Browse Source

deps: upgrade v8 to 3.19.0

v0.11.2-release
Ben Noordhuis 12 years ago
parent
commit
7ee538ddfe
  1. 27
      deps/v8/ChangeLog
  2. 9
      deps/v8/build/common.gypi
  3. 737
      deps/v8/include/v8.h
  4. 14
      deps/v8/samples/lineprocessor.cc
  5. 17
      deps/v8/samples/process.cc
  6. 14
      deps/v8/samples/shell.cc
  7. 220
      deps/v8/src/api.cc
  8. 67
      deps/v8/src/api.h
  9. 17
      deps/v8/src/arguments.h
  10. 76
      deps/v8/src/arm/code-stubs-arm.cc
  11. 9
      deps/v8/src/arm/debug-arm.cc
  12. 72
      deps/v8/src/arm/full-codegen-arm.cc
  13. 19
      deps/v8/src/arm/ic-arm.cc
  14. 60
      deps/v8/src/arm/lithium-arm.cc
  15. 19
      deps/v8/src/arm/lithium-arm.h
  16. 132
      deps/v8/src/arm/lithium-codegen-arm.cc
  17. 2
      deps/v8/src/arm/lithium-codegen-arm.h
  18. 42
      deps/v8/src/arm/macro-assembler-arm.cc
  19. 4
      deps/v8/src/arm/macro-assembler-arm.h
  20. 134
      deps/v8/src/arm/simulator-arm.cc
  21. 6
      deps/v8/src/arm/simulator-arm.h
  22. 136
      deps/v8/src/arm/stub-cache-arm.cc
  23. 100
      deps/v8/src/arraybuffer.js
  24. 7
      deps/v8/src/assembler.cc
  25. 2
      deps/v8/src/assembler.h
  26. 19
      deps/v8/src/ast.h
  27. 2
      deps/v8/src/atomicops_internals_x86_gcc.h
  28. 115
      deps/v8/src/bootstrapper.cc
  29. 40
      deps/v8/src/builtins-decls.h
  30. 75
      deps/v8/src/builtins.cc
  31. 6
      deps/v8/src/builtins.h
  32. 81
      deps/v8/src/code-stubs-hydrogen.cc
  33. 4
      deps/v8/src/code-stubs.cc
  34. 122
      deps/v8/src/code-stubs.h
  35. 7
      deps/v8/src/compiler.cc
  36. 20
      deps/v8/src/contexts.h
  37. 12
      deps/v8/src/d8-debug.cc
  38. 84
      deps/v8/src/d8.cc
  39. 12
      deps/v8/src/d8.h
  40. 7
      deps/v8/src/debug.cc
  41. 1
      deps/v8/src/debug.h
  42. 60
      deps/v8/src/factory.cc
  43. 5
      deps/v8/src/factory.h
  44. 10
      deps/v8/src/flag-definitions.h
  45. 34
      deps/v8/src/frames-inl.h
  46. 120
      deps/v8/src/frames.cc
  47. 16
      deps/v8/src/frames.h
  48. 5
      deps/v8/src/full-codegen.h
  49. 19
      deps/v8/src/global-handles.cc
  50. 4
      deps/v8/src/global-handles.h
  51. 15
      deps/v8/src/handles-inl.h
  52. 40
      deps/v8/src/heap-inl.h
  53. 5
      deps/v8/src/heap-snapshot-generator.cc
  54. 19
      deps/v8/src/heap.cc
  55. 28
      deps/v8/src/heap.h
  56. 95
      deps/v8/src/hydrogen-instructions.cc
  57. 196
      deps/v8/src/hydrogen-instructions.h
  58. 403
      deps/v8/src/hydrogen.cc
  59. 10
      deps/v8/src/hydrogen.h
  60. 75
      deps/v8/src/ia32/code-stubs-ia32.cc
  61. 9
      deps/v8/src/ia32/debug-ia32.cc
  62. 71
      deps/v8/src/ia32/full-codegen-ia32.cc
  63. 20
      deps/v8/src/ia32/ic-ia32.cc
  64. 154
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  65. 2
      deps/v8/src/ia32/lithium-codegen-ia32.h
  66. 100
      deps/v8/src/ia32/lithium-ia32.cc
  67. 29
      deps/v8/src/ia32/lithium-ia32.h
  68. 40
      deps/v8/src/ia32/macro-assembler-ia32.cc
  69. 4
      deps/v8/src/ia32/macro-assembler-ia32.h
  70. 180
      deps/v8/src/ia32/stub-cache-ia32.cc
  71. 204
      deps/v8/src/ic.cc
  72. 8
      deps/v8/src/ic.h
  73. 29
      deps/v8/src/incremental-marking.cc
  74. 11
      deps/v8/src/incremental-marking.h
  75. 63
      deps/v8/src/json-parser.h
  76. 2
      deps/v8/src/json-stringifier.h
  77. 7
      deps/v8/src/list-inl.h
  78. 3
      deps/v8/src/list.h
  79. 56
      deps/v8/src/lithium-allocator.cc
  80. 22
      deps/v8/src/lithium-allocator.h
  81. 15
      deps/v8/src/lithium.cc
  82. 181
      deps/v8/src/lithium.h
  83. 2
      deps/v8/src/macros.py
  84. 10
      deps/v8/src/messages.js
  85. 87
      deps/v8/src/mips/code-stubs-mips.cc
  86. 9
      deps/v8/src/mips/debug-mips.cc
  87. 75
      deps/v8/src/mips/full-codegen-mips.cc
  88. 19
      deps/v8/src/mips/ic-mips.cc
  89. 95
      deps/v8/src/mips/lithium-codegen-mips.cc
  90. 2
      deps/v8/src/mips/lithium-codegen-mips.h
  91. 60
      deps/v8/src/mips/lithium-mips.cc
  92. 19
      deps/v8/src/mips/lithium-mips.h
  93. 42
      deps/v8/src/mips/macro-assembler-mips.cc
  94. 4
      deps/v8/src/mips/macro-assembler-mips.h
  95. 138
      deps/v8/src/mips/stub-cache-mips.cc
  96. 44
      deps/v8/src/mksnapshot.cc
  97. 8
      deps/v8/src/object-observe.js
  98. 5
      deps/v8/src/objects-debug.cc
  99. 134
      deps/v8/src/objects-inl.h
  100. 2
      deps/v8/src/objects-printer.cc

27
deps/v8/ChangeLog

@ -1,3 +1,30 @@
2013-05-10: Version 3.19.0
Deprecated Context::New which returns Persistent.
Added Persistent<T>::Reset which disposes the handle and redirects it to
point to another object.
Deprecated WriteAscii and MayContainNonAscii.
Exposed AssertNoAllocation to API.
Performance and stability improvements on all platforms.
2013-04-30: Version 3.18.5
Allowed setting debugger breakpoints on CompareNilICs (issue 2660)
Fixed beyond-heap load on x64 Crankshafted StringCharFromCode
(Chromium issue 235311)
Change 'Parse error' to three more informative messages.
(Chromium issue 2636)
Performance and stability improvements on all platforms.
2013-04-26: Version 3.18.4
Added a preliminary API for ES6 ArrayBuffers

9
deps/v8/build/common.gypi

@ -454,6 +454,15 @@
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', {
'cflags!': [
'-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
'-O3',
],
'conditions': [
[ 'gcc_version==44 and clang==0', {
'cflags': [

737
deps/v8/include/v8.h

File diff suppressed because it is too large

14
deps/v8/samples/lineprocessor.cc

@ -25,6 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// TODO(dcarney): remove
#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
#include <v8.h>
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -124,7 +128,9 @@ void DispatchDebugMessages() {
// "evaluate" command, because it must be executed some context.
// In our sample we have only one context, so there is nothing really to
// think about.
v8::Context::Scope scope(debug_message_context);
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope handle_scope(isolate);
v8::Context::Scope scope(isolate, debug_message_context);
v8::Debug::ProcessDebugMessages();
}
@ -136,8 +142,8 @@ int RunMain(int argc, char* argv[]) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::String> script_source(NULL);
v8::Handle<v8::Value> script_name(NULL);
v8::Handle<v8::String> script_source;
v8::Handle<v8::Value> script_name;
int script_param_counter = 0;
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -209,7 +215,7 @@ int RunMain(int argc, char* argv[]) {
// Create a new execution environment containing the built-in
// functions
v8::Handle<v8::Context> context = v8::Context::New(NULL, global);
v8::Handle<v8::Context> context = v8::Context::New(isolate, NULL, global);
// Enter the newly created execution environment.
v8::Context::Scope context_scope(context);

17
deps/v8/samples/process.cc

@ -25,6 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// TODO(dcarney): remove this
#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
#include <v8.h>
#include <string>
@ -163,11 +168,11 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// is what we need for the reference to remain after we return from
// this method. That persistent handle has to be disposed in the
// destructor.
context_ = Context::New(NULL, global);
context_.Reset(GetIsolate(), Context::New(GetIsolate(), NULL, global));
// Enter the new context so all the following operations take place
// within it.
Context::Scope context_scope(context_);
Context::Scope context_scope(GetIsolate(), context_);
// Make the options mapping available within the context
if (!InstallMaps(opts, output))
@ -250,7 +255,7 @@ bool JsHttpRequestProcessor::Process(HttpRequest* request) {
// Enter this processor's context so all the remaining operations
// take place there
Context::Scope context_scope(context_);
Context::Scope context_scope(GetIsolate(), context_);
// Wrap the C++ request object in a JavaScript wrapper
Handle<Object> request_obj = WrapRequest(request);
@ -303,7 +308,8 @@ Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
Handle<ObjectTemplate> raw_template = MakeMapTemplate(GetIsolate());
map_template_ = Persistent<ObjectTemplate>::New(GetIsolate(), raw_template);
}
Handle<ObjectTemplate> templ = map_template_;
Handle<ObjectTemplate> templ =
Local<ObjectTemplate>::New(GetIsolate(), map_template_);
// Create an empty map wrapper.
Handle<Object> result = templ->NewInstance();
@ -410,7 +416,8 @@ Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
request_template_ =
Persistent<ObjectTemplate>::New(GetIsolate(), raw_template);
}
Handle<ObjectTemplate> templ = request_template_;
Handle<ObjectTemplate> templ =
Local<ObjectTemplate>::New(GetIsolate(), request_template_);
// Create an empty http request wrapper.
Handle<Object> result = templ->NewInstance();

14
deps/v8/samples/shell.cc

@ -25,6 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// TODO(dcarney): remove this
#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
#include <v8.h>
#include <assert.h>
#include <fcntl.h>
@ -45,7 +50,7 @@
*/
v8::Persistent<v8::Context> CreateShellContext();
v8::Handle<v8::Context> CreateShellContext(v8::Isolate* isolate);
void RunShell(v8::Handle<v8::Context> context);
int RunMain(v8::Isolate* isolate, int argc, char* argv[]);
bool ExecuteString(v8::Isolate* isolate,
@ -72,7 +77,7 @@ int main(int argc, char* argv[]) {
int result;
{
v8::HandleScope handle_scope(isolate);
v8::Persistent<v8::Context> context = CreateShellContext();
v8::Handle<v8::Context> context = CreateShellContext(isolate);
if (context.IsEmpty()) {
fprintf(stderr, "Error creating context\n");
return 1;
@ -81,7 +86,6 @@ int main(int argc, char* argv[]) {
result = RunMain(isolate, argc, argv);
if (run_shell) RunShell(context);
context->Exit();
context.Dispose(isolate);
}
v8::V8::Dispose();
return result;
@ -96,7 +100,7 @@ const char* ToCString(const v8::String::Utf8Value& value) {
// Creates a new execution environment containing the built-in
// functions.
v8::Persistent<v8::Context> CreateShellContext() {
v8::Handle<v8::Context> CreateShellContext(v8::Isolate* isolate) {
// Create a template for the global object.
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
// Bind the global 'print' function to the C++ Print callback.
@ -110,7 +114,7 @@ v8::Persistent<v8::Context> CreateShellContext() {
// Bind the 'version' function
global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
return v8::Context::New(NULL, global);
return v8::Context::New(isolate, NULL, global);
}

220
deps/v8/src/api.cc

@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// TODO(dcarney): remove
#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
#include "api.h"
#include <string.h> // For memcpy, strlen.
@ -625,7 +628,7 @@ i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
void V8::MakeWeak(i::Isolate* isolate,
i::Object** object,
void* parameters,
WeakReferenceCallback weak_reference_callback,
RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "MakeWeak");
@ -2409,6 +2412,46 @@ bool Value::IsArray() const {
}
bool Value::IsArrayBuffer() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()"))
return false;
return Utils::OpenHandle(this)->IsJSArrayBuffer();
}
bool Value::IsTypedArray() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()"))
return false;
return Utils::OpenHandle(this)->IsJSTypedArray();
}
#define TYPED_ARRAY_LIST(F) \
F(Uint8Array, kExternalUnsignedByteArray) \
F(Int8Array, kExternalByteArray) \
F(Uint16Array, kExternalUnsignedShortArray) \
F(Int16Array, kExternalShortArray) \
F(Uint32Array, kExternalUnsignedIntArray) \
F(Int32Array, kExternalIntArray) \
F(Float32Array, kExternalFloatArray) \
F(Float64Array, kExternalDoubleArray) \
F(Uint8ClampedArray, kExternalPixelArray)
#define VALUE_IS_TYPED_ARRAY(TypedArray, type_const) \
bool Value::Is##TypedArray() const { \
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::Is" #TypedArray "()")) \
return false; \
i::Handle<i::Object> obj = Utils::OpenHandle(this); \
if (!obj->IsJSTypedArray()) return false; \
return i::JSTypedArray::cast(*obj)->type() == type_const; \
}
TYPED_ARRAY_LIST(VALUE_IS_TYPED_ARRAY)
#undef VALUE_IS_TYPED_ARRAY
bool Value::IsObject() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsObject()")) return false;
return Utils::OpenHandle(this)->IsJSObject();
@ -2755,6 +2798,32 @@ void v8::ArrayBuffer::CheckCast(Value* that) {
}
void v8::TypedArray::CheckCast(Value* that) {
if (IsDeadCheck(i::Isolate::Current(), "v8::TypedArray::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSTypedArray(),
"v8::TypedArray::Cast()",
"Could not convert to TypedArray");
}
#define CHECK_TYPED_ARRAY_CAST(ApiClass, typeConst) \
void v8::ApiClass::CheckCast(Value* that) { \
if (IsDeadCheck(i::Isolate::Current(), "v8::" #ApiClass "::Cast()")) \
return; \
i::Handle<i::Object> obj = Utils::OpenHandle(that); \
ApiCheck(obj->IsJSTypedArray() && \
i::JSTypedArray::cast(*obj)->type() == typeConst, \
"v8::" #ApiClass "::Cast()", \
"Could not convert to " #ApiClass); \
}
TYPED_ARRAY_LIST(CHECK_TYPED_ARRAY_CAST)
#undef CHECK_TYPED_ARRAY_CAST
void v8::Date::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Date::Cast()")) return;
@ -3281,7 +3350,7 @@ Local<String> v8::Object::ObjectProtoToString() {
const char* postfix = "]";
int prefix_len = i::StrLength(prefix);
int str_len = str->Length();
int str_len = str->Utf8Length();
int postfix_len = i::StrLength(postfix);
int buf_len = prefix_len + str_len + postfix_len;
@ -3293,7 +3362,7 @@ Local<String> v8::Object::ObjectProtoToString() {
ptr += prefix_len;
// Write real content.
str->WriteAscii(ptr, 0, str_len);
str->WriteUtf8(ptr, str_len);
ptr += str_len;
// Write postfix.
@ -4061,7 +4130,7 @@ bool String::IsOneByte() const {
if (IsDeadCheck(str->GetIsolate(), "v8::String::IsOneByte()")) {
return false;
}
return str->IsOneByteConvertible();
return str->HasOnlyOneByteChars();
}
@ -5806,6 +5875,131 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(void* data, size_t byte_length) {
}
Local<ArrayBuffer> v8::TypedArray::Buffer() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::TypedArray::Buffer()"))
return Local<ArrayBuffer>();
i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
ASSERT(obj->buffer()->IsJSArrayBuffer());
i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
return Utils::ToLocal(buffer);
}
size_t v8::TypedArray::ByteOffset() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::TypedArray::ByteOffset()")) return 0;
i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_offset()->Number());
}
size_t v8::TypedArray::ByteLength() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::TypedArray::ByteLength()")) return 0;
i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_length()->Number());
}
size_t v8::TypedArray::Length() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::TypedArray::Length()")) return 0;
i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->length()->Number());
}
void* v8::TypedArray::BaseAddress() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::TypedArray::BaseAddress()")) return NULL;
i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
void* buffer_data = buffer->backing_store();
size_t byte_offset = static_cast<size_t>(obj->byte_offset()->Number());
return static_cast<uint8_t*>(buffer_data) + byte_offset;
}
template<typename ElementType,
ExternalArrayType array_type,
i::ElementsKind elements_kind>
i::Handle<i::JSTypedArray> NewTypedArray(
i::Isolate* isolate,
Handle<ArrayBuffer> array_buffer, size_t byte_offset, size_t length) {
i::Handle<i::JSTypedArray> obj =
isolate->factory()->NewJSTypedArray(array_type);
i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
ASSERT(byte_offset % sizeof(ElementType) == 0);
ASSERT(byte_offset + length * sizeof(ElementType) <=
static_cast<size_t>(buffer->byte_length()->Number()));
obj->set_buffer(*buffer);
i::Handle<i::Object> byte_offset_object = isolate->factory()->NewNumber(
static_cast<double>(byte_offset));
obj->set_byte_offset(*byte_offset_object);
i::Handle<i::Object> byte_length_object = isolate->factory()->NewNumber(
static_cast<double>(length * sizeof(ElementType)));
obj->set_byte_length(*byte_length_object);
i::Handle<i::Object> length_object = isolate->factory()->NewNumber(
static_cast<double>(length));
obj->set_length(*length_object);
i::Handle<i::ExternalArray> elements =
isolate->factory()->NewExternalArray(
static_cast<int>(length), array_type,
static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
i::Handle<i::Map> map =
isolate->factory()->GetElementsTransitionMap(
obj, elements_kind);
obj->set_map(*map);
obj->set_elements(*elements);
return obj;
}
#define TYPED_ARRAY_NEW(TypedArray, element_type, array_type, elements_kind) \
Local<TypedArray> TypedArray::New(Handle<ArrayBuffer> array_buffer, \
size_t byte_offset, size_t length) { \
i::Isolate* isolate = i::Isolate::Current(); \
EnsureInitializedForIsolate(isolate, \
"v8::" #TypedArray "::New(Handle<ArrayBuffer>, size_t, size_t)"); \
LOG_API(isolate, \
"v8::" #TypedArray "::New(Handle<ArrayBuffer>, size_t, size_t)"); \
ENTER_V8(isolate); \
i::Handle<i::JSTypedArray> obj = \
NewTypedArray<element_type, array_type, elements_kind>( \
isolate, array_buffer, byte_offset, length); \
return Utils::ToLocal##TypedArray(obj); \
}
TYPED_ARRAY_NEW(Uint8Array, uint8_t, kExternalUnsignedByteArray,
i::EXTERNAL_UNSIGNED_BYTE_ELEMENTS)
TYPED_ARRAY_NEW(Uint8ClampedArray, uint8_t, kExternalPixelArray,
i::EXTERNAL_PIXEL_ELEMENTS)
TYPED_ARRAY_NEW(Int8Array, int8_t, kExternalByteArray,
i::EXTERNAL_BYTE_ELEMENTS)
TYPED_ARRAY_NEW(Uint16Array, uint16_t, kExternalUnsignedShortArray,
i::EXTERNAL_UNSIGNED_SHORT_ELEMENTS)
TYPED_ARRAY_NEW(Int16Array, int16_t, kExternalShortArray,
i::EXTERNAL_SHORT_ELEMENTS)
TYPED_ARRAY_NEW(Uint32Array, uint32_t, kExternalUnsignedIntArray,
i::EXTERNAL_UNSIGNED_INT_ELEMENTS)
TYPED_ARRAY_NEW(Int32Array, int32_t, kExternalIntArray,
i::EXTERNAL_INT_ELEMENTS)
TYPED_ARRAY_NEW(Float32Array, float, kExternalFloatArray,
i::EXTERNAL_FLOAT_ELEMENTS)
TYPED_ARRAY_NEW(Float64Array, double, kExternalDoubleArray,
i::EXTERNAL_DOUBLE_ELEMENTS)
#undef TYPED_ARRAY_NEW
Local<Symbol> v8::Symbol::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
@ -5883,6 +6077,19 @@ Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
}
#ifdef DEBUG
v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate)
: isolate_(isolate),
last_state_(i::EnterAllocationScope(
reinterpret_cast<i::Isolate*>(isolate), false)) {
}
v8::AssertNoGCScope::~AssertNoGCScope() {
i::ExitAllocationScope(reinterpret_cast<i::Isolate*>(isolate_), last_state_);
}
#endif
void V8::IgnoreOutOfMemoryException() {
EnterIsolateIfNeeded()->set_ignore_out_of_memory(true);
}
@ -6295,9 +6502,10 @@ String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj)
TryCatch try_catch;
Handle<String> str = obj->ToString();
if (str.IsEmpty()) return;
length_ = str->Length();
length_ = str->Utf8Length();
str_ = i::NewArray<char>(length_ + 1);
str->WriteAscii(str_);
str->WriteUtf8(str_);
ASSERT(i::String::NonAsciiStart(str_, length_) >= length_);
}

67
deps/v8/src/api.h

@ -171,6 +171,16 @@ class RegisteredExtension {
V(Object, JSObject) \
V(Array, JSArray) \
V(ArrayBuffer, JSArrayBuffer) \
V(TypedArray, JSTypedArray) \
V(Uint8Array, JSTypedArray) \
V(Uint8ClampedArray, JSTypedArray) \
V(Int8Array, JSTypedArray) \
V(Uint16Array, JSTypedArray) \
V(Int16Array, JSTypedArray) \
V(Uint32Array, JSTypedArray) \
V(Int32Array, JSTypedArray) \
V(Float32Array, JSTypedArray) \
V(Float64Array, JSTypedArray) \
V(String, String) \
V(Symbol, Symbol) \
V(Script, Object) \
@ -208,6 +218,28 @@ class Utils {
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<ArrayBuffer> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<TypedArray> ToLocal(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Uint8Array> ToLocalUint8Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Uint8ClampedArray> ToLocalUint8ClampedArray(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Int8Array> ToLocalInt8Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Uint16Array> ToLocalUint16Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Int16Array> ToLocalInt16Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Uint32Array> ToLocalUint32Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Int32Array> ToLocalInt32Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Float32Array> ToLocalFloat32Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Float64Array> ToLocalFloat64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal(
@ -262,14 +294,34 @@ v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
}
class InternalHandleHelper {
public:
template<class From, class To>
static inline Local<To> Convert(v8::internal::Handle<From> obj) {
return Local<To>(reinterpret_cast<To*>(obj.location()));
}
};
// Implementations of ToLocal
#define MAKE_TO_LOCAL(Name, From, To) \
Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
ASSERT(obj.is_null() || !obj->IsTheHole()); \
return Local<To>(reinterpret_cast<To*>(obj.location())); \
return InternalHandleHelper::Convert<v8::internal::From, v8::To>(obj); \
}
#define MAKE_TO_LOCAL_TYPED_ARRAY(TypedArray, typeConst) \
Local<v8::TypedArray> Utils::ToLocal##TypedArray( \
v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
ASSERT(obj.is_null() || !obj->IsTheHole()); \
ASSERT(obj->type() == typeConst); \
return InternalHandleHelper:: \
Convert<v8::internal::JSTypedArray, v8::TypedArray>(obj); \
}
MAKE_TO_LOCAL(ToLocal, Context, Context)
MAKE_TO_LOCAL(ToLocal, Object, Value)
MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
@ -279,6 +331,18 @@ MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Uint8Array, kExternalUnsignedByteArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Uint8ClampedArray, kExternalPixelArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Int8Array, kExternalByteArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Uint16Array, kExternalUnsignedShortArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Int16Array, kExternalShortArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Uint32Array, kExternalUnsignedIntArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Int32Array, kExternalIntArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Float32Array, kExternalFloatArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Float64Array, kExternalDoubleArray)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@ -293,6 +357,7 @@ MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
MAKE_TO_LOCAL(ToLocal, DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
#undef MAKE_TO_LOCAL_TYPED_ARRAY
#undef MAKE_TO_LOCAL

17
deps/v8/src/arguments.h

@ -115,15 +115,18 @@ class CustomArguments : public Relocatable {
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
Type Name(Arguments args, Isolate* isolate)
Type Name(int args_length, Object** args_object, Isolate* isolate)
#define RUNTIME_FUNCTION(Type, Name) \
Type Name(Arguments args, Isolate* isolate)
#define RUNTIME_ARGUMENTS(isolate, args) args, isolate
static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
Arguments args(args_length, args_object); \
return __RT_impl_##Name(args, isolate); \
} \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
#define RUNTIME_ARGUMENTS(isolate, args) \
args.length(), args.arguments(), isolate
} } // namespace v8::internal

76
deps/v8/src/arm/code-stubs-arm.cc

@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "bootstrapper.h"
#include "builtins-decls.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
#include "stub-cache.h"
@ -73,6 +74,28 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
void LoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r0 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r1 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@ -115,9 +138,10 @@ static void InitializeArrayConstructorDescriptor(
int constant_stack_parameter_count) {
// register state
// r0 -- number of arguments
// r1 -- function
// r2 -- type info cell with elements kind
static Register registers[] = { r2 };
descriptor->register_param_count_ = 1;
static Register registers[] = { r1, r2 };
descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &r0;
@ -3776,12 +3800,6 @@ Register InstanceofStub::left() { return r0; }
Register InstanceofStub::right() { return r1; }
void LoadFieldStub::Generate(MacroAssembler* masm) {
StubCompiler::DoGenerateFastPropertyLoad(masm, r0, reg_, inobject_, index_);
__ Ret();
}
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
@ -4733,6 +4751,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Handle<Object> terminal_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
LAST_FAST_ELEMENTS_KIND);
__ JumpIfNotSmi(r3, &miss);
__ cmp(r3, Operand(terminal_kind_sentinel));
__ b(gt, &miss);
// Make sure the function is the Array() function
@ -5941,8 +5960,36 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
Label skip_write_barrier, after_writing;
ExternalReference high_promotion_mode = ExternalReference::
new_space_high_promotion_mode_active_address(masm->isolate());
__ mov(r4, Operand(high_promotion_mode));
__ ldr(r4, MemOperand(r4, 0));
__ cmp(r4, Operand::Zero());
__ b(eq, &skip_write_barrier);
__ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
__ RecordWriteField(r7,
ConsString::kFirstOffset,
r0,
r4,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
__ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
__ RecordWriteField(r7,
ConsString::kSecondOffset,
r1,
r4,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
__ jmp(&after_writing);
__ bind(&skip_write_barrier);
__ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
__ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
__ bind(&after_writing);
__ mov(r0, Operand(r7));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
@ -6788,6 +6835,9 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
// FastNewClosureStub::Generate
{ REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
// StringAddStub::Generate
{ REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET },
{ REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@ -7312,14 +7362,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Get the elements kind and case on that.
__ cmp(r2, Operand(undefined_sentinel));
__ b(eq, &no_info);
__ ldr(r3, FieldMemOperand(r2, kPointerSize));
// There is no info if the call site went megamorphic either
// TODO(mvstanton): Really? I thought if it was the array function that
// the cell wouldn't get stamped as megamorphic.
__ cmp(r3,
Operand(TypeFeedbackCells::MegamorphicSentinel(masm->isolate())));
__ b(eq, &no_info);
__ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
__ JumpIfNotSmi(r3, &no_info);
__ SmiUntag(r3);
__ jmp(&switch_ready);
__ bind(&no_info);

9
deps/v8/src/arm/debug-arm.cc

@ -224,6 +224,15 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- r0 : value
// -----------------------------------
Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-arm.cc)
// ----------- S t a t e -------------

72
deps/v8/src/arm/full-codegen-arm.cc

@ -1593,7 +1593,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
expr->depth() > 1) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
@ -1939,11 +1940,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label resume;
__ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
__ b(ne, &resume);
__ pop(result_register());
if (expr->yield_kind() == Yield::SUSPEND) {
// TODO(wingo): Box into { value: VALUE, done: false }.
}
EmitReturnIteratorResult(false);
} else {
__ pop(result_register());
EmitReturnSequence();
}
__ bind(&resume);
context()->Plug(result_register());
@ -1955,18 +1957,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ str(r1, FieldMemOperand(result_register(),
JSGeneratorObject::kContinuationOffset));
__ pop(result_register());
// TODO(wingo): Box into { value: VALUE, done: true }.
// Exit all nested statements.
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
while (current != NULL) {
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
EmitReturnSequence();
EmitReturnIteratorResult(true);
break;
}
@ -2074,6 +2065,55 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
}
void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
Label gc_required;
Label allocated;
Handle<Map> map(isolate()->native_context()->generator_result_map());
__ Allocate(map->instance_size(), r0, r2, r3, &gc_required, TAG_OBJECT);
__ bind(&allocated);
__ mov(r1, Operand(map));
__ pop(r2);
__ mov(r3, Operand(isolate()->factory()->ToBoolean(done)));
__ mov(r4, Operand(isolate()->factory()->empty_fixed_array()));
ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
__ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
__ str(r2,
FieldMemOperand(r0, JSGeneratorObject::kResultValuePropertyOffset));
__ str(r3,
FieldMemOperand(r0, JSGeneratorObject::kResultDonePropertyOffset));
// Only the value field needs a write barrier, as the other values are in the
// root set.
__ RecordWriteField(r0, JSGeneratorObject::kResultValuePropertyOffset,
r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
if (done) {
// Exit all nested statements.
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
while (current != NULL) {
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
}
EmitReturnSequence();
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ ldr(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
__ jmp(&allocated);
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();

19
deps/v8/src/arm/ic-arm.cc

@ -1180,6 +1180,25 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
}
void StoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r2 : key
// -- r1 : receiver
// -- lr : return address
// -----------------------------------
// Push receiver, key and value for runtime call.
__ Push(r1, r2, r0);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value

60
deps/v8/src/arm/lithium-arm.cc

@ -552,6 +552,11 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
LOperand* LChunkBuilder::UseConstant(HValue* value) {
return chunk_->DefineConstantOperand(HConstant::cast(value));
}
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@ -672,7 +677,7 @@ LUnallocated* LChunkBuilder::TempRegister() {
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
Abort("Out of virtual registers while trying to allocate temp register.");
return NULL;
vreg = 0;
}
operand->set_virtual_register(vreg);
return operand;
@ -1300,8 +1305,8 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
@ -1479,15 +1484,15 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left;
LOperand* right = UseOrConstant(instr->MostConstantOperand());
LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
(instr->CheckFlag(HValue::kCanOverflow) ||
!right->IsConstantOperand())) {
left = UseRegister(instr->LeastConstantOperand());
left = UseRegister(instr->BetterLeftOperand());
temp = TempRegister();
} else {
left = UseRegisterAtStart(instr->LeastConstantOperand());
left = UseRegisterAtStart(instr->BetterLeftOperand());
}
LMulI* mul = new(zone()) LMulI(left, right, temp);
if (instr->CheckFlag(HValue::kCanOverflow) ||
@ -1597,8 +1602,8 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@ -1629,8 +1634,8 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
left = UseRegisterAtStart(instr->LeastConstantOperand());
right = UseOrConstantAtStart(instr->MostConstantOperand());
left = UseRegisterAtStart(instr->BetterLeftOperand());
right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@ -2114,8 +2119,8 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
return DefineAsRegister(
new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
LOperand* obj = UseRegisterAtStart(instr->object());
return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@ -2150,12 +2155,6 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LLoadElements(input));
}
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@ -2319,14 +2318,25 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
: UseRegisterAtStart(instr->object());
}
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegister(instr->value());
LOperand* val;
if (needs_write_barrier ||
(FLAG_track_fields && instr->field_representation().IsSmi())) {
val = UseTempRegister(instr->value());
} else if (FLAG_track_double_fields &&
instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
}
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
return new(zone()) LStoreNamedField(obj, val, temp);
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
if (FLAG_track_fields && instr->field_representation().IsSmi()) {
return AssignEnvironment(result);
}
return result;
}
@ -2378,7 +2388,9 @@ LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = UseTempRegister(instr->size());
LOperand* size = instr->size()->IsConstant()
? UseConstant(instr->size())
: UseTempRegister(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
@ -2440,7 +2452,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
if (spill_index > LUnallocated::kMaxFixedIndex) {
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}

19
deps/v8/src/arm/lithium-arm.h

@ -122,7 +122,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@ -1574,18 +1573,6 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadElements(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
@ -2142,6 +2129,9 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
Representation representation() const {
return hydrogen()->field_representation();
}
};
@ -2787,6 +2777,9 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
// An input operand in a constant operand.
MUST_USE_RESULT LOperand* UseConstant(HValue* value);
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);

132
deps/v8/src/arm/lithium-codegen-arm.cc

@ -91,6 +91,10 @@ void LCodeGen::FinishCode(Handle<Code> code) {
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
for (int i = 0 ; i < transition_maps_.length(); i++) {
transition_maps_.at(i)->AddDependentCode(
DependentCode::kTransitionGroup, code);
}
}
@ -1161,14 +1165,14 @@ void LCodeGen::DoModI(LModI* instr) {
Register result = ToRegister(instr->result());
Label done;
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
// Check for x % 0.
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
// Check for (kMinInt % -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
@ -1185,10 +1189,10 @@ void LCodeGen::DoModI(LModI* instr) {
__ sdiv(result, left, right);
__ mls(result, result, right, left);
__ cmp(result, Operand::Zero());
__ b(ne, &done);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(result, Operand::Zero());
__ b(ne, &done);
__ cmp(left, Operand::Zero());
DeoptimizeIf(lt, instr->environment());
}
@ -1206,13 +1210,7 @@ void LCodeGen::DoModI(LModI* instr) {
ASSERT(!scratch.is(right));
ASSERT(!scratch.is(result));
Label vfp_modulo, both_positive, right_negative;
// Check for x % 0.
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
Label vfp_modulo, right_negative;
__ Move(result, left);
@ -1230,7 +1228,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
scratch,
&right_negative,
&both_positive);
&vfp_modulo);
// Perform modulo operation (scratch contains right - 1).
__ and_(result, scratch, Operand(left));
__ b(&done);
@ -1239,23 +1237,6 @@ void LCodeGen::DoModI(LModI* instr) {
// Negate right. The sign of the divisor does not matter.
__ rsb(right, right, Operand::Zero());
__ bind(&both_positive);
const int kUnfolds = 3;
// If the right hand side is smaller than the (nonnegative)
// left hand side, the left hand side is the result.
// Else try a few subtractions of the left hand side.
__ mov(scratch, left);
for (int i = 0; i < kUnfolds; i++) {
// Check if the left hand side is less or equal than the
// the right hand side.
__ cmp(scratch, Operand(right));
__ mov(result, scratch, LeaveCC, lt);
__ b(lt, &done);
// If not, reduce the left hand side by the right hand
// side and check again.
if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
}
__ bind(&vfp_modulo);
// Load the arguments in VFP registers.
// The divisor value is preloaded before. Be careful that 'right'
@ -3076,13 +3057,20 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
int offset = instr->hydrogen()->offset();
Register object = ToRegister(instr->object());
if (instr->hydrogen()->representation().IsDouble()) {
DwVfpRegister result = ToDoubleRegister(instr->result());
__ vldr(result, FieldMemOperand(object, offset));
return;
}
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
__ ldr(result, FieldMemOperand(object, offset));
} else {
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
__ ldr(result, FieldMemOperand(result, offset));
}
}
@ -3228,40 +3216,6 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
void LCodeGen::DoLoadElements(LLoadElements* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->object());
Register scratch = scratch0();
__ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
Label done, fail;
__ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch, ip);
__ b(eq, &done);
__ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
__ cmp(scratch, ip);
__ b(eq, &done);
// |scratch| still contains |input|'s map.
__ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ ubfx(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount);
__ cmp(scratch, Operand(GetInitialFastElementsKind()));
__ b(lt, &fail);
__ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
__ b(le, &done);
__ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ b(lt, &fail);
__ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ b(le, &done);
__ bind(&fail);
__ Abort("Check for fast or external elements failed.");
__ bind(&done);
}
}
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
@ -4234,8 +4188,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ mov(r0, Operand(instr->arity()));
__ mov(r2, Operand(instr->hydrogen()->property_cell()));
Object* cell_value = instr->hydrogen()->property_cell()->value();
ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value());
ElementsKind kind = instr->hydrogen()->elements_kind();
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@ -4262,15 +4215,34 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Representation representation = instr->representation();
Register object = ToRegister(instr->object());
Register value = ToRegister(instr->value());
Register scratch = scratch0();
int offset = instr->offset();
ASSERT(!object.is(value));
Handle<Map> transition = instr->transition();
if (FLAG_track_fields && representation.IsSmi()) {
Register value = ToRegister(instr->value());
__ SmiTag(value, value, SetCC);
if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
DeoptimizeIf(vs, instr->environment());
}
} else if (FLAG_track_double_fields && representation.IsDouble()) {
ASSERT(transition.is_null());
ASSERT(instr->is_in_object());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
DwVfpRegister value = ToDoubleRegister(instr->value());
__ vstr(value, FieldMemOperand(object, offset));
return;
}
if (!instr->transition().is_null()) {
__ mov(scratch, Operand(instr->transition()));
if (!transition.is_null()) {
if (transition->CanBeDeprecated()) {
transition_maps_.Add(transition, info()->zone());
}
__ mov(scratch, Operand(transition));
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
@ -4287,6 +4259,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
Register value = ToRegister(instr->value());
ASSERT(!object.is(value));
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
@ -5138,6 +5112,8 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
}
@ -5473,7 +5449,6 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
@ -5482,8 +5457,16 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ mov(result, Operand(Smi::FromInt(0)));
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(size, size);
if (instr->size()->IsRegister()) {
Register size = ToRegister(instr->size());
ASSERT(!size.is(result));
__ SmiTag(size);
__ push(size);
} else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Push(Smi::FromInt(size));
}
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr);
@ -5566,7 +5549,8 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
// Pick the right runtime function or stub to call.
int properties_count = instr->hydrogen()->constant_properties_length() / 2;
if (instr->hydrogen()->depth() > 1) {
if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
instr->hydrogen()->depth() > 1) {
__ Push(r3, r2, r1, r0);
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else if (flags != ObjectLiteral::kFastElements ||

2
deps/v8/src/arm/lithium-codegen-arm.h

@ -57,6 +57,7 @@ class LCodeGen BASE_EMBEDDED {
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@ -418,6 +419,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;

42
deps/v8/src/arm/macro-assembler-arm.cc

@ -1933,8 +1933,34 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
Label allocate_new_space, install_map;
AllocationFlags flags = TAG_OBJECT;
ExternalReference high_promotion_mode = ExternalReference::
new_space_high_promotion_mode_active_address(isolate());
mov(scratch1, Operand(high_promotion_mode));
ldr(scratch1, MemOperand(scratch1, 0));
cmp(scratch1, Operand::Zero());
b(eq, &allocate_new_space);
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
jmp(&install_map);
bind(&allocate_new_space);
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
flags);
bind(&install_map);
InitializeNewString(result,
length,
@ -3473,6 +3499,18 @@ void MacroAssembler::CheckPageFlag(
}
void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
Register scratch,
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
mov(scratch, Operand(map));
ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
b(ne, if_deprecated);
}
}
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,

4
deps/v8/src/arm/macro-assembler-arm.h

@ -213,6 +213,10 @@ class MacroAssembler: public Assembler {
Condition cc,
Label* condition_met);
void CheckMapDeprecated(Handle<Map> map,
Register scratch,
Label* if_deprecated);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,

134
deps/v8/src/arm/simulator-arm.cc

@ -975,12 +975,14 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) {
}
// For use in calls that take two double values, constructed either
// Runtime FP routines take up to two double arguments and zero
// or one integer arguments. All are consructed here.
// from r0-r3 or d0 and d1.
void Simulator::GetFpArgs(double* x, double* y) {
void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
*x = vfp_registers_[0];
*y = vfp_registers_[1];
*z = registers_[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
@ -988,44 +990,12 @@ void Simulator::GetFpArgs(double* x, double* y) {
// Registers 0 and 1 -> x.
OS::MemCopy(buffer, registers_, sizeof(*x));
OS::MemCopy(x, buffer, sizeof(*x));
// Registers 2 and 3 -> y.
OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
OS::MemCopy(y, buffer, sizeof(*y));
}
}
// For use in calls that take one double value, constructed either
// from r0 and r1 or d0.
void Simulator::GetFpArgs(double* x) {
if (use_eabi_hardfloat()) {
*x = vfp_registers_[0];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
OS::MemCopy(buffer, registers_, sizeof(*x));
OS::MemCopy(x, buffer, sizeof(*x));
}
}
// For use in calls that take one double value constructed either
// from r0 and r1 or d0 and one integer value.
void Simulator::GetFpArgs(double* x, int32_t* y) {
if (use_eabi_hardfloat()) {
*x = vfp_registers_[0];
*y = registers_[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
OS::MemCopy(buffer, registers_, sizeof(*x));
OS::MemCopy(x, buffer, sizeof(*x));
// Register 2 -> y.
// Register 2 and 3 -> y.
OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
OS::MemCopy(y, buffer, sizeof(*y));
// Register 2 -> z
memcpy(buffer, registers_ + 2, sizeof(*z));
memcpy(z, buffer, sizeof(*z));
}
}
@ -1648,10 +1618,12 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg3,
int32_t arg4,
int32_t arg5);
typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
int32_t arg3);
// These prototypes handle the four types of FP calls.
typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
typedef double (*SimulatorRuntimeFPCall)(double darg0);
typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
@ -1717,27 +1689,27 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
intptr_t external =
reinterpret_cast<intptr_t>(redirection->external_function());
if (fp_call) {
double dval0, dval1; // one or two double parameters
int32_t ival; // zero or one integer parameters
int64_t iresult = 0; // integer return value
double dresult = 0; // double return value
GetFpArgs(&dval0, &dval1, &ival);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
double dval0, dval1;
int32_t ival;
SimulatorRuntimeCall generic_target =
reinterpret_cast<SimulatorRuntimeCall>(external);
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
GetFpArgs(&dval0, &dval1);
PrintF("Call to host function at %p with args %f, %f",
FUNCTION_ADDR(target), dval0, dval1);
FUNCTION_ADDR(generic_target), dval0, dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
GetFpArgs(&dval0);
PrintF("Call to host function at %p with arg %f",
FUNCTION_ADDR(target), dval0);
FUNCTION_ADDR(generic_target), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
GetFpArgs(&dval0, &ival);
PrintF("Call to host function at %p with args %f, %d",
FUNCTION_ADDR(target), dval0, ival);
FUNCTION_ADDR(generic_target), dval0, ival);
break;
default:
UNREACHABLE();
@ -1749,22 +1721,54 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
switch (redirection->type()) {
case ExternalReference::BUILTIN_COMPARE_CALL: {
SimulatorRuntimeCompareCall target =
reinterpret_cast<SimulatorRuntimeCompareCall>(external);
iresult = target(dval0, dval1);
set_register(r0, static_cast<int32_t>(iresult));
set_register(r1, static_cast<int32_t>(iresult >> 32));
break;
}
case ExternalReference::BUILTIN_FP_FP_CALL: {
SimulatorRuntimeFPFPCall target =
reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
dresult = target(dval0, dval1);
SetFpResult(dresult);
break;
}
case ExternalReference::BUILTIN_FP_CALL: {
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
double result = target(arg0, arg1, arg2, arg3);
SetFpResult(result);
} else {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %08x\n", lo_res);
dresult = target(dval0);
SetFpResult(dresult);
break;
}
case ExternalReference::BUILTIN_FP_INT_CALL: {
SimulatorRuntimeFPIntCall target =
reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
dresult = target(dval0, ival);
SetFpResult(dresult);
break;
}
default:
UNREACHABLE();
break;
}
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
switch (redirection->type()) {
case ExternalReference::BUILTIN_COMPARE_CALL:
PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
break;
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_FP_CALL:
case ExternalReference::BUILTIN_FP_INT_CALL:
PrintF("Returned %f\n", dresult);
break;
default:
UNREACHABLE();
break;
}
set_register(r0, lo_res);
set_register(r1, hi_res);
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
SimulatorRuntimeDirectApiCall target =

6
deps/v8/src/arm/simulator-arm.h

@ -348,10 +348,8 @@ class Simulator {
void* external_function,
v8::internal::ExternalReference::Type type);
// For use in calls that take double value arguments.
void GetFpArgs(double* x, double* y);
void GetFpArgs(double* x);
void GetFpArgs(double* x, int32_t* y);
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();

136
deps/v8/src/arm/stub-cache-arm.cc

@ -315,11 +315,13 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
}
void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
bool inobject,
int index) {
int index,
Representation representation) {
ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@ -451,8 +453,10 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Register value_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Label* miss_label,
Label* miss_restore_name) {
Label* miss_restore_name,
Label* slow) {
// r0 : value
Label exit;
@ -465,6 +469,15 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
ASSERT(!representation.IsNone());
// Ensure no transitions to deprecated maps are followed.
__ CheckMapDeprecated(transition, scratch1, miss_label);
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@ -480,7 +493,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
Register holder_reg = CheckPrototypes(
object, receiver_reg, Handle<JSObject>(holder), name_reg,
scratch1, scratch2, name, miss_restore_name);
scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
@ -499,6 +512,30 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
}
Register storage_reg = name_reg;
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_restore_name);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(scratch1, value_reg);
__ vmov(s0, scratch1);
__ vcvt_f64_s32(d0, s0);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
miss_restore_name, DONT_DO_SMI_CHECK);
__ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
__ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
}
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@ -527,7 +564,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
name_reg,
scratch2,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
@ -545,34 +582,53 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
if (FLAG_track_double_fields && representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
__ mov(name_reg, value_reg);
} else {
ASSERT(storage_reg.is(name_reg));
}
__ RecordWriteField(receiver_reg,
offset,
name_reg,
scratch1,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
if (FLAG_track_double_fields && representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ str(value_reg, FieldMemOperand(scratch1, offset));
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
__ mov(name_reg, value_reg);
} else {
ASSERT(storage_reg.is(name_reg));
}
__ RecordWriteField(scratch1,
offset,
name_reg,
@ -580,6 +636,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
}
// Return the value (register r0).
ASSERT(value_reg.is(r0));
@ -624,12 +681,50 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
__ ldr(scratch1, FieldMemOperand(receiver_reg, offset));
} else {
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
int offset = index * kPointerSize + FixedArray::kHeaderSize;
__ ldr(scratch1, FieldMemOperand(scratch1, offset));
}
// Store the value into the storage.
Label do_store, heap_number;
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(scratch2, value_reg);
__ vmov(s0, scratch2);
__ vcvt_f64_s32(d0, s0);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
miss_label, DONT_DO_SMI_CHECK);
__ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
__ vstr(d0, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
// Return the value (register r0).
ASSERT(value_reg.is(r0));
__ Ret();
return;
}
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@ -642,6 +737,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
scratch1,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@ -650,6 +746,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(value_reg, FieldMemOperand(scratch1, offset));
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@ -663,6 +760,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
}
// Return the value (register r0).
ASSERT(value_reg.is(r0));
@ -1270,9 +1368,20 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
PropertyIndex index) {
GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
__ Ret();
PropertyIndex field,
Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
field.translate(holder),
representation);
GenerateTailCall(masm(), stub.GetCode(isolate()));
} else {
KeyedLoadFieldStub stub(field.is_inobject(holder),
field.translate(holder),
representation);
GenerateTailCall(masm(), stub.GetCode(isolate()));
}
}
@ -1496,7 +1605,8 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
GenerateFastPropertyLoad(masm(), r1, reg, index.is_inobject(holder),
index.translate(holder), Representation::Tagged());
GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
@ -2907,19 +3017,25 @@ Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
Register map_reg = scratch1();
int receiver_count = receiver_maps->length();
int number_of_handled_maps = 0;
__ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
Handle<Map> map = receiver_maps->at(current);
if (!map->is_deprecated()) {
number_of_handled_maps++;
__ mov(ip, Operand(receiver_maps->at(current)));
__ cmp(map_reg, ip);
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
}
}
ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}

100
deps/v8/src/arraybuffer.js

@ -0,0 +1,100 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"use strict";
var $ArrayBuffer = global.ArrayBuffer;
// -------------------------------------------------------------------
function ArrayBufferConstructor(byteLength) { // length = 1
if (%_IsConstructCall()) {
var l = TO_POSITIVE_INTEGER(byteLength);
%ArrayBufferInitialize(this, l);
} else {
return new $ArrayBuffer(byteLength);
}
}
function ArrayBufferGetByteLength() {
if (!IS_ARRAYBUFFER(this)) {
throw MakeTypeError('incompatible_method_receiver',
['ArrayBuffer.prototype.byteLength', this]);
}
return %ArrayBufferGetByteLength(this);
}
// ES6 Draft 15.13.5.5.3
function ArrayBufferSlice(start, end) {
if (!IS_ARRAYBUFFER(this)) {
throw MakeTypeError('incompatible_method_receiver',
['ArrayBuffer.prototype.slice', this]);
}
var relativeStart = TO_INTEGER(start);
var first;
if (relativeStart < 0) {
first = MathMax(this.byteLength + relativeStart, 0);
} else {
first = MathMin(relativeStart, this.byteLength);
}
var relativeEnd = IS_UNDEFINED(end) ? this.byteLength : TO_INTEGER(end);
var fin;
if (relativeEnd < 0) {
fin = MathMax(this.byteLength + relativeEnd, 0);
} else {
fin = MathMin(relativeEnd, this.byteLength);
}
var newLen = fin - first;
// TODO(dslomov): implement inheritance
var result = new $ArrayBuffer(newLen);
%ArrayBufferSliceImpl(this, result, first);
return result;
}
function SetUpArrayBuffer() {
%CheckIsBootstrapping();
// Set up the ArrayBuffer constructor function.
%SetCode($ArrayBuffer, ArrayBufferConstructor);
%FunctionSetPrototype($ArrayBuffer, new $Object());
// Set up the constructor property on the ArrayBuffer prototype object.
%SetProperty($ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM);
InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLength);
InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array(
"slice", ArrayBufferSlice
));
}
SetUpArrayBuffer();

7
deps/v8/src/assembler.cc

@ -1203,6 +1203,13 @@ ExternalReference ExternalReference::old_data_space_allocation_limit_address(
}
ExternalReference ExternalReference::
new_space_high_promotion_mode_active_address(Isolate* isolate) {
return ExternalReference(
isolate->heap()->NewSpaceHighPromotionModeActiveAddress());
}
ExternalReference ExternalReference::handle_scope_level_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_level_address(isolate));

2
deps/v8/src/assembler.h

@ -757,6 +757,8 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference old_data_space_allocation_limit_address(
Isolate* isolate);
static ExternalReference new_space_high_promotion_mode_active_address(
Isolate* isolate);
static ExternalReference double_fp_operation(Token::Value operation,
Isolate* isolate);

19
deps/v8/src/ast.h

@ -277,6 +277,14 @@ class SmallMapList {
bool is_empty() const { return list_.is_empty(); }
int length() const { return list_.length(); }
void AddMapIfMissing(Handle<Map> map, Zone* zone) {
map = Map::CurrentMapForDeprecated(map);
for (int i = 0; i < length(); ++i) {
if (at(i).is_identical_to(map)) return;
}
Add(map, zone);
}
void Add(Handle<Map> handle, Zone* zone) {
list_.Add(handle.location(), zone);
}
@ -1324,10 +1332,9 @@ class ObjectLiteral: public MaterializedLiteral {
return constant_properties_;
}
ZoneList<Property*>* properties() const { return properties_; }
bool fast_elements() const { return fast_elements_; }
bool has_function() { return has_function_; }
bool may_store_doubles() const { return may_store_doubles_; }
bool has_function() const { return has_function_; }
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
@ -1354,17 +1361,20 @@ class ObjectLiteral: public MaterializedLiteral {
bool is_simple,
bool fast_elements,
int depth,
bool may_store_doubles,
bool has_function)
: MaterializedLiteral(isolate, literal_index, is_simple, depth),
constant_properties_(constant_properties),
properties_(properties),
fast_elements_(fast_elements),
may_store_doubles_(may_store_doubles),
has_function_(has_function) {}
private:
Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_;
bool fast_elements_;
bool may_store_doubles_;
bool has_function_;
};
@ -2849,10 +2859,11 @@ class AstNodeFactory BASE_EMBEDDED {
bool is_simple,
bool fast_elements,
int depth,
bool may_store_doubles,
bool has_function) {
ObjectLiteral* lit = new(zone_) ObjectLiteral(
isolate_, constant_properties, properties, literal_index,
is_simple, fast_elements, depth, has_function);
is_simple, fast_elements, depth, may_store_doubles, has_function);
VISIT_AND_RETURN(ObjectLiteral, lit)
}

2
deps/v8/src/atomicops_internals_x86_gcc.h

@ -168,7 +168,7 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return *ptr;
}
#if defined(__x86_64__)
#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT)
// 64-bit low-level operations on 64-bit platform.

115
deps/v8/src/bootstrapper.cc

@ -201,7 +201,7 @@ class Genesis BASE_EMBEDDED {
ElementsKind elements_kind);
bool InstallNatives();
void InstallTypedArray(const char* name);
Handle<JSFunction> InstallTypedArray(const char* name);
bool InstallExperimentalNatives();
void InstallBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
@ -979,28 +979,32 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// ECMA-262, section 15.10.7.1.
FieldDescriptor field(heap->source_string(),
JSRegExp::kSourceFieldIndex,
final);
final,
Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.2.
FieldDescriptor field(heap->global_string(),
JSRegExp::kGlobalFieldIndex,
final);
final,
Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.3.
FieldDescriptor field(heap->ignore_case_string(),
JSRegExp::kIgnoreCaseFieldIndex,
final);
final,
Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.4.
FieldDescriptor field(heap->multiline_string(),
JSRegExp::kMultilineFieldIndex,
final);
final,
Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
@ -1009,7 +1013,8 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
FieldDescriptor field(heap->last_index_string(),
JSRegExp::kLastIndexFieldIndex,
writable);
writable,
Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
@ -1161,7 +1166,8 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
map->set_instance_descriptors(*descriptors);
{ // length
FieldDescriptor d(*factory->length_string(), 0, DONT_ENUM);
FieldDescriptor d(
*factory->length_string(), 0, DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d, witness);
}
{ // callee
@ -1270,9 +1276,9 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
void Genesis::InstallTypedArray(const char* name) {
Handle<JSFunction> Genesis::InstallTypedArray(const char* name) {
Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
InstallFunction(global, name, JS_TYPED_ARRAY_TYPE,
return InstallFunction(global, name, JS_TYPED_ARRAY_TYPE,
JSTypedArray::kSize, isolate()->initial_object_prototype(),
Builtins::kIllegal, true);
}
@ -1311,26 +1317,36 @@ void Genesis::InitializeExperimentalGlobal() {
}
}
if (FLAG_harmony_typed_arrays) {
{ // -- A r r a y B u f f e r
if (FLAG_harmony_array_buffer) {
// -- A r r a y B u f f e r
Handle<JSFunction> array_buffer_fun =
InstallFunction(global, "__ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
InstallFunction(global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSize,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true);
native_context()->set_array_buffer_fun(*array_buffer_fun);
}
{
if (FLAG_harmony_typed_arrays) {
// -- T y p e d A r r a y s
InstallTypedArray("__Int8Array");
InstallTypedArray("__Uint8Array");
InstallTypedArray("__Int16Array");
InstallTypedArray("__Uint16Array");
InstallTypedArray("__Int32Array");
InstallTypedArray("__Uint32Array");
InstallTypedArray("__Float32Array");
InstallTypedArray("__Float64Array");
}
Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array");
native_context()->set_int8_array_fun(*int8_fun);
Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array");
native_context()->set_uint8_array_fun(*uint8_fun);
Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array");
native_context()->set_int16_array_fun(*int16_fun);
Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array");
native_context()->set_uint16_array_fun(*uint16_fun);
Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array");
native_context()->set_int32_array_fun(*int32_fun);
Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array");
native_context()->set_uint32_array_fun(*uint32_fun);
Handle<JSFunction> float_fun = InstallTypedArray("Float32Array");
native_context()->set_float_array_fun(*float_fun);
Handle<JSFunction> double_fun = InstallTypedArray("Float64Array");
native_context()->set_double_array_fun(*double_fun);
Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray");
native_context()->set_uint8c_array_fun(*uint8c_fun);
}
if (FLAG_harmony_generators) {
@ -1371,6 +1387,40 @@ void Genesis::InitializeExperimentalGlobal() {
*generator_object_prototype);
native_context()->set_generator_object_prototype_map(
*generator_object_prototype_map);
// Create a map for generator result objects.
ASSERT(object_map->inobject_properties() == 0);
STATIC_ASSERT(JSGeneratorObject::kResultPropertyCount == 2);
Handle<Map> generator_result_map = factory()->CopyMap(object_map,
JSGeneratorObject::kResultPropertyCount);
ASSERT(generator_result_map->inobject_properties() ==
JSGeneratorObject::kResultPropertyCount);
Handle<DescriptorArray> descriptors = factory()->NewDescriptorArray(0,
JSGeneratorObject::kResultPropertyCount);
DescriptorArray::WhitenessWitness witness(*descriptors);
generator_result_map->set_instance_descriptors(*descriptors);
Handle<String> value_string = factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("value"));
FieldDescriptor value_descr(*value_string,
JSGeneratorObject::kResultValuePropertyIndex,
NONE,
Representation::Tagged());
generator_result_map->AppendDescriptor(&value_descr, witness);
Handle<String> done_string = factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("done"));
FieldDescriptor done_descr(*done_string,
JSGeneratorObject::kResultDonePropertyIndex,
NONE,
Representation::Tagged());
generator_result_map->AppendDescriptor(&done_descr, witness);
generator_result_map->set_unused_property_fields(0);
ASSERT_EQ(JSGeneratorObject::kResultSize,
generator_result_map->instance_size());
native_context()->set_generator_result_map(*generator_result_map);
}
}
@ -1924,14 +1974,16 @@ bool Genesis::InstallNatives() {
{
FieldDescriptor index_field(heap()->index_string(),
JSRegExpResult::kIndexIndex,
NONE);
NONE,
Representation::Tagged());
initial_map->AppendDescriptor(&index_field, witness);
}
{
FieldDescriptor input_field(heap()->input_string(),
JSRegExpResult::kInputIndex,
NONE);
NONE,
Representation::Tagged());
initial_map->AppendDescriptor(&input_field, witness);
}
@ -1974,6 +2026,11 @@ bool Genesis::InstallExperimentalNatives() {
"native object-observe.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
if (FLAG_harmony_array_buffer &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native arraybuffer.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
if (FLAG_harmony_typed_arrays &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native typedarray.js") == 0) {
@ -2352,14 +2409,15 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
if (from->HasFastProperties()) {
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(from->map()->instance_descriptors());
for (int i = 0; i < descs->number_of_descriptors(); i++) {
for (int i = 0; i < from->map()->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
case FIELD: {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i));
int index = descs->GetFieldIndex(i);
Handle<Object> value = Handle<Object>(from->FastPropertyAt(index),
ASSERT(!descs->GetDetails(i).representation().IsDouble());
Handle<Object> value = Handle<Object>(from->RawFastPropertyAt(index),
isolate());
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
@ -2386,9 +2444,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// Add to dictionary.
Handle<Name> key = Handle<Name>(descs->GetKey(i));
Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
PropertyDetails d = PropertyDetails(details.attributes(),
CALLBACKS,
details.descriptor_index());
PropertyDetails d = PropertyDetails(
details.attributes(), CALLBACKS, i + 1);
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
}

40
deps/v8/src/builtins-decls.h

@ -0,0 +1,40 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_BUILTINS_DECLS_H_
#define V8_BUILTINS_DECLS_H_
#include "arguments.h"
namespace v8 {
namespace internal {
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure);
} } // namespace v8::internal
#endif // V8_BUILTINS_DECLS_H_

75
deps/v8/src/builtins.cc

@ -129,7 +129,8 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate); \
MUST_USE_RESULT static MaybeObject* Builtin_##name( \
name##ArgumentsType args, Isolate* isolate) { \
int args_length, Object** args_object, Isolate* isolate) { \
name##ArgumentsType args(args_length, args_object); \
ASSERT(isolate == Isolate::Current()); \
args.Verify(); \
return Builtin_Impl_##name(args, isolate); \
@ -140,8 +141,15 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
#else // For release mode.
#define BUILTIN(name) \
static MaybeObject* Builtin_##name(name##ArgumentsType args, Isolate* isolate)
static MaybeObject* Builtin_impl##name( \
name##ArgumentsType args, Isolate* isolate); \
static MaybeObject* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
name##ArgumentsType args(args_length, args_object); \
return Builtin_impl##name(args, isolate); \
} \
static MaybeObject* Builtin_impl##name( \
name##ArgumentsType args, Isolate* isolate)
#endif
@ -186,26 +194,31 @@ BUILTIN(EmptyFunction) {
}
#define CONVERT_ARG_STUB_CALLER_ARGS(name) \
Arguments* name = reinterpret_cast<Arguments*>(args[0]);
RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) {
CONVERT_ARG_STUB_CALLER_ARGS(caller_args);
ASSERT(args.length() == 2);
Handle<Object> type_info = args.at<Object>(1);
// If we get 2 arguments then they are the stub parameters (constructor, type
// info). If we get 3, then the first one is a pointer to the arguments
// passed by the caller.
Arguments empty_args(0, NULL);
bool no_caller_args = args.length() == 2;
ASSERT(no_caller_args || args.length() == 3);
int parameters_start = no_caller_args ? 0 : 1;
Arguments* caller_args = no_caller_args
? &empty_args
: reinterpret_cast<Arguments*>(args[0]);
Handle<JSFunction> constructor = args.at<JSFunction>(parameters_start);
Handle<Object> type_info = args.at<Object>(parameters_start + 1);
JSArray* array = NULL;
bool holey = false;
if (caller_args->length() == 1 && (*caller_args)[0]->IsSmi()) {
int value = Smi::cast((*caller_args)[0])->value();
holey = (value > 0 && value < JSObject::kInitialMaxFastElementArray);
}
JSArray* array;
MaybeObject* maybe_array;
if (*type_info != isolate->heap()->undefined_value()) {
if (*type_info != isolate->heap()->undefined_value() &&
JSGlobalPropertyCell::cast(*type_info)->value()->IsSmi()) {
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(*type_info);
if (cell->value()->IsSmi()) {
Smi* smi = Smi::cast(cell->value());
ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
if (holey && !IsFastHoleyElementsKind(to_kind)) {
@ -214,27 +227,25 @@ RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) {
cell->set_value(Smi::FromInt(to_kind));
}
AllocationSiteMode mode = AllocationSiteInfo::GetMode(to_kind);
if (mode == TRACK_ALLOCATION_SITE) {
maybe_array = isolate->heap()->AllocateEmptyJSArrayWithAllocationSite(
to_kind, type_info);
maybe_array = isolate->heap()->AllocateJSObjectWithAllocationSite(
*constructor, type_info);
if (!maybe_array->To(&array)) return maybe_array;
} else {
maybe_array = isolate->heap()->AllocateEmptyJSArray(to_kind);
}
ElementsKind kind = constructor->initial_map()->elements_kind();
ASSERT(kind == GetInitialFastElementsKind());
maybe_array = isolate->heap()->AllocateJSObject(*constructor);
if (!maybe_array->To(&array)) return maybe_array;
}
}
ElementsKind kind = GetInitialFastElementsKind();
// We might need to transition to holey
if (holey) {
kind = GetHoleyElementsKind(kind);
maybe_array = array->TransitionElementsKind(kind);
if (maybe_array->IsFailure()) return maybe_array;
}
if (array == NULL) {
maybe_array = isolate->heap()->AllocateEmptyJSArray(kind);
if (!maybe_array->To(&array)) return maybe_array;
}
maybe_array = isolate->heap()->AllocateJSArrayStorage(array, 0, 0,
DONT_INITIALIZE_ARRAY_ELEMENTS);
if (maybe_array->IsFailure()) return maybe_array;
maybe_array = ArrayConstructInitializeElements(array, caller_args);
if (maybe_array->IsFailure()) return maybe_array;
return array;
@ -1500,6 +1511,11 @@ static void Generate_KeyedLoadIC_NonStrictArguments(MacroAssembler* masm) {
KeyedLoadIC::GenerateNonStrictArguments(masm);
}
static void Generate_StoreIC_Slow(MacroAssembler* masm) {
StoreIC::GenerateSlow(masm);
}
static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm);
}
@ -1617,6 +1633,11 @@ static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
}
static void Generate_CompareNilIC_DebugBreak(MacroAssembler* masm) {
Debug::GenerateCompareNilICDebugBreak(masm);
}
static void Generate_Return_DebugBreak(MacroAssembler* masm) {
Debug::GenerateReturnDebugBreak(masm);
}

6
deps/v8/src/builtins.h

@ -124,6 +124,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_Slow, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
@ -230,6 +232,8 @@ enum BuiltinExtraArguments {
DEBUG_BREAK) \
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_STUB, \
DEBUG_BREAK) \
V(CompareNilIC_DebugBreak, COMPARE_NIL_IC, DEBUG_STUB, \
DEBUG_BREAK) \
V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, \
@ -274,8 +278,6 @@ enum BuiltinExtraArguments {
V(APPLY_PREPARE, 1) \
V(APPLY_OVERFLOW, 1)
MaybeObject* ArrayConstructor_StubFailure(Arguments args, Isolate* isolate);
class BuiltinFunctionTable;
class ObjectVisitor;

81
deps/v8/src/code-stubs-hydrogen.cc

@ -82,6 +82,24 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HContext* context() { return context_; }
Isolate* isolate() { return info_.isolate(); }
class ArrayContextChecker {
public:
ArrayContextChecker(HGraphBuilder* builder, HValue* constructor,
HValue* array_function)
: checker_(builder) {
checker_.If<HCompareObjectEqAndBranch, HValue*>(constructor,
array_function);
checker_.Then();
}
~ArrayContextChecker() {
checker_.ElseDeopt();
checker_.End();
}
private:
IfBuilder checker_;
};
private:
SmartArrayPointer<HParameter*> parameters_;
HValue* arguments_length_;
@ -240,7 +258,8 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
GetCodeKind(),
GetICState(),
GetExtraICState(),
GetStubType(), -1);
GetStubType(),
GetStubFlags());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
@ -290,8 +309,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
checker.Then();
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
HValue* elements =
AddInstruction(new(zone) HLoadElements(boilerplate, NULL));
HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
if_fixed_cow.IfCompareMap(elements, factory->fixed_cow_array_map());
@ -377,11 +395,12 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
for (int i = 0; i < size; i += kPointerSize) {
HInstruction* value =
AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
AddInstruction(new(zone) HLoadNamedField(
boilerplate, true, Representation::Tagged(), i));
AddInstruction(new(zone) HStoreNamedField(object,
factory->empty_string(),
value,
true, i));
value, true,
Representation::Tagged(), i));
}
checker.ElseDeopt();
@ -409,6 +428,36 @@ Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
}
template<>
HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
Representation representation = casted_stub()->representation();
HInstruction* load = AddInstruction(DoBuildLoadNamedField(
GetParameter(0), casted_stub()->is_inobject(),
representation, casted_stub()->offset()));
return load;
}
Handle<Code> LoadFieldStub::GenerateCode() {
return DoGenerateCode(this);
}
template<>
HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
Representation representation = casted_stub()->representation();
HInstruction* load = AddInstruction(DoBuildLoadNamedField(
GetParameter(0), casted_stub()->is_inobject(),
representation, casted_stub()->offset()));
return load;
}
Handle<Code> KeyedLoadFieldStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
@ -452,8 +501,7 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
if_builder.Else();
HInstruction* elements =
AddInstruction(new(zone) HLoadElements(js_array, js_array));
HInstruction* elements = AddLoadElements(js_array);
HInstruction* elements_length =
AddInstruction(new(zone) HFixedArrayBaseLength(elements));
@ -470,12 +518,15 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
AddInstruction(new(zone) HStoreNamedField(js_array,
factory->elements_field_string(),
new_elements, true,
Representation::Tagged(),
JSArray::kElementsOffset));
if_builder.End();
AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_string(),
map, true, JSArray::kMapOffset));
map, true,
Representation::Tagged(),
JSArray::kMapOffset));
return js_array;
}
@ -491,6 +542,10 @@ HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
// -- Parameter 1 : type info cell
// -- Parameter 0 : constructor
// -----------------------------------
HInstruction* array_function = BuildGetArrayFunction(context());
ArrayContextChecker(this,
GetParameter(ArrayConstructorStubBase::kConstructor),
array_function);
// Get the right map
// Should be a constant
JSArrayBuilder array_builder(
@ -510,6 +565,10 @@ Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
BuildCodeStub() {
HInstruction* array_function = BuildGetArrayFunction(context());
ArrayContextChecker(this,
GetParameter(ArrayConstructorStubBase::kConstructor),
array_function);
// Smi check and range check on the input arg.
HValue* constant_one = graph()->GetConstant1();
HValue* constant_zero = graph()->GetConstant0();
@ -563,6 +622,10 @@ Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
HInstruction* array_function = BuildGetArrayFunction(context());
ArrayContextChecker(this,
GetParameter(ArrayConstructorStubBase::kConstructor),
array_function);
ElementsKind kind = casted_stub()->elements_kind();
HValue* length = GetArgumentsLength();

4
deps/v8/src/code-stubs.cc

@ -431,7 +431,9 @@ CompareNilICStub::Types CompareNilICStub::GetPatchedICFlags(
} else if (object->IsUndefined()) {
types = static_cast<CompareNilICStub::Types>(
types | CompareNilICStub::kCompareAgainstUndefined);
} else if (object->IsUndetectableObject() || !object->IsHeapObject()) {
} else if (object->IsUndetectableObject() ||
object->IsOddball() ||
!object->IsHeapObject()) {
types = CompareNilICStub::kFullCompare;
} else if ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0) {
types = CompareNilICStub::kFullCompare;

122
deps/v8/src/code-stubs.h

@ -87,7 +87,8 @@ namespace internal {
V(ArrayConstructor) \
V(ProfileEntryHook) \
/* IC Handler stubs */ \
V(LoadField)
V(LoadField) \
V(KeyedLoadField)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@ -185,6 +186,12 @@ class CodeStub BASE_EMBEDDED {
virtual Code::ExtraICState GetExtraICState() {
return Code::kNoExtraICState;
}
virtual Code::StubType GetStubType() {
return Code::NORMAL;
}
virtual int GetStubFlags() {
return -1;
}
protected:
static bool CanUseFPRegisters();
@ -192,9 +199,6 @@ class CodeStub BASE_EMBEDDED {
// Generates the assembler code for the stub.
virtual Handle<Code> GenerateCode() = 0;
virtual Code::StubType GetStubType() {
return Code::NORMAL;
}
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
@ -253,7 +257,6 @@ class PlatformCodeStub : public CodeStub {
virtual Handle<Code> GenerateCode();
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
virtual int GetStubFlags() { return -1; }
protected:
// Generates the assembler code for the stub.
@ -754,42 +757,108 @@ class StoreArrayLengthStub: public StoreICStub {
};
class HandlerStub: public ICStub {
class HICStub: public HydrogenCodeStub {
public:
virtual Code::Kind GetCodeKind() const { return kind(); }
virtual InlineCacheState GetICState() { return MONOMORPHIC; }
protected:
HICStub() : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { }
class KindBits: public BitField<Code::Kind, 0, 4> {};
virtual Code::Kind kind() const = 0;
};
class HandlerStub: public HICStub {
public:
explicit HandlerStub(Code::Kind kind) : ICStub(kind) { }
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
virtual int GetStubFlags() { return kind(); }
protected:
HandlerStub() : HICStub() { }
};
class LoadFieldStub: public HandlerStub {
public:
LoadFieldStub(Register reg, bool inobject, int index)
: HandlerStub(Code::LOAD_IC),
reg_(reg),
inobject_(inobject),
index_(index) { }
virtual void Generate(MacroAssembler* masm);
LoadFieldStub(bool inobject, int index, Representation representation)
: HandlerStub() {
Initialize(Code::LOAD_IC, inobject, index, representation);
}
virtual Handle<Code> GenerateCode();
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
Representation representation() {
if (unboxed_double()) return Representation::Double();
return Representation::Tagged();
}
virtual Code::Kind kind() const {
return KindBits::decode(bit_field_);
}
bool is_inobject() {
return InobjectBits::decode(bit_field_);
}
int offset() {
int index = IndexBits::decode(bit_field_);
int offset = index * kPointerSize;
if (is_inobject()) return offset;
return FixedArray::kHeaderSize + offset;
}
bool unboxed_double() {
return UnboxedDoubleBits::decode(bit_field_);
}
protected:
virtual Code::StubType GetStubType() { return Code::FIELD; }
protected:
LoadFieldStub() : HandlerStub() { }
void Initialize(Code::Kind kind,
bool inobject,
int index,
Representation representation) {
bool unboxed_double = FLAG_track_double_fields && representation.IsDouble();
bit_field_ = KindBits::encode(kind)
| InobjectBits::encode(inobject)
| IndexBits::encode(index)
| UnboxedDoubleBits::encode(unboxed_double);
}
private:
STATIC_ASSERT(KindBits::kSize == 4);
class RegisterBits: public BitField<int, 4, 6> {};
class InobjectBits: public BitField<bool, 10, 1> {};
class IndexBits: public BitField<int, 11, 11> {};
class InobjectBits: public BitField<bool, 4, 1> {};
class IndexBits: public BitField<int, 5, 11> {};
class UnboxedDoubleBits: public BitField<bool, 16, 1> {};
virtual CodeStub::Major MajorKey() { return LoadField; }
virtual int MinorKey() {
return KindBits::encode(kind())
| RegisterBits::encode(reg_.code())
| InobjectBits::encode(inobject_)
| IndexBits::encode(index_);
virtual int NotMissMinorKey() { return bit_field_; }
int bit_field_;
};
class KeyedLoadFieldStub: public LoadFieldStub {
public:
KeyedLoadFieldStub(bool inobject, int index, Representation representation)
: LoadFieldStub() {
Initialize(Code::KEYED_LOAD_IC, inobject, index, representation);
}
Register reg_;
bool inobject_;
int index_;
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
virtual Handle<Code> GenerateCode();
private:
virtual CodeStub::Major MajorKey() { return KeyedLoadField; }
};
@ -1596,7 +1665,8 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
static void InstallDescriptors(Isolate* isolate);
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kPropertyCell = 0;
static const int kConstructor = 0;
static const int kPropertyCell = 1;
private:
int NotMissMinorKey() { return bit_field_; }

7
deps/v8/src/compiler.cc

@ -144,7 +144,8 @@ Code::Flags CompilationInfo::flags() const {
return Code::ComputeFlags(code_stub()->GetCodeKind(),
code_stub()->GetICState(),
code_stub()->GetExtraICState(),
Code::NORMAL, -1);
code_stub()->GetStubType(),
code_stub()->GetStubFlags());
} else {
return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
}
@ -299,14 +300,14 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
//
// The encoding is as a signed value, with parameters and receiver using
// the negative indices and locals the non-negative ones.
const int parameter_limit = -LUnallocated::kMinFixedIndex;
const int parameter_limit = -LUnallocated::kMinFixedSlotIndex;
Scope* scope = info()->scope();
if ((scope->num_parameters() + 1) > parameter_limit) {
info()->set_bailout_reason("too many parameters");
return AbortOptimization();
}
const int locals_limit = LUnallocated::kMaxFixedIndex;
const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
if (!info()->osr_ast_id().IsNone() &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
info()->set_bailout_reason("too many parameters/locals");

20
deps/v8/src/contexts.h

@ -124,6 +124,15 @@ enum BindingFlags {
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
V(FLOAT_ARRAY_FUN_INDEX, JSFunction, float_array_fun) \
V(DOUBLE_ARRAY_FUN_INDEX, JSFunction, double_array_fun) \
V(UINT8C_ARRAY_FUN_INDEX, JSFunction, uint8c_array_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \
V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
@ -171,6 +180,7 @@ enum BindingFlags {
strict_mode_generator_function_map) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
generator_object_prototype_map) \
V(GENERATOR_RESULT_MAP_INDEX, Map, generator_result_map) \
V(RANDOM_SEED_INDEX, ByteArray, random_seed)
// JSFunctions are pairs (context, function code), sometimes also called
@ -278,6 +288,15 @@ class Context: public FixedArray {
INSTANTIATE_FUN_INDEX,
CONFIGURE_INSTANCE_FUN_INDEX,
ARRAY_BUFFER_FUN_INDEX,
UINT8_ARRAY_FUN_INDEX,
INT8_ARRAY_FUN_INDEX,
UINT16_ARRAY_FUN_INDEX,
INT16_ARRAY_FUN_INDEX,
UINT32_ARRAY_FUN_INDEX,
INT32_ARRAY_FUN_INDEX,
FLOAT_ARRAY_FUN_INDEX,
DOUBLE_ARRAY_FUN_INDEX,
UINT8C_ARRAY_FUN_INDEX,
MESSAGE_LISTENERS_INDEX,
MAKE_MESSAGE_FUN_INDEX,
GET_STACK_TRACE_LINE_INDEX,
@ -305,6 +324,7 @@ class Context: public FixedArray {
GENERATOR_FUNCTION_MAP_INDEX,
STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX,
GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
GENERATOR_RESULT_MAP_INDEX,
RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.

12
deps/v8/src/d8-debug.cc

@ -77,7 +77,7 @@ void HandleDebugEvent(DebugEvent event,
// Print the event details.
Handle<Object> details =
Shell::DebugMessageDetails(Handle<String>::Cast(event_json));
Shell::DebugMessageDetails(isolate, Handle<String>::Cast(event_json));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate, &try_catch);
return;
@ -114,7 +114,7 @@ void HandleDebugEvent(DebugEvent event,
// Convert the debugger command to a JSON debugger request.
Handle<Value> request =
Shell::DebugCommandToJSONRequest(String::New(command));
Shell::DebugCommandToJSONRequest(isolate, String::New(command));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate, &try_catch);
continue;
@ -146,7 +146,8 @@ void HandleDebugEvent(DebugEvent event,
Handle<String> response = Handle<String>::Cast(response_val);
// Convert the debugger response into text details and the running state.
Handle<Object> response_details = Shell::DebugMessageDetails(response);
Handle<Object> response_details =
Shell::DebugMessageDetails(isolate, response);
if (try_catch.HasCaught()) {
Shell::ReportException(isolate, &try_catch);
continue;
@ -281,7 +282,8 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
// Print the event details.
TryCatch try_catch;
Handle<Object> details =
Shell::DebugMessageDetails(Handle<String>::Cast(String::New(message)));
Shell::DebugMessageDetails(isolate_,
Handle<String>::Cast(String::New(message)));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate_, &try_catch);
PrintPrompt();
@ -310,7 +312,7 @@ void RemoteDebugger::HandleKeyboardCommand(char* command) {
// Convert the debugger command to a JSON debugger request.
TryCatch try_catch;
Handle<Value> request =
Shell::DebugCommandToJSONRequest(String::New(command));
Shell::DebugCommandToJSONRequest(isolate_, String::New(command));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate_, &try_catch);
PrintPrompt();

84
deps/v8/src/d8.cc

@ -40,6 +40,11 @@
#include <string.h>
#include <sys/stat.h>
// TODO(dcarney): remove
#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
#ifdef V8_SHARED
#include <assert.h>
#endif // V8_SHARED
@ -124,8 +129,8 @@ class PerIsolateData {
}
#define DEFINE_STRING_GETTER(name, value) \
static Persistent<String> name##_string(Isolate* isolate) { \
return Get(isolate)->name##_string_; \
static Handle<String> name##_string(Isolate* isolate) { \
return Handle<String>(*Get(isolate)->name##_string_); \
}
FOR_EACH_STRING(DEFINE_STRING_GETTER)
#undef DEFINE_STRING_GETTER
@ -245,7 +250,7 @@ bool Shell::ExecuteString(Isolate* isolate,
} else {
PerIsolateData* data = PerIsolateData::Get(isolate);
Local<Context> realm =
Local<Context>::New(data->realms_[data->realm_current_]);
Local<Context>::New(isolate, data->realms_[data->realm_current_]);
realm->Enter();
Handle<Value> result = script->Run();
realm->Exit();
@ -272,7 +277,7 @@ bool Shell::ExecuteString(Isolate* isolate,
#if !defined(V8_SHARED)
} else {
v8::TryCatch try_catch;
Context::Scope context_scope(utility_context_);
Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
Handle<Value> fun = global->Get(String::New("Stringify"));
Handle<Value> argv[1] = { result };
@ -421,7 +426,7 @@ Handle<Value> Shell::RealmEval(const Arguments& args) {
}
Handle<Script> script = Script::New(args[1]->ToString());
if (script.IsEmpty()) return Undefined(isolate);
Local<Context> realm = Local<Context>::New(data->realms_[index]);
Local<Context> realm = Local<Context>::New(isolate, data->realms_[index]);
realm->Enter();
Handle<Value> result = script->Run();
realm->Exit();
@ -435,7 +440,7 @@ Handle<Value> Shell::RealmSharedGet(Local<String> property,
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (data->realm_shared_.IsEmpty()) return Undefined(isolate);
return data->realm_shared_;
return Local<Value>::New(isolate, data->realm_shared_);
}
void Shell::RealmSharedSet(Local<String> property,
@ -1057,14 +1062,14 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
void Shell::ExternalArrayWeakCallback(v8::Isolate* isolate,
Persistent<Value> object,
void* data) {
Persistent<Object>* object,
uint8_t* data) {
HandleScope scope(isolate);
int32_t length = object->ToObject()->Get(
int32_t length = (*object)->Get(
PerIsolateData::byteLength_string(isolate))->Uint32Value();
isolate->AdjustAmountOfExternalAllocatedMemory(-length);
delete[] static_cast<uint8_t*>(data);
object.Dispose(isolate);
delete[] data;
object->Dispose(isolate);
}
@ -1180,7 +1185,7 @@ Handle<Array> Shell::GetCompletions(Isolate* isolate,
Handle<String> text,
Handle<String> full) {
HandleScope handle_scope(isolate);
Context::Scope context_scope(utility_context_);
Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
Handle<Value> fun = global->Get(String::New("GetCompletions"));
static const int kArgc = 3;
@ -1191,8 +1196,10 @@ Handle<Array> Shell::GetCompletions(Isolate* isolate,
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<Object> Shell::DebugMessageDetails(Handle<String> message) {
Context::Scope context_scope(utility_context_);
Handle<Object> Shell::DebugMessageDetails(Isolate* isolate,
Handle<String> message) {
HandleScope handle_scope(isolate);
Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
Handle<Value> fun = global->Get(String::New("DebugMessageDetails"));
static const int kArgc = 1;
@ -1202,8 +1209,10 @@ Handle<Object> Shell::DebugMessageDetails(Handle<String> message) {
}
Handle<Value> Shell::DebugCommandToJSONRequest(Handle<String> command) {
Context::Scope context_scope(utility_context_);
Handle<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
Handle<String> command) {
HandleScope handle_scope(isolate);
Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
Handle<Value> fun = global->Get(String::New("DebugCommandToJSONRequest"));
static const int kArgc = 1;
@ -1214,7 +1223,9 @@ Handle<Value> Shell::DebugCommandToJSONRequest(Handle<String> command) {
void Shell::DispatchDebugMessages() {
v8::Context::Scope scope(Shell::evaluation_context_);
Isolate* isolate = v8::Isolate::GetCurrent();
HandleScope handle_scope(isolate);
v8::Context::Scope scope(isolate, Shell::evaluation_context_);
v8::Debug::ProcessDebugMessages();
}
#endif // ENABLE_DEBUGGER_SUPPORT
@ -1327,7 +1338,7 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
// utility, evaluation and debug context can all access each other.
utility_context_->SetSecurityToken(Undefined(isolate));
evaluation_context_->SetSecurityToken(Undefined(isolate));
Context::Scope utility_scope(utility_context_);
Context::Scope utility_scope(isolate, utility_context_);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
@ -1459,6 +1470,9 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
global_template->Set(String::New("Realm"), realm_template);
// Bind the handlers for external arrays.
#ifndef V8_SHARED
if (!i::FLAG_harmony_typed_arrays) {
#endif // V8_SHARED
PropertyAttribute attr =
static_cast<PropertyAttribute>(ReadOnly | DontDelete);
global_template->Set(PerIsolateData::ArrayBuffer_string(isolate),
@ -1481,6 +1495,9 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
CreateArrayTemplate(Float64Array), attr);
global_template->Set(String::New("Uint8ClampedArray"),
CreateArrayTemplate(Uint8ClampedArray), attr);
#ifndef V8_SHARED
}
#endif // V8_SHARED
#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
@ -1522,7 +1539,8 @@ void Shell::InitializeDebugger(Isolate* isolate) {
Locker lock(isolate);
HandleScope scope(isolate);
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
utility_context_ = Context::New(NULL, global_template);
utility_context_.Reset(isolate,
Context::New(isolate, NULL, global_template));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Start the debugger agent if requested.
@ -1535,14 +1553,15 @@ void Shell::InitializeDebugger(Isolate* isolate) {
}
Persistent<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
i::ScopedLock lock(context_mutex_);
#endif // V8_SHARED
// Initialize the global objects
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
Persistent<Context> context = Context::New(NULL, global_template);
HandleScope handle_scope(isolate);
Local<Context> context = Context::New(isolate, NULL, global_template);
ASSERT(!context.IsEmpty());
Context::Scope scope(context);
@ -1560,7 +1579,7 @@ Persistent<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
context->Global()->Set(String::New("arguments"),
Utils::ToLocal(arguments_jsarray));
#endif // V8_SHARED
return context;
return handle_scope.Close(context);
}
@ -1740,9 +1759,9 @@ Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
void Shell::RunShell(Isolate* isolate) {
Locker locker(isolate);
Context::Scope context_scope(evaluation_context_);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
HandleScope outer_scope(isolate);
Context::Scope context_scope(isolate, evaluation_context_);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
Handle<String> name = String::New("(d8)");
LineEditor* console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
@ -1791,7 +1810,7 @@ void ShellThread::Run() {
// Prepare the context for this thread.
Locker locker(isolate_);
HandleScope outer_scope(isolate_);
Persistent<Context> thread_context =
Local<Context> thread_context =
Shell::CreateEvaluationContext(isolate_);
Context::Scope context_scope(thread_context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate_));
@ -1815,7 +1834,6 @@ void ShellThread::Run() {
Shell::ExecuteString(isolate_, str, String::New(filename), false, false);
}
thread_context.Dispose(thread_context->GetIsolate());
ptr = next_line;
}
}
@ -1892,15 +1910,16 @@ void SourceGroup::ExecuteInThread() {
{
Isolate::Scope iscope(isolate);
Locker lock(isolate);
{
HandleScope scope(isolate);
PerIsolateData data(isolate);
Persistent<Context> context = Shell::CreateEvaluationContext(isolate);
Local<Context> context = Shell::CreateEvaluationContext(isolate);
{
Context::Scope cscope(context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
Execute(isolate);
}
context.Dispose(isolate);
}
if (Shell::options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
@ -2091,11 +2110,12 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
#endif // V8_SHARED
{ // NOLINT
Locker lock(isolate);
{
HandleScope scope(isolate);
Persistent<Context> context = CreateEvaluationContext(isolate);
Local<Context> context = CreateEvaluationContext(isolate);
if (options.last_run) {
// Keep using the same context in the interactive shell.
evaluation_context_ = context;
evaluation_context_.Reset(isolate, context);
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
// If the interactive debugger is enabled make sure to activate
// it before running the files passed on the command line.
@ -2109,8 +2129,8 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
options.isolate_sources[0].Execute(isolate);
}
}
if (!options.last_run) {
context.Dispose(isolate);
if (options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
@ -2155,7 +2175,7 @@ int Shell::Main(int argc, char* argv[]) {
{
Initialize(isolate);
#ifdef ENABLE_VTUNE_JIT_INTERFACE
vTune::InitilizeVtuneForV8();
vTune::InitializeVtuneForV8();
#endif
PerIsolateData data(isolate);
InitializeDebugger(isolate);

12
deps/v8/src/d8.h

@ -273,7 +273,7 @@ class Shell : public i::AllStatic {
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(Isolate* isolate, TryCatch* try_catch);
static Handle<String> ReadFile(Isolate* isolate, const char* name);
static Persistent<Context> CreateEvaluationContext(Isolate* isolate);
static Local<Context> CreateEvaluationContext(Isolate* isolate);
static int RunMain(Isolate* isolate, int argc, char* argv[]);
static int Main(int argc, char* argv[]);
static void Exit(int exit_code);
@ -292,8 +292,10 @@ class Shell : public i::AllStatic {
static void MapCounters(const char* name);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Handle<Object> DebugMessageDetails(Handle<String> message);
static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
static Handle<Object> DebugMessageDetails(Isolate* isolate,
Handle<String> message);
static Handle<Value> DebugCommandToJSONRequest(Isolate* isolate,
Handle<String> command);
static void DispatchDebugMessages();
#endif // ENABLE_DEBUGGER_SUPPORT
#endif // V8_SHARED
@ -414,8 +416,8 @@ class Shell : public i::AllStatic {
ExternalArrayType type,
int32_t element_size);
static void ExternalArrayWeakCallback(Isolate* isolate,
Persistent<Value> object,
void* data);
Persistent<Object>* object,
uint8_t* data);
};

7
deps/v8/src/debug.cc

@ -76,12 +76,12 @@ Debug::~Debug() {
static void PrintLn(v8::Local<v8::Value> value) {
v8::Local<v8::String> s = value->ToString();
ScopedVector<char> data(s->Length() + 1);
ScopedVector<char> data(s->Utf8Length() + 1);
if (data.start() == NULL) {
V8::FatalProcessOutOfMemory("PrintLn");
return;
}
s->WriteAscii(data.start());
s->WriteUtf8(data.start());
PrintF("%s\n", data.start());
}
@ -1644,6 +1644,9 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
case Code::KEYED_STORE_IC:
return isolate->builtins()->KeyedStoreIC_DebugBreak();
case Code::COMPARE_NIL_IC:
return isolate->builtins()->CompareNilIC_DebugBreak();
default:
UNREACHABLE();
}

1
deps/v8/src/debug.h

@ -418,6 +418,7 @@ class Debug {
static void GenerateStoreICDebugBreak(MacroAssembler* masm);
static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
static void GenerateCompareNilICDebugBreak(MacroAssembler* masm);
static void GenerateReturnDebugBreak(MacroAssembler* masm);
static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
static void GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm);

60
deps/v8/src/factory.cc

@ -660,6 +660,14 @@ Handle<Object> Factory::NewNumberFromUint(uint32_t value,
}
Handle<HeapNumber> Factory::NewHeapNumber(double value,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateHeapNumber(value, pretenure), HeapNumber);
}
Handle<JSObject> Factory::NewNeanderObject() {
CALL_HEAP_FUNCTION(
isolate(),
@ -1056,6 +1064,58 @@ Handle<JSArrayBuffer> Factory::NewJSArrayBuffer() {
}
Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type) {
JSFunction* typed_array_fun;
Context* native_context = isolate()->context()->native_context();
switch (type) {
case kExternalUnsignedByteArray:
typed_array_fun = native_context->uint8_array_fun();
break;
case kExternalByteArray:
typed_array_fun = native_context->int8_array_fun();
break;
case kExternalUnsignedShortArray:
typed_array_fun = native_context->uint16_array_fun();
break;
case kExternalShortArray:
typed_array_fun = native_context->int16_array_fun();
break;
case kExternalUnsignedIntArray:
typed_array_fun = native_context->uint32_array_fun();
break;
case kExternalIntArray:
typed_array_fun = native_context->int32_array_fun();
break;
case kExternalFloatArray:
typed_array_fun = native_context->float_array_fun();
break;
case kExternalDoubleArray:
typed_array_fun = native_context->double_array_fun();
break;
case kExternalPixelArray:
typed_array_fun = native_context->uint8c_array_fun();
break;
default:
UNREACHABLE();
return Handle<JSTypedArray>();
}
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSObject(typed_array_fun),
JSTypedArray);
}
Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
Handle<Object> prototype) {
CALL_HEAP_FUNCTION(

5
deps/v8/src/factory.h

@ -267,6 +267,9 @@ class Factory {
Handle<Object> NewNumberFromUint(uint32_t value,
PretenureFlag pretenure = NOT_TENURED);
Handle<HeapNumber> NewHeapNumber(double value,
PretenureFlag pretenure = NOT_TENURED);
// These objects are used by the api to create env-independent data
// structures in the heap.
Handle<JSObject> NewNeanderObject();
@ -315,6 +318,8 @@ class Factory {
Handle<JSArrayBuffer> NewJSArrayBuffer();
Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
// Change the type of the argument into a JS object/function and reinitialize.

10
deps/v8/src/flag-definitions.h

@ -166,6 +166,9 @@ DEFINE_bool(harmony_observation, false,
"enable harmony object observation (implies harmony collections")
DEFINE_bool(harmony_typed_arrays, false,
"enable harmony typed arrays")
DEFINE_bool(harmony_array_buffer, false,
"enable harmony array buffer")
DEFINE_implication(harmony_typed_arrays, harmony_array_buffer)
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
@ -177,7 +180,7 @@ DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony, harmony_generators)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
DEFINE_implication(harmony, harmony_typed_arrays)
// TODO[dslomov] add harmony => harmony_typed_arrays
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
@ -190,6 +193,9 @@ DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenure_literals, true, "allocate literals in old space")
DEFINE_bool(track_fields, false, "track fields with only smi values")
DEFINE_bool(track_double_fields, false, "track fields with double values")
DEFINE_implication(track_double_fields, track_fields)
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
@ -225,6 +231,8 @@ DEFINE_bool(trace_gvn, false, "trace global value numbering")
DEFINE_bool(trace_representation, false, "trace representation types")
DEFINE_bool(trace_track_allocation_sites, false,
"trace the tracking of allocation sites")
DEFINE_bool(trace_migration, false, "trace object migration")
DEFINE_bool(trace_generalization, false, "trace map generalization")
DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
DEFINE_bool(stress_environments, false, "environment for every instruction")
DEFINE_int(deopt_every_n_times,

34
deps/v8/src/frames-inl.h

@ -98,6 +98,12 @@ inline StackHandler::Kind StackHandler::kind() const {
}
inline unsigned StackHandler::index() const {
const int offset = StackHandlerConstants::kStateOffset;
return IndexField::decode(Memory::unsigned_at(address() + offset));
}
inline Object** StackHandler::context_address() const {
const int offset = StackHandlerConstants::kContextOffset;
return reinterpret_cast<Object**>(address() + offset);
@ -213,6 +219,34 @@ Object* JavaScriptFrame::GetParameter(int index) const {
}
inline Address JavaScriptFrame::GetOperandSlot(int index) const {
Address base = fp() + JavaScriptFrameConstants::kLocal0Offset;
ASSERT(IsAddressAligned(base, kPointerSize));
ASSERT_EQ(type(), JAVA_SCRIPT);
ASSERT_LT(index, ComputeOperandsCount());
ASSERT_LE(0, index);
// Operand stack grows down.
return base - index * kPointerSize;
}
inline Object* JavaScriptFrame::GetOperand(int index) const {
return Memory::Object_at(GetOperandSlot(index));
}
inline int JavaScriptFrame::ComputeOperandsCount() const {
Address base = fp() + JavaScriptFrameConstants::kLocal0Offset;
// Base points to low address of first operand and stack grows down, so add
// kPointerSize to get the actual stack size.
intptr_t stack_size_in_bytes = (base + kPointerSize) - sp();
ASSERT(IsAligned(stack_size_in_bytes, kPointerSize));
ASSERT(type() == JAVA_SCRIPT);
ASSERT(stack_size_in_bytes >= 0);
return static_cast<int>(stack_size_in_bytes >> kPointerSizeLog2);
}
inline Object* JavaScriptFrame::receiver() const {
return GetParameter(-1);
}

120
deps/v8/src/frames.cc

@ -840,6 +840,72 @@ void JavaScriptFrame::PrintTop(Isolate* isolate,
}
void JavaScriptFrame::SaveOperandStack(FixedArray* store,
int* stack_handler_index) const {
int operands_count = store->length();
ASSERT_LE(operands_count, ComputeOperandsCount());
// Visit the stack in LIFO order, saving operands and stack handlers into the
// array. The saved stack handlers store a link to the next stack handler,
// which will allow RestoreOperandStack to rewind the handlers.
StackHandlerIterator it(this, top_handler());
int i = operands_count - 1;
*stack_handler_index = -1;
for (; !it.done(); it.Advance()) {
StackHandler* handler = it.handler();
// Save operands pushed after the handler was pushed.
for (; GetOperandSlot(i) < handler->address(); i--) {
store->set(i, GetOperand(i));
}
ASSERT_GE(i + 1, StackHandlerConstants::kSlotCount);
ASSERT_EQ(handler->address(), GetOperandSlot(i));
int next_stack_handler_index = i + 1 - StackHandlerConstants::kSlotCount;
handler->Unwind(isolate(), store, next_stack_handler_index,
*stack_handler_index);
*stack_handler_index = next_stack_handler_index;
i -= StackHandlerConstants::kSlotCount;
}
// Save any remaining operands.
for (; i >= 0; i--) {
store->set(i, GetOperand(i));
}
}
void JavaScriptFrame::RestoreOperandStack(FixedArray* store,
int stack_handler_index) {
int operands_count = store->length();
ASSERT_LE(operands_count, ComputeOperandsCount());
int i = 0;
while (i <= stack_handler_index) {
if (i < stack_handler_index) {
// An operand.
ASSERT_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
Memory::Object_at(GetOperandSlot(i)) = store->get(i);
i++;
} else {
// A stack handler.
ASSERT_EQ(i, stack_handler_index);
// The FixedArray store grows up. The stack grows down. So the operand
// slot for i actually points to the bottom of the top word in the
// handler. The base of the StackHandler* is the address of the bottom
// word, which will be the last slot that is in the handler.
int handler_slot_index = i + StackHandlerConstants::kSlotCount - 1;
StackHandler *handler =
StackHandler::FromAddress(GetOperandSlot(handler_slot_index));
stack_handler_index = handler->Rewind(isolate(), store, i, fp());
i += StackHandlerConstants::kSlotCount;
}
}
for (; i < operands_count; i++) {
ASSERT_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
Memory::Object_at(GetOperandSlot(i)) = store->get(i);
}
}
void FrameSummary::Print() {
PrintF("receiver: ");
receiver_->ShortPrint();
@ -1436,6 +1502,60 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
}
// -------------------------------------------------------------------------
void StackHandler::Unwind(Isolate* isolate,
FixedArray* array,
int offset,
int previous_handler_offset) const {
STATIC_ASSERT(StackHandlerConstants::kSlotCount == 5);
ASSERT_LE(0, offset);
ASSERT_GE(array->length(), offset + 5);
// Unwinding a stack handler into an array chains it in the opposite
// direction, re-using the "next" slot as a "previous" link, so that stack
// handlers can be later re-wound in the correct order. Decode the "state"
// slot into "index" and "kind" and store them separately, using the fp slot.
array->set(offset, Smi::FromInt(previous_handler_offset)); // next
array->set(offset + 1, *code_address()); // code
array->set(offset + 2, Smi::FromInt(static_cast<int>(index()))); // state
array->set(offset + 3, *context_address()); // context
array->set(offset + 4, Smi::FromInt(static_cast<int>(kind()))); // fp
*isolate->handler_address() = next()->address();
}
int StackHandler::Rewind(Isolate* isolate,
FixedArray* array,
int offset,
Address fp) {
STATIC_ASSERT(StackHandlerConstants::kSlotCount == 5);
ASSERT_LE(0, offset);
ASSERT_GE(array->length(), offset + 5);
Smi* prev_handler_offset = Smi::cast(array->get(offset));
Code* code = Code::cast(array->get(offset + 1));
Smi* smi_index = Smi::cast(array->get(offset + 2));
Object* context = array->get(offset + 3);
Smi* smi_kind = Smi::cast(array->get(offset + 4));
unsigned state = KindField::encode(static_cast<Kind>(smi_kind->value())) |
IndexField::encode(static_cast<unsigned>(smi_index->value()));
Memory::Address_at(address() + StackHandlerConstants::kNextOffset) =
*isolate->handler_address();
Memory::Object_at(address() + StackHandlerConstants::kCodeOffset) = code;
Memory::uintptr_at(address() + StackHandlerConstants::kStateOffset) = state;
Memory::Object_at(address() + StackHandlerConstants::kContextOffset) =
context;
Memory::Address_at(address() + StackHandlerConstants::kFPOffset) = fp;
*isolate->handler_address() = address();
return prev_handler_offset->value();
}
// -------------------------------------------------------------------------
int NumRegs(RegList reglist) {

16
deps/v8/src/frames.h

@ -93,6 +93,7 @@ class StackHandlerConstants : public AllStatic {
static const int kFPOffset = 4 * kPointerSize;
static const int kSize = kFPOffset + kPointerSize;
static const int kSlotCount = kSize >> kPointerSizeLog2;
};
@ -131,9 +132,15 @@ class StackHandler BASE_EMBEDDED {
inline bool is_catch() const;
inline bool is_finally() const;
// Generator support to preserve stack handlers.
void Unwind(Isolate* isolate, FixedArray* array, int offset,
int previous_handler_offset) const;
int Rewind(Isolate* isolate, FixedArray* array, int offset, Address fp);
private:
// Accessors.
inline Kind kind() const;
inline unsigned index() const;
inline Object** context_address() const;
inline Object** code_address() const;
@ -536,6 +543,15 @@ class JavaScriptFrame: public StandardFrame {
return GetNumberOfIncomingArguments();
}
// Access the operand stack.
inline Address GetOperandSlot(int index) const;
inline Object* GetOperand(int index) const;
inline int ComputeOperandsCount() const;
// Generator support to preserve operand stack and stack handlers.
void SaveOperandStack(FixedArray* store, int* stack_handler_index) const;
void RestoreOperandStack(FixedArray* store, int stack_handler_index);
// Debugger access.
void SetParameterValue(int index, Object* value) const;

5
deps/v8/src/full-codegen.h

@ -410,6 +410,11 @@ class FullCodeGenerator: public AstVisitor {
// this has to be a separate pass _before_ populating or executing any module.
void AllocateModules(ZoneList<Declaration*>* declarations);
// Generator code to return a fresh iterator result object. The "value"
// property is set to a value popped from the stack, and "done" is set
// according to the argument.
void EmitReturnIteratorResult(bool done);
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.

19
deps/v8/src/global-handles.cc

@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// TODO(dcarney): remove
#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
#include "v8.h"
#include "api.h"
@ -232,7 +235,7 @@ class GlobalHandles::Node {
void MakeWeak(GlobalHandles* global_handles,
void* parameter,
WeakReferenceCallback weak_reference_callback,
RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback) {
ASSERT(state() != FREE);
set_state(WEAK);
@ -264,7 +267,7 @@ class GlobalHandles::Node {
set_state(NEAR_DEATH);
set_parameter(NULL);
v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
v8::Persistent<v8::Value> object = ToApi<v8::Value>(handle());
{
// Check that we are not passing a finalized external string to
// the callback.
@ -276,9 +279,11 @@ class GlobalHandles::Node {
VMState<EXTERNAL> state(isolate);
if (near_death_callback_ != NULL) {
if (IsWeakCallback::decode(flags_)) {
WeakReferenceCallback callback =
reinterpret_cast<WeakReferenceCallback>(near_death_callback_);
callback(object, par);
RevivableCallback callback =
reinterpret_cast<RevivableCallback>(near_death_callback_);
callback(reinterpret_cast<v8::Isolate*>(isolate),
&object,
par);
} else {
near_death_callback_(reinterpret_cast<v8::Isolate*>(isolate),
object,
@ -490,9 +495,9 @@ void GlobalHandles::Destroy(Object** location) {
void GlobalHandles::MakeWeak(Object** location,
void* parameter,
WeakReferenceCallback weak_reference_callback,
RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback) {
ASSERT(near_death_callback != NULL);
ASSERT((weak_reference_callback == NULL) != (near_death_callback == NULL));
Node::FromLocation(location)->MakeWeak(this,
parameter,
weak_reference_callback,

4
deps/v8/src/global-handles.h

@ -130,6 +130,8 @@ class GlobalHandles {
// Destroy a global handle.
void Destroy(Object** location);
typedef WeakReferenceCallbacks<v8::Value, void>::Revivable RevivableCallback;
// Make the global handle weak and set the callback parameter for the
// handle. When the garbage collector recognizes that only weak global
// handles point to an object the handles are cleared and the callback
@ -138,7 +140,7 @@ class GlobalHandles {
// reason is that Smi::FromInt(0) does not change during garage collection.
void MakeWeak(Object** location,
void* parameter,
WeakReferenceCallback weak_reference_callback,
RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback);
void RecordStats(HeapStats* stats);

15
deps/v8/src/handles-inl.h

@ -53,8 +53,9 @@ Handle<T>::Handle(T* obj, Isolate* isolate) {
template <typename T>
inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
ASSERT(location_ == NULL ||
reinterpret_cast<Address>(*location_) != kZapValue);
ASSERT(location_ == NULL || !(*location_)->IsFailure());
if (location_ == other.location_) return true;
if (location_ == NULL || other.location_ == NULL) return false;
// Dereferencing deferred handles to check object equality is safe.
SLOW_ASSERT(IsDereferenceAllowed(true) && other.IsDereferenceAllowed(true));
return *location_ == *other.location_;
@ -63,24 +64,22 @@ inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
template <typename T>
inline T* Handle<T>::operator*() const {
ASSERT(location_ != NULL);
ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
ASSERT(location_ != NULL && !(*location_)->IsFailure());
SLOW_ASSERT(IsDereferenceAllowed(false));
return *BitCast<T**>(location_);
}
template <typename T>
inline T** Handle<T>::location() const {
ASSERT(location_ == NULL ||
reinterpret_cast<Address>(*location_) != kZapValue);
SLOW_ASSERT(IsDereferenceAllowed(false));
ASSERT(location_ == NULL || !(*location_)->IsFailure());
SLOW_ASSERT(location_ == NULL || IsDereferenceAllowed(false));
return location_;
}
#ifdef DEBUG
template <typename T>
bool Handle<T>::IsDereferenceAllowed(bool allow_deferred) const {
if (location_ == NULL) return true;
ASSERT(location_ != NULL);
Object* object = *BitCast<T**>(location_);
if (object->IsSmi()) return true;
HeapObject* heap_object = HeapObject::cast(object);

40
deps/v8/src/heap-inl.h

@ -650,6 +650,10 @@ inline bool Heap::allow_allocation(bool new_state) {
return old;
}
inline void Heap::set_allow_allocation(bool allocation_allowed) {
allocation_allowed_ = allocation_allowed;
}
#endif
@ -864,33 +868,41 @@ DisallowAllocationFailure::~DisallowAllocationFailure() {
#ifdef DEBUG
AssertNoAllocation::AssertNoAllocation() {
Isolate* isolate = ISOLATE;
active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
if (active_) {
old_state_ = isolate->heap()->allow_allocation(false);
bool EnterAllocationScope(Isolate* isolate, bool allow_allocation) {
bool active = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
bool last_state = isolate->heap()->IsAllocationAllowed();
if (active) {
// TODO(yangguo): Make HandleDereferenceGuard avoid isolate mutation in the
// same way if running on the optimizer thread.
isolate->heap()->set_allow_allocation(allow_allocation);
}
return last_state;
}
AssertNoAllocation::~AssertNoAllocation() {
if (active_) HEAP->allow_allocation(old_state_);
void ExitAllocationScope(Isolate* isolate, bool last_state) {
bool active = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
if (active) {
isolate->heap()->set_allow_allocation(last_state);
}
}
DisableAssertNoAllocation::DisableAssertNoAllocation() {
Isolate* isolate = ISOLATE;
active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
if (active_) {
old_state_ = isolate->heap()->allow_allocation(true);
AssertNoAllocation::AssertNoAllocation()
: last_state_(EnterAllocationScope(ISOLATE, false)) {
}
AssertNoAllocation::~AssertNoAllocation() {
ExitAllocationScope(ISOLATE, last_state_);
}
DisableAssertNoAllocation::DisableAssertNoAllocation()
: last_state_(EnterAllocationScope(ISOLATE, true)) {
}
DisableAssertNoAllocation::~DisableAssertNoAllocation() {
if (active_) HEAP->allow_allocation(old_state_);
ExitAllocationScope(ISOLATE, last_state_);
}
#else
AssertNoAllocation::AssertNoAllocation() { }

5
deps/v8/src/heap-snapshot-generator.cc

@ -1309,8 +1309,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
if (js_obj->HasFastProperties()) {
DescriptorArray* descs = js_obj->map()->instance_descriptors();
int real_size = js_obj->map()->NumberOfOwnDescriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
if (descs->GetDetails(i).descriptor_index() > real_size) continue;
for (int i = 0; i < real_size; i++) {
switch (descs->GetType(i)) {
case FIELD: {
int index = descs->GetFieldIndex(i);
@ -1332,7 +1331,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
js_obj->GetInObjectPropertyOffset(index));
}
} else {
Object* value = js_obj->FastPropertyAt(index);
Object* value = js_obj->RawFastPropertyAt(index);
if (k != heap_->hidden_string()) {
SetPropertyReference(js_obj, entry, k, value);
} else {

19
deps/v8/src/heap.cc

@ -3176,7 +3176,8 @@ void Heap::SetNumberStringCache(Object* number, String* string) {
MaybeObject* Heap::NumberToString(Object* number,
bool check_number_string_cache) {
bool check_number_string_cache,
PretenureFlag pretenure) {
isolate_->counters()->number_to_string_runtime()->Increment();
if (check_number_string_cache) {
Object* cached = GetNumberStringCache(number);
@ -3197,7 +3198,8 @@ MaybeObject* Heap::NumberToString(Object* number,
}
Object* js_string;
MaybeObject* maybe_js_string = AllocateStringFromOneByte(CStrVector(str));
MaybeObject* maybe_js_string =
AllocateStringFromOneByte(CStrVector(str), pretenure);
if (maybe_js_string->ToObject(&js_string)) {
SetNumberStringCache(number, String::cast(js_string));
}
@ -4156,7 +4158,9 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
for (int i = 0; i < count; i++) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
ASSERT(name->IsInternalizedString());
FieldDescriptor field(name, i, NONE, i + 1);
// TODO(verwaest): Since we cannot update the boilerplate's map yet,
// initialize to the worst case.
FieldDescriptor field(name, i, NONE, Representation::Tagged());
descriptors->Set(i, &field, witness);
}
descriptors->Sort();
@ -4336,8 +4340,7 @@ MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
if (to_kind != initial_map->elements_kind()) {
MaybeObject* maybe_new_map = constructor->GetElementsTransitionMap(
isolate(), to_kind);
MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
// Possibly alter the mode, since we found an updated elements kind
// in the type info cell.
@ -4585,12 +4588,10 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
// The global object might be created from an object template with accessors.
// Fill these accessors into the dictionary.
DescriptorArray* descs = map->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
PropertyDetails d = PropertyDetails(details.attributes(),
CALLBACKS,
details.descriptor_index());
PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
Object* value = descs->GetCallbacksObject(i);
MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
if (!maybe_value->ToObject(&value)) return maybe_value;

28
deps/v8/src/heap.h

@ -1476,6 +1476,7 @@ class Heap {
#ifdef DEBUG
bool IsAllocationAllowed() { return allocation_allowed_; }
inline void set_allow_allocation(bool allocation_allowed);
inline bool allow_allocation(bool enable);
bool disallow_allocation_failure() {
@ -1530,6 +1531,14 @@ class Heap {
return new_space_high_promotion_mode_active_;
}
inline PretenureFlag GetPretenureMode() {
return new_space_high_promotion_mode_active_ ? TENURED : NOT_TENURED;
}
inline Address* NewSpaceHighPromotionModeActiveAddress() {
return reinterpret_cast<Address*>(&new_space_high_promotion_mode_active_);
}
inline intptr_t PromotedTotalSize() {
return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
}
@ -1608,7 +1617,8 @@ class Heap {
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true);
Object* number, bool check_number_string_cache = true,
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* Uint32ToString(
uint32_t value, bool check_number_string_cache = true);
@ -1975,7 +1985,8 @@ class Heap {
// Indicates that the new space should be kept small due to high promotion
// rates caused by the mutator allocating a lot of long-lived objects.
bool new_space_high_promotion_mode_active_;
// TODO(hpayer): change to bool if no longer accessed from generated code
intptr_t new_space_high_promotion_mode_active_;
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
@ -2691,6 +2702,13 @@ class DescriptorLookupCache {
// { AssertNoAllocation nogc;
// ...
// }
#ifdef DEBUG
inline bool EnterAllocationScope(Isolate* isolate, bool allow_allocation);
inline void ExitAllocationScope(Isolate* isolate, bool last_state);
#endif
class AssertNoAllocation {
public:
inline AssertNoAllocation();
@ -2698,8 +2716,7 @@ class AssertNoAllocation {
#ifdef DEBUG
private:
bool old_state_;
bool active_;
bool last_state_;
#endif
};
@ -2711,8 +2728,7 @@ class DisableAssertNoAllocation {
#ifdef DEBUG
private:
bool old_state_;
bool active_;
bool last_state_;
#endif
};

95
deps/v8/src/hydrogen-instructions.cc

@ -54,20 +54,6 @@ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
const char* Representation::Mnemonic() const {
switch (kind_) {
case kNone: return "v";
case kTagged: return "t";
case kDouble: return "d";
case kInteger32: return "i";
case kExternal: return "x";
default:
UNREACHABLE();
return NULL;
}
}
int HValue::LoopWeight() const {
const int w = FLAG_loop_weight;
static const int weights[] = { 1, w, w*w, w*w*w, w*w*w*w };
@ -1615,15 +1601,6 @@ void HCheckMaps::SetSideEffectDominator(GVNFlag side_effect,
}
void HLoadElements::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
if (HasTypeCheck()) {
stream->Add(" ");
typecheck()->PrintNameTo(stream);
}
}
void HCheckMaps::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" [%p", *map_set()->first());
@ -1972,6 +1949,10 @@ void HPhi::DeleteFromGraph() {
void HPhi::InitRealUses(int phi_id) {
// Initialize real uses.
phi_id_ = phi_id;
// Compute a conservative approximation of truncating uses before inferring
// representations. The proper, exact computation will be done later, when
// inserting representation changes.
SetFlag(kTruncatingToInt32);
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
if (!value->IsPhi()) {
@ -1981,6 +1962,9 @@ void HPhi::InitRealUses(int phi_id) {
PrintF("#%d Phi is used by real #%d %s as %s\n",
id(), value->id(), value->Mnemonic(), rep.Mnemonic());
}
if (!value->IsSimulate() && !value->CheckFlag(kTruncatingToInt32)) {
ClearFlag(kTruncatingToInt32);
}
}
}
}
@ -2076,7 +2060,12 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(false),
is_not_in_new_space_(true),
boolean_value_(handle->BooleanValue()) {
if (handle_->IsHeapObject()) {
Heap* heap = Handle<HeapObject>::cast(handle)->GetHeap();
is_not_in_new_space_ = !heap->InNewSpace(*handle);
}
if (handle_->IsNumber()) {
double n = handle_->Number();
has_int32_value_ = IsInteger32(n);
@ -2105,12 +2094,14 @@ HConstant::HConstant(Handle<Object> handle,
Representation r,
HType type,
bool is_internalize_string,
bool is_not_in_new_space,
bool boolean_value)
: handle_(handle),
unique_id_(unique_id),
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(is_internalize_string),
is_not_in_new_space_(is_not_in_new_space),
boolean_value_(boolean_value),
type_from_value_(type) {
ASSERT(!handle.is_null());
@ -2122,12 +2113,14 @@ HConstant::HConstant(Handle<Object> handle,
HConstant::HConstant(int32_t integer_value,
Representation r,
bool is_not_in_new_space,
Handle<Object> optional_handle)
: handle_(optional_handle),
unique_id_(),
has_int32_value_(true),
has_double_value_(true),
is_internalized_string_(false),
is_not_in_new_space_(is_not_in_new_space),
boolean_value_(integer_value != 0),
int32_value_(integer_value),
double_value_(FastI2D(integer_value)) {
@ -2137,12 +2130,14 @@ HConstant::HConstant(int32_t integer_value,
HConstant::HConstant(double double_value,
Representation r,
bool is_not_in_new_space,
Handle<Object> optional_handle)
: handle_(optional_handle),
unique_id_(),
has_int32_value_(IsInteger32(double_value)),
has_double_value_(true),
is_internalized_string_(false),
is_not_in_new_space_(is_not_in_new_space),
boolean_value_(double_value != 0 && !std::isnan(double_value)),
int32_value_(DoubleToInt32(double_value)),
double_value_(double_value) {
@ -2162,26 +2157,35 @@ void HConstant::Initialize(Representation r) {
HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (r.IsInteger32() && !has_int32_value_) return NULL;
if (r.IsDouble() && !has_double_value_) return NULL;
if (has_int32_value_) return new(zone) HConstant(int32_value_, r, handle_);
if (has_double_value_) return new(zone) HConstant(double_value_, r, handle_);
if (has_int32_value_) {
return new(zone) HConstant(int32_value_, r, is_not_in_new_space_, handle_);
}
if (has_double_value_) {
return new(zone) HConstant(double_value_, r, is_not_in_new_space_, handle_);
}
ASSERT(!handle_.is_null());
return new(zone) HConstant(handle_,
unique_id_,
r,
type_from_value_,
is_internalized_string_,
is_not_in_new_space_,
boolean_value_);
}
HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const {
if (has_int32_value_) {
return new(zone) HConstant(
int32_value_, Representation::Integer32(), handle_);
return new(zone) HConstant(int32_value_,
Representation::Integer32(),
is_not_in_new_space_,
handle_);
}
if (has_double_value_) {
return new(zone) HConstant(
DoubleToInt32(double_value_), Representation::Integer32(), handle_);
return new(zone) HConstant(DoubleToInt32(double_value_),
Representation::Integer32(),
is_not_in_new_space_,
handle_);
}
return NULL;
}
@ -2517,6 +2521,8 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) {
Handle<Map> map = types->at(i);
// Deprecated maps are updated to the current map in the type oracle.
ASSERT(!map->is_deprecated());
LookupResult lookup(map->GetIsolate());
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
@ -2528,6 +2534,12 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
} else {
SetGVNFlag(kDependsOnBackingStoreFields);
}
if (FLAG_track_double_fields &&
lookup.representation().IsDouble()) {
// Since the value needs to be boxed, use a generic handler for
// loading doubles.
continue;
}
types_.Add(types->at(i), zone);
break;
}
@ -3501,14 +3513,7 @@ void HPhi::SimplifyConstantInputs() {
void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
// If there are non-Phi uses, and all of them have observed the same
// representation, than that's what this Phi is going to use.
Representation new_rep = RepresentationObservedByAllNonPhiUses();
if (!new_rep.IsNone()) {
UpdateRepresentation(new_rep, h_infer, "unanimous use observations");
return;
}
new_rep = RepresentationFromInputs();
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
@ -3517,22 +3522,6 @@ void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
}
Representation HPhi::RepresentationObservedByAllNonPhiUses() {
int non_phi_use_count = 0;
for (int i = Representation::kInteger32;
i < Representation::kNumRepresentations; ++i) {
non_phi_use_count += non_phi_uses_[i];
}
if (non_phi_use_count <= 1) return Representation::None();
for (int i = 0; i < Representation::kNumRepresentations; ++i) {
if (non_phi_uses_[i] == non_phi_use_count) {
return Representation::FromKind(static_cast<Representation::Kind>(i));
}
}
return Representation::None();
}
Representation HPhi::RepresentationFromInputs() {
bool double_occurred = false;
bool int32_occurred = false;

196
deps/v8/src/hydrogen-instructions.h

@ -135,7 +135,6 @@ class LChunkBuilder;
V(IsUndetectableAndBranch) \
V(LeaveInlined) \
V(LoadContextSlot) \
V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@ -207,6 +206,7 @@ class LChunkBuilder;
V(Calls) \
V(InobjectFields) \
V(BackingStoreFields) \
V(DoubleFields) \
V(ElementsKind) \
V(ElementsPointer) \
V(ArrayElements) \
@ -304,58 +304,6 @@ class Range: public ZoneObject {
};
class Representation {
public:
enum Kind {
kNone,
kInteger32,
kDouble,
kTagged,
kExternal,
kNumRepresentations
};
Representation() : kind_(kNone) { }
static Representation None() { return Representation(kNone); }
static Representation Tagged() { return Representation(kTagged); }
static Representation Integer32() { return Representation(kInteger32); }
static Representation Double() { return Representation(kDouble); }
static Representation External() { return Representation(kExternal); }
static Representation FromKind(Kind kind) { return Representation(kind); }
bool Equals(const Representation& other) {
return kind_ == other.kind_;
}
bool is_more_general_than(const Representation& other) {
ASSERT(kind_ != kExternal);
ASSERT(other.kind_ != kExternal);
return kind_ > other.kind_;
}
Kind kind() const { return static_cast<Kind>(kind_); }
bool IsNone() const { return kind_ == kNone; }
bool IsTagged() const { return kind_ == kTagged; }
bool IsInteger32() const { return kind_ == kInteger32; }
bool IsDouble() const { return kind_ == kDouble; }
bool IsExternal() const { return kind_ == kExternal; }
bool IsSpecialization() const {
return kind_ == kInteger32 || kind_ == kDouble;
}
const char* Mnemonic() const;
private:
explicit Representation(Kind k) : kind_(k) { }
// Make sure kind fits in int8.
STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
int8_t kind_;
};
class UniqueValueId {
public:
UniqueValueId() : raw_address_(NULL) { }
@ -2405,15 +2353,20 @@ class HCallNewArray: public HCallNew {
Handle<JSGlobalPropertyCell> type_cell)
: HCallNew(context, constructor, argument_count),
type_cell_(type_cell) {
elements_kind_ = static_cast<ElementsKind>(
Smi::cast(type_cell->value())->value());
}
Handle<JSGlobalPropertyCell> property_cell() const {
return type_cell_;
}
ElementsKind elements_kind() const { return elements_kind_; }
DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
private:
ElementsKind elements_kind_;
Handle<JSGlobalPropertyCell> type_cell_;
};
@ -2637,39 +2590,6 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
};
class HLoadElements: public HTemplateInstruction<2> {
public:
HLoadElements(HValue* value, HValue* typecheck) {
SetOperandAt(0, value);
SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnElementsPointer);
}
HValue* value() { return OperandAt(0); }
HValue* typecheck() {
ASSERT(HasTypeCheck());
return OperandAt(1);
}
bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
virtual void PrintDataTo(StringStream* stream);
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(LoadElements)
protected:
virtual bool DataEquals(HValue* other) { return true; }
private:
virtual bool IsDeletable() const { return true; }
};
class HLoadExternalArrayPointer: public HUnaryOperation {
public:
explicit HLoadExternalArrayPointer(HValue* value)
@ -3065,7 +2985,6 @@ class HPhi: public HValue {
virtual Range* InferRange(Zone* zone);
virtual void InferRepresentation(HInferRepresentation* h_infer);
Representation RepresentationObservedByAllNonPhiUses();
Representation RepresentationFromUseRequirements();
virtual Representation RequiredInputRepresentation(int index) {
return representation();
@ -3238,19 +3157,24 @@ class HConstant: public HTemplateInstruction<0> {
HConstant(Handle<Object> handle, Representation r);
HConstant(int32_t value,
Representation r,
bool is_not_in_new_space = true,
Handle<Object> optional_handle = Handle<Object>::null());
HConstant(double value,
Representation r,
bool is_not_in_new_space = true,
Handle<Object> optional_handle = Handle<Object>::null());
HConstant(Handle<Object> handle,
UniqueValueId unique_id,
Representation r,
HType type,
bool is_internalized_string,
bool is_not_in_new_space,
bool boolean_value);
Handle<Object> handle() {
if (handle_.is_null()) {
// Default arguments to is_not_in_new_space depend on this heap number
// to be tenured so that it's guaranteed not be be located in new space.
handle_ = FACTORY->NewNumber(double_value_, TENURED);
}
ALLOW_HANDLE_DEREF(Isolate::Current(), "smi check");
@ -3265,6 +3189,10 @@ class HConstant: public HTemplateInstruction<0> {
std::isnan(double_value_));
}
bool NotInNewSpace() const {
return is_not_in_new_space_;
}
bool ImmortalImmovable() const {
if (has_int32_value_) {
return false;
@ -3411,6 +3339,7 @@ class HConstant: public HTemplateInstruction<0> {
bool has_int32_value_ : 1;
bool has_double_value_ : 1;
bool is_internalized_string_ : 1; // TODO(yangguo): make this part of HType.
bool is_not_in_new_space_ : 1;
bool boolean_value_ : 1;
int32_t int32_value_;
double double_value_;
@ -3434,16 +3363,27 @@ class HBinaryOperation: public HTemplateInstruction<3> {
HValue* left() { return OperandAt(1); }
HValue* right() { return OperandAt(2); }
// TODO(kasperl): Move these helpers to the IA-32 Lithium
// instruction sequence builder.
HValue* LeastConstantOperand() {
if (IsCommutative() && left()->IsConstant()) return right();
return left();
// True if switching left and right operands likely generates better code.
bool AreOperandsBetterSwitched() {
if (!IsCommutative()) return false;
// Constant operands are better off on the right, they can be inlined in
// many situations on most platforms.
if (left()->IsConstant()) return true;
if (right()->IsConstant()) return false;
// Otherwise, if there is only one use of the right operand, it would be
// better off on the left for platforms that only have 2-arg arithmetic
// ops (e.g ia32, x64) that clobber the left operand.
return (right()->UseCount() == 1);
}
HValue* BetterLeftOperand() {
return AreOperandsBetterSwitched() ? right() : left();
}
HValue* MostConstantOperand() {
if (IsCommutative() && left()->IsConstant()) return left();
return right();
HValue* BetterRightOperand() {
return AreOperandsBetterSwitched() ? left() : right();
}
void set_observed_input_representation(int index, Representation rep) {
@ -5265,29 +5205,45 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
class HLoadNamedField: public HTemplateInstruction<2> {
public:
HLoadNamedField(HValue* object, bool is_in_object, int offset,
HValue* typecheck = NULL)
HLoadNamedField(HValue* object, bool is_in_object,
Representation field_representation,
int offset, HValue* typecheck = NULL)
: is_in_object_(is_in_object),
field_representation_(field_representation),
offset_(offset) {
ASSERT(object != NULL);
SetOperandAt(0, object);
SetOperandAt(1, typecheck != NULL ? typecheck : object);
if (FLAG_track_fields && field_representation.IsSmi()) {
set_type(HType::Smi());
set_representation(Representation::Tagged());
} else if (FLAG_track_double_fields && field_representation.IsDouble()) {
set_representation(field_representation);
} else {
set_representation(Representation::Tagged());
}
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
if (is_in_object) {
if (FLAG_track_double_fields && representation().IsDouble()) {
ASSERT(is_in_object);
ASSERT(offset == HeapNumber::kValueOffset);
SetGVNFlag(kDependsOnDoubleFields);
} else if (is_in_object) {
SetGVNFlag(kDependsOnInobjectFields);
SetGVNFlag(kDependsOnMaps);
} else {
SetGVNFlag(kDependsOnBackingStoreFields);
SetGVNFlag(kDependsOnMaps);
}
}
static HLoadNamedField* NewArrayLength(Zone* zone, HValue* object,
HValue* typecheck,
HType type = HType::Tagged()) {
Representation representation =
type.IsSmi() ? Representation::Smi() : Representation::Tagged();
HLoadNamedField* result = new(zone) HLoadNamedField(
object, true, JSArray::kLengthOffset, typecheck);
object, true, representation, JSArray::kLengthOffset, typecheck);
result->set_type(type);
result->SetGVNFlag(kDependsOnArrayLengths);
result->ClearGVNFlag(kDependsOnInobjectFields);
@ -5302,6 +5258,7 @@ class HLoadNamedField: public HTemplateInstruction<2> {
bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
bool is_in_object() const { return is_in_object_; }
Representation field_representation() const { return representation_; }
int offset() const { return offset_; }
virtual Representation RequiredInputRepresentation(int index) {
@ -5321,6 +5278,7 @@ class HLoadNamedField: public HTemplateInstruction<2> {
virtual bool IsDeletable() const { return true; }
bool is_in_object_;
Representation field_representation_;
int offset_;
};
@ -5615,29 +5573,41 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> {
class HStoreNamedField: public HTemplateInstruction<2> {
public:
HStoreNamedField(HValue* obj,
Handle<String> name,
Handle<Name> name,
HValue* val,
bool in_object,
Representation field_representation,
int offset)
: name_(name),
is_in_object_(in_object),
field_representation_(field_representation),
offset_(offset),
transition_unique_id_(),
new_space_dominator_(NULL) {
SetOperandAt(0, obj);
SetOperandAt(1, val);
SetFlag(kTrackSideEffectDominators);
SetGVNFlag(kDependsOnNewSpacePromotion);
if (is_in_object_) {
if (FLAG_track_double_fields && field_representation.IsDouble()) {
SetGVNFlag(kChangesDoubleFields);
} else if (is_in_object_) {
SetGVNFlag(kChangesInobjectFields);
SetGVNFlag(kDependsOnNewSpacePromotion);
} else {
SetGVNFlag(kChangesBackingStoreFields);
SetGVNFlag(kDependsOnNewSpacePromotion);
}
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
virtual Representation RequiredInputRepresentation(int index) {
if (FLAG_track_double_fields &&
index == 1 && field_representation_.IsDouble()) {
return field_representation_;
} else if (FLAG_track_fields &&
index == 1 && field_representation_.IsSmi()) {
return Representation::Integer32();
}
return Representation::Tagged();
}
virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
@ -5649,7 +5619,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
HValue* object() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
Handle<String> name() const { return name_; }
Handle<Name> name() const { return name_; }
bool is_in_object() const { return is_in_object_; }
int offset() const { return offset_; }
Handle<Map> transition() const { return transition_; }
@ -5658,7 +5628,12 @@ class HStoreNamedField: public HTemplateInstruction<2> {
HValue* new_space_dominator() const { return new_space_dominator_; }
bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value()) &&
ASSERT(!(FLAG_track_double_fields && field_representation_.IsDouble()) ||
transition_.is_null());
return (!FLAG_track_fields || !field_representation_.IsSmi()) &&
// If there is a transition, a new storage object needs to be allocated.
!(FLAG_track_double_fields && field_representation_.IsDouble()) &&
StoringValueNeedsWriteBarrier(value()) &&
ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
@ -5670,9 +5645,14 @@ class HStoreNamedField: public HTemplateInstruction<2> {
transition_unique_id_ = UniqueValueId(transition_);
}
Representation field_representation() const {
return field_representation_;
}
private:
Handle<String> name_;
Handle<Name> name_;
bool is_in_object_;
Representation field_representation_;
int offset_;
Handle<Map> transition_;
UniqueValueId transition_unique_id_;
@ -6159,12 +6139,14 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
bool fast_elements,
int literal_index,
int depth,
bool may_store_doubles,
bool has_function)
: HMaterializedLiteral<1>(literal_index, depth),
constant_properties_(constant_properties),
constant_properties_length_(constant_properties->length()),
literals_(literals),
fast_elements_(fast_elements),
may_store_doubles_(may_store_doubles),
has_function_(has_function) {
SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);
@ -6179,6 +6161,7 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
}
Handle<FixedArray> literals() const { return literals_; }
bool fast_elements() const { return fast_elements_; }
bool may_store_doubles() const { return may_store_doubles_; }
bool has_function() const { return has_function_; }
virtual Representation RequiredInputRepresentation(int index) {
@ -6193,6 +6176,7 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
int constant_properties_length_;
Handle<FixedArray> literals_;
bool fast_elements_ : 1;
bool may_store_doubles_ : 1;
bool has_function_ : 1;
};

403
deps/v8/src/hydrogen.cc

@ -641,6 +641,7 @@ HConstant* HGraph::GetConstant##Name() { \
Representation::Tagged(), \
htype, \
false, \
true, \
boolean_value); \
constant->InsertAfter(GetConstantUndefined()); \
constant_##name##_.set(constant); \
@ -1191,10 +1192,13 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
new_length->ClearFlag(HValue::kCanOverflow);
Factory* factory = isolate()->factory();
Representation representation = IsFastElementsKind(kind)
? Representation::Smi() : Representation::Tagged();
HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
object,
factory->length_field_string(),
new_length, true,
representation,
JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
}
@ -1267,8 +1271,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
bool fast_elements = IsFastObjectElementsKind(elements_kind);
HValue* elements =
AddInstruction(new(zone) HLoadElements(object, mapcheck));
HValue* elements = AddLoadElements(object, mapcheck);
if (is_store && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
HCheckMaps* check_cow_map = HCheckMaps::New(
@ -1413,9 +1416,12 @@ void HGraphBuilder::BuildInitializeElements(HValue* elements,
BuildStoreMap(elements, map);
Handle<String> fixed_array_length_field_name = factory->length_field_string();
Representation representation = IsFastElementsKind(kind)
? Representation::Smi() : Representation::Tagged();
HInstruction* store_length =
new(zone) HStoreNamedField(elements, fixed_array_length_field_name,
capacity, true, FixedArray::kLengthOffset);
capacity, true, representation,
FixedArray::kLengthOffset);
AddInstruction(store_length);
}
@ -1447,6 +1453,7 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
isolate()->factory()->properties_field_symbol(),
empty_fixed_array,
true,
Representation::Tagged(),
JSArray::kPropertiesOffset));
HInstruction* length_store = AddInstruction(
@ -1454,6 +1461,7 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
isolate()->factory()->length_field_string(),
length_field,
true,
Representation::Tagged(),
JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
@ -1479,6 +1487,7 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
isolate()->factory()->elements_field_string(),
elements,
true,
Representation::Tagged(),
JSArray::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
@ -1493,7 +1502,9 @@ HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
Handle<String> map_field_name = factory->map_field_string();
HInstruction* store_map =
new(zone) HStoreNamedField(object, map_field_name, map,
true, JSObject::kMapOffset);
true, Representation::Tagged(),
JSObject::kMapOffset);
store_map->ClearGVNFlag(kChangesInobjectFields);
store_map->SetGVNFlag(kChangesMaps);
AddInstruction(store_map);
return store_map;
@ -1509,6 +1520,18 @@ HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
}
HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
HValue* typecheck) {
HLoadNamedField* instr = new(zone()) HLoadNamedField(object, true,
Representation::Tagged(), JSObject::kElementsOffset, typecheck);
AddInstruction(instr);
instr->SetGVNFlag(kDependsOnElementsPointer);
instr->ClearGVNFlag(kDependsOnMaps);
instr->ClearGVNFlag(kDependsOnInobjectFields);
return instr;
}
HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* context,
HValue* old_capacity) {
Zone* zone = this->zone();
@ -1574,7 +1597,7 @@ HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object,
HInstruction* elements_store = AddInstruction(new(zone) HStoreNamedField(
object,
factory->elements_field_string(),
new_elements, true,
new_elements, true, Representation::Tagged(),
JSArray::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
@ -1711,13 +1734,13 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length == 0)) {
HInstruction* value =
AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
HInstruction* value = AddInstruction(new(zone) HLoadNamedField(
boilerplate, true, Representation::Tagged(), i));
if (i != JSArray::kMapOffset) {
AddInstruction(new(zone) HStoreNamedField(object,
factory->empty_string(),
value,
true, i));
value, true,
Representation::Tagged(), i));
} else {
BuildStoreMap(object, value);
}
@ -1732,24 +1755,24 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
if (length > 0) {
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
HValue* boilerplate_elements =
AddInstruction(new(zone) HLoadElements(boilerplate, NULL));
HValue* boilerplate_elements = AddLoadElements(boilerplate);
HValue* object_elements =
AddInstruction(new(zone) HInnerAllocatedObject(object, elems_offset));
AddInstruction(new(zone) HStoreNamedField(object,
factory->elements_field_string(),
object_elements,
true, JSObject::kElementsOffset));
object_elements, true,
Representation::Tagged(),
JSObject::kElementsOffset));
// Copy the elements array header.
for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
HInstruction* value =
AddInstruction(new(zone) HLoadNamedField(boilerplate_elements,
true, i));
AddInstruction(new(zone) HLoadNamedField(
boilerplate_elements, true, Representation::Tagged(), i));
AddInstruction(new(zone) HStoreNamedField(object_elements,
factory->empty_string(),
value,
true, i));
value, true,
Representation::Tagged(), i));
}
// Copy the elements array contents.
@ -1834,11 +1857,32 @@ HValue* HGraphBuilder::BuildCreateAllocationSiteInfo(HValue* previous_object,
isolate()->factory()->payload_string(),
payload,
true,
Representation::Tagged(),
AllocationSiteInfo::kPayloadOffset));
return alloc_site;
}
HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* context) {
HInstruction* global_object = AddInstruction(new(zone())
HGlobalObject(context));
HInstruction* native_context = AddInstruction(new(zone())
HLoadNamedField(global_object, true, Representation::Tagged(),
GlobalObject::kNativeContextOffset));
return native_context;
}
HInstruction* HGraphBuilder::BuildGetArrayFunction(HValue* context) {
HInstruction* native_context = BuildGetNativeContext(context);
int offset = Context::kHeaderSize +
kPointerSize * Context::ARRAY_FUNCTION_INDEX;
HInstruction* array_function = AddInstruction(new(zone())
HLoadNamedField(native_context, true, Representation::Tagged(), offset));
return array_function;
}
HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
HValue* allocation_site_payload,
@ -1855,17 +1899,14 @@ HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode(HValue* context) {
// Get the global context, the native context, the map array
HInstruction* global_object = AddInstruction(new(zone())
HGlobalObject(context));
HInstruction* native_context = AddInstruction(new(zone())
HLoadNamedField(global_object, true, GlobalObject::kNativeContextOffset));
HInstruction* native_context = builder()->BuildGetNativeContext(context);
int offset = Context::kHeaderSize +
kPointerSize * Context::JS_ARRAY_MAPS_INDEX;
HInstruction* map_array = AddInstruction(new(zone())
HLoadNamedField(native_context, true, offset));
HLoadNamedField(native_context, true, Representation::Tagged(), offset));
offset = kind_ * kPointerSize + FixedArrayBase::kHeaderSize;
return AddInstruction(new(zone()) HLoadNamedField(map_array, true, offset));
return AddInstruction(new(zone()) HLoadNamedField(
map_array, true, Representation::Tagged(), offset));
}
@ -3747,7 +3788,39 @@ void HInferRepresentation::Analyze() {
}
}
// Set truncation flags for groups of connected phis. This is a conservative
// approximation; the flag will be properly re-computed after representations
// have been determined.
if (phi_count > 0) {
BitVector* done = new(zone()) BitVector(phi_count, graph_->zone());
for (int i = 0; i < phi_count; ++i) {
if (done->Contains(i)) continue;
// Check if all uses of all connected phis in this group are truncating.
bool all_uses_everywhere_truncating = true;
for (BitVector::Iterator it(connected_phis.at(i));
!it.Done();
it.Advance()) {
int index = it.Current();
all_uses_everywhere_truncating &=
phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
done->Add(index);
}
if (all_uses_everywhere_truncating) {
continue; // Great, nothing to do.
}
// Clear truncation flag of this group of connected phis.
for (BitVector::Iterator it(connected_phis.at(i));
!it.Done();
it.Advance()) {
int index = it.Current();
phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
}
}
}
// Simplify constant phi inputs where possible.
// This step uses kTruncatingToInt32 flags of phis.
for (int i = 0; i < phi_count; ++i) {
phi_list->at(i)->SimplifyConstantInputs();
}
@ -4027,19 +4100,17 @@ void HGraph::InsertRepresentationChanges() {
// int32-phis allow truncation and iteratively remove the ones that
// are used in an operation that does not allow a truncating
// conversion.
// TODO(fschneider): Replace this with a worklist-based iteration.
ZoneList<HPhi*> worklist(8, zone());
for (int i = 0; i < phi_list()->length(); i++) {
HPhi* phi = phi_list()->at(i);
if (phi->representation().IsInteger32()) {
phi->SetFlag(HValue::kTruncatingToInt32);
}
}
bool change = true;
while (change) {
change = false;
for (int i = 0; i < phi_list()->length(); i++) {
HPhi* phi = phi_list()->at(i);
if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
// If a Phi is used as a non-truncating int32 or as a double,
// clear its "truncating" flag.
@ -4054,11 +4125,27 @@ void HGraph::InsertRepresentationChanges() {
phi->id(), it.value()->id(), it.value()->Mnemonic());
}
phi->ClearFlag(HValue::kTruncatingToInt32);
change = true;
worklist.Add(phi, zone());
break;
}
}
}
while (!worklist.is_empty()) {
HPhi* current = worklist.RemoveLast();
for (int i = 0; i < current->OperandCount(); ++i) {
HValue* input = current->OperandAt(i);
if (input->IsPhi() &&
input->representation().IsInteger32() &&
input->CheckFlag(HValue::kTruncatingToInt32)) {
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating because of #%d %s\n",
input->id(), current->id(), current->Mnemonic());
}
input->ClearFlag(HValue::kTruncatingToInt32);
worklist.Add(HPhi::cast(input), zone());
}
}
}
for (int i = 0; i < blocks_.length(); ++i) {
@ -5364,6 +5451,9 @@ void HGraph::DeadCodeElimination() {
while (!worklist.is_empty()) {
HInstruction* instr = worklist.RemoveLast();
// This happens when an instruction is used multiple times as operand. That
// in turn could happen through GVN.
if (!instr->IsLinked()) continue;
if (FLAG_trace_dead_code_elimination) {
HeapStringAllocator allocator;
StringStream stream(&allocator);
@ -6604,10 +6694,16 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
if (properties->length() > 0) {
return false;
} else {
int nof = boilerplate->map()->inobject_properties();
for (int i = 0; i < nof; i++) {
Handle<DescriptorArray> descriptors(
boilerplate->map()->instance_descriptors());
int limit = boilerplate->map()->NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
Representation representation = details.representation();
int index = descriptors->GetFieldIndex(i);
if ((*max_properties)-- == 0) return false;
Handle<Object> value(boilerplate->InObjectPropertyAt(i), isolate);
Handle<Object> value(boilerplate->InObjectPropertyAt(index), isolate);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
@ -6617,6 +6713,8 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
pointer_size)) {
return false;
}
} else if (representation.IsDouble()) {
*data_size += HeapNumber::kSize;
}
}
}
@ -6666,6 +6764,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
expr->fast_elements(),
expr->literal_index(),
expr->depth(),
expr->may_store_doubles(),
expr->has_function()));
}
@ -6814,7 +6913,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// of the property values and is the value of the entire expression.
Push(literal);
HLoadElements* elements = NULL;
HInstruction* elements = NULL;
for (int i = 0; i < length; i++) {
Expression* subexpr = subexprs->at(i);
@ -6826,10 +6925,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* value = Pop();
if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
// Pass in literal as dummy depedency, since the receiver always has
// elements.
elements = new(zone()) HLoadElements(literal, literal);
AddInstruction(elements);
elements = AddLoadElements(literal);
HValue* key = AddInstruction(
new(zone()) HConstant(Handle<Object>(Smi::FromInt(i), isolate()),
@ -6887,14 +6983,29 @@ static bool ComputeLoadStoreField(Handle<Map> type,
static int ComputeLoadStoreFieldIndex(Handle<Map> type,
Handle<String> name,
LookupResult* lookup) {
ASSERT(lookup->IsField() || lookup->IsTransitionToField(*type));
if (lookup->IsField()) {
return lookup->GetLocalFieldIndexFromMap(*type);
} else {
Map* transition = lookup->GetTransitionMapFromMap(*type);
return transition->PropertyIndexFor(*name) - type->inobject_properties();
int descriptor = transition->LastAdded();
int index = transition->instance_descriptors()->GetFieldIndex(descriptor);
return index - type->inobject_properties();
}
}
static Representation ComputeLoadStoreRepresentation(Handle<Map> type,
LookupResult* lookup) {
if (lookup->IsField()) {
return lookup->representation();
} else {
Map* transition = lookup->GetTransitionMapFromMap(*type);
int descriptor = transition->LastAdded();
PropertyDetails details =
transition->instance_descriptors()->GetDetails(descriptor);
return details.representation();
}
}
@ -6949,8 +7060,9 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
zone()));
}
int index = ComputeLoadStoreFieldIndex(map, name, lookup);
int index = ComputeLoadStoreFieldIndex(map, lookup);
bool is_in_object = index < 0;
Representation representation = ComputeLoadStoreRepresentation(map, lookup);
int offset = index * kPointerSize;
if (index < 0) {
// Negative property indices are in-object properties, indexed
@ -6959,9 +7071,33 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
} else {
offset += FixedArray::kHeaderSize;
}
HStoreNamedField* instr =
new(zone()) HStoreNamedField(object, name, value, is_in_object, offset);
if (lookup->IsTransitionToField(*map)) {
bool transition_to_field = lookup->IsTransitionToField(*map);
if (FLAG_track_double_fields && representation.IsDouble()) {
if (transition_to_field) {
NoObservableSideEffectsScope no_side_effects(this);
HInstruction* heap_number_size = AddInstruction(new(zone()) HConstant(
HeapNumber::kSize, Representation::Integer32()));
HInstruction* double_box = AddInstruction(new(zone()) HAllocate(
environment()->LookupContext(), heap_number_size,
HType::HeapNumber(), HAllocate::CAN_ALLOCATE_IN_NEW_SPACE));
BuildStoreMap(double_box, isolate()->factory()->heap_number_map());
AddInstruction(new(zone()) HStoreNamedField(
double_box, name, value, true,
Representation::Double(), HeapNumber::kValueOffset));
value = double_box;
representation = Representation::Tagged();
} else {
HInstruction* double_box = AddInstruction(new(zone()) HLoadNamedField(
object, is_in_object, Representation::Tagged(), offset));
double_box->set_type(HType::HeapNumber());
return new(zone()) HStoreNamedField(
double_box, name, value, true,
Representation::Double(), HeapNumber::kValueOffset);
}
}
HStoreNamedField* instr = new(zone()) HStoreNamedField(
object, name, value, is_in_object, representation, offset);
if (transition_to_field) {
Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
instr->set_transition(transition);
// TODO(fschneider): Record the new map type of the object in the IR to
@ -7043,22 +7179,31 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
HValue* object,
SmallMapList* types,
Handle<String> name) {
int count = 0;
int previous_field_offset = 0;
bool previous_field_is_in_object = false;
bool is_monomorphic_field = true;
if (HandlePolymorphicArrayLengthLoad(expr, object, types, name))
return;
Handle<Map> map;
AddInstruction(new(zone()) HCheckNonSmi(object));
// Use monomorphic load if property lookup results in the same field index
// for all maps. Requires special map check on the set of all handled maps.
HInstruction* instr = NULL;
if (types->length() > 0 && types->length() <= kMaxLoadPolymorphism) {
LookupResult lookup(isolate());
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
map = types->at(i);
if (ComputeLoadStoreField(map, name, &lookup, false)) {
int index = ComputeLoadStoreFieldIndex(map, name, &lookup);
int previous_field_offset = 0;
bool previous_field_is_in_object = false;
Representation representation = Representation::None();
int count;
for (count = 0; count < types->length(); ++count) {
Handle<Map> map = types->at(count);
if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
int index = ComputeLoadStoreFieldIndex(map, &lookup);
Representation new_representation =
ComputeLoadStoreRepresentation(map, &lookup);
bool is_in_object = index < 0;
int offset = index * kPointerSize;
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
@ -7066,31 +7211,33 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
} else {
offset += FixedArray::kHeaderSize;
}
if (count == 0) {
previous_field_offset = offset;
previous_field_is_in_object = is_in_object;
} else if (is_monomorphic_field) {
is_monomorphic_field = (offset == previous_field_offset) &&
(is_in_object == previous_field_is_in_object);
}
++count;
representation = new_representation;
} else if (offset != previous_field_offset ||
is_in_object != previous_field_is_in_object ||
(FLAG_track_fields &&
!representation.IsCompatibleForLoad(new_representation))) {
break;
}
representation = representation.generalize(new_representation);
}
// Use monomorphic load if property lookup results in the same field index
// for all maps. Requires special map check on the set of all handled maps.
AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* instr;
if (count == types->length() && is_monomorphic_field) {
if (count == types->length()) {
AddInstruction(HCheckMaps::New(object, types, zone()));
instr = BuildLoadNamedField(object, map, &lookup);
} else {
instr = DoBuildLoadNamedField(
object, previous_field_is_in_object,
representation, previous_field_offset);
}
}
if (instr == NULL) {
HValue* context = environment()->LookupContext();
instr = new(zone()) HLoadNamedFieldPolymorphic(context,
object,
types,
name,
zone());
instr = new(zone()) HLoadNamedFieldPolymorphic(
context, object, types, name, zone());
}
instr->set_position(expr->position());
@ -7239,14 +7386,15 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
// Keyed store.
CHECK_ALIVE(VisitForValue(prop->key()));
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* value = Pop();
HValue* key = Pop();
HValue* object = Pop();
HValue* value = environment()->ExpressionStackAt(0);
HValue* key = environment()->ExpressionStackAt(1);
HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
HandleKeyedElementAccess(object, key, value, expr, expr->AssignmentId(),
expr->position(),
true, // is_store
&has_side_effects);
Drop(3);
Push(value);
AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
@ -7648,16 +7796,37 @@ HLoadNamedField* HOptimizedGraphBuilder::BuildLoadNamedField(
Handle<Map> map,
LookupResult* lookup) {
int index = lookup->GetLocalFieldIndexFromMap(*map);
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
int offset = (index * kPointerSize) + map->instance_size();
return new(zone()) HLoadNamedField(object, true, offset);
} else {
// Non-negative property indices are in the properties array.
int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
return new(zone()) HLoadNamedField(object, false, offset);
// Negative property indices are in-object properties, indexed from the end of
// the fixed part of the object. Non-negative property indices are in the
// properties array.
int inobject = index < 0;
Representation representation = lookup->representation();
int offset = inobject
? index * kPointerSize + map->instance_size()
: index * kPointerSize + FixedArray::kHeaderSize;
return DoBuildLoadNamedField(object, inobject, representation, offset);
}
HLoadNamedField* HGraphBuilder::DoBuildLoadNamedField(
HValue* object,
bool inobject,
Representation representation,
int offset) {
bool load_double = false;
if (representation.IsDouble()) {
representation = Representation::Tagged();
load_double = FLAG_track_double_fields;
}
HLoadNamedField* field =
new(zone()) HLoadNamedField(object, inobject, representation, offset);
if (load_double) {
AddInstruction(field);
field->set_type(HType::HeapNumber());
return new(zone()) HLoadNamedField(
field, true, Representation::Double(), HeapNumber::kValueOffset);
}
return field;
}
@ -7927,8 +8096,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HInstruction* elements_kind_instr =
AddInstruction(new(zone()) HElementsKind(object));
HInstruction* elements =
AddInstruction(new(zone()) HLoadElements(object, checkspec));
HInstruction* elements = AddLoadElements(object, checkspec);
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
@ -9555,16 +9723,13 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
HCallNew* call;
if (use_call_new_array) {
AddInstruction(new(zone()) HCheckFunction(constructor,
Handle<JSFunction>(isolate()->global_context()->array_function())));
Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId());
ASSERT(feedback->IsSmi());
// TODO(mvstanton): It would be better to use the already created global
// property cell that is shared by full code gen. That way, any transition
// information that happened after crankshaft won't be lost. The right
// way to do that is to begin passing the cell to the type feedback oracle
// instead of just the value in the cell. Do this in a follow-up checkin.
Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId());
ASSERT(feedback->IsSmi());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(feedback);
@ -9977,7 +10142,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
after = BuildIncrement(returns_original_input, expr);
input = Pop();
input = environment()->ExpressionStackAt(0);
expr->RecordTypeFeedback(oracle(), zone());
HandleKeyedElementAccess(obj, key, after, expr, expr->AssignmentId(),
@ -9985,10 +10150,10 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
true, // is_store
&has_side_effects);
// Drop the key from the bailout environment. Overwrite the receiver
// with the result of the operation, and the placeholder with the
// original value if necessary.
Drop(1);
// Drop the key and the original value from the bailout environment.
// Overwrite the receiver with the result of the operation, and the
// placeholder with the original value if necessary.
Drop(2);
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
ASSERT(has_side_effects); // Stores always have side effects.
@ -10660,7 +10825,6 @@ void HOptimizedGraphBuilder::BuildEmitDeepCopy(
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
int elements_offset = *offset + object_size;
int inobject_properties = boilerplate_object->map()->inobject_properties();
if (create_allocation_site_info) {
elements_offset += AllocationSiteInfo::kSize;
*offset += AllocationSiteInfo::kSize;
@ -10674,28 +10838,49 @@ void HOptimizedGraphBuilder::BuildEmitDeepCopy(
// Copy in-object properties.
HValue* object_properties =
AddInstruction(new(zone) HInnerAllocatedObject(target, object_offset));
for (int i = 0; i < inobject_properties; i++) {
Handle<DescriptorArray> descriptors(
boilerplate_object->map()->instance_descriptors());
int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
int index = descriptors->GetFieldIndex(i);
int property_offset = boilerplate_object->GetInObjectPropertyOffset(index);
Handle<Name> name(descriptors->GetKey(i));
Handle<Object> value =
Handle<Object>(boilerplate_object->InObjectPropertyAt(i),
Handle<Object>(boilerplate_object->InObjectPropertyAt(index),
isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
Handle<JSObject> original_value_object = Handle<JSObject>::cast(
Handle<Object>(original_boilerplate_object->InObjectPropertyAt(i),
Handle<Object>(original_boilerplate_object->InObjectPropertyAt(index),
isolate()));
HInstruction* value_instruction =
AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
AddInstruction(new(zone) HStoreNamedField(
object_properties, factory->unknown_field_string(), value_instruction,
true, boilerplate_object->GetInObjectPropertyOffset(i)));
object_properties, name, value_instruction, true,
Representation::Tagged(), property_offset));
BuildEmitDeepCopy(value_object, original_value_object, target,
offset, DONT_TRACK_ALLOCATION_SITE);
} else {
Representation representation = details.representation();
HInstruction* value_instruction = AddInstruction(new(zone) HConstant(
value, Representation::Tagged()));
if (representation.IsDouble()) {
HInstruction* double_box =
AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
BuildStoreMap(double_box, factory->heap_number_map());
AddInstruction(new(zone) HStoreNamedField(
double_box, name, value_instruction, true,
Representation::Double(), HeapNumber::kValueOffset));
value_instruction = double_box;
*offset += HeapNumber::kSize;
}
AddInstruction(new(zone) HStoreNamedField(
object_properties, factory->unknown_field_string(), value_instruction,
true, boilerplate_object->GetInObjectPropertyOffset(i)));
object_properties, name, value_instruction, true,
Representation::Tagged(), property_offset));
}
}
@ -10790,7 +10975,7 @@ HValue* HOptimizedGraphBuilder::BuildCopyObjectHeader(
object_header,
factory->elements_field_string(),
elements,
true, JSObject::kElementsOffset));
true, Representation::Tagged(), JSObject::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
Handle<Object> properties_field =
@ -10800,8 +10985,9 @@ HValue* HOptimizedGraphBuilder::BuildCopyObjectHeader(
properties_field, Representation::None()));
AddInstruction(new(zone) HStoreNamedField(object_header,
factory->empty_string(),
properties,
true, JSObject::kPropertiesOffset));
properties, true,
Representation::Tagged(),
JSObject::kPropertiesOffset));
if (boilerplate_object->IsJSArray()) {
Handle<JSArray> boilerplate_array =
@ -10810,11 +10996,15 @@ HValue* HOptimizedGraphBuilder::BuildCopyObjectHeader(
Handle<Object>(boilerplate_array->length(), isolate());
HInstruction* length = AddInstruction(new(zone) HConstant(
length_field, Representation::None()));
ASSERT(boilerplate_array->length()->IsSmi());
Representation representation =
IsFastElementsKind(boilerplate_array->GetElementsKind())
? Representation::Smi() : Representation::Tagged();
HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
object_header,
factory->length_field_string(),
length,
true, JSArray::kLengthOffset));
true, representation, JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
}
@ -11207,6 +11397,7 @@ void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
name,
value,
true, // in-object store.
Representation::Tagged(),
JSValue::kValueOffset));
if_js_value->Goto(join);
join->SetJoinId(call->id());

10
deps/v8/src/hydrogen.h

@ -982,6 +982,11 @@ class HGraphBuilder {
HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
// Building common constructs
HLoadNamedField* DoBuildLoadNamedField(HValue* object,
bool inobject,
Representation representation,
int offset);
HInstruction* BuildExternalArrayElementAccess(
HValue* external_elements,
HValue* checked_key,
@ -1025,6 +1030,8 @@ class HGraphBuilder {
HInstruction* BuildStoreMap(HValue* object, HValue* map);
HInstruction* BuildStoreMap(HValue* object, Handle<Map> map);
HLoadNamedField* AddLoadElements(HValue *object, HValue *typecheck = NULL);
class IfBuilder {
public:
explicit IfBuilder(HGraphBuilder* builder,
@ -1317,6 +1324,9 @@ class HGraphBuilder {
int previous_object_size,
HValue* payload);
HInstruction* BuildGetNativeContext(HValue* context);
HInstruction* BuildGetArrayFunction(HValue* context);
private:
HGraphBuilder();
CompilationInfo* info_;

75
deps/v8/src/ia32/code-stubs-ia32.cc

@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "bootstrapper.h"
#include "builtins-decls.h"
#include "code-stubs.h"
#include "isolate.h"
#include "jsregexp.h"
@ -79,6 +80,28 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
void LoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { edx };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { edx };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@ -107,9 +130,10 @@ static void InitializeArrayConstructorDescriptor(
int constant_stack_parameter_count) {
// register state
// eax -- number of arguments
// edi -- function
// ebx -- type info cell with elements kind
static Register registers[] = { ebx };
descriptor->register_param_count_ = 1;
static Register registers[] = { edi, ebx };
descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
@ -3298,12 +3322,6 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
}
void LoadFieldStub::Generate(MacroAssembler* masm) {
StubCompiler::DoGenerateFastPropertyLoad(masm, eax, reg_, inobject_, index_);
__ ret(0);
}
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in edx and the parameter count is in eax.
@ -4758,6 +4776,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Handle<Object> terminal_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(isolate,
LAST_FAST_ELEMENTS_KIND);
__ JumpIfNotSmi(ecx, &miss);
__ cmp(ecx, Immediate(terminal_kind_sentinel));
__ j(above, &miss);
// Load the global or builtins object from the current context
@ -5821,8 +5840,33 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
__ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
Label skip_write_barrier, after_writing;
ExternalReference high_promotion_mode = ExternalReference::
new_space_high_promotion_mode_active_address(masm->isolate());
__ test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
__ j(zero, &skip_write_barrier);
__ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
__ RecordWriteField(ecx,
ConsString::kFirstOffset,
eax,
ebx,
kDontSaveFPRegs);
__ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
__ RecordWriteField(ecx,
ConsString::kSecondOffset,
edx,
ebx,
kDontSaveFPRegs);
__ jmp(&after_writing);
__ bind(&skip_write_barrier);
__ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
__ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
__ bind(&after_writing);
__ mov(eax, ecx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@ -7354,8 +7398,10 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
{ REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
// FastNewClosureStub
// FastNewClosureStub and StringAddStub::Generate
{ REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
// StringAddStub::Generate
{ REG(ecx), REG(eax), REG(ebx), EMIT_REMEMBERED_SET},
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@ -7876,15 +7922,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Get the elements kind and case on that.
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, kPointerSize));
// There is no info if the call site went megamorphic either
// TODO(mvstanton): Really? I thought if it was the array function that
// the cell wouldn't get stamped as megamorphic.
__ cmp(edx, Immediate(TypeFeedbackCells::MegamorphicSentinel(
masm->isolate())));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
__ JumpIfNotSmi(edx, &no_info);
__ SmiUntag(edx);
__ jmp(&switch_ready);
__ bind(&no_info);

9
deps/v8/src/ia32/debug-ia32.cc

@ -240,6 +240,15 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- eax : value
// -----------------------------------
Generate_DebugBreakCallHelper(masm, eax.bit(), 0, false);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC call call (from ic-ia32.cc)
// ----------- S t a t e -------------

71
deps/v8/src/ia32/full-codegen-ia32.cc

@ -1529,7 +1529,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
expr->depth() > 1) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
@ -1900,11 +1901,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label resume;
__ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
__ j(not_equal, &resume);
__ pop(result_register());
if (expr->yield_kind() == Yield::SUSPEND) {
// TODO(wingo): Box into { value: VALUE, done: false }.
}
EmitReturnIteratorResult(false);
} else {
__ pop(result_register());
EmitReturnSequence();
}
__ bind(&resume);
context()->Plug(result_register());
@ -1916,18 +1918,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(FieldOperand(result_register(),
JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ pop(result_register());
// TODO(wingo): Box into { value: VALUE, done: true }.
// Exit all nested statements.
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
while (current != NULL) {
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
EmitReturnSequence();
EmitReturnIteratorResult(true);
break;
}
@ -2033,6 +2024,54 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
}
void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
Label gc_required;
Label allocated;
Handle<Map> map(isolate()->native_context()->generator_result_map());
__ Allocate(map->instance_size(), eax, ecx, edx, &gc_required, TAG_OBJECT);
__ bind(&allocated);
__ mov(ebx, map);
__ pop(ecx);
__ mov(edx, isolate()->factory()->ToBoolean(done));
ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
isolate()->factory()->empty_fixed_array());
__ mov(FieldOperand(eax, JSObject::kElementsOffset),
isolate()->factory()->empty_fixed_array());
__ mov(FieldOperand(eax, JSGeneratorObject::kResultValuePropertyOffset), ecx);
__ mov(FieldOperand(eax, JSGeneratorObject::kResultDonePropertyOffset), edx);
// Only the value field needs a write barrier, as the other values are in the
// root set.
__ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset,
ecx, edx, kDontSaveFPRegs);
if (done) {
// Exit all nested statements.
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
while (current != NULL) {
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
}
EmitReturnSequence();
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
__ jmp(&allocated);
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();

20
deps/v8/src/ia32/ic-ia32.cc

@ -1530,6 +1530,26 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
}
void StoreIC::GenerateSlow(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
__ pop(ebx);
__ push(edx);
__ push(ecx);
__ push(eax);
__ push(ebx); // return address
// Do tail-call to runtime routine.
ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value

154
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -113,6 +113,10 @@ void LCodeGen::FinishCode(Handle<Code> code) {
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
for (int i = 0 ; i < transition_maps_.length(); i++) {
transition_maps_.at(i)->AddDependentCode(
DependentCode::kTransitionGroup, code);
}
}
@ -1230,7 +1234,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ and_(dividend, divisor - 1);
__ bind(&done);
} else {
Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
Label done, remainder_eq_dividend, slow, both_positive;
Register left_reg = ToRegister(instr->left());
Register right_reg = ToRegister(instr->right());
Register result_reg = ToRegister(instr->result());
@ -1266,23 +1270,10 @@ void LCodeGen::DoModI(LModI* instr) {
__ mov(scratch, right_reg);
__ sub(Operand(scratch), Immediate(1));
__ test(scratch, Operand(right_reg));
__ j(not_zero, &do_subtraction, Label::kNear);
__ j(not_zero, &slow, Label::kNear);
__ and_(left_reg, Operand(scratch));
__ jmp(&remainder_eq_dividend, Label::kNear);
__ bind(&do_subtraction);
const int kUnfolds = 3;
// Try a few subtractions of the dividend.
__ mov(scratch, left_reg);
for (int i = 0; i < kUnfolds; i++) {
// Reduce the dividend by the divisor.
__ sub(left_reg, Operand(right_reg));
// Check if the dividend is less than the divisor.
__ cmp(left_reg, Operand(right_reg));
__ j(less, &remainder_eq_dividend, Label::kNear);
}
__ mov(left_reg, scratch);
// Slow case, using idiv instruction.
__ bind(&slow);
@ -1915,18 +1906,26 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
int32_t offset = ToInteger32(LConstantOperand::cast(right));
__ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
__ lea(ToRegister(instr->result()), address);
}
} else {
if (right->IsConstantOperand()) {
__ add(ToOperand(left), ToInteger32Immediate(right));
} else {
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr->environment());
}
}
}
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
@ -2956,13 +2955,27 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
int offset = instr->hydrogen()->offset();
Register object = ToRegister(instr->object());
if (FLAG_track_double_fields &&
instr->hydrogen()->representation().IsDouble()) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
__ movdbl(result, FieldOperand(object, offset));
} else {
PushX87DoubleOperand(FieldOperand(object, offset));
CurrentInstructionReturnsX87Result();
}
return;
}
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
__ mov(result, FieldOperand(object, offset));
} else {
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
__ mov(result, FieldOperand(result, offset));
}
}
@ -3146,41 +3159,6 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
void LCodeGen::DoLoadElements(LLoadElements* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->object());
__ mov(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
Label done, ok, fail;
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
Immediate(factory()->fixed_array_map()));
__ j(equal, &done, Label::kNear);
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
Immediate(factory()->fixed_cow_array_map()));
__ j(equal, &done, Label::kNear);
Register temp((result.is(eax)) ? ebx : eax);
__ push(temp);
__ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
__ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
__ and_(temp, Map::kElementsKindMask);
__ shr(temp, Map::kElementsKindShift);
__ cmp(temp, GetInitialFastElementsKind());
__ j(less, &fail, Label::kNear);
__ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
__ j(less_equal, &ok, Label::kNear);
__ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
__ j(less, &fail, Label::kNear);
__ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
__ j(less_equal, &ok, Label::kNear);
__ bind(&fail);
__ Abort("Check for fast or external elements failed.");
__ bind(&ok);
__ pop(temp);
__ bind(&done);
}
}
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
@ -4213,8 +4191,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ Set(eax, Immediate(instr->arity()));
__ mov(ebx, instr->hydrogen()->property_cell());
Object* cell_value = instr->hydrogen()->property_cell()->value();
ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value());
ElementsKind kind = instr->hydrogen()->elements_kind();
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@ -4241,16 +4218,51 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Representation representation = instr->representation();
Register object = ToRegister(instr->object());
int offset = instr->offset();
if (!instr->transition().is_null()) {
Handle<Map> transition = instr->transition();
if (FLAG_track_fields && representation.IsSmi()) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (!IsInteger32(operand_value)) {
DeoptimizeIf(no_condition, instr->environment());
}
} else {
Register value = ToRegister(instr->value());
__ SmiTag(value);
if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
DeoptimizeIf(overflow, instr->environment());
}
}
} else if (FLAG_track_double_fields && representation.IsDouble()) {
ASSERT(transition.is_null());
ASSERT(instr->is_in_object());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value = ToDoubleRegister(instr->value());
__ movdbl(FieldOperand(object, offset), value);
} else {
__ fstp_d(FieldOperand(object, offset));
}
return;
}
if (!transition.is_null()) {
if (transition->CanBeDeprecated()) {
transition_maps_.Add(transition, info()->zone());
}
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
__ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
__ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
Register temp = ToRegister(instr->temp());
Register temp_map = ToRegister(instr->temp_map());
__ mov(temp_map, instr->transition());
__ mov(temp_map, transition);
__ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
// Update the write barrier for the map field.
__ RecordWriteField(object,
@ -4286,6 +4298,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ mov(FieldOperand(write_register, offset), ToRegister(operand_value));
} else {
Handle<Object> handle_value = ToHandle(operand_value);
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
__ mov(FieldOperand(write_register, offset), handle_value);
}
} else {
@ -5459,6 +5472,8 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
}
@ -6025,18 +6040,24 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
__ SmiTag(size);
PushSafepointRegistersScope scope(this);
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
if (!size.is(result)) {
__ StoreToSafepointRegisterSlot(result, size);
}
__ mov(result, Immediate(Smi::FromInt(0)));
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
Register size = ToRegister(instr->size());
ASSERT(!size.is(result));
__ SmiTag(ToRegister(instr->size()));
__ push(size);
} else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ push(Immediate(Smi::FromInt(size)));
}
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
@ -6125,7 +6146,8 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
// Set up the parameters to the stub/runtime call and pick the right
// runtime function or stub to call.
int properties_count = instr->hydrogen()->constant_properties_length() / 2;
if (instr->hydrogen()->depth() > 1) {
if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
instr->hydrogen()->depth() > 1) {
__ PushHeapObject(literals);
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(constant_properties));

2
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -59,6 +59,7 @@ class LCodeGen BASE_EMBEDDED {
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@ -415,6 +416,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;

100
deps/v8/src/ia32/lithium-ia32.cc

@ -601,6 +601,11 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
LOperand* LChunkBuilder::UseConstant(HValue* value) {
return chunk_->DefineConstantOperand(HConstant::cast(value));
}
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@ -731,7 +736,7 @@ LUnallocated* LChunkBuilder::TempRegister() {
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
Abort("Out of virtual registers while trying to allocate temp register.");
return NULL;
vreg = 0;
}
operand->set_virtual_register(vreg);
return operand;
@ -829,8 +834,8 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
ASSERT(op != Token::MOD);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
return DefineSameAsFirst(result);
}
@ -1387,8 +1392,8 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
@ -1555,8 +1560,8 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstant(instr->MostConstantOperand());
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
temp = TempRegister();
@ -1599,13 +1604,24 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
// Check to see if it would be advantageous to use an lea instruction rather
// than an add. This is the case when no overflow check is needed and there
// are multiple uses of the add's inputs, so using a 3-register add will
// preserve all input values for later uses.
bool use_lea = LAddI::UseLea(instr);
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
HValue* right_candidate = instr->BetterRightOperand();
LOperand* right = use_lea
? UseRegisterOrConstantAtStart(right_candidate)
: UseOrConstantAtStart(right_candidate);
LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineSameAsFirst(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
LInstruction* result = use_lea
? DefineAsRegister(add)
: DefineSameAsFirst(add);
if (can_overflow) {
result = AssignEnvironment(result);
}
return result;
@ -1624,8 +1640,8 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
left = UseRegisterAtStart(instr->LeastConstantOperand());
right = UseOrConstantAtStart(instr->MostConstantOperand());
left = UseRegisterAtStart(instr->BetterLeftOperand());
right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@ -2166,7 +2182,6 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
ASSERT(instr->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@ -2207,12 +2222,6 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LLoadElements(input));
}
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@ -2287,19 +2296,6 @@ LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
}
// DoStoreKeyed and DoStoreNamedField have special considerations for allowing
// use of a constant instead of a register.
static bool StoreConstantValueAllowed(HValue* value) {
if (value->IsConstant()) {
HConstant* constant_value = HConstant::cast(value);
return constant_value->HasSmiValue()
|| constant_value->HasDoubleValue()
|| constant_value->ImmortalImmovable();
}
return false;
}
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
@ -2327,17 +2323,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
val = UseTempRegister(instr->value());
key = UseTempRegister(instr->key());
} else {
if (StoreConstantValueAllowed(instr->value())) {
val = UseRegisterOrConstantAtStart(instr->value());
} else {
val = UseRegisterAtStart(instr->value());
}
if (StoreConstantValueAllowed(instr->key())) {
key = UseRegisterOrConstantAtStart(instr->key());
} else {
key = UseRegisterAtStart(instr->key());
}
}
return new(zone()) LStoreKeyed(obj, key, val);
}
@ -2438,11 +2425,24 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
: UseRegisterAtStart(instr->object());
}
bool can_be_constant = instr->value()->IsConstant() &&
HConstant::cast(instr->value())->NotInNewSpace() &&
!(FLAG_track_double_fields && instr->field_representation().IsDouble());
LOperand* val;
if (needs_write_barrier) {
val = UseTempRegister(instr->value());
} else if (StoreConstantValueAllowed(instr->value())) {
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
} else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
} else if (FLAG_track_double_fields &&
instr->field_representation().IsDouble()) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseX87TopOfStack(instr->value());
}
} else {
val = UseRegister(instr->value());
}
@ -2455,7 +2455,12 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We need a temporary register for write barrier of the map field.
LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
LStoreNamedField* result =
new(zone()) LStoreNamedField(obj, val, temp, temp_map);
if (FLAG_track_fields && instr->field_representation().IsSmi()) {
return AssignEnvironment(result);
}
return result;
}
@ -2516,8 +2521,9 @@ LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
// TODO(mvstanton): why can't size be a constant if possible?
LOperand* size = UseTempRegister(instr->size());
LOperand* size = instr->size()->IsConstant()
? UseConstant(instr->size())
: UseTempRegister(instr->size());
LOperand* temp = TempRegister();
LAllocate* result = new(zone()) LAllocate(context, size, temp);
return AssignPointerMap(DefineAsRegister(result));
@ -2587,7 +2593,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
if (spill_index > LUnallocated::kMaxFixedIndex) {
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}

29
deps/v8/src/ia32/lithium-ia32.h

@ -117,7 +117,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@ -1370,6 +1369,11 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
static bool UseLea(HAdd* add) {
return !add->CheckFlag(HValue::kCanOverflow) &&
add->BetterLeftOperand()->UseCount() > 1;
}
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
@ -1496,6 +1500,11 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
virtual bool ClobbersDoubleRegisters() const {
return !CpuFeatures::IsSupported(SSE2) &&
!hydrogen()->representation().IsDouble();
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
@ -1550,18 +1559,6 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
};
class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadElements(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
@ -2206,6 +2203,9 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
Representation representation() const {
return hydrogen()->field_representation();
}
};
@ -2908,6 +2908,9 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
// An input operand in a constant operand.
MUST_USE_RESULT LOperand* UseConstant(HValue* value);
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);

40
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -1603,10 +1603,32 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
Label allocate_new_space, install_map;
AllocationFlags flags = TAG_OBJECT;
ExternalReference high_promotion_mode = ExternalReference::
new_space_high_promotion_mode_active_address(isolate());
test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
j(zero, &allocate_new_space);
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
jmp(&install_map);
bind(&allocate_new_space);
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
flags);
bind(&install_map);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(isolate()->factory()->cons_ascii_string_map()));
@ -2889,6 +2911,18 @@ void MacroAssembler::CheckPageFlagForMap(
}
void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
Register scratch,
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
mov(scratch, map);
mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
and_(scratch, Immediate(Smi::FromInt(Map::Deprecated::kMask)));
j(not_zero, if_deprecated);
}
}
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,

4
deps/v8/src/ia32/macro-assembler-ia32.h

@ -91,6 +91,10 @@ class MacroAssembler: public Assembler {
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
void CheckMapDeprecated(Handle<Map> map,
Register scratch,
Label* if_deprecated);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,

180
deps/v8/src/ia32/stub-cache-ia32.cc

@ -369,11 +369,13 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
bool inobject,
int index) {
int index,
Representation representation) {
ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@ -763,8 +765,10 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Register value_reg,
Register scratch1,
Register scratch2,
Register unused,
Label* miss_label,
Label* miss_restore_name) {
Label* miss_restore_name,
Label* slow) {
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
miss_label, DO_SMI_CHECK, REQUIRE_EXACT_MAP);
@ -774,6 +778,15 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ CheckAccessGlobalProxy(receiver_reg, scratch1, scratch2, miss_label);
}
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
ASSERT(!representation.IsNone());
// Ensure no transitions to deprecated maps are followed.
__ CheckMapDeprecated(transition, scratch1, miss_label);
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@ -790,7 +803,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// We need an extra register, push
Register holder_reg = CheckPrototypes(
object, receiver_reg, Handle<JSObject>(holder), name_reg,
scratch1, scratch2, name, miss_restore_name);
scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
@ -809,6 +822,46 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
}
Register storage_reg = name_reg;
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_restore_name);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(value_reg);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
__ cvtsi2sd(xmm0, value_reg);
} else {
__ push(value_reg);
__ fild_s(Operand(esp, 0));
__ pop(value_reg);
}
__ SmiTag(value_reg);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
miss_restore_name, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
__ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
} else {
__ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
}
__ bind(&do_store);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
__ movdbl(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
}
}
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@ -820,7 +873,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ pop(scratch1); // Return address.
__ push(receiver_reg);
__ push(Immediate(transition));
__ push(eax);
__ push(value_reg);
__ push(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@ -834,12 +887,11 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ mov(scratch1, Immediate(transition));
__ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field and pass the now unused
// name_reg as scratch register.
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
name_reg,
scratch2,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@ -856,32 +908,52 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
if (FLAG_track_double_fields && representation.IsDouble()) {
__ mov(FieldOperand(receiver_reg, offset), storage_reg);
} else {
__ mov(FieldOperand(receiver_reg, offset), value_reg);
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
__ mov(name_reg, value_reg);
} else {
ASSERT(storage_reg.is(name_reg));
}
__ RecordWriteField(receiver_reg,
offset,
name_reg,
scratch1,
kDontSaveFPRegs);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ mov(FieldOperand(scratch1, offset), eax);
if (FLAG_track_double_fields && representation.IsDouble()) {
__ mov(FieldOperand(scratch1, offset), storage_reg);
} else {
__ mov(FieldOperand(scratch1, offset), value_reg);
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
__ mov(name_reg, value_reg);
} else {
ASSERT(storage_reg.is(name_reg));
}
__ RecordWriteField(scratch1,
offset,
name_reg,
receiver_reg,
kDontSaveFPRegs);
}
}
// Return the value (register eax).
ASSERT(value_reg.is(eax));
@ -920,12 +992,65 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
__ mov(scratch1, FieldOperand(receiver_reg, offset));
} else {
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
int offset = index * kPointerSize + FixedArray::kHeaderSize;
__ mov(scratch1, FieldOperand(scratch1, offset));
}
// Store the value into the storage.
Label do_store, heap_number;
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(value_reg);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
__ cvtsi2sd(xmm0, value_reg);
} else {
__ push(value_reg);
__ fild_s(Operand(esp, 0));
__ pop(value_reg);
}
__ SmiTag(value_reg);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
__ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
} else {
__ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
}
__ bind(&do_store);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
__ movdbl(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
}
// Return the value (register eax).
ASSERT(value_reg.is(eax));
__ ret(0);
return;
}
ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ mov(FieldOperand(receiver_reg, offset), value_reg);
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ mov(name_reg, value_reg);
@ -934,13 +1059,15 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
name_reg,
scratch1,
kDontSaveFPRegs);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ mov(FieldOperand(scratch1, offset), eax);
__ mov(FieldOperand(scratch1, offset), value_reg);
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ mov(name_reg, value_reg);
@ -950,6 +1077,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
receiver_reg,
kDontSaveFPRegs);
}
}
// Return the value (register eax).
ASSERT(value_reg.is(eax));
@ -1197,10 +1325,20 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
PropertyIndex index) {
// Get the value from the properties.
GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
__ ret(0);
PropertyIndex field,
Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
field.translate(holder),
representation);
GenerateTailCall(masm(), stub.GetCode(isolate()));
} else {
KeyedLoadFieldStub stub(field.is_inobject(holder),
field.translate(holder),
representation);
GenerateTailCall(masm(), stub.GetCode(isolate()));
}
}
@ -1455,7 +1593,9 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
name, &miss);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
GenerateFastPropertyLoad(
masm(), edi, reg, index.is_inobject(holder),
index.translate(holder), Representation::Tagged());
// Check that the function really is a function.
__ JumpIfSmi(edi, &miss);
@ -2984,17 +3124,23 @@ Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
Register map_reg = scratch1();
__ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
__ cmp(map_reg, receiver_maps->at(current));
Handle<Map> map = receiver_maps->at(current);
if (!map->is_deprecated()) {
number_of_handled_maps++;
__ cmp(map_reg, map);
__ j(equal, handlers->at(current));
}
}
ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}

204
deps/v8/src/ic.cc

@ -182,6 +182,15 @@ Address IC::OriginalCodeAddress() const {
static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
Object* receiver,
Object* name) {
if (target->is_keyed_load_stub() ||
target->is_keyed_call_stub() ||
target->is_keyed_store_stub()) {
// Determine whether the failure is due to a name failure.
if (!name->IsName()) return false;
Name* stub_name = target->FindFirstName();
if (Name::cast(name) != stub_name) return false;
}
InlineCacheHolderFlag cache_holder =
Code::ExtractCacheHolderFromFlags(target->flags());
@ -208,10 +217,30 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
int index = map->IndexInCodeCache(name, target);
if (index >= 0) {
map->RemoveFromCodeCache(String::cast(name), target, index);
// For loads, handlers are stored in addition to the ICs on the map. Remove
// those, too.
if (target->is_load_stub() || target->is_keyed_load_stub()) {
Code* handler = target->FindFirstCode();
index = map->IndexInCodeCache(name, handler);
if (index >= 0) {
map->RemoveFromCodeCache(String::cast(name), handler, index);
}
}
return true;
}
return false;
// If the IC is shared between multiple receivers (slow dictionary mode), then
// the map cannot be deprecated and the stub invalidated.
if (cache_holder != OWN_MAP) return false;
// The stub is not in the cache. We've ruled out all other kinds of failure
// except for proptotype chain changes, a deprecated map, or a map that's
// different from the one that the stub expects. If the map hasn't changed,
// assume it's a prototype failure. Treat deprecated maps in the same way as
// prototype failures (stay monomorphic if possible).
Map* old_map = target->FindFirstMap();
if (old_map == NULL) return false;
return old_map == map || old_map->is_deprecated();
}
@ -221,22 +250,13 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
if (state != MONOMORPHIC || !name->IsString()) return state;
if (receiver->IsUndefined() || receiver->IsNull()) return state;
// For keyed load/store/call, the most likely cause of cache failure is
// that the key has changed. We do not distinguish between
// prototype and non-prototype failures for keyed access.
Code::Kind kind = target->kind();
if (kind == Code::KEYED_LOAD_IC ||
kind == Code::KEYED_STORE_IC ||
kind == Code::KEYED_CALL_IC) {
return MONOMORPHIC;
}
// Remove the target from the code cache if it became invalid
// because of changes in the prototype chain to avoid hitting it
// again.
// Call stubs handle this later to allow extra IC state
// transitions.
if (kind != Code::CALL_IC &&
if (kind != Code::CALL_IC && kind != Code::KEYED_CALL_IC &&
TryRemoveInvalidPrototypeDependentStub(target, receiver, name)) {
return MONOMORPHIC_PROTOTYPE_FAILURE;
}
@ -506,6 +526,13 @@ MaybeObject* CallICBase::LoadFunction(State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->map()->is_deprecated()) {
JSObject::MigrateInstance(receiver);
}
}
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
@ -708,8 +735,7 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
object, name);
} else if (kind_ == Code::CALL_IC &&
TryRemoveInvalidPrototypeDependentStub(target(),
} else if (TryRemoveInvalidPrototypeDependentStub(target(),
*object,
*name)) {
state = MONOMORPHIC_PROTOTYPE_FAILURE;
@ -732,15 +758,7 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
case UNINITIALIZED:
case MONOMORPHIC_PROTOTYPE_FAILURE:
case PREMONOMORPHIC:
set_target(*code);
break;
case MONOMORPHIC:
if (code->ic_state() != MONOMORPHIC) {
Map* map = target()->FindFirstMap();
if (map != NULL) {
UpdateMegamorphicCache(map, *name, target());
}
}
set_target(*code);
break;
case MEGAMORPHIC: {
@ -777,6 +795,13 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
Handle<String>::cast(key));
}
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->map()->is_deprecated()) {
JSObject::MigrateInstance(receiver);
}
}
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_call", object, key);
}
@ -891,6 +916,13 @@ MaybeObject* LoadIC::Load(State state,
return Runtime::GetElementOrCharAtOrFail(isolate(), object, index);
}
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->map()->is_deprecated()) {
JSObject::MigrateInstance(receiver);
}
}
// Named lookup in the object.
LookupResult lookup(isolate());
LookupForRead(object, name, &lookup);
@ -955,11 +987,30 @@ bool IC::UpdatePolymorphicIC(State state,
MapHandleList receiver_maps;
CodeHandleList handlers;
int number_of_valid_maps;
int handler_to_overwrite = -1;
Handle<Map> new_receiver_map(receiver->map());
{
AssertNoAllocation no_gc;
target()->FindAllMaps(&receiver_maps);
int number_of_maps = receiver_maps.length();
if (number_of_maps >= 4) return false;
number_of_valid_maps = number_of_maps;
for (int i = 0; i < number_of_maps; i++) {
Handle<Map> map = receiver_maps.at(i);
// Filter out deprecated maps to ensure its instances get migrated.
if (map->is_deprecated()) {
number_of_valid_maps--;
// If the receiver map is already in the polymorphic IC, this indicates
// there was a prototoype chain failure. In that case, just overwrite the
// handler.
} else if (map.is_identical_to(new_receiver_map)) {
number_of_valid_maps--;
handler_to_overwrite = i;
}
}
if (number_of_valid_maps >= 4) return false;
// Only allow 0 maps in case target() was reset to UNINITIALIZED by the GC.
// In that case, allow the IC to go back monomorphic.
@ -969,14 +1020,16 @@ bool IC::UpdatePolymorphicIC(State state,
target()->FindAllCode(&handlers, receiver_maps.length());
}
if (!AddOneReceiverMapIfMissing(&receiver_maps,
Handle<Map>(receiver->map()))) {
return false;
number_of_valid_maps++;
if (handler_to_overwrite >= 0) {
handlers.Set(handler_to_overwrite, code);
} else {
receiver_maps.Add(new_receiver_map);
handlers.Add(code);
}
handlers.Add(code);
Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
&receiver_maps, &handlers, name);
&receiver_maps, &handlers, number_of_valid_maps, name);
set_target(*ic);
return true;
}
@ -1063,16 +1116,9 @@ void IC::PatchCache(State state,
if (UpdatePolymorphicIC(state, strict_mode, receiver, name, code)) {
break;
}
}
if (target()->type() != Code::NORMAL) {
if (target()->is_load_stub()) {
CopyICToMegamorphicCache(name);
} else {
Code* handler = target();
Map* map = handler->FindFirstMap();
if (map != NULL) {
UpdateMegamorphicCache(map, *name, handler);
}
}
}
@ -1175,7 +1221,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
}
void IC::UpdateMegamorphicCache(Map* map, String* name, Code* code) {
void IC::UpdateMegamorphicCache(Map* map, Name* name, Code* code) {
// Cache code holding map should be consistent with
// GenerateMonomorphicCacheProbe.
isolate()->stub_cache()->Set(name, map, code);
@ -1195,7 +1241,8 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
switch (lookup->type()) {
case FIELD:
return isolate()->stub_cache()->ComputeLoadField(
name, receiver, holder, lookup->GetFieldIndex());
name, receiver, holder,
lookup->GetFieldIndex(), lookup->representation());
case CONSTANT_FUNCTION: {
Handle<JSFunction> constant(lookup->GetConstantFunction());
return isolate()->stub_cache()->ComputeLoadConstant(
@ -1237,7 +1284,7 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
PropertyIndex lengthIndex =
PropertyIndex::NewHeaderIndex(JSArray::kLengthOffset / kPointerSize);
return isolate()->stub_cache()->ComputeLoadField(
name, receiver, holder, lengthIndex);
name, receiver, holder, lengthIndex, Representation::Tagged());
}
// TODO(dcarney): Handle correctly.
if (callback->IsDeclaredAccessorInfo()) break;
@ -1366,6 +1413,10 @@ MaybeObject* KeyedLoadIC::Load(State state,
}
} else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->map()->is_deprecated()) {
JSObject::MigrateInstance(receiver);
}
if (receiver->elements()->map() ==
isolate()->heap()->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub();
@ -1400,7 +1451,8 @@ Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup,
switch (lookup->type()) {
case FIELD:
return isolate()->stub_cache()->ComputeKeyedLoadField(
name, receiver, holder, lookup->GetFieldIndex());
name, receiver, holder,
lookup->GetFieldIndex(), lookup->representation());
case CONSTANT_FUNCTION: {
Handle<JSFunction> constant(lookup->GetConstantFunction(), isolate());
return isolate()->stub_cache()->ComputeKeyedLoadConstant(
@ -1432,7 +1484,9 @@ Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup,
static bool LookupForWrite(Handle<JSObject> receiver,
Handle<String> name,
LookupResult* lookup) {
Handle<Object> value,
LookupResult* lookup,
IC::State* state) {
Handle<JSObject> holder = receiver;
receiver->Lookup(*name, lookup);
if (lookup->IsFound()) {
@ -1444,9 +1498,10 @@ static bool LookupForWrite(Handle<JSObject> receiver,
receiver->LocalLookupRealNamedProperty(*name, lookup);
return lookup->IsFound() &&
!lookup->IsReadOnly() &&
lookup->CanHoldValue(value) &&
lookup->IsCacheable();
}
return true;
return lookup->CanHoldValue(value);
}
if (lookup->IsPropertyCallbacks()) return true;
@ -1464,8 +1519,25 @@ static bool LookupForWrite(Handle<JSObject> receiver,
// chain check. This avoids a double lookup, but requires us to pass in the
// receiver when trying to fetch extra information from the transition.
receiver->map()->LookupTransition(*holder, *name, lookup);
return lookup->IsTransition() &&
!lookup->GetTransitionDetails(receiver->map()).IsReadOnly();
if (!lookup->IsTransition()) return false;
PropertyDetails target_details =
lookup->GetTransitionDetails(receiver->map());
if (target_details.IsReadOnly()) return false;
// If the value that's being stored does not fit in the field that the
// instance would transition to, create a new transition that fits the value.
// This has to be done before generating the IC, since that IC will embed the
// transition target.
// Ensure the instance and its map were migrated before trying to update the
// transition target.
ASSERT(!receiver->map()->is_deprecated());
if (!value->FitsRepresentation(target_details.representation())) {
Handle<Map> target(lookup->GetTransitionMapFromMap(receiver->map()));
Map::GeneralizeRepresentation(
target, target->LastAdded(), value->OptimalRepresentation());
*state = MONOMORPHIC_PROTOTYPE_FAILURE;
}
return true;
}
@ -1499,6 +1571,10 @@ MaybeObject* StoreIC::Store(State state,
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->map()->is_deprecated()) {
JSObject::MigrateInstance(receiver);
}
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
@ -1545,7 +1621,7 @@ MaybeObject* StoreIC::Store(State state,
}
LookupResult lookup(isolate());
if (LookupForWrite(receiver, name, &lookup)) {
if (LookupForWrite(receiver, name, value, &lookup, &state)) {
if (FLAG_use_ic) {
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
}
@ -1954,6 +2030,9 @@ MaybeObject* KeyedStoreIC::Store(State state,
if (miss_mode != MISS_FORCE_GENERIC) {
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->map()->is_deprecated()) {
JSObject::MigrateInstance(receiver);
}
bool key_is_smi_like = key->IsSmi() ||
(FLAG_compiled_keyed_stores && !key->ToSmi()->IsFailure());
if (receiver->elements()->map() ==
@ -2183,11 +2262,24 @@ RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
int new_unused = transition->unused_property_fields();
int new_size = old_storage->length() + new_unused + 1;
Object* result;
{ MaybeObject* maybe_result = old_storage->CopySize(new_size);
MaybeObject* maybe_result = old_storage->CopySize(new_size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
FixedArray* new_storage = FixedArray::cast(result);
new_storage->set(old_storage->length(), value);
Object* to_store = value;
if (FLAG_track_double_fields) {
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(transition->LastAdded());
if (details.representation().IsDouble()) {
MaybeObject* maybe_storage =
isolate->heap()->AllocateHeapNumber(value->Number());
if (!maybe_storage->To(&to_store)) return maybe_storage;
}
}
new_storage->set(old_storage->length(), to_store);
// Set the new property value and do the map transition.
object->set_properties(new_storage);
@ -2229,6 +2321,24 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure) {
}
RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
NoHandleAllocation na(isolate);
ASSERT(args.length() == 3);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
return Runtime::SetObjectProperty(isolate,
object,
key,
value,
NONE,
strict_mode);
}
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
NoHandleAllocation na(isolate);
ASSERT(args.length() == 3);

8
deps/v8/src/ic.h

@ -45,6 +45,7 @@ namespace internal {
ICU(KeyedCallIC_Miss) \
ICU(StoreIC_Miss) \
ICU(StoreIC_ArrayLength) \
ICU(StoreIC_Slow) \
ICU(SharedStoreIC_ExtendStorage) \
ICU(KeyedStoreIC_Miss) \
ICU(KeyedStoreIC_MissForceGeneric) \
@ -184,7 +185,7 @@ class IC {
Handle<JSObject> receiver,
Handle<String> name,
Handle<Code> code);
virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code);
virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code);
virtual Handle<Code> megamorphic_stub() {
UNREACHABLE();
return Handle<Code>::null();
@ -471,7 +472,7 @@ class KeyedLoadIC: public LoadIC {
virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup,
Handle<JSObject> receiver,
Handle<String> name);
virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code) { }
virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
private:
// Stub accessors.
@ -504,6 +505,7 @@ class StoreIC: public IC {
}
// Code generators for stub routines. Only called once at startup.
static void GenerateSlow(MacroAssembler* masm);
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm,
@ -620,7 +622,7 @@ class KeyedStoreIC: public StoreIC {
StrictModeFlag strict_mode,
Handle<JSObject> receiver,
Handle<String> name);
virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code) { }
virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
virtual Handle<Code> megamorphic_stub() {
return isolate()->builtins()->KeyedStoreIC_Generic();

29
deps/v8/src/incremental-marking.cc

@ -490,10 +490,16 @@ bool IncrementalMarking::WorthActivating() {
// debug tests run with incremental marking and some without.
static const intptr_t kActivationThreshold = 0;
#endif
// Only start incremental marking in a safe state: 1) when expose GC is
// deactivated, 2) when incremental marking is turned on, 3) when we are
// currently not in a GC, and 4) when we are currently not serializing
// or deserializing the heap.
return !FLAG_expose_gc &&
FLAG_incremental_marking &&
FLAG_incremental_marking_steps &&
heap_->gc_state() == Heap::NOT_IN_GC &&
!Serializer::enabled() &&
heap_->isolate()->IsInitialized() &&
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
}
@ -561,17 +567,21 @@ void IncrementalMarking::UncommitMarkingDeque() {
}
void IncrementalMarking::Start() {
void IncrementalMarking::Start(CompactionFlag flag) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start\n");
}
ASSERT(FLAG_incremental_marking);
ASSERT(FLAG_incremental_marking_steps);
ASSERT(state_ == STOPPED);
ASSERT(heap_->gc_state() == Heap::NOT_IN_GC);
ASSERT(!Serializer::enabled());
ASSERT(heap_->isolate()->IsInitialized());
ResetStepCounters();
if (heap_->IsSweepingComplete()) {
StartMarking(ALLOW_COMPACTION);
StartMarking(flag);
} else {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start sweeping.\n");
@ -860,6 +870,17 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
}
void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) {
// TODO(hpayer): Let's play safe for now, but compaction should be
// in principle possible.
Start(PREVENT_COMPACTION);
} else {
Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
}
}
void IncrementalMarking::Step(intptr_t allocated_bytes,
CompletionAction action) {
if (heap_->gc_state() != Heap::NOT_IN_GC ||
@ -965,7 +986,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
PrintPID("Postponing speeding up marking until marking starts\n");
}
} else {
marking_speed_ += kMarkingSpeedAccellerationInterval;
marking_speed_ += kMarkingSpeedAccelleration;
marking_speed_ = static_cast<int>(
Min(kMaxMarkingSpeed,
static_cast<intptr_t>(marking_speed_ * 1.3)));

11
deps/v8/src/incremental-marking.h

@ -75,7 +75,9 @@ class IncrementalMarking {
bool WorthActivating();
void Start();
enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
void Start(CompactionFlag flag = ALLOW_COMPACTION);
void Stop();
@ -110,10 +112,7 @@ class IncrementalMarking {
static const intptr_t kMarkingSpeedAccelleration = 2;
static const intptr_t kMaxMarkingSpeed = 1000;
void OldSpaceStep(intptr_t allocated) {
Step(allocated * kFastMarking / kInitialMarkingSpeed,
GC_VIA_STACK_GUARD);
}
void OldSpaceStep(intptr_t allocated);
void Step(intptr_t allocated, CompletionAction action);
@ -226,8 +225,6 @@ class IncrementalMarking {
void ResetStepCounters();
enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
void StartMarking(CompactionFlag flag);
void ActivateIncrementalWriteBarrier(PagedSpace* space);

63
deps/v8/src/json-parser.h

@ -381,33 +381,23 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
// First check whether there is a single expected transition. If so, try
// to parse it first.
bool follow_expected = false;
Handle<Map> target;
if (seq_ascii) {
key = JSObject::ExpectedTransitionKey(map);
follow_expected = !key.is_null() && ParseJsonString(key);
}
// If the expected transition hits, follow it.
if (follow_expected) {
map = JSObject::ExpectedTransitionTarget(map);
target = JSObject::ExpectedTransitionTarget(map);
} else {
// If the expected transition failed, parse an internalized string and
// try to find a matching transition.
key = ParseJsonInternalizedString();
if (key.is_null()) return ReportUnexpectedCharacter();
Handle<Map> target = JSObject::FindTransitionToField(map, key);
target = JSObject::FindTransitionToField(map, key);
// If a transition was found, follow it and continue.
if (!target.is_null()) {
map = target;
} else {
// If no transition was found, commit the intermediate state to the
// object and stop transitioning.
JSObject::TransitionToMap(json_object, map);
int length = properties.length();
for (int i = 0; i < length; i++) {
json_object->FastPropertyAtPut(i, *properties[i]);
}
transitioning = false;
}
transitioning = !target.is_null();
}
if (c0_ != ':') return ReportUnexpectedCharacter();
@ -415,8 +405,36 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
value = ParseJsonValue();
if (value.is_null()) return ReportUnexpectedCharacter();
if (transitioning) {
int descriptor = map->NumberOfOwnDescriptors();
PropertyDetails details =
target->instance_descriptors()->GetDetails(descriptor);
Representation expected_representation = details.representation();
if (value->FitsRepresentation(expected_representation)) {
// If the target representation is double and the value is already
// double, use the existing box.
if (FLAG_track_double_fields &&
value->IsSmi() &&
expected_representation.IsDouble()) {
value = factory()->NewHeapNumber(
Handle<Smi>::cast(value)->value());
}
properties.Add(value, zone());
if (transitioning) continue;
map = target;
continue;
} else {
transitioning = false;
}
}
// Commit the intermediate state to the object and stop transitioning.
JSObject::AllocateStorageForMap(json_object, map);
int length = properties.length();
for (int i = 0; i < length; i++) {
Handle<Object> value = properties[i];
json_object->FastPropertyAtPut(i, *value);
}
} else {
key = ParseJsonInternalizedString();
if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
@ -435,10 +453,21 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
// If we transitioned until the very end, transition the map now.
if (transitioning) {
JSObject::TransitionToMap(json_object, map);
JSObject::AllocateStorageForMap(json_object, map);
int length = properties.length();
for (int i = 0; i < length; i++) {
json_object->FastPropertyAtPut(i, *properties[i]);
Handle<Object> value = properties[i];
// If the target representation is double and the value is already
// double, use the existing box.
if (FLAG_track_double_fields && value->IsSmi()) {
Representation representation =
map->instance_descriptors()->GetDetails(i).representation();
if (representation.IsDouble()) {
value = factory()->NewHeapNumber(
Handle<Smi>::cast(value)->value());
}
}
json_object->FastPropertyAtPut(i, *value);
}
}
}

2
deps/v8/src/json-stringifier.h

@ -644,7 +644,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
Handle<Object> property;
if (details.type() == FIELD && *map == object->map()) {
property = Handle<Object>(
object->FastPropertyAt(
object->RawFastPropertyAt(
map->instance_descriptors()->GetFieldIndex(i)),
isolate_);
} else {

7
deps/v8/src/list-inl.h

@ -103,6 +103,13 @@ Vector<T> List<T, P>::AddBlock(T value, int count, P alloc) {
}
template<typename T, class P>
void List<T, P>::Set(int index, const T& elm) {
ASSERT(index >= 0 && index <= length_);
data_[index] = elm;
}
template<typename T, class P>
void List<T, P>::InsertAt(int index, const T& elm, P alloc) {
ASSERT(index >= 0 && index <= length_);

3
deps/v8/src/list.h

@ -115,6 +115,9 @@ class List {
void InsertAt(int index, const T& element,
AllocationPolicy allocator = AllocationPolicy());
// Overwrites the element at the specific index.
void Set(int index, const T& element);
// Added 'count' elements with the value 'value' and returns a
// vector that allows access to the elements. The vector is valid
// until the next change is made to this list.

56
deps/v8/src/lithium-allocator.cc

@ -56,9 +56,11 @@ static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
}
UsePosition::UsePosition(LifetimePosition pos, LOperand* operand)
UsePosition::UsePosition(LifetimePosition pos,
LOperand* operand,
LOperand* hint)
: operand_(operand),
hint_(NULL),
hint_(hint),
pos_(pos),
next_(NULL),
requires_reg_(false),
@ -138,6 +140,7 @@ LiveRange::LiveRange(int id, Zone* zone)
next_(NULL),
current_interval_(NULL),
last_processed_use_(NULL),
current_hint_operand_(NULL),
spill_operand_(new(zone) LOperand()),
spill_start_index_(kMaxInt) { }
@ -227,13 +230,6 @@ bool LiveRange::CanBeSpilled(LifetimePosition pos) {
}
UsePosition* LiveRange::FirstPosWithHint() const {
UsePosition* pos = first_pos_;
while (pos != NULL && !pos->HasHint()) pos = pos->next();
return pos;
}
LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
LOperand* op = NULL;
if (HasRegisterAssigned()) {
@ -375,7 +371,7 @@ bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
LifetimePosition start = Start();
LifetimePosition other_start = other->Start();
if (start.Value() == other_start.Value()) {
UsePosition* pos = FirstPosWithHint();
UsePosition* pos = first_pos();
if (pos == NULL) return false;
UsePosition* other_pos = other->first_pos();
if (other_pos == NULL) return true;
@ -449,16 +445,19 @@ void LiveRange::AddUseInterval(LifetimePosition start,
}
UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
void LiveRange::AddUsePosition(LifetimePosition pos,
LOperand* operand,
LOperand* hint,
Zone* zone) {
LAllocator::TraceAlloc("Add to live range %d use position %d\n",
id_,
pos.Value());
UsePosition* use_pos = new(zone) UsePosition(pos, operand);
UsePosition* use_pos = new(zone) UsePosition(pos, operand, hint);
UsePosition* prev_hint = NULL;
UsePosition* prev = NULL;
UsePosition* current = first_pos_;
while (current != NULL && current->pos().Value() < pos.Value()) {
prev_hint = current->HasHint() ? current : prev_hint;
prev = current;
current = current->next();
}
@ -471,7 +470,9 @@ UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
prev->next_ = use_pos;
}
return use_pos;
if (prev_hint == NULL && use_pos->HasHint()) {
current_hint_operand_ = hint;
}
}
@ -624,13 +625,13 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
bool is_tagged) {
TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
ASSERT(operand->HasFixedPolicy());
if (operand->policy() == LUnallocated::FIXED_SLOT) {
operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_index());
} else if (operand->policy() == LUnallocated::FIXED_REGISTER) {
int reg_index = operand->fixed_index();
if (operand->HasFixedSlotPolicy()) {
operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_slot_index());
} else if (operand->HasFixedRegisterPolicy()) {
int reg_index = operand->fixed_register_index();
operand->ConvertTo(LOperand::REGISTER, reg_index);
} else if (operand->policy() == LUnallocated::FIXED_DOUBLE_REGISTER) {
int reg_index = operand->fixed_index();
} else if (operand->HasFixedDoubleRegisterPolicy()) {
int reg_index = operand->fixed_register_index();
operand->ConvertTo(LOperand::DOUBLE_REGISTER, reg_index);
} else {
UNREACHABLE();
@ -725,14 +726,14 @@ void LAllocator::Define(LifetimePosition position,
if (range->IsEmpty() || range->Start().Value() > position.Value()) {
// Can happen if there is a definition without use.
range->AddUseInterval(position, position.NextInstruction(), zone_);
range->AddUsePosition(position.NextInstruction(), NULL, zone_);
range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone_);
} else {
range->ShortenTo(position);
}
if (operand->IsUnallocated()) {
LUnallocated* unalloc_operand = LUnallocated::cast(operand);
range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
range->AddUsePosition(position, unalloc_operand, hint, zone_);
}
}
@ -745,7 +746,7 @@ void LAllocator::Use(LifetimePosition block_start,
if (range == NULL) return;
if (operand->IsUnallocated()) {
LUnallocated* unalloc_operand = LUnallocated::cast(operand);
range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
range->AddUsePosition(position, unalloc_operand, hint, zone_);
}
range->AddUseInterval(block_start, position, zone_);
}
@ -845,7 +846,7 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
bool is_tagged = HasTaggedValue(cur_input->virtual_register());
AllocateFixed(cur_input, gap_index + 1, is_tagged);
AddConstraintsGapMove(gap_index, input_copy, cur_input);
} else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
} else if (cur_input->HasWritableRegisterPolicy()) {
// The live range of writable input registers always goes until the end
// of the instruction.
ASSERT(!cur_input->IsUsedAtStart());
@ -924,7 +925,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
if (phi != NULL) {
// This is a phi resolving move.
if (!phi->block()->IsLoopHeader()) {
hint = LiveRangeFor(phi->id())->FirstHint();
hint = LiveRangeFor(phi->id())->current_hint_operand();
}
} else {
if (to->IsUnallocated()) {
@ -1812,10 +1813,8 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
}
UsePosition* hinted_use = current->FirstPosWithHint();
if (hinted_use != NULL) {
LOperand* hint = hinted_use->hint();
if (hint->IsRegister() || hint->IsDoubleRegister()) {
LOperand* hint = current->FirstHint();
if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
int register_index = hint->index();
TraceAlloc(
"Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
@ -1833,7 +1832,6 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
return true;
}
}
}
// Find the register which stays free for the longest time.
int reg = 0;

22
deps/v8/src/lithium-allocator.h

@ -244,13 +244,12 @@ class UseInterval: public ZoneObject {
// Representation of a use position.
class UsePosition: public ZoneObject {
public:
UsePosition(LifetimePosition pos, LOperand* operand);
UsePosition(LifetimePosition pos, LOperand* operand, LOperand* hint);
LOperand* operand() const { return operand_; }
bool HasOperand() const { return operand_ != NULL; }
LOperand* hint() const { return hint_; }
void set_hint(LOperand* hint) { hint_ = hint; }
bool HasHint() const;
bool RequiresRegister() const;
bool RegisterIsBeneficial() const;
@ -261,9 +260,9 @@ class UsePosition: public ZoneObject {
private:
void set_next(UsePosition* next) { next_ = next; }
LOperand* operand_;
LOperand* hint_;
LifetimePosition pos_;
LOperand* const operand_;
LOperand* const hint_;
LifetimePosition const pos_;
UsePosition* next_;
bool requires_reg_;
bool register_beneficial_;
@ -329,10 +328,14 @@ class LiveRange: public ZoneObject {
return assigned_register_ != kInvalidAssignment;
}
bool IsSpilled() const { return spilled_; }
UsePosition* FirstPosWithHint() const;
LOperand* current_hint_operand() const {
ASSERT(current_hint_operand_ == FirstHint());
return current_hint_operand_;
}
LOperand* FirstHint() const {
UsePosition* pos = FirstPosWithHint();
UsePosition* pos = first_pos_;
while (pos != NULL && !pos->HasHint()) pos = pos->next();
if (pos != NULL) return pos->hint();
return NULL;
}
@ -367,8 +370,9 @@ class LiveRange: public ZoneObject {
void AddUseInterval(LifetimePosition start,
LifetimePosition end,
Zone* zone);
UsePosition* AddUsePosition(LifetimePosition pos,
void AddUsePosition(LifetimePosition pos,
LOperand* operand,
LOperand* hint,
Zone* zone);
// Shorten the most recently added interval by setting a new start.
@ -398,6 +402,8 @@ class LiveRange: public ZoneObject {
// This is used as a cache, it doesn't affect correctness.
mutable UseInterval* current_interval_;
UsePosition* last_processed_use_;
// This is used as a cache, it's invalid outside of BuildLiveRanges.
LOperand* current_hint_operand_;
LOperand* spill_operand_;
int spill_start_index_;
};

15
deps/v8/src/lithium.cc

@ -58,24 +58,27 @@ void LOperand::PrintTo(StringStream* stream) {
case UNALLOCATED:
unalloc = LUnallocated::cast(this);
stream->Add("v%d", unalloc->virtual_register());
switch (unalloc->policy()) {
if (unalloc->basic_policy() == LUnallocated::FIXED_SLOT) {
stream->Add("(=%dS)", unalloc->fixed_slot_index());
break;
}
switch (unalloc->extended_policy()) {
case LUnallocated::NONE:
break;
case LUnallocated::FIXED_REGISTER: {
int reg_index = unalloc->fixed_register_index();
const char* register_name =
Register::AllocationIndexToString(unalloc->fixed_index());
Register::AllocationIndexToString(reg_index);
stream->Add("(=%s)", register_name);
break;
}
case LUnallocated::FIXED_DOUBLE_REGISTER: {
int reg_index = unalloc->fixed_register_index();
const char* double_register_name =
DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
DoubleRegister::AllocationIndexToString(reg_index);
stream->Add("(=%s)", double_register_name);
break;
}
case LUnallocated::FIXED_SLOT:
stream->Add("(=%dS)", unalloc->fixed_index());
break;
case LUnallocated::MUST_HAVE_REGISTER:
stream->Add("(R)");
break;

181
deps/v8/src/lithium.h

@ -92,12 +92,16 @@ class LOperand: public ZoneObject {
class LUnallocated: public LOperand {
public:
enum Policy {
enum BasicPolicy {
FIXED_SLOT,
EXTENDED_POLICY
};
enum ExtendedPolicy {
NONE,
ANY,
FIXED_REGISTER,
FIXED_DOUBLE_REGISTER,
FIXED_SLOT,
MUST_HAVE_REGISTER,
WRITABLE_REGISTER,
SAME_AS_FIRST_INPUT
@ -117,99 +121,152 @@ class LUnallocated: public LOperand {
USED_AT_END
};
explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
Initialize(policy, 0, USED_AT_END);
explicit LUnallocated(ExtendedPolicy policy) : LOperand(UNALLOCATED, 0) {
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(USED_AT_END);
}
LUnallocated(BasicPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
ASSERT(policy == FIXED_SLOT);
value_ |= BasicPolicyField::encode(policy);
value_ |= index << FixedSlotIndexField::kShift;
ASSERT(this->fixed_slot_index() == index);
}
LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
Initialize(policy, fixed_index, USED_AT_END);
LUnallocated(ExtendedPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
ASSERT(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(USED_AT_END);
value_ |= FixedRegisterField::encode(index);
}
LUnallocated(ExtendedPolicy policy, Lifetime lifetime)
: LOperand(UNALLOCATED, 0) {
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(lifetime);
}
LUnallocated* CopyUnconstrained(Zone* zone) {
LUnallocated* result = new(zone) LUnallocated(ANY);
result->set_virtual_register(virtual_register());
return result;
}
LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
Initialize(policy, 0, lifetime);
static LUnallocated* cast(LOperand* op) {
ASSERT(op->IsUnallocated());
return reinterpret_cast<LUnallocated*>(op);
}
// The superclass has a KindField. Some policies have a signed fixed
// index in the upper bits.
static const int kPolicyWidth = 3;
static const int kLifetimeWidth = 1;
static const int kVirtualRegisterWidth = 15;
// The encoding used for LUnallocated operands depends on the policy that is
// stored within the operand. The FIXED_SLOT policy uses a compact encoding
// because it accommodates a larger pay-load.
//
// For FIXED_SLOT policy:
// +------------------------------------------+
// | slot_index | vreg | 0 | 001 |
// +------------------------------------------+
//
// For all other (extended) policies:
// +------------------------------------------+
// | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime
// +------------------------------------------+ P ... Policy
//
// The slot index is a signed value which requires us to decode it manually
// instead of using the BitField utility class.
static const int kPolicyShift = kKindFieldWidth;
static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
static const int kFixedIndexShift =
kVirtualRegisterShift + kVirtualRegisterWidth;
static const int kFixedIndexWidth = 32 - kFixedIndexShift;
STATIC_ASSERT(kFixedIndexWidth > 5);
// The superclass has a KindField.
STATIC_ASSERT(kKindFieldWidth == 3);
class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
// BitFields for all unallocated operands.
class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
class LifetimeField
: public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
};
// BitFields specific to BasicPolicy::FIXED_SLOT.
class FixedSlotIndexField : public BitField<int, 22, 10> {};
class VirtualRegisterField
: public BitField<unsigned,
kVirtualRegisterShift,
kVirtualRegisterWidth> {
};
// BitFields specific to BasicPolicy::EXTENDED_POLICY.
class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
class LifetimeField : public BitField<Lifetime, 25, 1> {};
class FixedRegisterField : public BitField<int, 26, 6> {};
static const int kMaxVirtualRegisters = 1 << kVirtualRegisterWidth;
static const int kMaxFixedIndex = (1 << (kFixedIndexWidth - 1)) - 1;
static const int kMinFixedIndex = -(1 << (kFixedIndexWidth - 1));
static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
// Predicates for the operand policy.
bool HasAnyPolicy() const {
return policy() == ANY;
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == ANY;
}
bool HasFixedPolicy() const {
return policy() == FIXED_REGISTER ||
policy() == FIXED_DOUBLE_REGISTER ||
policy() == FIXED_SLOT;
return basic_policy() == FIXED_SLOT ||
extended_policy() == FIXED_REGISTER ||
extended_policy() == FIXED_DOUBLE_REGISTER;
}
bool HasRegisterPolicy() const {
return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
return basic_policy() == EXTENDED_POLICY && (
extended_policy() == WRITABLE_REGISTER ||
extended_policy() == MUST_HAVE_REGISTER);
}
bool HasSameAsInputPolicy() const {
return policy() == SAME_AS_FIRST_INPUT;
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == SAME_AS_FIRST_INPUT;
}
bool HasFixedSlotPolicy() const {
return basic_policy() == FIXED_SLOT;
}
Policy policy() const { return PolicyField::decode(value_); }
void set_policy(Policy policy) {
value_ = PolicyField::update(value_, policy);
bool HasFixedRegisterPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == FIXED_REGISTER;
}
int fixed_index() const {
return static_cast<int>(value_) >> kFixedIndexShift;
bool HasFixedDoubleRegisterPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == FIXED_DOUBLE_REGISTER;
}
bool HasWritableRegisterPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == WRITABLE_REGISTER;
}
int virtual_register() const {
return VirtualRegisterField::decode(value_);
// [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
BasicPolicy basic_policy() const {
return BasicPolicyField::decode(value_);
}
void set_virtual_register(unsigned id) {
value_ = VirtualRegisterField::update(value_, id);
// [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
ExtendedPolicy extended_policy() const {
ASSERT(basic_policy() == EXTENDED_POLICY);
return ExtendedPolicyField::decode(value_);
}
LUnallocated* CopyUnconstrained(Zone* zone) {
LUnallocated* result = new(zone) LUnallocated(ANY);
result->set_virtual_register(virtual_register());
return result;
// [fixed_slot_index]: Only for FIXED_SLOT.
int fixed_slot_index() const {
ASSERT(HasFixedSlotPolicy());
return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
}
static LUnallocated* cast(LOperand* op) {
ASSERT(op->IsUnallocated());
return reinterpret_cast<LUnallocated*>(op);
// [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
int fixed_register_index() const {
ASSERT(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
return FixedRegisterField::decode(value_);
}
bool IsUsedAtStart() {
return LifetimeField::decode(value_) == USED_AT_START;
// [virtual_register]: The virtual register ID for this operand.
int virtual_register() const {
return VirtualRegisterField::decode(value_);
}
void set_virtual_register(unsigned id) {
value_ = VirtualRegisterField::update(value_, id);
}
private:
void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
value_ |= PolicyField::encode(policy);
value_ |= LifetimeField::encode(lifetime);
value_ |= fixed_index << kFixedIndexShift;
ASSERT(this->fixed_index() == fixed_index);
// [lifetime]: Only for non-FIXED_SLOT.
bool IsUsedAtStart() {
ASSERT(basic_policy() == EXTENDED_POLICY);
return LifetimeField::decode(value_) == USED_AT_START;
}
};

2
deps/v8/src/macros.py

@ -116,7 +116,7 @@ macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === '__ArrayBuffer');
macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro FLOOR(arg) = $floor(arg);

10
deps/v8/src/messages.js

@ -101,16 +101,24 @@ var kMessages = {
observe_type_non_string: ["Invalid changeRecord with non-string 'type' property"],
observe_notify_non_notifier: ["notify called on non-notifier object"],
proto_poison_pill: ["Generic use of __proto__ accessor not allowed"],
parameterless_typed_array_constr:
["%0"," constructor should have at least one argument."],
not_typed_array: ["this is not a typed array."],
invalid_argument: ["invalid_argument"],
// RangeError
invalid_array_length: ["Invalid array length"],
invalid_array_buffer_length: ["Invalid array buffer length"],
invalid_typed_array_offset: ["Start offset is too large"],
invalid_typed_array_length: ["Length is too large"],
invalid_typed_array_alignment: ["%0", "of", "%1", "should be a multiple of", "%3"],
typed_array_set_source_too_large:
["Source is too large"],
stack_overflow: ["Maximum call stack size exceeded"],
invalid_time_value: ["Invalid time value"],
// SyntaxError
unable_to_parse: ["Parse error"],
paren_in_arg_string: ["Function arg string contains parenthesis"],
not_isvar: ["builtin %IS_VAR: not a variable"],
single_function_literal: ["Single function literal required"],
invalid_regexp_flags: ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
invalid_regexp: ["Invalid RegExp pattern /", "%0", "/"],
illegal_break: ["Illegal break statement"],

87
deps/v8/src/mips/code-stubs-mips.cc

@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "bootstrapper.h"
#include "builtins-decls.h"
#include "code-stubs.h"
#include "codegen.h"
#include "regexp-macro-assembler.h"
@ -74,6 +75,28 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
void LoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a1 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@ -116,9 +139,10 @@ static void InitializeArrayConstructorDescriptor(
int constant_stack_parameter_count) {
// register state
// a0 -- number of arguments
// a1 -- function
// a2 -- type info cell with elements kind
static Register registers[] = { a2 };
descriptor->register_param_count_ = 1;
static Register registers[] = { a1, a2 };
descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &a0;
@ -4099,12 +4123,6 @@ Register InstanceofStub::left() { return a0; }
Register InstanceofStub::right() { return a1; }
void LoadFieldStub::Generate(MacroAssembler* masm) {
StubCompiler::DoGenerateFastPropertyLoad(masm, v0, reg_, inobject_, index_);
__ Ret();
}
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
@ -5103,6 +5121,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Handle<Object> terminal_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
LAST_FAST_ELEMENTS_KIND);
__ JumpIfNotSmi(a3, &miss);
__ Branch(&miss, gt, a3, Operand(terminal_kind_sentinel));
// Make sure the function is the Array() function
__ LoadArrayFunction(a3);
@ -6300,25 +6319,53 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
Label skip_write_barrier, after_writing;
ExternalReference high_promotion_mode = ExternalReference::
new_space_high_promotion_mode_active_address(masm->isolate());
__ li(t0, Operand(high_promotion_mode));
__ lw(t0, MemOperand(t0, 0));
__ Branch(&skip_write_barrier, eq, t0, Operand(zero_reg));
__ mov(t3, v0);
__ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
__ RecordWriteField(t3,
ConsString::kFirstOffset,
a0,
t0,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
__ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
__ RecordWriteField(t3,
ConsString::kSecondOffset,
a1,
t0,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
__ jmp(&after_writing);
__ bind(&skip_write_barrier);
__ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
__ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
__ bind(&after_writing);
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
__ DropAndRet(2);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
// to contain only ASCII characters.
// to contain only one byte characters.
// t0: first instance type.
// t1: second instance type.
// Branch to if _both_ instances have kAsciiDataHintMask set.
__ And(at, t0, Operand(kAsciiDataHintMask));
// Branch to if _both_ instances have kOneByteDataHintMask set.
__ And(at, t0, Operand(kOneByteDataHintMask));
__ and_(at, at, t1);
__ Branch(&ascii_data, ne, at, Operand(zero_reg));
__ Xor(t0, t0, Operand(t1));
STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
__ And(t0, t0, Operand(kOneByteStringTag | kAsciiDataHintTag));
STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
__ And(t0, t0, Operand(kOneByteStringTag | kOneByteDataHintTag));
__ Branch(&ascii_data, eq, t0,
Operand(kOneByteStringTag | kAsciiDataHintTag));
Operand(kOneByteStringTag | kOneByteDataHintTag));
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
@ -7181,6 +7228,9 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
// FastNewClosureStub::Generate
{ REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
// StringAddStub::Generate
{ REG(t3), REG(a1), REG(t0), EMIT_REMEMBERED_SET },
{ REG(t3), REG(a0), REG(t0), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@ -7703,13 +7753,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label no_info, switch_ready;
// Get the elements kind and case on that.
__ Branch(&no_info, eq, a2, Operand(undefined_sentinel));
__ lw(a3, FieldMemOperand(a2, kPointerSize));
// There is no info if the call site went megamorphic either
// TODO(mvstanton): Really? I thought if it was the array function that
// the cell wouldn't get stamped as megamorphic.
__ Branch(&no_info, eq, a3,
Operand(TypeFeedbackCells::MegamorphicSentinel(masm->isolate())));
__ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
__ JumpIfNotSmi(a3, &no_info);
__ SmiUntag(a3);
__ jmp(&switch_ready);
__ bind(&no_info);

9
deps/v8/src/mips/debug-mips.cc

@ -236,6 +236,15 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- a0 : value
// -----------------------------------
Generate_DebugBreakCallHelper(masm, a0.bit(), 0);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-mips.cc).
// ----------- S t a t e -------------

75
deps/v8/src/mips/full-codegen-mips.cc

@ -1592,7 +1592,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
expr->depth() > 1) {
__ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
@ -1943,11 +1944,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label resume;
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(&resume, ne, result_register(), Operand(at));
__ pop(result_register());
if (expr->yield_kind() == Yield::SUSPEND) {
// TODO(wingo): Box into { value: VALUE, done: false }.
}
EmitReturnIteratorResult(false);
} else {
__ pop(result_register());
EmitReturnSequence();
}
__ bind(&resume);
context()->Plug(result_register());
@ -1959,18 +1961,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ sw(a1, FieldMemOperand(result_register(),
JSGeneratorObject::kContinuationOffset));
__ pop(result_register());
// TODO(wingo): Box into { value: VALUE, done: true }.
// Exit all nested statements.
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
while (current != NULL) {
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
EmitReturnSequence();
EmitReturnIteratorResult(true);
break;
}
@ -2057,7 +2048,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Subu(a3, a3, Operand(1));
__ Branch(&call_resume, lt, a3, Operand(zero_reg));
__ push(a2);
__ b(&push_operand_holes);
__ Branch(&push_operand_holes);
__ bind(&call_resume);
__ push(a1);
__ push(result_register());
@ -2076,6 +2067,56 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
}
void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
Label gc_required;
Label allocated;
Handle<Map> map(isolate()->native_context()->generator_result_map());
__ Allocate(map->instance_size(), a0, a2, a3, &gc_required, TAG_OBJECT);
__ bind(&allocated);
__ li(a1, Operand(map));
__ pop(a2);
__ li(a3, Operand(isolate()->factory()->ToBoolean(done)));
__ li(t0, Operand(isolate()->factory()->empty_fixed_array()));
ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
__ sw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
__ sw(t0, FieldMemOperand(a0, JSObject::kPropertiesOffset));
__ sw(t0, FieldMemOperand(a0, JSObject::kElementsOffset));
__ sw(a2,
FieldMemOperand(a0, JSGeneratorObject::kResultValuePropertyOffset));
__ sw(a3,
FieldMemOperand(a0, JSGeneratorObject::kResultDonePropertyOffset));
// Only the value field needs a write barrier, as the other values are in the
// root set.
__ RecordWriteField(a0, JSGeneratorObject::kResultValuePropertyOffset,
a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
if (done) {
// Exit all nested statements.
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
while (current != NULL) {
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
}
__ mov(result_register(), a0);
EmitReturnSequence();
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ lw(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
__ jmp(&allocated);
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();

19
deps/v8/src/mips/ic-mips.cc

@ -1440,6 +1440,25 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
}
void StoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a2 : key
// -- a1 : receiver
// -- ra : return address
// -----------------------------------
// Push receiver, key and value for runtime call.
__ Push(a1, a2, a0);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value

95
deps/v8/src/mips/lithium-codegen-mips.cc

@ -91,6 +91,10 @@ void LCodeGen::FinishCode(Handle<Code> code) {
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
for (int i = 0 ; i < transition_maps_.length(); i++) {
transition_maps_.at(i)->AddDependentCode(
DependentCode::kTransitionGroup, code);
}
}
@ -2684,13 +2688,20 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
int offset = instr->hydrogen()->offset();
Register object = ToRegister(instr->object());
if (instr->hydrogen()->representation().IsDouble()) {
DoubleRegister result = ToDoubleRegister(instr->result());
__ ldc1(result, FieldMemOperand(object, offset));
return;
}
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
__ lw(result, FieldMemOperand(object, offset));
} else {
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
__ lw(result, FieldMemOperand(result, offset));
}
}
@ -2835,38 +2846,6 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
void LCodeGen::DoLoadElements(LLoadElements* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->object());
Register scratch = scratch0();
__ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
Label done, fail;
__ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
__ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
__ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot.
__ Branch(&done, eq, scratch, Operand(at));
// |scratch| still contains |input|'s map.
__ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ Ext(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount);
__ Branch(&fail, lt, scratch,
Operand(GetInitialFastElementsKind()));
__ Branch(&done, le, scratch,
Operand(TERMINAL_FAST_ELEMENTS_KIND));
__ Branch(&fail, lt, scratch,
Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ Branch(&done, le, scratch,
Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ bind(&fail);
__ Abort("Check for fast or external elements failed.");
__ bind(&done);
}
}
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
@ -3891,8 +3870,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ li(a0, Operand(instr->arity()));
__ li(a2, Operand(instr->hydrogen()->property_cell()));
Object* cell_value = instr->hydrogen()->property_cell()->value();
ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value());
ElementsKind kind = instr->hydrogen()->elements_kind();
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@ -3919,15 +3897,34 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Representation representation = instr->representation();
Register object = ToRegister(instr->object());
Register value = ToRegister(instr->value());
Register scratch = scratch0();
int offset = instr->offset();
ASSERT(!object.is(value));
Handle<Map> transition = instr->transition();
if (!instr->transition().is_null()) {
__ li(scratch, Operand(instr->transition()));
if (FLAG_track_fields && representation.IsSmi()) {
Register value = ToRegister(instr->value());
__ SmiTagCheckOverflow(value, value, scratch);
if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
}
} else if (FLAG_track_double_fields && representation.IsDouble()) {
ASSERT(transition.is_null());
ASSERT(instr->is_in_object());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
DoubleRegister value = ToDoubleRegister(instr->value());
__ sdc1(value, FieldMemOperand(object, offset));
return;
}
if (!transition.is_null()) {
if (transition->CanBeDeprecated()) {
transition_maps_.Add(transition, info()->zone());
}
__ li(scratch, Operand(transition));
__ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
@ -3944,6 +3941,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
Register value = ToRegister(instr->value());
ASSERT(!object.is(value));
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
@ -4811,6 +4810,8 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
}
@ -5150,7 +5151,6 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
@ -5159,8 +5159,16 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ mov(result, zero_reg);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(size, size);
if (instr->size()->IsRegister()) {
Register size = ToRegister(instr->size());
ASSERT(!size.is(result));
__ SmiTag(size);
__ push(size);
} else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Push(Smi::FromInt(size));
}
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr);
@ -5246,7 +5254,8 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
// Pick the right runtime function or stub to call.
int properties_count = instr->hydrogen()->constant_properties_length() / 2;
if (instr->hydrogen()->depth() > 1) {
if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
instr->hydrogen()->depth() > 1) {
__ Push(a3, a2, a1, a0);
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else if (flags != ObjectLiteral::kFastElements ||

2
deps/v8/src/mips/lithium-codegen-mips.h

@ -56,6 +56,7 @@ class LCodeGen BASE_EMBEDDED {
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@ -416,6 +417,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;

60
deps/v8/src/mips/lithium-mips.cc

@ -552,6 +552,11 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
LOperand* LChunkBuilder::UseConstant(HValue* value) {
return chunk_->DefineConstantOperand(HConstant::cast(value));
}
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@ -672,7 +677,7 @@ LUnallocated* LChunkBuilder::TempRegister() {
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
Abort("Out of virtual registers while trying to allocate temp register.");
return NULL;
vreg = 0;
}
operand->set_virtual_register(vreg);
return operand;
@ -1301,8 +1306,8 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
@ -1397,15 +1402,15 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left;
LOperand* right = UseOrConstant(instr->MostConstantOperand());
LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
(instr->CheckFlag(HValue::kCanOverflow) ||
!right->IsConstantOperand())) {
left = UseRegister(instr->LeastConstantOperand());
left = UseRegister(instr->BetterLeftOperand());
temp = TempRegister();
} else {
left = UseRegisterAtStart(instr->LeastConstantOperand());
left = UseRegisterAtStart(instr->BetterLeftOperand());
}
LMulI* mul = new(zone()) LMulI(left, right, temp);
if (instr->CheckFlag(HValue::kCanOverflow) ||
@ -1470,8 +1475,8 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@ -1502,8 +1507,8 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
left = UseRegisterAtStart(instr->LeastConstantOperand());
right = UseOrConstantAtStart(instr->MostConstantOperand());
left = UseRegisterAtStart(instr->BetterLeftOperand());
right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@ -1988,8 +1993,8 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
return DefineAsRegister(
new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
LOperand* obj = UseRegisterAtStart(instr->object());
return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@ -2024,12 +2029,6 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LLoadElements(input));
}
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@ -2194,14 +2193,25 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
: UseRegisterAtStart(instr->object());
}
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegister(instr->value());
LOperand* val;
if (needs_write_barrier ||
(FLAG_track_fields && instr->field_representation().IsSmi())) {
val = UseTempRegister(instr->value());
} else if (FLAG_track_double_fields &&
instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
}
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
return new(zone()) LStoreNamedField(obj, val, temp);
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
if (FLAG_track_fields && instr->field_representation().IsSmi()) {
return AssignEnvironment(result);
}
return result;
}
@ -2253,7 +2263,9 @@ LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = UseTempRegister(instr->size());
LOperand* size = instr->size()->IsConstant()
? UseConstant(instr->size())
: UseTempRegister(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
@ -2315,7 +2327,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
if (spill_index > LUnallocated::kMaxFixedIndex) {
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}

19
deps/v8/src/mips/lithium-mips.h

@ -122,7 +122,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@ -1521,18 +1520,6 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadElements(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
@ -2089,6 +2076,9 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
Representation representation() const {
return hydrogen()->field_representation();
}
};
@ -2722,6 +2712,9 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
// An input operand in a constant operand.
MUST_USE_RESULT LOperand* UseConstant(HValue* value);
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);

42
deps/v8/src/mips/macro-assembler-mips.cc

@ -3135,8 +3135,34 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
Label allocate_new_space, install_map;
AllocationFlags flags = TAG_OBJECT;
ExternalReference high_promotion_mode = ExternalReference::
new_space_high_promotion_mode_active_address(isolate());
li(scratch1, Operand(high_promotion_mode));
lw(scratch1, MemOperand(scratch1, 0));
Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg));
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
jmp(&install_map);
bind(&allocate_new_space);
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
flags);
bind(&install_map);
InitializeNewString(result,
length,
Heap::kConsAsciiStringMapRootIndex,
@ -5135,6 +5161,18 @@ void MacroAssembler::CheckPageFlag(
}
void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
Register scratch,
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
li(scratch, Operand(map));
lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
Branch(if_deprecated, ne, scratch, Operand(zero_reg));
}
}
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,

4
deps/v8/src/mips/macro-assembler-mips.h

@ -325,6 +325,10 @@ class MacroAssembler: public Assembler {
Condition cc,
Label* condition_met);
void CheckMapDeprecated(Handle<Map> map,
Register scratch,
Label* if_deprecated);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfNotInNewSpace(Register object,

138
deps/v8/src/mips/stub-cache-mips.cc

@ -307,11 +307,13 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
}
void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
bool inobject,
int index) {
int index,
Representation representation) {
ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@ -442,8 +444,10 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Register value_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Label* miss_label,
Label* miss_restore_name) {
Label* miss_restore_name,
Label* slow) {
// a0 : value.
Label exit;
@ -456,6 +460,15 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
ASSERT(!representation.IsNone());
// Ensure no transitions to deprecated maps are followed.
__ CheckMapDeprecated(transition, scratch1, miss_label);
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@ -471,7 +484,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
Register holder_reg = CheckPrototypes(
object, receiver_reg, Handle<JSObject>(holder), name_reg,
scratch1, scratch2, name, miss_restore_name);
scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
@ -490,6 +503,30 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
}
Register storage_reg = name_reg;
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_restore_name);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(scratch1, value_reg);
__ mtc1(scratch1, f6);
__ cvt_d_w(f4, f6);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
miss_restore_name, DONT_DO_SMI_CHECK);
__ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
__ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
}
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@ -517,7 +554,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
name_reg,
scratch2,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
@ -535,33 +572,53 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
if (FLAG_track_double_fields && representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
__ mov(name_reg, value_reg);
} else {
ASSERT(storage_reg.is(name_reg));
}
__ RecordWriteField(receiver_reg,
offset,
name_reg,
scratch1,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
if (FLAG_track_double_fields && representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ sw(value_reg, FieldMemOperand(scratch1, offset));
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
__ mov(name_reg, value_reg);
} else {
ASSERT(storage_reg.is(name_reg));
}
__ mov(name_reg, value_reg);
__ RecordWriteField(scratch1,
offset,
@ -570,6 +627,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
}
}
// Return the value (register v0).
ASSERT(value_reg.is(a0));
@ -615,12 +673,51 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
__ lw(scratch1, FieldMemOperand(receiver_reg, offset));
} else {
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
int offset = index * kPointerSize + FixedArray::kHeaderSize;
__ lw(scratch1, FieldMemOperand(scratch1, offset));
}
// Store the value into the storage.
Label do_store, heap_number;
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(scratch2, value_reg);
__ mtc1(scratch2, f6);
__ cvt_d_w(f4, f6);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
miss_label, DONT_DO_SMI_CHECK);
__ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
__ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
// Return the value (register v0).
ASSERT(value_reg.is(a0));
__ mov(v0, a0);
__ Ret();
return;
}
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@ -633,6 +730,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
scratch1,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@ -641,6 +739,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ sw(value_reg, FieldMemOperand(scratch1, offset));
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@ -654,6 +753,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
}
}
// Return the value (register v0).
ASSERT(value_reg.is(a0));
@ -1269,9 +1369,20 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
PropertyIndex index) {
GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
__ Ret();
PropertyIndex field,
Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
field.translate(holder),
representation);
GenerateTailCall(masm(), stub.GetCode(isolate()));
} else {
KeyedLoadFieldStub stub(field.is_inobject(holder),
field.translate(holder),
representation);
GenerateTailCall(masm(), stub.GetCode(isolate()));
}
}
@ -1501,7 +1612,8 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
GenerateFastPropertyLoad(masm(), a1, reg, index.is_inobject(holder),
index.translate(holder), Representation::Tagged());
GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
@ -2935,18 +3047,24 @@ Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
Register map_reg = scratch1();
int receiver_count = receiver_maps->length();
int number_of_handled_maps = 0;
__ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
Handle<Map> map = receiver_maps->at(current);
if (!map->is_deprecated()) {
number_of_handled_maps++;
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
eq, map_reg, Operand(receiver_maps->at(current)));
}
}
ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}

44
deps/v8/src/mksnapshot.cc

@ -32,6 +32,10 @@
#endif
#include <signal.h>
// TODO(dcarney): remove
#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
#include "v8.h"
#include "bootstrapper.h"
@ -291,6 +295,18 @@ class BZip2Decompressor : public StartupDataDecompressor {
#endif
void DumpException(Handle<Message> message) {
String::Utf8Value message_string(message->Get());
String::Utf8Value message_line(message->GetSourceLine());
fprintf(stderr, "%s at line %d\n", *message_string, message->GetLineNumber());
fprintf(stderr, "%s\n", *message_line);
for (int i = 0; i <= message->GetEndColumn(); ++i) {
fprintf(stderr, "%c", i < message->GetStartColumn() ? ' ' : '^');
}
fprintf(stderr, "\n");
}
int main(int argc, char** argv) {
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
@ -312,13 +328,18 @@ int main(int argc, char** argv) {
}
#endif
i::Serializer::Enable();
Persistent<Context> context = v8::Context::New();
Isolate* isolate = Isolate::GetCurrent();
Persistent<Context> context;
{
HandleScope handle_scope(isolate);
context.Reset(isolate, Context::New(isolate));
}
if (context.IsEmpty()) {
fprintf(stderr,
"\nException thrown while compiling natives - see above.\n\n");
exit(1);
}
Isolate* isolate = context->GetIsolate();
if (i::FLAG_extra_code != NULL) {
context->Enter();
// Capture 100 frames if anything happens.
@ -350,27 +371,14 @@ int main(int argc, char** argv) {
TryCatch try_catch;
Local<Script> script = Script::Compile(source);
if (try_catch.HasCaught()) {
fprintf(stderr, "Failure compiling '%s' (see above)\n", name);
fprintf(stderr, "Failure compiling '%s'\n", name);
DumpException(try_catch.Message());
exit(1);
}
script->Run();
if (try_catch.HasCaught()) {
fprintf(stderr, "Failure running '%s'\n", name);
Local<Message> message = try_catch.Message();
Local<String> message_string = message->Get();
Local<String> message_line = message->GetSourceLine();
int len = 2 + message_string->Utf8Length() + message_line->Utf8Length();
char* buf = new char(len);
message_string->WriteUtf8(buf);
fprintf(stderr, "%s at line %d\n", buf, message->GetLineNumber());
message_line->WriteUtf8(buf);
fprintf(stderr, "%s\n", buf);
int from = message->GetStartColumn();
int to = message->GetEndColumn();
int i;
for (i = 0; i < from; i++) fprintf(stderr, " ");
for ( ; i <= to; i++) fprintf(stderr, "^");
fprintf(stderr, "\n");
DumpException(try_catch.Message());
exit(1);
}
context->Exit();

8
deps/v8/src/object-observe.js

@ -138,7 +138,9 @@ function NotifyChange(type, object, name, oldValue) {
var changeRecord = (arguments.length < 4) ?
{ type: type, object: object, name: name } :
{ type: type, object: object, name: name, oldValue: oldValue };
ObjectFreeze(changeRecord);
// TODO(rafaelw): This breaks spec-compliance. Re-enable when freezing isn't
// slow.
// ObjectFreeze(changeRecord);
EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
}
@ -164,7 +166,9 @@ function ObjectNotifierNotify(changeRecord) {
%DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
READ_ONLY + DONT_DELETE);
}
ObjectFreeze(newRecord);
// TODO(rafaelw): This breaks spec-compliance. Re-enable when freezing isn't
// slow.
// ObjectFreeze(newRecord);
EnqueueChangeRecord(newRecord, objectInfo.changeObservers);
}

5
deps/v8/src/objects-debug.cc

@ -323,10 +323,6 @@ void Map::MapVerify() {
instance_size() < HEAP->Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
DescriptorArray* descriptors = instance_descriptors();
for (int i = 0; i < NumberOfOwnDescriptors(); ++i) {
CHECK_EQ(i, descriptors->GetDetails(i).descriptor_index() - 1);
}
SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
if (HasTransitionArray()) {
SLOW_ASSERT(transitions()->IsSortedNoDuplicates());
@ -419,6 +415,7 @@ void JSGeneratorObject::JSGeneratorObjectVerify() {
VerifyObjectField(kReceiverOffset);
VerifyObjectField(kOperandStackOffset);
VerifyObjectField(kContinuationOffset);
VerifyObjectField(kStackHandlerIndexOffset);
}

134
deps/v8/src/objects-inl.h

@ -283,6 +283,16 @@ bool Object::HasValidElements() {
return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray();
}
MaybeObject* Object::AllocateNewStorageFor(Heap* heap,
Representation representation,
PretenureFlag tenure) {
if (!FLAG_track_double_fields) return this;
if (!representation.IsDouble()) return this;
return heap->AllocateHeapNumber(Number(), tenure);
}
StringShape::StringShape(String* str)
: type_(str->map()->instance_type()) {
set_valid();
@ -357,12 +367,8 @@ bool String::IsTwoByteRepresentationUnderneath() {
bool String::HasOnlyOneByteChars() {
uint32_t type = map()->instance_type();
return (type & kOneByteDataHintMask) == kOneByteDataHintTag;
}
bool String::IsOneByteConvertible() {
return HasOnlyOneByteChars() || IsOneByteRepresentation();
return (type & kOneByteDataHintMask) == kOneByteDataHintTag ||
IsOneByteRepresentation();
}
@ -1513,22 +1519,7 @@ MaybeObject* JSObject::ResetElements() {
}
MaybeObject* JSObject::AddFastPropertyUsingMap(Map* map) {
ASSERT(this->map()->NumberOfOwnDescriptors() + 1 ==
map->NumberOfOwnDescriptors());
if (this->map()->unused_property_fields() == 0) {
int new_size = properties()->length() + map->unused_property_fields() + 1;
FixedArray* new_properties;
MaybeObject* maybe_properties = properties()->CopySize(new_size);
if (!maybe_properties->To(&new_properties)) return maybe_properties;
set_properties(new_properties);
}
set_map(map);
return this;
}
MaybeObject* JSObject::TransitionToMap(Map* map) {
MaybeObject* JSObject::AllocateStorageForMap(Map* map) {
ASSERT(this->map()->inobject_properties() == map->inobject_properties());
ElementsKind expected_kind = this->map()->elements_kind();
if (map->elements_kind() != expected_kind) {
@ -1549,6 +1540,14 @@ MaybeObject* JSObject::TransitionToMap(Map* map) {
}
MaybeObject* JSObject::MigrateInstance() {
// Converting any field to the most specific type will cause the
// GeneralizeFieldRepresentation algorithm to create the most general existing
// transition that matches the object. This achieves what is needed.
return GeneralizeFieldRepresentation(0, Representation::Smi());
}
Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
AssertNoAllocation no_gc;
if (!map->HasTransitionArray()) return Handle<String>::null();
@ -1710,10 +1709,17 @@ void JSObject::SetInternalField(int index, Smi* value) {
}
MaybeObject* JSObject::FastPropertyAt(Representation representation,
int index) {
Object* raw_value = RawFastPropertyAt(index);
return raw_value->AllocateNewStorageFor(GetHeap(), representation);
}
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
Object* JSObject::FastPropertyAt(int index) {
Object* JSObject::RawFastPropertyAt(int index) {
// Adjust for the number of properties stored in the object.
index -= map()->inobject_properties();
if (index < 0) {
@ -1726,7 +1732,7 @@ Object* JSObject::FastPropertyAt(int index) {
}
Object* JSObject::FastPropertyAtPut(int index, Object* value) {
void JSObject::FastPropertyAtPut(int index, Object* value) {
// Adjust for the number of properties stored in the object.
index -= map()->inobject_properties();
if (index < 0) {
@ -1737,7 +1743,6 @@ Object* JSObject::FastPropertyAtPut(int index, Object* value) {
ASSERT(index < properties()->length());
properties()->set(index, value);
}
return value;
}
@ -2277,6 +2282,23 @@ void DescriptorArray::SetSortedKey(int descriptor_index, int pointer) {
}
void DescriptorArray::SetRepresentation(int descriptor_index,
Representation representation) {
ASSERT(!representation.IsNone());
PropertyDetails details = GetDetails(descriptor_index);
set(ToDetailsIndex(descriptor_index),
details.CopyWithRepresentation(representation).AsSmi());
}
void DescriptorArray::InitializeRepresentations(Representation representation) {
int length = number_of_descriptors();
for (int i = 0; i < length; i++) {
SetRepresentation(i, representation);
}
}
Object** DescriptorArray::GetValueSlot(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
return HeapObject::RawField(
@ -2338,10 +2360,8 @@ void DescriptorArray::Set(int descriptor_number,
const WhitenessWitness&) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
ASSERT(desc->GetDetails().descriptor_index() <=
number_of_descriptors());
ASSERT(desc->GetDetails().descriptor_index() > 0);
ASSERT(!desc->GetDetails().representation().IsNone());
NoIncrementalWriteBarrierSet(this,
ToKeyIndex(descriptor_number),
desc->GetKey());
@ -2357,9 +2377,7 @@ void DescriptorArray::Set(int descriptor_number,
void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
ASSERT(desc->GetDetails().descriptor_index() <=
number_of_descriptors());
ASSERT(desc->GetDetails().descriptor_index() > 0);
ASSERT(!desc->GetDetails().representation().IsNone());
set(ToKeyIndex(descriptor_number), desc->GetKey());
set(ToValueIndex(descriptor_number), desc->GetValue());
@ -2370,9 +2388,7 @@ void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
void DescriptorArray::Append(Descriptor* desc,
const WhitenessWitness& witness) {
int descriptor_number = number_of_descriptors();
int enumeration_index = descriptor_number + 1;
SetNumberOfDescriptors(descriptor_number + 1);
desc->SetEnumerationIndex(enumeration_index);
Set(descriptor_number, desc, witness);
uint32_t hash = desc->GetKey()->Hash();
@ -2391,9 +2407,7 @@ void DescriptorArray::Append(Descriptor* desc,
void DescriptorArray::Append(Descriptor* desc) {
int descriptor_number = number_of_descriptors();
int enumeration_index = descriptor_number + 1;
SetNumberOfDescriptors(descriptor_number + 1);
desc->SetEnumerationIndex(enumeration_index);
Set(descriptor_number, desc);
uint32_t hash = desc->GetKey()->Hash();
@ -3573,6 +3587,38 @@ bool Map::is_observed() {
}
void Map::deprecate() {
set_bit_field3(Deprecated::update(bit_field3(), true));
}
bool Map::is_deprecated() {
if (!FLAG_track_fields) return false;
return Deprecated::decode(bit_field3());
}
bool Map::CanBeDeprecated() {
int descriptor = LastAdded();
for (int i = 0; i <= descriptor; i++) {
PropertyDetails details = instance_descriptors()->GetDetails(i);
if (FLAG_track_fields && details.representation().IsSmi()) {
return true;
}
if (FLAG_track_double_fields && details.representation().IsDouble()) {
return true;
}
}
return false;
}
Handle<Map> Map::CurrentMapForDeprecated(Handle<Map> map) {
if (!map->is_deprecated()) return map;
return GeneralizeRepresentation(map, 0, Representation::Smi());
}
void Map::NotifyLeafMapLayoutChange() {
dependent_code()->DeoptimizeDependentCodeGroup(
GetIsolate(),
@ -4109,23 +4155,6 @@ static MaybeObject* EnsureHasTransitionArray(Map* map) {
void Map::InitializeDescriptors(DescriptorArray* descriptors) {
int len = descriptors->number_of_descriptors();
#ifdef DEBUG
ASSERT(len <= DescriptorArray::kMaxNumberOfDescriptors);
bool used_indices[DescriptorArray::kMaxNumberOfDescriptors];
for (int i = 0; i < len; ++i) used_indices[i] = false;
// Ensure that all enumeration indexes between 1 and length occur uniquely in
// the descriptor array.
for (int i = 0; i < len; ++i) {
int enum_index = descriptors->GetDetails(i).descriptor_index() -
PropertyDetails::kInitialIndex;
ASSERT(0 <= enum_index && enum_index < len);
ASSERT(!used_indices[enum_index]);
used_indices[enum_index] = true;
}
#endif
set_instance_descriptors(descriptors);
SetNumberOfOwnDescriptors(len);
}
@ -5095,6 +5124,7 @@ ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
SMI_ACCESSORS(JSGeneratorObject, stack_handler_index, kStackHandlerIndexOffset)
JSGeneratorObject* JSGeneratorObject::cast(Object* obj) {

2
deps/v8/src/objects-printer.cc

@ -271,7 +271,7 @@ void JSObject::PrintProperties(FILE* out) {
switch (descs->GetType(i)) {
case FIELD: {
int index = descs->GetFieldIndex(i);
FastPropertyAt(index)->ShortPrint(out);
RawFastPropertyAt(index)->ShortPrint(out);
PrintF(out, " (field at offset %d)\n", index);
break;
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save