Browse Source

Revert "Upgrade V8 to 3.7.1"

This reverts commit 92f5a5d3ca.

V8 3.7.1 in debug mode on ia32 has a curious race-like bug where an fs.Stats
object is not fully formed until some time after it's created. This is easy
to demonstrate by running `make test-debug`.

V8 3.7.0 does not exhibit this behaviour so back we go.

Fixes #1981.
v0.7.4-release
Ben Noordhuis 13 years ago
parent
commit
edea4122b1
  1. 45
      deps/v8/ChangeLog
  2. 35
      deps/v8/preparser/preparser-process.cc
  3. 2
      deps/v8/src/SConscript
  4. 11
      deps/v8/src/accessors.cc
  5. 57
      deps/v8/src/api.cc
  6. 15
      deps/v8/src/arm/assembler-arm-inl.h
  7. 6
      deps/v8/src/arm/assembler-arm.h
  8. 78
      deps/v8/src/arm/builtins-arm.cc
  9. 98
      deps/v8/src/arm/code-stubs-arm.cc
  10. 12
      deps/v8/src/arm/code-stubs-arm.h
  11. 249
      deps/v8/src/arm/codegen-arm.cc
  12. 1
      deps/v8/src/arm/codegen-arm.h
  13. 22
      deps/v8/src/arm/deoptimizer-arm.cc
  14. 169
      deps/v8/src/arm/full-codegen-arm.cc
  15. 104
      deps/v8/src/arm/ic-arm.cc
  16. 36
      deps/v8/src/arm/lithium-arm.cc
  17. 32
      deps/v8/src/arm/lithium-arm.h
  18. 191
      deps/v8/src/arm/lithium-codegen-arm.cc
  19. 14
      deps/v8/src/arm/lithium-codegen-arm.h
  20. 36
      deps/v8/src/arm/macro-assembler-arm.cc
  21. 61
      deps/v8/src/arm/macro-assembler-arm.h
  22. 5
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  23. 4
      deps/v8/src/arm/simulator-arm.cc
  24. 656
      deps/v8/src/arm/stub-cache-arm.cc
  25. 18
      deps/v8/src/array.js
  26. 21
      deps/v8/src/assembler.cc
  27. 15
      deps/v8/src/assembler.h
  28. 14
      deps/v8/src/ast-inl.h
  29. 7
      deps/v8/src/ast.cc
  30. 19
      deps/v8/src/ast.h
  31. 144
      deps/v8/src/bootstrapper.cc
  32. 8
      deps/v8/src/builtins.cc
  33. 5
      deps/v8/src/builtins.h
  34. 11
      deps/v8/src/checks.h
  35. 25
      deps/v8/src/code-stubs.cc
  36. 45
      deps/v8/src/code-stubs.h
  37. 15
      deps/v8/src/codegen.h
  38. 46
      deps/v8/src/compiler.cc
  39. 54
      deps/v8/src/compiler.h
  40. 29
      deps/v8/src/contexts.cc
  41. 53
      deps/v8/src/contexts.h
  42. 3
      deps/v8/src/d8.cc
  43. 242
      deps/v8/src/debug.cc
  44. 7
      deps/v8/src/debug.h
  45. 9
      deps/v8/src/deoptimizer.cc
  46. 3
      deps/v8/src/deoptimizer.h
  47. 53
      deps/v8/src/factory.cc
  48. 7
      deps/v8/src/factory.h
  49. 8
      deps/v8/src/flag-definitions.h
  50. 63
      deps/v8/src/frames.cc
  51. 2
      deps/v8/src/frames.h
  52. 16
      deps/v8/src/full-codegen.cc
  53. 6
      deps/v8/src/full-codegen.h
  54. 3
      deps/v8/src/globals.h
  55. 114
      deps/v8/src/handles.cc
  56. 46
      deps/v8/src/handles.h
  57. 5
      deps/v8/src/heap-inl.h
  58. 135
      deps/v8/src/heap.cc
  59. 40
      deps/v8/src/heap.h
  60. 79
      deps/v8/src/hydrogen-instructions.cc
  61. 92
      deps/v8/src/hydrogen-instructions.h
  62. 286
      deps/v8/src/hydrogen.cc
  63. 32
      deps/v8/src/hydrogen.h
  64. 15
      deps/v8/src/ia32/assembler-ia32-inl.h
  65. 13
      deps/v8/src/ia32/builtins-ia32.cc
  66. 138
      deps/v8/src/ia32/code-stubs-ia32.cc
  67. 11
      deps/v8/src/ia32/code-stubs-ia32.h
  68. 258
      deps/v8/src/ia32/codegen-ia32.cc
  69. 15
      deps/v8/src/ia32/deoptimizer-ia32.cc
  70. 12
      deps/v8/src/ia32/disasm-ia32.cc
  71. 177
      deps/v8/src/ia32/full-codegen-ia32.cc
  72. 142
      deps/v8/src/ia32/ic-ia32.cc
  73. 192
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  74. 12
      deps/v8/src/ia32/lithium-codegen-ia32.h
  75. 46
      deps/v8/src/ia32/lithium-ia32.cc
  76. 32
      deps/v8/src/ia32/lithium-ia32.h
  77. 44
      deps/v8/src/ia32/macro-assembler-ia32.cc
  78. 6
      deps/v8/src/ia32/macro-assembler-ia32.h
  79. 5
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  80. 707
      deps/v8/src/ia32/stub-cache-ia32.cc
  81. 1087
      deps/v8/src/ic.cc
  82. 190
      deps/v8/src/ic.h
  83. 3
      deps/v8/src/incremental-marking-inl.h
  84. 26
      deps/v8/src/incremental-marking.cc
  85. 11
      deps/v8/src/incremental-marking.h
  86. 37
      deps/v8/src/interpreter-irregexp.cc
  87. 12
      deps/v8/src/interpreter-irregexp.h
  88. 14
      deps/v8/src/isolate.cc
  89. 11
      deps/v8/src/isolate.h
  90. 18
      deps/v8/src/jsregexp.cc
  91. 5
      deps/v8/src/list-inl.h
  92. 4
      deps/v8/src/list.h
  93. 6
      deps/v8/src/liveobjectlist.cc
  94. 5
      deps/v8/src/macros.py
  95. 8
      deps/v8/src/mark-compact-inl.h
  96. 73
      deps/v8/src/mark-compact.cc
  97. 56
      deps/v8/src/mark-compact.h
  98. 44
      deps/v8/src/messages.js
  99. 15
      deps/v8/src/mips/assembler-mips-inl.h
  100. 2
      deps/v8/src/mips/assembler-mips.h

45
deps/v8/ChangeLog

@ -1,48 +1,3 @@
2011-10-26: Version 3.7.1
Achieved 33% speedup in debug-mode tests.
Removed special casing of calls to RegExp test and exec methods with no
argument. Now matches new JSC behaviour. crbug.com/75740.
Return the empty string on cyclic references in toString (ES5
conformance).
Fixed bug triggered by JSBeautifier. crbug.com/100409.
Made Math.random state per-context instead of per-process (issue 864).
Fixed stack traces to skip native functions.
Make snapshots (new contexts) smaller and faster.
Fixed handling of Function.apply for non-array arguments.
Fixed evaluation order in defineProperties to match FireFox.
Fixed handling of non-object receivers for array builtins,
crbug.com/100702.
Multiple fixes to improve compliance with test262.
Fixed compatibility with older Android releases.
Fixed compilation with gcc-4.5.3.
Improved performance of WriteUtf8, issue 1665.
Made native syntax an early error in the preparser.
Fixed issues 793 and 893 relating to Function.prototype.bind.
Improved let, const, Set and Map support and other Harmony features
(behind the --harmony flag).
Changed evaluation order for > and <= to match ES5 instead of ES3.
Bug fixes and performance improvements on all platforms.
2011-10-13: Version 3.7.0
Fixed array handling for Object.defineOwnProperty (ES5 conformance).

35
deps/v8/preparser/preparser-process.cc

@ -267,22 +267,34 @@ void CheckException(v8::PreParserData* data,
ExceptionExpectation ParseExpectation(int argc, const char* argv[]) {
// Parse ["throws" [<exn-type> [<start> [<end>]]]].
ExceptionExpectation expects;
// Parse exception expectations from (the remainder of) the command line.
int arg_index = 0;
while (argc > arg_index && strncmp("throws", argv[arg_index], 7)) {
arg_index++;
}
// Skip any flags.
while (argc > arg_index && IsFlag(argv[arg_index])) arg_index++;
if (argc > arg_index) {
if (strncmp("throws", argv[arg_index], 7)) {
// First argument after filename, if present, must be the verbatim
// "throws", marking that the preparsing should fail with an exception.
fail(NULL, "ERROR: Extra arguments not prefixed by \"throws\".\n");
}
expects.throws = true;
arg_index++;
if (argc > arg_index && !IsFlag(argv[arg_index])) {
expects.type = argv[arg_index];
do {
arg_index++;
if (argc > arg_index && !IsFlag(argv[arg_index])) {
expects.beg_pos = atoi(argv[arg_index]); // NOLINT
} while (argc > arg_index && IsFlag(argv[arg_index]));
if (argc > arg_index) {
// Next argument is the exception type identifier.
expects.type = argv[arg_index];
do {
arg_index++;
if (argc > arg_index && !IsFlag(argv[arg_index])) {
} while (argc > arg_index && IsFlag(argv[arg_index]));
if (argc > arg_index) {
expects.beg_pos = atoi(argv[arg_index]); // NOLINT
do {
arg_index++;
} while (argc > arg_index && IsFlag(argv[arg_index]));
if (argc > arg_index) {
expects.end_pos = atoi(argv[arg_index]); // NOLINT
}
}
@ -296,8 +308,7 @@ int main(int argc, const char* argv[]) {
// Parse command line.
// Format: preparser (<scriptfile> | -e "<source>")
// ["throws" [<exn-type> [<start> [<end>]]]]
// Any flags (except an initial -e) are ignored.
// Flags must not separate "throws" and its arguments.
// Any flags (except an initial -s) are ignored.
// Check for mandatory filename argument.
int arg_index = 1;

2
deps/v8/src/SConscript

@ -321,7 +321,7 @@ debug-debugger.js
EXPERIMENTAL_LIBRARY_FILES = '''
proxy.js
collection.js
weakmap.js
'''.split()

11
deps/v8/src/accessors.cc

@ -527,9 +527,7 @@ MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
// correctly yet. Compile it now and return the right length.
HandleScope scope;
Handle<JSFunction> handle(function);
if (!JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
return Failure::Exception();
}
if (!CompileLazy(handle, KEEP_EXCEPTION)) return Failure::Exception();
return Smi::FromInt(handle->shared()->length());
} else {
return Smi::FromInt(function->shared()->length());
@ -761,12 +759,7 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
caller = potential_caller;
potential_caller = it.next();
}
// If caller is bound, return null. This is compatible with JSC, and
// allows us to make bound functions use the strict function map
// and its associated throwing caller and arguments.
if (caller->shared()->bound()) {
return isolate->heap()->null_value();
}
return CheckNonStrictCallerOrThrow(isolate, caller);
}

57
deps/v8/src/api.cc

@ -2794,7 +2794,7 @@ Local<Value> v8::Object::Get(uint32_t index) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result = i::Object::GetElement(self, index);
i::Handle<i::Object> result = i::GetElement(self, index);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
@ -2874,10 +2874,8 @@ Local<Array> v8::Object::GetPropertyNames() {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
bool threw = false;
i::Handle<i::FixedArray> value =
i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS, &threw);
if (threw) return Local<v8::Array>();
i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
@ -2895,10 +2893,8 @@ Local<Array> v8::Object::GetOwnPropertyNames() {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
bool threw = false;
i::Handle<i::FixedArray> value =
i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY, &threw);
if (threw) return Local<v8::Array>();
i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
@ -3097,10 +3093,7 @@ static Local<Value> GetPropertyByLookup(i::Isolate* isolate,
// If the property being looked up is a callback, it can throw
// an exception.
EXCEPTION_PREAMBLE(isolate);
PropertyAttributes ignored;
i::Handle<i::Object> result =
i::Object::GetProperty(receiver, receiver, lookup, name,
&ignored);
i::Handle<i::Object> result = i::GetProperty(receiver, name, lookup);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
@ -3117,7 +3110,7 @@ Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
ENTER_V8(isolate);
i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::LookupResult lookup(isolate);
i::LookupResult lookup;
self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
}
@ -3130,7 +3123,7 @@ Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::LookupResult lookup(isolate);
i::LookupResult lookup;
self_obj->LookupRealNamedProperty(*key_obj, &lookup);
return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
}
@ -3641,30 +3634,13 @@ int String::WriteUtf8(char* buffer,
if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0;
LOG_API(isolate, "String::WriteUtf8");
ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(this);
if (str->IsAsciiRepresentation()) {
int len;
if (capacity == -1) {
capacity = str->length() + 1;
len = str->length();
} else {
len = i::Min(capacity, str->length());
}
i::String::WriteToFlat(*str, buffer, 0, len);
if (nchars_ref != NULL) *nchars_ref = len;
if (!(options & NO_NULL_TERMINATION) && capacity > len) {
buffer[len] = '\0';
return len + 1;
}
return len;
}
i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
i::Handle<i::String> str = Utils::OpenHandle(this);
isolate->string_tracker()->RecordWrite(str);
if (options & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
FlattenString(str);
str->TryFlatten();
}
write_input_buffer.Reset(0, *str);
int len = str->length();
@ -3985,15 +3961,6 @@ HeapStatistics::HeapStatistics(): total_heap_size_(0),
void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
if (!i::Isolate::Current()->IsInitialized()) {
// Isolate is unitialized thus heap is not configured yet.
heap_statistics->set_total_heap_size(0);
heap_statistics->set_total_heap_size_executable(0);
heap_statistics->set_used_heap_size(0);
heap_statistics->set_heap_size_limit(0);
return;
}
i::Heap* heap = i::Isolate::Current()->heap();
heap_statistics->set_total_heap_size(heap->CommittedMemory());
heap_statistics->set_total_heap_size_executable(
@ -4006,15 +3973,14 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
bool v8::V8::IdleNotification() {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
i::Isolate* isolate = i::Isolate::Current();
if (isolate == NULL || !isolate->IsInitialized()) return true;
if (!i::Isolate::Current()->IsInitialized()) return true;
return i::V8::IdleNotification();
}
void v8::V8::LowMemoryNotification() {
i::Isolate* isolate = i::Isolate::Current();
if (isolate == NULL || !isolate->IsInitialized()) return;
if (!isolate->IsInitialized()) return;
isolate->heap()->CollectAllAvailableGarbage();
}
@ -4109,9 +4075,8 @@ Persistent<Context> v8::Context::New(
}
// Leave V8.
if (env.is_null()) {
if (env.is_null())
return Persistent<Context>();
}
return Persistent<Context>(Utils::ToLocal(env));
}

15
deps/v8/src/arm/assembler-arm-inl.h

@ -74,10 +74,10 @@ int RelocInfo::target_address_size() {
}
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
if (host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
@ -103,12 +103,10 @@ Object** RelocInfo::target_object_address() {
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
if (host() != NULL && target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), &Memory::Object_at(pc_), HeapObject::cast(target));
}
@ -138,12 +136,11 @@ JSGlobalPropertyCell* RelocInfo::target_cell() {
}
void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
WriteBarrierMode mode) {
void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
if (host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(

6
deps/v8/src/arm/assembler-arm.h

@ -304,9 +304,9 @@ const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
// Aliases for double registers.
static const DwVfpRegister& kFirstCalleeSavedDoubleReg = d8;
static const DwVfpRegister& kLastCalleeSavedDoubleReg = d15;
static const DwVfpRegister& kDoubleRegZero = d14;
const DwVfpRegister kFirstCalleeSavedDoubleReg = d8;
const DwVfpRegister kLastCalleeSavedDoubleReg = d15;
const DwVfpRegister kDoubleRegZero = d14;
// Coprocessor register

78
deps/v8/src/arm/builtins-arm.cc

@ -86,6 +86,12 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
}
// This constant has the same value as JSArray::kPreallocatedArrayElements and
// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
// below should be reconsidered.
static const int kLoopUnfoldLimit = 4;
// Allocate an empty JSArray. The allocated array is put into the result
// register. An elements backing store is allocated with size initial_capacity
// and filled with the hole values.
@ -95,9 +101,9 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
int initial_capacity,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
ASSERT(initial_capacity > 0);
// Load the initial map from the array function.
__ ldr(scratch1, FieldMemOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
@ -147,24 +153,12 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
// Fill the FixedArray with the hole value. Inline the code if short.
if (initial_capacity == 0) return;
// Fill the FixedArray with the hole value.
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
ASSERT(initial_capacity <= kLoopUnfoldLimit);
__ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
static const int kLoopUnfoldLimit = 4;
if (initial_capacity <= kLoopUnfoldLimit) {
for (int i = 0; i < initial_capacity; i++) {
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
}
} else {
Label loop, entry;
__ add(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
__ b(&entry);
__ bind(&loop);
for (int i = 0; i < initial_capacity; i++) {
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(scratch1, scratch2);
__ b(lt, &loop);
}
}
@ -179,7 +173,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// register elements_array_storage is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
Register array_size, // As a smi, cannot be 0.
Register array_size, // As a smi.
Register result,
Register elements_array_storage,
Register elements_array_end,
@ -187,18 +181,32 @@ static void AllocateJSArray(MacroAssembler* masm,
Register scratch2,
bool fill_with_hole,
Label* gc_required) {
Label not_empty, allocated;
// Load the initial map from the array function.
__ ldr(elements_array_storage,
FieldMemOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
if (FLAG_debug_code) { // Assert that array size is not zero.
__ tst(array_size, array_size);
__ Assert(ne, "array size is unexpectedly 0");
}
// Check whether an empty sized array is requested.
__ tst(array_size, array_size);
__ b(ne, &not_empty);
// If an empty array is requested allocate a small elements array anyway. This
// keeps the code below free of special casing for the empty array.
int size = JSArray::kSize +
FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ AllocateInNewSpace(size,
result,
elements_array_end,
scratch1,
gc_required,
TAG_OBJECT);
__ jmp(&allocated);
// Allocate the JSArray object together with space for a FixedArray with the
// requested number of elements.
__ bind(&not_empty);
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ mov(elements_array_end,
Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
@ -218,6 +226,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// result: JSObject
// elements_array_storage: initial map
// array_size: size of array (smi)
__ bind(&allocated);
__ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
__ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
__ str(elements_array_storage,
@ -247,6 +256,14 @@ static void AllocateJSArray(MacroAssembler* masm,
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
STATIC_ASSERT(kSmiTag == 0);
__ tst(array_size, array_size);
// Length of the FixedArray is the number of pre-allocated elements if
// the actual JSArray has length 0 and the size of the JSArray for non-empty
// JSArrays. The length of a FixedArray is stored as a smi.
__ mov(array_size,
Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)),
LeaveCC,
eq);
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(array_size,
MemOperand(elements_array_storage, kPointerSize, PostIndex));
@ -294,20 +311,20 @@ static void AllocateJSArray(MacroAssembler* masm,
static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array;
Label argc_one_or_more, argc_two_or_more;
// Check for array construction with zero arguments or one.
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &argc_one_or_more);
// Handle construction of an empty array.
__ bind(&empty_array);
AllocateEmptyJSArray(masm,
r1,
r2,
r3,
r4,
r5,
JSArray::kPreallocatedArrayElements,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, r3, r4);
// Setup return value, remove receiver from stack and return.
@ -322,13 +339,6 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ b(ne, &argc_two_or_more);
STATIC_ASSERT(kSmiTag == 0);
__ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
__ tst(r2, r2);
__ b(ne, &not_empty_array);
__ Drop(1); // Adjust stack.
__ mov(r0, Operand(0)); // Treat this as a call with argc of zero.
__ b(&empty_array);
__ bind(&not_empty_array);
__ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
__ b(ne, call_generic_code);
@ -1017,9 +1027,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Set up the roots register.
ExternalReference roots_array_start =
ExternalReference::roots_array_start(masm->isolate());
__ mov(r10, Operand(roots_array_start));
ExternalReference roots_address =
ExternalReference::roots_address(masm->isolate());
__ mov(r10, Operand(roots_address));
// Push the function and the receiver onto the stack.
__ push(r1);

98
deps/v8/src/arm/code-stubs-arm.cc

@ -263,12 +263,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// [sp + (2 * kPointerSize)]: literals array.
// All sizes here are multiples of kPointerSize.
int elements_size = 0;
if (length_ > 0) {
elements_size = mode_ == CLONE_DOUBLE_ELEMENTS
? FixedDoubleArray::SizeFor(length_)
: FixedArray::SizeFor(length_);
}
int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
int size = JSArray::kSize + elements_size;
// Load boilerplate object into r3 and check if we need to create a
@ -288,9 +283,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
if (mode_ == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map_index = Heap::kFixedArrayMapRootIndex;
} else if (mode_ == CLONE_DOUBLE_ELEMENTS) {
message = "Expected (writable) fixed double array";
expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
} else {
ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
@ -330,7 +322,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
// Copy the elements array.
ASSERT((elements_size % kPointerSize) == 0);
__ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
}
@ -3922,7 +3913,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
// Get the prototype of the function.
__ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
__ TryGetFunctionPrototype(function, prototype, scratch, &slow);
// Check that the function prototype is a JS object.
__ JumpIfSmi(prototype, &slow);
@ -6677,82 +6668,7 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
}
void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register receiver,
Register properties,
Handle<String> name,
Register scratch0) {
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
// (their names are the null value).
for (int i = 0; i < kInlinedProbes; i++) {
// scratch0 points to properties hash.
// Compute the masked index: (hash + i + i * i) & mask.
Register index = scratch0;
// Capacity is smi 2^n.
__ ldr(index, FieldMemOperand(properties, kCapacityOffset));
__ sub(index, index, Operand(1));
__ and_(index, index, Operand(
Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3);
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
ASSERT_EQ(kSmiTagSize, 1);
Register tmp = properties;
__ add(tmp, properties, Operand(index, LSL, 1));
__ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
ASSERT(!tmp.is(entity_name));
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
__ cmp(entity_name, tmp);
__ b(eq, done);
if (i != kInlinedProbes - 1) {
// Stop if found the property.
__ cmp(entity_name, Operand(Handle<String>(name)));
__ b(eq, miss);
// Check if the entry name is not a symbol.
__ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ ldrb(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
__ tst(entity_name, Operand(kIsSymbolMask));
__ b(eq, miss);
// Restore the properties.
__ ldr(properties,
FieldMemOperand(receiver, JSObject::kPropertiesOffset));
}
}
const int spill_mask =
(lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
r2.bit() | r1.bit() | r0.bit());
__ stm(db_w, sp, spill_mask);
__ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ mov(r1, Operand(Handle<String>(name)));
StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
__ CallStub(&stub);
__ tst(r0, Operand(r0));
__ ldm(ia_w, sp, spill_mask);
__ b(eq, done);
__ b(ne, miss);
}
// TODO(kmillikin): Eliminate this function when the stub cache is fully
// handlified.
MaybeObject* StringDictionaryLookupStub::TryGenerateNegativeLookup(
MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
MacroAssembler* masm,
Label* miss,
Label* done,
@ -7011,13 +6927,6 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ r3, r1, r2, EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ r4, r2, r3, EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ r2, r3, r9, EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateDoubleToObject
{ r6, r2, r0, EMIT_REMEMBERED_SET },
{ r2, r6, r9, EMIT_REMEMBERED_SET },
// Null termination.
{ no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
};
@ -7254,6 +7163,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
#undef __
} } // namespace v8::internal

12
deps/v8/src/arm/code-stubs-arm.h

@ -799,17 +799,7 @@ class StringDictionaryLookupStub: public CodeStub {
void Generate(MacroAssembler* masm);
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register receiver,
Register properties,
Handle<String> name,
Register scratch0);
// TODO(kmillikin): Eliminate this function when the stub cache is fully
// handlified.
MUST_USE_RESULT static MaybeObject* TryGenerateNegativeLookup(
MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
MacroAssembler* masm,
Label* miss,
Label* done,

249
deps/v8/src/arm/codegen-arm.cc

@ -30,13 +30,10 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen.h"
#include "macro-assembler.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@ -54,252 +51,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
}
// -------------------------------------------------------------------------
// Code generators
void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
// Set transitioned map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
HeapObject::kMapOffset,
r3,
r9,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
Label loop, entry, convert_hole, gc_required;
bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
__ push(lr);
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedArray
// r5: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
__ mov(lr, Operand(FixedDoubleArray::kHeaderSize));
__ add(lr, lr, Operand(r5, LSL, 2));
__ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedDoubleArray, not tagged as heap object
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
// Set destination FixedDoubleArray's length.
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
// Update receiver's map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
HeapObject::kMapOffset,
r3,
r9,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ add(r3, r6, Operand(kHeapObjectTag));
__ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
__ RecordWriteField(r2,
JSObject::kElementsOffset,
r3,
r9,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
__ add(r6, r7, Operand(r5, LSL, 2));
__ mov(r4, Operand(kHoleNanLower32));
__ mov(r5, Operand(kHoleNanUpper32));
// r3: begin of source FixedArray element fields, not tagged
// r4: kHoleNanLower32
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
// r7: begin of FixedDoubleArray element fields, not tagged
if (!vfp3_supported) __ Push(r1, r0);
__ b(&entry);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ pop(lr);
__ b(fail);
// Convert and copy elements.
__ bind(&loop);
__ ldr(r9, MemOperand(r3, 4, PostIndex));
// r9: current element
__ JumpIfNotSmi(r9, &convert_hole);
// Normal smi, convert to double and store.
__ SmiUntag(r9);
if (vfp3_supported) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r9);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r7, 0);
__ add(r7, r7, Operand(8));
} else {
FloatingPointHelper::ConvertIntToDouble(masm,
r9,
FloatingPointHelper::kCoreRegisters,
d0,
r0,
r1,
lr,
s0);
__ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
}
__ b(&entry);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
__ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
__ bind(&entry);
__ cmp(r7, r6);
__ b(lt, &loop);
if (!vfp3_supported) __ Pop(r1, r0);
__ pop(lr);
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
Label entry, loop, convert_hole, gc_required;
__ push(lr);
__ Push(r3, r2, r1, r0);
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedDoubleArray
// r5: number of elements (smi-tagged)
// Allocate new FixedArray.
__ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
__ add(r0, r0, Operand(r5, LSL, 1));
__ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedArray, not tagged as heap object
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
// Set destination FixedDoubleArray's length.
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
// Prepare for conversion loop.
__ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
__ add(r3, r6, Operand(FixedArray::kHeaderSize));
__ add(r6, r6, Operand(kHeapObjectTag));
__ add(r5, r3, Operand(r5, LSL, 1));
__ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
__ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in r4 to fully take advantage of post-indexing.
// r3: begin of destination FixedArray element fields, not tagged
// r4: begin of source FixedDoubleArray element fields, not tagged, +4
// r5: end of destination FixedArray, not tagged
// r6: destination FixedArray
// r7: the-hole pointer
// r9: heap number map
__ b(&entry);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ Pop(r3, r2, r1, r0);
__ pop(lr);
__ b(fail);
__ bind(&loop);
__ ldr(r1, MemOperand(r4, 8, PostIndex));
// lr: current element's upper 32 bit
// r4: address of next element's upper 32 bit
__ cmp(r1, Operand(kHoleNanUpper32));
__ b(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
// r2: new heap number
__ ldr(r0, MemOperand(r4, 12, NegOffset));
__ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
__ mov(r0, r3);
__ str(r2, MemOperand(r3, 4, PostIndex));
__ RecordWrite(r6,
r0,
r2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ b(&entry);
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
__ str(r7, MemOperand(r3, 4, PostIndex));
__ bind(&entry);
__ cmp(r3, r5);
__ b(lt, &loop);
__ Pop(r3, r2, r1, r0);
// Update receiver's map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
HeapObject::kMapOffset,
r3,
r9,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created and filled FixedArray.
__ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
__ RecordWriteField(r2,
JSObject::kElementsOffset,
r6,
r9,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ pop(lr);
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

1
deps/v8/src/arm/codegen-arm.h

@ -29,6 +29,7 @@
#define V8_ARM_CODEGEN_ARM_H_
#include "ast.h"
#include "code-stubs-arm.h"
#include "ic-inl.h"
namespace v8 {

22
deps/v8/src/arm/deoptimizer-arm.cc

@ -100,6 +100,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
}
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
int instructions =
@ -177,13 +178,16 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
RelocInfo rinfo(pc_after - 2 * kInstrSize,
RelocInfo::CODE_TARGET,
0,
unoptimized_code);
unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
unoptimized_code, &rinfo, replacement_code);
}
void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
@ -205,8 +209,8 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(check_code->entry());
check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 2 * kInstrSize, check_code);
check_code->GetHeap()->incremental_marking()->
RecordCodeTargetPatch(pc_after - 2 * kInstrSize, check_code);
}
@ -723,6 +727,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
// __ add(r6, r2, Operand(r3, LSL, 1));
__ add(r6, r2, Operand(r3));
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r7);
@ -756,9 +761,8 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(ip); // remove lr
// Set up the roots register.
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate);
__ mov(r10, Operand(roots_array_start));
ExternalReference roots_address = ExternalReference::roots_address(isolate);
__ mov(r10, Operand(roots_address));
__ pop(ip); // remove pc
__ pop(r7); // get continuation, leave pc on stack

169
deps/v8/src/arm/full-codegen-arm.cc

@ -269,10 +269,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
VariableProxy* proxy = scope()->function();
ASSERT(proxy->var()->mode() == CONST ||
proxy->var()->mode() == CONST_HARMONY);
EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
EmitDeclaration(scope()->function(), CONST, NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@ -721,8 +718,6 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
Variable* variable = proxy->var();
bool binding_needs_init =
mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
++(*global_count);
@ -734,7 +729,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ str(result_register(), StackOperand(variable));
} else if (binding_needs_init) {
} else if (mode == CONST || mode == LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, StackOperand(variable));
@ -768,7 +763,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
} else if (binding_needs_init) {
} else if (mode == CONST || mode == LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, ContextOperand(cp, variable->index()));
@ -780,13 +775,9 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ Declaration");
__ mov(r2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
ASSERT(mode == VAR ||
mode == CONST ||
mode == CONST_HARMONY ||
mode == LET);
PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
? READ_ONLY : NONE;
// Declaration nodes are always introduced in one of three modes.
ASSERT(mode == VAR || mode == CONST || mode == LET);
PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE;
__ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@ -796,7 +787,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(function);
} else if (binding_needs_init) {
} else if (mode == CONST || mode == LET) {
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ Push(cp, r2, r1, r0);
} else {
@ -938,17 +929,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&done_convert);
__ push(r0);
// Check for proxies.
Label call_runtime;
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
__ b(le, &call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
Label next;
Label next, call_runtime;
// Preload a couple of values used in the loop.
Register empty_fixed_array_value = r6;
__ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
@ -1027,16 +1012,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&loop);
// We got a fixed array in register r0. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r2, r3, r3, LAST_JS_PROXY_TYPE);
__ b(gt, &non_proxy);
__ mov(r1, Operand(Smi::FromInt(0))); // Zero indicates proxy
__ bind(&non_proxy);
__ Push(r1, r0); // Smi and array
__ mov(r1, Operand(Smi::FromInt(0))); // Map (0) - force slow check.
__ Push(r1, r0);
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
__ mov(r0, Operand(Smi::FromInt(0)));
__ Push(r1, r0); // Fixed array length (as smi) and initial index.
@ -1053,23 +1031,18 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
// Get the expected map from the stack or a smi in the
// Get the expected map from the stack or a zero map in the
// permanent slow case into register r2.
__ ldr(r2, MemOperand(sp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
// If not, we have to filter the key.
Label update_each;
__ ldr(r1, MemOperand(sp, 4 * kPointerSize));
__ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r4, Operand(r2));
__ b(eq, &update_each);
// For proxies, no filtering is done.
// TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
__ cmp(r2, Operand(Smi::FromInt(0)));
__ b(eq, &update_each);
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
@ -1124,7 +1097,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->strict_mode_flag());
FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
__ mov(r0, Operand(info));
__ push(r0);
__ CallStub(&stub);
@ -1155,7 +1128,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_non_strict_eval()) {
if (s->calls_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@ -1168,7 +1141,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@ -1212,7 +1185,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_non_strict_eval()) {
if (s->calls_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@ -1251,12 +1224,11 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == CONST ||
local->mode() == CONST_HARMONY ||
local->mode() == LET) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
if (local->mode() == CONST) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
} else { // LET || CONST_HARMONY
} else { // LET
__ b(ne, done);
__ mov(r0, Operand(var->name()));
__ push(r0);
@ -1294,15 +1266,13 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
if (!var->binding_needs_init()) {
if (var->mode() != LET && var->mode() != CONST) {
context()->Plug(var);
} else {
// Let and const need a read barrier.
GetVar(r0, var);
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
if (var->mode() == LET || var->mode() == CONST_HARMONY) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
if (var->mode() == LET) {
Label done;
__ b(ne, &done);
__ mov(r0, Operand(var->name()));
@ -1310,8 +1280,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&done);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
ASSERT(var->mode() == CONST);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
}
context()->Plug(r0);
@ -1499,19 +1467,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_elements));
__ mov(r1, Operand(expr->constant_elements()));
__ Push(r3, r2, r1);
if (constant_elements_values->map() ==
if (expr->constant_elements()->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@ -1523,14 +1485,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
constant_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
__ CallStub(&stub);
}
@ -1553,56 +1509,24 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements.
__ ldr(r6, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
__ ldr(r2, FieldMemOperand(r6, JSObject::kMapOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
Label element_done;
Label double_elements;
Label smi_element;
Label slow_elements;
Label fast_elements;
__ CheckFastElements(r2, r3, &double_elements);
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(result_register(), &smi_element);
__ CheckFastSmiOnlyElements(r2, r3, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
__ bind(&slow_elements);
__ push(r6); // Copy of array literal.
__ mov(r1, Operand(Smi::FromInt(i)));
__ mov(r2, Operand(Smi::FromInt(NONE))); // PropertyAttributes
__ mov(r3, Operand(Smi::FromInt(strict_mode_flag()))); // Strict mode.
__ Push(r1, result_register(), r2, r3);
__ CallRuntime(Runtime::kSetProperty, 5);
__ b(&element_done);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ mov(r3, Operand(Smi::FromInt(i)));
__ StoreNumberToDoubleElements(result_register(), r3, r6, r1, r4, r5, r9,
r7, &slow_elements);
__ b(&element_done);
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ str(result_register(), FieldMemOperand(r1, offset));
// Update the write barrier for the array store.
Label no_map_change;
__ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store with r0 as the scratch
// register.
__ RecordWriteField(
r1, offset, result_register(), r2, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ b(&element_done);
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ str(result_register(), FieldMemOperand(r1, offset));
// Fall through
__ bind(&element_done);
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ CheckFastSmiOnlyElements(r3, r2, &no_map_change);
__ push(r6); // Copy of array literal.
__ CallRuntime(Runtime::kNonSmiElementStored, 1);
__ bind(&no_map_change);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@ -1979,9 +1903,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
}
} else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
} else if (var->mode() != CONST) {
// Assignment to var or initializing assignment to let.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, r1);
if (FLAG_debug_code && op == Token::INIT_LET) {
@ -2861,8 +2784,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(VFP3)) {
__ PrepareCallCFunction(1, r0);
__ ldr(r0, ContextOperand(context_register(), Context::GLOBAL_INDEX));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
__ mov(r0, Operand(ExternalReference::isolate_address()));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatures::Scope scope(VFP3);
@ -2882,9 +2804,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ mov(r0, r4);
} else {
__ PrepareCallCFunction(2, r0);
__ ldr(r1, ContextOperand(context_register(), Context::GLOBAL_INDEX));
__ mov(r0, Operand(r4));
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
__ mov(r1, Operand(ExternalReference::isolate_address()));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
@ -4150,25 +4071,33 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::EQ_STRICT:
case Token::EQ:
cond = eq;
__ pop(r1);
break;
case Token::LT:
cond = lt;
__ pop(r1);
break;
case Token::GT:
cond = gt;
// Reverse left and right sides to obtain ECMA-262 conversion order.
cond = lt;
__ mov(r1, result_register());
__ pop(r0);
break;
case Token::LTE:
cond = le;
// Reverse left and right sides to obtain ECMA-262 conversion order.
cond = ge;
__ mov(r1, result_register());
__ pop(r0);
break;
case Token::GTE:
cond = ge;
__ pop(r1);
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
__ pop(r1);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);

104
deps/v8/src/arm/ic-arm.cc

@ -382,10 +382,10 @@ Object* CallIC_Miss(Arguments args);
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
Code::ExtraICState extra_state) {
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r1 : receiver
// -- r2 : name
@ -395,7 +395,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
extra_state,
extra_ic_state,
NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
@ -464,7 +464,7 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
}
void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@ -486,10 +486,10 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
}
void CallICBase::GenerateMiss(MacroAssembler* masm,
int argc,
IC::UtilityId id,
Code::ExtraICState extra_state) {
static void GenerateCallMiss(MacroAssembler* masm,
int argc,
IC::UtilityId id,
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@ -541,7 +541,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
}
// Invoke the function.
CallKind call_kind = CallICBase::Contextual::decode(extra_state)
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
ParameterCount actual(argc);
@ -553,6 +553,18 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
}
void CallIC::GenerateMiss(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
}
void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_ic_state) {
@ -568,6 +580,27 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
}
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
GenerateCallNormal(masm, argc);
GenerateMiss(masm, argc, Code::kNoExtraICState);
}
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
}
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
@ -685,7 +718,7 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ JumpIfSmi(r2, &miss);
__ IsObjectJSStringType(r2, r0, &miss);
CallICBase::GenerateNormal(masm, argc);
GenerateCallNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
@ -1211,47 +1244,6 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r2 : receiver
// -- r3 : target map
// -- lr : return address
// -----------------------------------
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
__ mov(r0, r2);
__ Ret();
__ bind(&fail);
}
__ push(r2);
__ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
}
void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r2 : receiver
// -- r3 : target map
// -- lr : return address
// -----------------------------------
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
__ mov(r0, r2);
__ Ret();
__ bind(&fail);
}
__ push(r2);
__ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
}
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ---------- S t a t e --------------
@ -1567,9 +1559,11 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
case Token::LT:
return lt;
case Token::GT:
return gt;
// Reverse left and right operands to obtain ECMA-262 conversion order.
return lt;
case Token::LTE:
return le;
// Reverse left and right operands to obtain ECMA-262 conversion order.
return ge;
case Token::GTE:
return ge;
default:

36
deps/v8/src/arm/lithium-arm.cc

@ -391,12 +391,6 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
}
LChunk::LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
info_(info),
@ -1410,10 +1404,12 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Token::Value op = instr->token();
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -1425,8 +1421,8 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
@ -1974,26 +1970,6 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
}
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
new LTransitionElementsKind(object, new_map_reg, NULL);
return DefineSameAsFirst(result);
} else {
LOperand* object = UseFixed(instr->object(), r0);
LOperand* fixed_object_reg = FixedTemp(r2);
LOperand* new_map_reg = FixedTemp(r3);
LTransitionElementsKind* result =
new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
return MarkAsCall(DefineFixed(result, r0), instr);
}
}
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();

32
deps/v8/src/arm/lithium-arm.h

@ -162,7 +162,6 @@ class LCodeGen;
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@ -1261,6 +1260,7 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
LOperand* context() { return InputAt(0); }
LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
virtual void PrintDataTo(StringStream* stream);
};
@ -1277,9 +1277,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
@ -1563,6 +1561,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
@ -1582,8 +1581,7 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
bool strict_mode() { return strict_mode_flag() == kStrictMode; }
bool strict_mode() { return hydrogen()->strict_mode(); }
};
@ -1671,30 +1669,6 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
};
class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
LOperand* temp_reg) {
inputs_[0] = object;
temps_[0] = new_map_temp;
temps_[1] = temp_reg;
}
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* new_map_reg() { return temps_[0]; }
LOperand* temp_reg() { return temps_[1]; }
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
};
class LStringAdd: public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {

191
deps/v8/src/arm/lithium-codegen-arm.cc

@ -410,12 +410,6 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
Handle<Object> value = chunk_->LookupLiteral(op);
return value->Number();
}
Operand LCodeGen::ToOperand(LOperand* op) {
if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
@ -1711,44 +1705,30 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
__ cmp(ToRegister(left), ToRegister(right));
}
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cond = TokenToCondition(instr->op(), false);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
int next_block =
EvalComparison(instr->op(), left_val, right_val) ? true_block
: false_block;
EmitGoto(next_block);
if (instr->is_double()) {
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to false block label.
__ b(vs, chunk_->GetAssemblyLabel(false_block));
} else {
if (instr->is_double()) {
// Compare left and right operands as doubles and load the
// resulting flags into the normal status register.
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to false block label.
__ b(vs, chunk_->GetAssemblyLabel(false_block));
} else {
if (right->IsConstantOperand()) {
__ cmp(ToRegister(left),
Operand(ToInteger32(LConstantOperand::cast(right))));
} else if (left->IsConstantOperand()) {
__ cmp(ToRegister(right),
Operand(ToInteger32(LConstantOperand::cast(left))));
// We transposed the operands. Reverse the condition.
cond = ReverseCondition(cond);
} else {
__ cmp(ToRegister(left), ToRegister(right));
}
}
EmitBranch(true_block, false_block, cond);
EmitCmpI(left, right);
}
Condition cc = TokenToCondition(instr->op(), instr->is_double());
EmitBranch(true_block, false_block, cc);
}
@ -2196,6 +2176,9 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
__ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
__ LoadRoot(ToRegister(instr->result()),
Heap::kTrueValueRootIndex,
condition);
@ -2268,19 +2251,13 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
__ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
// Cells are always in the remembered set.
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteField(scratch,
JSGlobalPropertyCell::kValueOffset,
value,
scratch2,
kLRHasBeenSaved,
kSaveFPRegs,
OMIT_REMEMBERED_SET,
check_needed);
}
__ RecordWriteField(scratch,
JSGlobalPropertyCell::kValueOffset,
value,
scratch2,
kLRHasBeenSaved,
kSaveFPRegs,
OMIT_REMEMBERED_SET);
}
@ -2308,18 +2285,13 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register value = ToRegister(instr->value());
MemOperand target = ContextOperand(context, instr->slot_index());
__ str(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (instr->needs_write_barrier()) {
__ RecordWriteContextSlot(context,
target.offset(),
value,
scratch0(),
kLRHasBeenSaved,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
kSaveFPRegs);
}
}
@ -2340,7 +2312,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name) {
LookupResult lookup(isolate());
LookupResult lookup;
type->LookupInDescriptors(NULL, *name, &lookup);
ASSERT(lookup.IsProperty() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
@ -2806,7 +2778,7 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
LoadHeapObject(result, instr->hydrogen()->closure());
__ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
@ -3325,36 +3297,21 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (instr->is_in_object()) {
__ str(value, FieldMemOperand(object, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
if (instr->needs_write_barrier()) {
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
offset,
value,
scratch,
kLRHasBeenSaved,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
__ RecordWriteField(
object, offset, value, scratch, kLRHasBeenSaved, kSaveFPRegs);
}
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ str(value, FieldMemOperand(scratch, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
__ RecordWriteField(scratch,
offset,
value,
object,
kLRHasBeenSaved,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
__ RecordWriteField(
scratch, offset, value, object, kLRHasBeenSaved, kSaveFPRegs);
}
}
}
@ -3405,18 +3362,9 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ RecordWrite(elements,
key,
value,
kLRHasBeenSaved,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
__ RecordWrite(elements, key, value, kLRHasBeenSaved, kSaveFPRegs);
}
}
@ -3539,48 +3487,6 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
Register new_map_reg = ToRegister(instr->new_map_reg());
Register scratch = scratch0();
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
ElementsKind from_kind = from_map->elements_kind();
ElementsKind to_kind = to_map->elements_kind();
Label not_applicable;
__ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
__ cmp(scratch, Operand(from_map));
__ b(ne, &not_applicable);
__ mov(new_map_reg, Operand(to_map));
if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kLRHasBeenSaved, kDontSaveFPRegs);
} else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
to_kind == FAST_DOUBLE_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
} else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
RelocInfo::CODE_TARGET, instr);
} else {
UNREACHABLE();
}
__ bind(&not_applicable);
}
void LCodeGen::DoStringAdd(LStringAdd* instr) {
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
@ -4297,15 +4203,10 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
__ mov(r1, Operand(constant_elements));
__ mov(r1, Operand(instr->hydrogen()->constant_elements()));
__ Push(r3, r2, r1);
// Pick the right runtime function or stub to call.
@ -4322,9 +4223,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
constant_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@ -4416,7 +4315,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
FastNewClosureStub stub(shared_info->strict_mode_flag());
FastNewClosureStub stub(
shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
__ mov(r1, Operand(shared_info));
__ push(r1);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@ -4449,9 +4349,8 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
false_label,
input,
instr->type_literal());
if (final_branch_condition != kNoCondition) {
EmitBranch(true_block, false_block, final_branch_condition);
}
EmitBranch(true_block, false_block, final_branch_condition);
}
@ -4521,7 +4420,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = eq;
} else {
final_branch_condition = ne;
__ b(false_label);
// A dead branch instruction will be generated after this point.
}
return final_branch_condition;

14
deps/v8/src/arm/lithium-codegen-arm.h

@ -86,7 +86,6 @@ class LCodeGen BASE_EMBEDDED {
SwVfpRegister flt_scratch,
DoubleRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
@ -140,8 +139,8 @@ class LCodeGen BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
StrictModeFlag strict_mode_flag() const {
return info()->strict_mode_flag();
int strict_mode_flag() const {
return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
}
LChunk* chunk() const { return chunk_; }
@ -207,7 +206,7 @@ class LCodeGen BASE_EMBEDDED {
LInstruction* instr);
// Generate a direct call to a known function. Expects the function
// to be in r1.
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr,
@ -264,6 +263,7 @@ class LCodeGen BASE_EMBEDDED {
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
DoubleRegister result,
bool deoptimize_on_undefined,
@ -272,10 +272,8 @@ class LCodeGen BASE_EMBEDDED {
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name);
Condition EmitTypeofIs(Label* true_label, Label* false_label,
Register input, Handle<String> type_name);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to

36
deps/v8/src/arm/macro-assembler-arm.cc

@ -1101,16 +1101,24 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
ASSERT(function->is_compiled());
// Get the function and setup the context.
mov(r1, Operand(Handle<JSFunction>(function)));
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Invoke the cached code.
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
if (V8::UseCrankshaft()) {
// TODO(kasperl): For now, we always call indirectly through the
// code field in the function to allow recompilation to take effect
// without changing any of the call sites.
ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
} else {
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
}
}
@ -1594,7 +1602,6 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
ASSERT(!result.is(scratch1));
ASSERT(!result.is(scratch2));
ASSERT(!scratch1.is(scratch2));
ASSERT(!object_size.is(ip));
ASSERT(!result.is(ip));
ASSERT(!scratch1.is(ip));
ASSERT(!scratch2.is(ip));
@ -2023,8 +2030,7 @@ void MacroAssembler::DispatchMap(Register obj,
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
Label* miss,
bool miss_on_bound_function) {
Label* miss) {
// Check that the receiver isn't a smi.
JumpIfSmi(function, miss);
@ -2032,16 +2038,6 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
b(ne, miss);
if (miss_on_bound_function) {
ldr(scratch,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
ldr(scratch,
FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
tst(scratch,
Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
b(ne, miss);
}
// Make sure that the function has an instance prototype.
Label non_instance;
ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
@ -3151,10 +3147,8 @@ void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
clz(zeros, source); // This instruction is only supported after ARM5.
#else
// Order of the next two lines is important: zeros register
// can be the same as source register.
Move(scratch, source);
mov(zeros, Operand(0, RelocInfo::NONE));
Move(scratch, source);
// Top 16.
tst(scratch, Operand(0xffff0000));
add(zeros, zeros, Operand(16), LeaveCC, eq);

61
deps/v8/src/arm/macro-assembler-arm.h

@ -320,11 +320,8 @@ class MacroAssembler: public Assembler {
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1,
Register src2,
Register src3,
Register src4,
Condition cond = al) {
void Push(Register src1, Register src2,
Register src3, Register src4, Condition cond = al) {
ASSERT(!src1.is(src2));
ASSERT(!src2.is(src3));
ASSERT(!src1.is(src3));
@ -363,57 +360,6 @@ class MacroAssembler: public Assembler {
}
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
ASSERT(!src1.is(src2));
ASSERT(!src2.is(src3));
ASSERT(!src1.is(src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
} else {
ldr(src3, MemOperand(sp, 4, PostIndex), cond);
ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
}
} else {
Pop(src2, src3, cond);
str(src1, MemOperand(sp, 4, PostIndex), cond);
}
}
// Pop four registers. Pops rightmost register first (from lower address).
void Pop(Register src1,
Register src2,
Register src3,
Register src4,
Condition cond = al) {
ASSERT(!src1.is(src2));
ASSERT(!src2.is(src3));
ASSERT(!src1.is(src3));
ASSERT(!src1.is(src4));
ASSERT(!src2.is(src4));
ASSERT(!src3.is(src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
ldm(ia_w,
sp,
src1.bit() | src2.bit() | src3.bit() | src4.bit(),
cond);
} else {
ldr(src4, MemOperand(sp, 4, PostIndex), cond);
ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
}
} else {
Pop(src3, src4, cond);
ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
}
} else {
Pop(src2, src3, src4, cond);
ldr(src1, MemOperand(sp, 4, PostIndex), cond);
}
}
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
@ -726,8 +672,7 @@ class MacroAssembler: public Assembler {
void TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
Label* miss,
bool miss_on_bound_function = false);
Label* miss);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both

5
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -1111,11 +1111,6 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
} else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
// Subject string might have been a ConsString that underwent
// short-circuiting during GC. That will not change start_address but
// will change pointer inside the subject handle.
frame_entry<const String*>(re_frame, kInputString) = *subject;
}
return 0;

4
deps/v8/src/arm/simulator-arm.cc

@ -1268,9 +1268,9 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit() const {
// Leave a safety margin of 512 bytes to prevent overrunning the stack when
// Leave a safety margin of 256 bytes to prevent overrunning the stack when
// pushing values.
return reinterpret_cast<uintptr_t>(stack_) + 512;
return reinterpret_cast<uintptr_t>(stack_) + 256;
}

656
deps/v8/src/arm/stub-cache-arm.cc

File diff suppressed because it is too large

18
deps/v8/src/array.js

@ -1013,22 +1013,18 @@ function ArrayFilter(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver)) {
receiver = ToObject(receiver);
}
var result = new $Array();
var accumulator = new InternalArray();
var accumulator_length = 0;
var result = [];
var result_length = 0;
for (var i = 0; i < length; i++) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
if (%_CallFunction(receiver, current, i, array, f)) {
accumulator[accumulator_length++] = current;
result[result_length++] = current;
}
}
}
%MoveArrayContents(accumulator, result);
return result;
}
@ -1049,8 +1045,6 @@ function ArrayForEach(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver)) {
receiver = ToObject(receiver);
}
for (var i = 0; i < length; i++) {
@ -1080,8 +1074,6 @@ function ArraySome(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver)) {
receiver = ToObject(receiver);
}
for (var i = 0; i < length; i++) {
@ -1110,8 +1102,6 @@ function ArrayEvery(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver)) {
receiver = ToObject(receiver);
}
for (var i = 0; i < length; i++) {
@ -1139,8 +1129,6 @@ function ArrayMap(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver)) {
receiver = ToObject(receiver);
}
var result = new $Array();

21
deps/v8/src/assembler.cc

@ -834,8 +834,8 @@ ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
}
ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
return ExternalReference(isolate->heap()->roots_array_start());
ExternalReference ExternalReference::roots_address(Isolate* isolate) {
return ExternalReference(isolate->heap()->roots_address());
}
@ -1137,23 +1137,6 @@ static int native_compare_doubles(double y, double x) {
}
bool EvalComparison(Token::Value op, double op1, double op2) {
ASSERT(Token::IsCompareOp(op));
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: return (op1 == op2);
case Token::NE: return (op1 != op2);
case Token::LT: return (op1 < op2);
case Token::GT: return (op1 > op2);
case Token::LTE: return (op1 <= op2);
case Token::GTE: return (op1 >= op2);
default:
UNREACHABLE();
return false;
}
}
ExternalReference ExternalReference::double_fp_operation(
Token::Value operation, Isolate* isolate) {
typedef double BinaryFPOperation(double x, double y);

15
deps/v8/src/assembler.h

@ -279,17 +279,14 @@ class RelocInfo BASE_EMBEDDED {
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
INLINE(Address target_address());
INLINE(void set_target_address(Address target,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(void set_target_address(Address target));
INLINE(Object* target_object());
INLINE(Handle<Object> target_object_handle(Assembler* origin));
INLINE(Object** target_object_address());
INLINE(void set_target_object(Object* target,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(void set_target_object(Object* target));
INLINE(JSGlobalPropertyCell* target_cell());
INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
INLINE(void set_target_cell(JSGlobalPropertyCell* cell,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(void set_target_cell(JSGlobalPropertyCell* cell));
// Read the address of the word containing the target_address in an
@ -596,8 +593,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
// Static variable Heap::roots_array_start()
static ExternalReference roots_array_start(Isolate* isolate);
// Static variable Heap::roots_address()
static ExternalReference roots_address(Isolate* isolate);
// Static variable StackGuard::address_of_jslimit()
static ExternalReference address_of_stack_limit(Isolate* isolate);
@ -850,8 +847,6 @@ static inline int NumberOfBitsSet(uint32_t x) {
return num_bits_set;
}
bool EvalComparison(Token::Value op, double op1, double op2);
// Computes pow(x, y) with the special cases in the spec for Math.pow.
double power_double_int(double x, int y);
double power_double_double(double x, double y);

14
deps/v8/src/ast-inl.h

@ -111,18 +111,8 @@ ForInStatement::ForInStatement(Isolate* isolate, ZoneStringList* labels)
}
int FunctionLiteral::start_position() const {
return scope()->start_position();
}
int FunctionLiteral::end_position() const {
return scope()->end_position();
}
StrictModeFlag FunctionLiteral::strict_mode_flag() const {
return scope()->strict_mode_flag();
bool FunctionLiteral::strict_mode() const {
return scope()->is_strict_mode();
}

7
deps/v8/src/ast.cc

@ -66,6 +66,7 @@ VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
name_(var->name()),
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
inside_with_(false),
is_trivial_(false),
position_(RelocInfo::kNoPosition) {
BindTo(var);
@ -75,11 +76,13 @@ VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
VariableProxy::VariableProxy(Isolate* isolate,
Handle<String> name,
bool is_this,
bool inside_with,
int position)
: Expression(isolate),
name_(name),
var_(NULL),
is_this_(is_this),
inside_with_(inside_with),
is_trivial_(false),
position_(position) {
// Names must be canonicalized for fast equality checks.
@ -465,7 +468,7 @@ bool FunctionLiteral::IsInlineable() const {
bool ThisFunction::IsInlineable() const {
return true;
return false;
}
@ -720,7 +723,7 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
holder_ = Handle<JSObject>::null();
}
while (true) {
LookupResult lookup(type->GetIsolate());
LookupResult lookup;
type->LookupInDescriptors(NULL, *name, &lookup);
// If the function wasn't found directly in the map, we start
// looking upwards through the prototype chain.

19
deps/v8/src/ast.h

@ -405,10 +405,7 @@ class Declaration: public AstNode {
mode_(mode),
fun_(fun),
scope_(scope) {
ASSERT(mode == VAR ||
mode == CONST ||
mode == CONST_HARMONY ||
mode == LET);
ASSERT(mode == VAR || mode == CONST || mode == LET);
// At the moment there are no "const functions"'s in JavaScript...
ASSERT(fun == NULL || mode == VAR || mode == LET);
}
@ -1131,6 +1128,7 @@ class VariableProxy: public Expression {
Handle<String> name() const { return name_; }
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
bool inside_with() const { return inside_with_; }
int position() const { return position_; }
void MarkAsTrivial() { is_trivial_ = true; }
@ -1142,12 +1140,14 @@ class VariableProxy: public Expression {
Handle<String> name_;
Variable* var_; // resolved variable, or NULL
bool is_this_;
bool inside_with_;
bool is_trivial_;
int position_;
VariableProxy(Isolate* isolate,
Handle<String> name,
bool is_this,
bool inside_with,
int position = RelocInfo::kNoPosition);
friend class Scope;
@ -1620,6 +1620,8 @@ class FunctionLiteral: public Expression {
bool has_only_simple_this_property_assignments,
Handle<FixedArray> this_property_assignments,
int num_parameters,
int start_position,
int end_position,
Type type,
bool has_duplicate_parameters)
: Expression(isolate),
@ -1632,6 +1634,8 @@ class FunctionLiteral: public Expression {
has_only_simple_this_property_assignments),
this_property_assignments_(this_property_assignments),
num_parameters_(num_parameters),
start_position_(start_position),
end_position_(end_position),
function_token_position_(RelocInfo::kNoPosition),
inferred_name_(HEAP->empty_string()),
is_expression_(type != DECLARATION),
@ -1647,12 +1651,11 @@ class FunctionLiteral: public Expression {
ZoneList<Statement*>* body() const { return body_; }
void set_function_token_position(int pos) { function_token_position_ = pos; }
int function_token_position() const { return function_token_position_; }
int start_position() const;
int end_position() const;
int start_position() const { return start_position_; }
int end_position() const { return end_position_; }
bool is_expression() const { return is_expression_; }
bool is_anonymous() const { return is_anonymous_; }
bool strict_mode() const { return strict_mode_flag() == kStrictMode; }
StrictModeFlag strict_mode_flag() const;
bool strict_mode() const;
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }

144
deps/v8/src/bootstrapper.cc

@ -38,7 +38,6 @@
#include "macro-assembler.h"
#include "natives.h"
#include "objects-visiting.h"
#include "platform.h"
#include "snapshot.h"
#include "extensions/externalize-string-extension.h"
#include "extensions/gc-extension.h"
@ -363,7 +362,6 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
if (is_ecma_native) {
function->shared()->set_instance_class_name(*symbol);
}
function->shared()->set_native(true);
return function;
}
@ -377,28 +375,26 @@ Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
DescriptorArray::WhitenessWitness witness(*descriptors);
{ // Add length.
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
descriptors->Set(0, &d, witness);
descriptors->Set(0, &d);
}
{ // Add name.
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
descriptors->Set(1, &d, witness);
descriptors->Set(1, &d);
}
{ // Add arguments.
Handle<Foreign> foreign =
factory()->NewForeign(&Accessors::FunctionArguments);
CallbacksDescriptor d(*factory()->arguments_symbol(), *foreign, attributes);
descriptors->Set(2, &d, witness);
descriptors->Set(2, &d);
}
{ // Add caller.
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionCaller);
CallbacksDescriptor d(*factory()->caller_symbol(), *foreign, attributes);
descriptors->Set(3, &d, witness);
descriptors->Set(3, &d);
}
if (prototypeMode != DONT_ADD_PROTOTYPE) {
// Add prototype.
@ -408,9 +404,9 @@ Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
Handle<Foreign> foreign =
factory()->NewForeign(&Accessors::FunctionPrototype);
CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
descriptors->Set(4, &d, witness);
descriptors->Set(4, &d);
}
descriptors->Sort(witness);
descriptors->Sort();
return descriptors;
}
@ -526,43 +522,41 @@ Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor(
? 4
: 5);
PropertyAttributes attributes = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE);
DescriptorArray::WhitenessWitness witness(*descriptors);
DONT_ENUM | DONT_DELETE | READ_ONLY);
{ // length
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
descriptors->Set(0, &d, witness);
descriptors->Set(0, &d);
}
{ // name
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
descriptors->Set(1, &d, witness);
descriptors->Set(1, &d);
}
{ // arguments
CallbacksDescriptor d(*factory()->arguments_symbol(),
*arguments,
attributes);
descriptors->Set(2, &d, witness);
descriptors->Set(2, &d);
}
{ // caller
CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attributes);
descriptors->Set(3, &d, witness);
descriptors->Set(3, &d);
}
// prototype
if (prototypeMode != DONT_ADD_PROTOTYPE) {
if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) {
attributes = static_cast<PropertyAttributes>(attributes | READ_ONLY);
if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
}
Handle<Foreign> foreign =
factory()->NewForeign(&Accessors::FunctionPrototype);
CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
descriptors->Set(4, &d, witness);
descriptors->Set(4, &d);
}
descriptors->Sort(witness);
descriptors->Sort();
return descriptors;
}
@ -947,7 +941,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
ASSERT_EQ(0, initial_map->inobject_properties());
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5);
DescriptorArray::WhitenessWitness witness(*descriptors);
PropertyAttributes final =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
int enum_index = 0;
@ -957,7 +950,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSRegExp::kSourceFieldIndex,
final,
enum_index++);
descriptors->Set(0, &field, witness);
descriptors->Set(0, &field);
}
{
// ECMA-262, section 15.10.7.2.
@ -965,7 +958,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSRegExp::kGlobalFieldIndex,
final,
enum_index++);
descriptors->Set(1, &field, witness);
descriptors->Set(1, &field);
}
{
// ECMA-262, section 15.10.7.3.
@ -973,7 +966,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSRegExp::kIgnoreCaseFieldIndex,
final,
enum_index++);
descriptors->Set(2, &field, witness);
descriptors->Set(2, &field);
}
{
// ECMA-262, section 15.10.7.4.
@ -981,7 +974,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSRegExp::kMultilineFieldIndex,
final,
enum_index++);
descriptors->Set(3, &field, witness);
descriptors->Set(3, &field);
}
{
// ECMA-262, section 15.10.7.5.
@ -991,10 +984,10 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSRegExp::kLastIndexFieldIndex,
writable,
enum_index++);
descriptors->Set(4, &field, witness);
descriptors->Set(4, &field);
}
descriptors->SetNextEnumerationIndex(enum_index);
descriptors->Sort(witness);
descriptors->Sort();
initial_map->set_inobject_properties(5);
initial_map->set_pre_allocated_property_fields(5);
@ -1072,7 +1065,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
DONT_ENUM);
#ifdef DEBUG
LookupResult lookup(isolate);
LookupResult lookup;
result->LocalLookup(heap->callee_symbol(), &lookup);
ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
@ -1091,6 +1084,11 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
{ // --- aliased_arguments_boilerplate_
Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
new_map->set_pre_allocated_property_fields(2);
Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
// Set up a well-formed parameter map to make assertions happy.
Handle<FixedArray> elements = factory->NewFixedArray(2);
elements->set_map(heap->non_strict_arguments_elements_map());
@ -1099,16 +1097,12 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
elements->set(0, *array);
array = factory->NewFixedArray(0);
elements->set(1, *array);
Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
new_map->set_pre_allocated_property_fields(2);
Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
// Set elements kind after allocating the object because
// NewJSObjectFromMap assumes a fast elements map.
new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
result->set_elements(*elements);
Handle<Map> non_strict_arguments_elements_map =
factory->GetElementsTransitionMap(result,
NON_STRICT_ARGUMENTS_ELEMENTS);
result->set_map(*non_strict_arguments_elements_map);
ASSERT(result->HasNonStrictArgumentsElements());
result->set_elements(*elements);
global_context()->set_aliased_arguments_boilerplate(*result);
}
@ -1131,20 +1125,19 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Create the descriptor array for the arguments object.
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
DescriptorArray::WhitenessWitness witness(*descriptors);
{ // length
FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
descriptors->Set(0, &d, witness);
descriptors->Set(0, &d);
}
{ // callee
CallbacksDescriptor d(*factory->callee_symbol(), *callee, attributes);
descriptors->Set(1, &d, witness);
descriptors->Set(1, &d);
}
{ // caller
CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
descriptors->Set(2, &d, witness);
descriptors->Set(2, &d);
}
descriptors->Sort(witness);
descriptors->Sort();
// Create the map. Allocate one in-object field for length.
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
@ -1169,7 +1162,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
DONT_ENUM);
#ifdef DEBUG
LookupResult lookup(isolate);
LookupResult lookup;
result->LocalLookup(heap->length_symbol(), &lookup);
ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
@ -1228,14 +1221,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Initialize the data slot.
global_context()->set_data(heap->undefined_value());
{
// Initialize the random seed slot.
Handle<ByteArray> zeroed_byte_array(
factory->NewByteArray(kRandomStateSize));
global_context()->set_random_seed(*zeroed_byte_array);
memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
}
}
@ -1243,26 +1228,12 @@ void Genesis::InitializeExperimentalGlobal() {
Handle<JSObject> global = Handle<JSObject>(global_context()->global());
// TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no
// longer need to live behind a flag, so functions get added to the snapshot.
if (FLAG_harmony_collections) {
{ // -- S e t
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
prototype, Builtins::kIllegal, true);
}
{ // -- M a p
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
prototype, Builtins::kIllegal, true);
}
{ // -- W e a k M a p
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
prototype, Builtins::kIllegal, true);
}
// longer need to live behind a flag, so WeakMap gets added to the snapshot.
if (FLAG_harmony_weakmaps) { // -- W e a k M a p
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
prototype, Builtins::kIllegal, true);
}
}
@ -1391,7 +1362,6 @@ void Genesis::InstallExperimentalNativeFunctions() {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
}
}
@ -1726,9 +1696,7 @@ bool Genesis::InstallNatives() {
Handle<DescriptorArray> reresult_descriptors =
factory()->NewDescriptorArray(3);
DescriptorArray::WhitenessWitness witness(*reresult_descriptors);
reresult_descriptors->CopyFrom(0, *array_descriptors, 0, witness);
reresult_descriptors->CopyFrom(0, *array_descriptors, 0);
int enum_index = 0;
{
@ -1736,7 +1704,7 @@ bool Genesis::InstallNatives() {
JSRegExpResult::kIndexIndex,
NONE,
enum_index++);
reresult_descriptors->Set(1, &index_field, witness);
reresult_descriptors->Set(1, &index_field);
}
{
@ -1744,9 +1712,9 @@ bool Genesis::InstallNatives() {
JSRegExpResult::kInputIndex,
NONE,
enum_index++);
reresult_descriptors->Set(2, &input_field, witness);
reresult_descriptors->Set(2, &input_field);
}
reresult_descriptors->Sort(witness);
reresult_descriptors->Sort();
initial_map->set_inobject_properties(2);
initial_map->set_pre_allocated_property_fields(2);
@ -1773,9 +1741,9 @@ bool Genesis::InstallExperimentalNatives() {
"native proxy.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
if (FLAG_harmony_collections &&
if (FLAG_harmony_weakmaps &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native collection.js") == 0) {
"native weakmap.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
}
@ -2021,12 +1989,6 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
false);
ASSERT(isolate->has_pending_exception() != result);
if (!result) {
// We print out the name of the extension that fail to install.
// When an error is thrown during bootstrapping we automatically print
// the line number at which this happened to the console in the isolate
// error throwing functionality.
OS::PrintError("Error installing extension '%s'.\n",
current->extension()->name());
isolate->clear_pending_exception();
}
current->set_state(v8::INSTALLED);
@ -2046,9 +2008,7 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
builtins->set_javascript_builtin(id, *function);
Handle<SharedFunctionInfo> shared
= Handle<SharedFunctionInfo>(function->shared());
if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
return false;
}
if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
// Set the code object on the function object.
function->ReplaceCode(function->shared()->code());
builtins->set_javascript_builtin_code(id, shared->code());
@ -2128,7 +2088,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
break;
}
case CALLBACKS: {
LookupResult result(isolate());
LookupResult result;
to->LocalLookup(descs->GetKey(i), &result);
// If the property is already there we skip it
if (result.IsProperty()) continue;
@ -2166,7 +2126,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
if (properties->IsKey(raw_key)) {
ASSERT(raw_key->IsString());
// If the property is already there we skip it.
LookupResult result(isolate());
LookupResult result;
to->LocalLookup(String::cast(raw_key), &result);
if (result.IsProperty()) continue;
// Set the property.

8
deps/v8/src/builtins.cc

@ -1507,14 +1507,6 @@ static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
KeyedStoreIC::GenerateNonStrictArguments(masm);
}
static void Generate_TransitionElementsSmiToDouble(MacroAssembler* masm) {
KeyedStoreIC::GenerateTransitionElementsSmiToDouble(masm);
}
static void Generate_TransitionElementsDoubleToObject(MacroAssembler* masm) {
KeyedStoreIC::GenerateTransitionElementsDoubleToObject(masm);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
Debug::GenerateLoadICDebugBreak(masm);

5
deps/v8/src/builtins.h

@ -167,10 +167,6 @@ enum BuiltinExtraArguments {
kStrictMode) \
V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(TransitionElementsSmiToDouble, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(TransitionElementsDoubleToObject, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, \
@ -238,6 +234,7 @@ enum BuiltinExtraArguments {
V(DELETE, 2) \
V(IN, 1) \
V(INSTANCE_OF, 1) \
V(GET_KEYS, 0) \
V(FILTER_KEY, 1) \
V(CALL_NON_FUNCTION, 0) \
V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \

11
deps/v8/src/checks.h

@ -63,9 +63,7 @@ static inline void CheckHelper(const char* file,
// The CHECK macro checks that the given condition is true; if not, it
// prints a message to stderr and aborts.
#define CHECK(condition) do { \
if (!(condition)) CheckHelper(__FILE__, __LINE__, #condition, false); \
} while (0)
#define CHECK(condition) CheckHelper(__FILE__, __LINE__, #condition, condition)
// Helper function used by the CHECK_EQ function when given int
@ -259,8 +257,11 @@ template <int> class StaticAssertionHelper { };
SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
extern bool FLAG_enable_slow_asserts;
namespace v8 { namespace internal {
bool EnableSlowAsserts();
} } // namespace v8::internal
// The ASSERT macro is equivalent to CHECK except that it only
// generates code in debug builds.
@ -272,7 +273,7 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
#define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition)
#define SLOW_ASSERT(condition) if (EnableSlowAsserts()) CHECK(condition)
#else
#define ASSERT_RESULT(expr) (expr)
#define ASSERT(condition) ((void) 0)

25
deps/v8/src/code-stubs.cc

@ -415,29 +415,4 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
}
void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
Label fail;
if (!FLAG_trace_elements_transitions) {
if (to_ == FAST_ELEMENTS) {
if (from_ == FAST_SMI_ONLY_ELEMENTS) {
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
} else if (from_ == FAST_DOUBLE_ELEMENTS) {
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
} else {
UNREACHABLE();
}
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_jsarray_,
FAST_ELEMENTS);
} else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) {
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, is_jsarray_);
} else {
UNREACHABLE();
}
}
masm->bind(&fail);
KeyedStoreIC::GenerateRuntimeSetProperty(masm, strict_mode_);
}
} } // namespace v8::internal

45
deps/v8/src/code-stubs.h

@ -30,7 +30,6 @@
#include "allocation.h"
#include "globals.h"
#include "codegen.h"
namespace v8 {
namespace internal {
@ -70,8 +69,7 @@ namespace internal {
V(KeyedLoadElement) \
V(KeyedStoreElement) \
V(DebuggerStatement) \
V(StringDictionaryLookup) \
V(ElementsTransitionAndStore)
V(StringDictionaryLookup)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@ -364,7 +362,6 @@ class FastCloneShallowArrayStub : public CodeStub {
enum Mode {
CLONE_ELEMENTS,
CLONE_DOUBLE_ELEMENTS,
COPY_ON_WRITE_ELEMENTS
};
@ -383,8 +380,8 @@ class FastCloneShallowArrayStub : public CodeStub {
Major MajorKey() { return FastCloneShallowArray; }
int MinorKey() {
ASSERT(mode_ == 0 || mode_ == 1 || mode_ == 2);
return length_ * 3 + mode_;
ASSERT(mode_ == 0 || mode_ == 1);
return (length_ << 1) | mode_;
}
};
@ -1028,42 +1025,6 @@ class ToBooleanStub: public CodeStub {
Types types_;
};
class ElementsTransitionAndStoreStub : public CodeStub {
public:
ElementsTransitionAndStoreStub(ElementsKind from,
ElementsKind to,
bool is_jsarray,
StrictModeFlag strict_mode)
: from_(from),
to_(to),
is_jsarray_(is_jsarray),
strict_mode_(strict_mode) {}
private:
class FromBits: public BitField<ElementsKind, 0, 8> {};
class ToBits: public BitField<ElementsKind, 8, 8> {};
class IsJSArrayBits: public BitField<bool, 16, 8> {};
class StrictModeBits: public BitField<StrictModeFlag, 24, 8> {};
Major MajorKey() { return ElementsTransitionAndStore; }
int MinorKey() {
return FromBits::encode(from_) |
ToBits::encode(to_) |
IsJSArrayBits::encode(is_jsarray_) |
StrictModeBits::encode(strict_mode_);
}
void Generate(MacroAssembler* masm);
ElementsKind from_;
ElementsKind to_;
bool is_jsarray_;
StrictModeFlag strict_mode_;
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
};
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_

15
deps/v8/src/codegen.h

@ -81,19 +81,4 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#error Unsupported target architecture.
#endif
namespace v8 {
namespace internal {
class ElementsTransitionGenerator : public AllStatic {
public:
static void GenerateSmiOnlyToObject(MacroAssembler* masm);
static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail);
static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
private:
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
};
} } // namespace v8::internal
#endif // V8_CODEGEN_H_

46
deps/v8/src/compiler.cc

@ -59,6 +59,7 @@ CompilationInfo::CompilationInfo(Handle<Script> script)
script_(script),
extension_(NULL),
pre_parse_data_(NULL),
supports_deoptimization_(false),
osr_ast_id_(AstNode::kNoNumber) {
Initialize(NONOPT);
}
@ -73,6 +74,7 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
script_(Handle<Script>(Script::cast(shared_info->script()))),
extension_(NULL),
pre_parse_data_(NULL),
supports_deoptimization_(false),
osr_ast_id_(AstNode::kNoNumber) {
Initialize(BASE);
}
@ -88,6 +90,7 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
script_(Handle<Script>(Script::cast(shared_info_->script()))),
extension_(NULL),
pre_parse_data_(NULL),
supports_deoptimization_(false),
osr_ast_id_(AstNode::kNoNumber) {
Initialize(BASE);
}
@ -306,9 +309,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
static bool GenerateCode(CompilationInfo* info) {
return info->IsCompilingForDebugging() || !V8::UseCrankshaft() ?
FullCodeGenerator::MakeCode(info) :
MakeCrankshaftCode(info);
return V8::UseCrankshaft() ?
MakeCrankshaftCode(info) :
FullCodeGenerator::MakeCode(info);
}
@ -477,22 +480,20 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
// that would be compiled lazily anyway, so we skip the preparse step
// in that case too.
ScriptDataImpl* pre_data = input_pre_data;
int flags = kNoParsingFlags;
if ((natives == NATIVES_CODE) || FLAG_allow_natives_syntax) {
flags |= kAllowNativesSyntax;
}
if (natives != NATIVES_CODE && FLAG_harmony_scoping) {
flags |= kHarmonyScoping;
}
bool harmony_scoping = natives != NATIVES_CODE && FLAG_harmony_scoping;
if (pre_data == NULL
&& source_length >= FLAG_min_preparse_length) {
if (source->IsExternalTwoByteString()) {
ExternalTwoByteStringUC16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
pre_data = ParserApi::PartialPreParse(&stream, extension, flags);
pre_data = ParserApi::PartialPreParse(&stream,
extension,
harmony_scoping);
} else {
GenericStringUC16CharacterStream stream(source, 0, source->length());
pre_data = ParserApi::PartialPreParse(&stream, extension, flags);
pre_data = ParserApi::PartialPreParse(&stream,
extension,
harmony_scoping);
}
}
@ -515,6 +516,9 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
info.MarkAsGlobal();
info.SetExtension(extension);
info.SetPreParseData(pre_data);
if (natives == NATIVES_CODE) {
info.MarkAsAllowingNativesSyntax();
}
result = MakeFunctionInfo(&info);
if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, result);
@ -558,7 +562,7 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
CompilationInfo info(script);
info.MarkAsEval();
if (is_global) info.MarkAsGlobal();
info.SetStrictModeFlag(strict_mode);
if (strict_mode == kStrictMode) info.MarkAsStrictMode();
info.SetCallingContext(context);
result = MakeFunctionInfo(&info);
if (!result.is_null()) {
@ -566,7 +570,6 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
// If caller is strict mode, the result must be strict as well,
// but not the other way around. Consider:
// eval("'use strict'; ...");
// TODO(keuchel): adapt this for extended mode.
ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
compilation_cache->PutEval(source, context, is_global, result);
}
@ -598,13 +601,10 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
HistogramTimerScope timer(isolate->counters()->compile_lazy());
// After parsing we know function's strict mode. Remember it.
StrictModeFlag strict_mode = info->function()->strict_mode_flag();
ASSERT(info->strict_mode_flag() == kNonStrictMode ||
info->strict_mode_flag() == strict_mode);
ASSERT(shared->strict_mode_flag() == kNonStrictMode ||
shared->strict_mode_flag() == strict_mode);
info->SetStrictModeFlag(strict_mode);
shared->set_strict_mode_flag(strict_mode);
if (info->function()->strict_mode()) {
shared->set_strict_mode(true);
info->MarkAsStrictMode();
}
// Compile the code.
if (!MakeCode(info)) {
@ -684,7 +684,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
CompilationInfo info(script);
info.SetFunction(literal);
info.SetScope(literal->scope());
info.SetStrictModeFlag(literal->scope()->strict_mode_flag());
if (literal->scope()->is_strict_mode()) info.MarkAsStrictMode();
LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
// Determine if the function can be lazily compiled. This is necessary to
@ -750,7 +750,7 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
function_info->set_strict_mode_flag(lit->strict_mode_flag());
function_info->set_strict_mode(lit->strict_mode());
function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
}

54
deps/v8/src/compiler.h

@ -52,10 +52,7 @@ class CompilationInfo BASE_EMBEDDED {
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
bool is_strict_mode() const { return strict_mode_flag() == kStrictMode; }
StrictModeFlag strict_mode_flag() const {
return StrictModeFlagField::decode(flags_);
}
bool is_strict_mode() const { return IsStrictMode::decode(flags_); }
bool is_in_loop() const { return IsInLoop::decode(flags_); }
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
@ -76,15 +73,22 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(!is_lazy());
flags_ |= IsGlobal::encode(true);
}
void SetStrictModeFlag(StrictModeFlag strict_mode_flag) {
ASSERT(StrictModeFlagField::decode(flags_) == kNonStrictMode ||
StrictModeFlagField::decode(flags_) == strict_mode_flag);
flags_ = StrictModeFlagField::update(flags_, strict_mode_flag);
void MarkAsStrictMode() {
flags_ |= IsStrictMode::encode(true);
}
StrictModeFlag StrictMode() {
return is_strict_mode() ? kStrictMode : kNonStrictMode;
}
void MarkAsInLoop() {
ASSERT(is_lazy());
flags_ |= IsInLoop::encode(true);
}
void MarkAsAllowingNativesSyntax() {
flags_ |= IsNativesSyntaxAllowed::encode(true);
}
bool allows_natives_syntax() const {
return IsNativesSyntaxAllowed::decode(flags_);
}
void MarkAsNative() {
flags_ |= IsNative::encode(true);
}
@ -116,19 +120,6 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(IsOptimizing());
osr_ast_id_ = osr_ast_id;
}
void MarkCompilingForDebugging(Handle<Code> current_code) {
ASSERT(mode_ != OPTIMIZE);
ASSERT(current_code->kind() == Code::FUNCTION);
flags_ |= IsCompilingForDebugging::encode(true);
if (current_code->is_compiled_optimizable()) {
EnableDeoptimizationSupport();
} else {
mode_ = CompilationInfo::NONOPT;
}
}
bool IsCompilingForDebugging() {
return IsCompilingForDebugging::decode(flags_);
}
bool has_global_object() const {
return !closure().is_null() && (closure()->context()->global() != NULL);
@ -148,12 +139,10 @@ class CompilationInfo BASE_EMBEDDED {
void DisableOptimization();
// Deoptimization support.
bool HasDeoptimizationSupport() const {
return SupportsDeoptimization::decode(flags_);
}
bool HasDeoptimizationSupport() const { return supports_deoptimization_; }
void EnableDeoptimizationSupport() {
ASSERT(IsOptimizable());
flags_ |= SupportsDeoptimization::encode(true);
supports_deoptimization_ = true;
}
// Determine whether or not we can adaptively optimize.
@ -188,9 +177,8 @@ class CompilationInfo BASE_EMBEDDED {
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
if (!shared_info_.is_null()) {
ASSERT(strict_mode_flag() == kNonStrictMode);
SetStrictModeFlag(shared_info_->strict_mode_flag());
if (!shared_info_.is_null() && shared_info_->strict_mode()) {
MarkAsStrictMode();
}
}
@ -210,14 +198,11 @@ class CompilationInfo BASE_EMBEDDED {
// Flags that can be set for lazy compilation.
class IsInLoop: public BitField<bool, 3, 1> {};
// Strict mode - used in eager compilation.
class StrictModeFlagField: public BitField<StrictModeFlag, 4, 1> {};
class IsStrictMode: public BitField<bool, 4, 1> {};
// Native syntax (%-stuff) allowed?
class IsNativesSyntaxAllowed: public BitField<bool, 5, 1> {};
// Is this a function from our natives.
class IsNative: public BitField<bool, 6, 1> {};
// Is this code being compiled with support for deoptimization..
class SupportsDeoptimization: public BitField<bool, 7, 1> {};
// If compiling for debugging produce just full code matching the
// initial mode setting.
class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
unsigned flags_;
@ -246,6 +231,7 @@ class CompilationInfo BASE_EMBEDDED {
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
bool supports_deoptimization_;
int osr_ast_id_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);

29
deps/v8/src/contexts.cc

@ -174,10 +174,6 @@ Handle<Object> Context::Lookup(Handle<String> name,
*attributes = READ_ONLY;
*binding_flags = IMMUTABLE_CHECK_INITIALIZED;
break;
case CONST_HARMONY:
*attributes = READ_ONLY;
*binding_flags = IMMUTABLE_CHECK_INITIALIZED_HARMONY;
break;
case DYNAMIC:
case DYNAMIC_GLOBAL:
case DYNAMIC_LOCAL:
@ -191,8 +187,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
// Check the slot corresponding to the intermediate context holding
// only the function name variable.
if (follow_context_chain && context->IsFunctionContext()) {
VariableMode mode;
int function_index = scope_info->FunctionContextSlotIndex(*name, &mode);
int function_index = scope_info->FunctionContextSlotIndex(*name);
if (function_index >= 0) {
if (FLAG_trace_contexts) {
PrintF("=> found intermediate function in context slot %d\n",
@ -200,9 +195,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
*index = function_index;
*attributes = READ_ONLY;
ASSERT(mode == CONST || mode == CONST_HARMONY);
*binding_flags = (mode == CONST)
? IMMUTABLE_IS_INITIALIZED : IMMUTABLE_IS_INITIALIZED_HARMONY;
*binding_flags = IMMUTABLE_IS_INITIALIZED;
return context;
}
}
@ -262,7 +255,7 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
if (param_index >= 0) return false;
// Check context only holding the function name variable.
index = scope_info->FunctionContextSlotIndex(*name, NULL);
index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) return false;
context = context->previous();
}
@ -273,7 +266,8 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
}
void Context::ComputeEvalScopeInfo(bool* outer_scope_calls_non_strict_eval) {
void Context::ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
bool* outer_scope_calls_non_strict_eval) {
// Skip up the context chain checking all the function contexts to see
// whether they call eval.
Context* context = this;
@ -281,11 +275,14 @@ void Context::ComputeEvalScopeInfo(bool* outer_scope_calls_non_strict_eval) {
if (context->IsFunctionContext()) {
Handle<SerializedScopeInfo> scope_info(
context->closure()->shared()->scope_info());
if (scope_info->CallsEval() && !scope_info->IsStrictMode()) {
// No need to go further since the answers will not change from
// here.
*outer_scope_calls_non_strict_eval = true;
return;
if (scope_info->CallsEval()) {
*outer_scope_calls_eval = true;
if (!scope_info->IsStrictMode()) {
// No need to go further since the answers will not change from
// here.
*outer_scope_calls_non_strict_eval = true;
return;
}
}
}
context = context->previous();

53
deps/v8/src/contexts.h

@ -46,43 +46,24 @@ enum ContextLookupFlags {
// ES5 10.2 defines lexical environments with mutable and immutable bindings.
// Immutable bindings have two states, initialized and uninitialized, and
// their state is changed by the InitializeImmutableBinding method. The
// BindingFlags enum represents information if a binding has definitely been
// initialized. A mutable binding does not need to be checked and thus has
// the BindingFlag MUTABLE_IS_INITIALIZED.
//
// There are two possibilities for immutable bindings
// * 'const' declared variables. They are initialized when evaluating the
// corresponding declaration statement. They need to be checked for being
// initialized and thus get the flag IMMUTABLE_CHECK_INITIALIZED.
// * The function name of a named function literal. The binding is immediately
// initialized when entering the function and thus does not need to be
// checked. it gets the BindingFlag IMMUTABLE_IS_INITIALIZED.
// Accessing an uninitialized binding produces the undefined value.
// their state is changed by the InitializeImmutableBinding method.
//
// The harmony proposal for block scoped bindings also introduces the
// uninitialized state for mutable bindings.
// * A 'let' declared variable. They are initialized when evaluating the
// corresponding declaration statement. They need to be checked for being
// initialized and thus get the flag MUTABLE_CHECK_INITIALIZED.
// * A 'var' declared variable. It is initialized immediately upon creation
// and thus doesn't need to be checked. It gets the flag
// MUTABLE_IS_INITIALIZED.
// * Catch bound variables, function parameters and variables introduced by
// function declarations are initialized immediately and do not need to be
// checked. Thus they get the flag MUTABLE_IS_INITIALIZED.
// Immutable bindings in harmony mode get the _HARMONY flag variants. Accessing
// an uninitialized binding produces a reference error.
//
// In V8 uninitialized bindings are set to the hole value upon creation and set
// to a different value upon initialization.
// uninitialized state for mutable bindings. A 'let' declared variable
// is a mutable binding that is created uninitalized upon activation of its
// lexical environment and it is initialized when evaluating its declaration
// statement. Var declared variables are mutable bindings that are
// immediately initialized upon creation. The BindingFlags enum represents
// information if a binding has definitely been initialized. 'const' declared
// variables are created as uninitialized immutable bindings.
// In harmony mode accessing an uninitialized binding produces a reference
// error.
enum BindingFlags {
MUTABLE_IS_INITIALIZED,
MUTABLE_CHECK_INITIALIZED,
IMMUTABLE_IS_INITIALIZED,
IMMUTABLE_CHECK_INITIALIZED,
IMMUTABLE_IS_INITIALIZED_HARMONY,
IMMUTABLE_CHECK_INITIALIZED_HARMONY,
MISSING_BINDING
};
@ -157,9 +138,7 @@ enum BindingFlags {
to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
V(PROXY_ENUMERATE, JSFunction, proxy_enumerate) \
V(RANDOM_SEED_INDEX, ByteArray, random_seed)
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
// JSFunctions are pairs (context, function code), sometimes also called
// closures. A Context object is used to represent function contexts and
@ -215,8 +194,7 @@ class Context: public FixedArray {
PREVIOUS_INDEX,
// The extension slot is used for either the global object (in global
// contexts), eval extension object (function contexts), subject of with
// (with contexts), or the variable name (catch contexts), the serialized
// scope info (block contexts).
// (with contexts), or the variable name (catch contexts).
EXTENSION_INDEX,
GLOBAL_INDEX,
MIN_CONTEXT_SLOTS,
@ -280,8 +258,6 @@ class Context: public FixedArray {
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX,
PROXY_ENUMERATE,
RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
@ -409,7 +385,8 @@ class Context: public FixedArray {
// Determine if any function scope in the context call eval and if
// any of those calls are in non-strict mode.
void ComputeEvalScopeInfo(bool* outer_scope_calls_non_strict_eval);
void ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
bool* outer_scope_calls_non_strict_eval);
// Code generation support.
static int SlotOffset(int index) {

3
deps/v8/src/d8.cc

@ -178,8 +178,7 @@ bool Shell::ExecuteString(Handle<String> source,
// If all went well and the result wasn't undefined then print
// the returned value.
v8::String::Utf8Value str(result);
size_t count = fwrite(*str, sizeof(**str), str.length(), stdout);
(void) count; // Silence GCC-4.5.x "unused result" warning.
fwrite(*str, sizeof(**str), str.length(), stdout);
printf("\n");
}
return true;

242
deps/v8/src/debug.cc

@ -87,13 +87,19 @@ static void PrintLn(v8::Local<v8::Value> value) {
static Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind) {
Isolate* isolate = Isolate::Current();
return isolate->stub_cache()->ComputeCallDebugBreak(argc, kind);
CALL_HEAP_FUNCTION(
isolate,
isolate->stub_cache()->ComputeCallDebugBreak(argc, kind),
Code);
}
static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
Isolate* isolate = Isolate::Current();
return isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind);
CALL_HEAP_FUNCTION(
isolate,
isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind),
Code);
}
@ -1721,203 +1727,50 @@ void Debug::ClearStepNext() {
}
// Helper function to compile full code for debugging. This code will
// have debug break slots and deoptimization
// information. Deoptimization information is required in case that an
// optimized version of this function is still activated on the
// stack. It will also make sure that the full code is compiled with
// the same flags as the previous version - that is flags which can
// change the code generated. The current method of mapping from
// already compiled full code without debug break slots to full code
// with debug break slots depends on the generated code is otherwise
// exactly the same.
static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
Handle<Code> current_code) {
ASSERT(!current_code->has_debug_break_slots());
CompilationInfo info(shared);
info.MarkCompilingForDebugging(current_code);
ASSERT(!info.shared_info()->is_compiled());
ASSERT(!info.isolate()->has_pending_exception());
// Use compile lazy which will end up compiling the full code in the
// configuration configured above.
bool result = Compiler::CompileLazy(&info);
ASSERT(result != Isolate::Current()->has_pending_exception());
info.isolate()->clear_pending_exception();
#if DEBUG
if (result) {
Handle<Code> new_code(shared->code());
ASSERT(new_code->has_debug_break_slots());
ASSERT(current_code->is_compiled_optimizable() ==
new_code->is_compiled_optimizable());
ASSERT(current_code->instruction_size() <= new_code->instruction_size());
}
#endif
return result;
}
void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
Deoptimizer::DeoptimizeAll();
Handle<Code> lazy_compile =
Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
// Keep the list of activated functions in a handlified list as it
// is used both in GC and non-GC code.
List<Handle<JSFunction> > active_functions(100);
{
// We are going to iterate heap to find all functions without
// debug break slots.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
// Ensure no GC in this scope as we are comparing raw pointer
// values and performing a heap iteration.
AssertNoAllocation no_allocation;
// Find all non-optimized code functions with activation frames on
// the stack.
for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->function()->IsJSFunction()) {
JSFunction* function = JSFunction::cast(frame->function());
if (function->code()->kind() == Code::FUNCTION &&
!function->code()->has_debug_break_slots())
active_functions.Add(Handle<JSFunction>(function));
}
}
// Sort the functions on the object pointer value to prepare for
// the binary search below.
active_functions.Sort(HandleObjectPointerCompare<JSFunction>);
// Scan the heap for all non-optimized functions which has no
// debug break slots.
HeapIterator iterator;
HeapObject* obj = NULL;
while (((obj = iterator.next()) != NULL)) {
if (obj->IsJSFunction()) {
JSFunction* function = JSFunction::cast(obj);
if (function->shared()->allows_lazy_compilation() &&
function->shared()->script()->IsScript() &&
function->code()->kind() == Code::FUNCTION &&
!function->code()->has_debug_break_slots()) {
bool has_activation =
SortedListBSearch<Handle<JSFunction> >(
active_functions,
Handle<JSFunction>(function),
HandleObjectPointerCompare<JSFunction>) != -1;
if (!has_activation) {
function->set_code(*lazy_compile);
function->shared()->set_code(*lazy_compile);
}
}
}
// We are going to iterate heap to find all functions without
// debug break slots.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
AssertNoAllocation no_allocation;
Builtins* builtins = isolate_->builtins();
Code* lazy_compile = builtins->builtin(Builtins::kLazyCompile);
// Find all non-optimized code functions with activation frames on
// the stack.
List<JSFunction*> active_functions(100);
for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->function()->IsJSFunction()) {
JSFunction* function = JSFunction::cast(frame->function());
if (function->code()->kind() == Code::FUNCTION)
active_functions.Add(function);
}
}
// Now the non-GC scope is left, and the sorting of the functions
// in active_function is not ensured any more. The code below does
// not rely on it.
// Now recompile all functions with activation frames and and
// patch the return address to run in the new compiled code.
for (int i = 0; i < active_functions.length(); i++) {
Handle<JSFunction> function = active_functions[i];
Handle<SharedFunctionInfo> shared(function->shared());
// If recompilation is not possible just skip it.
if (shared->is_toplevel() ||
!shared->allows_lazy_compilation() ||
shared->code()->kind() == Code::BUILTIN) {
continue;
}
// Make sure that the shared full code is compiled with debug
// break slots.
Handle<Code> current_code(function->code());
if (shared->code()->has_debug_break_slots()) {
// if the code is already recompiled to have break slots skip
// recompilation.
ASSERT(!function->code()->has_debug_break_slots());
} else {
// Try to compile the full code with debug break slots. If it
// fails just keep the current code.
ASSERT(shared->code() == *current_code);
ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
shared->set_code(*lazy_compile);
bool prev_force_debugger_active =
isolate_->debugger()->force_debugger_active();
isolate_->debugger()->set_force_debugger_active(true);
CompileFullCodeForDebugging(shared, current_code);
isolate_->debugger()->set_force_debugger_active(
prev_force_debugger_active);
if (!shared->is_compiled()) {
shared->set_code(*current_code);
continue;
}
}
Handle<Code> new_code(shared->code());
// Find the function and patch return address.
for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
// If the current frame is for this function in its
// non-optimized form rewrite the return address to continue
// in the newly compiled full code with debug break slots.
if (frame->function()->IsJSFunction() &&
frame->function() == *function &&
frame->LookupCode()->kind() == Code::FUNCTION) {
intptr_t delta = frame->pc() - current_code->instruction_start();
int debug_break_slot_count = 0;
int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
// Check if the pc in the new code with debug break
// slots is before this slot.
RelocInfo* info = it.rinfo();
int debug_break_slot_bytes =
debug_break_slot_count * Assembler::kDebugBreakSlotLength;
intptr_t new_delta =
info->pc() -
new_code->instruction_start() -
debug_break_slot_bytes;
if (new_delta > delta) {
break;
}
// Passed a debug break slot in the full code with debug
// break slots.
debug_break_slot_count++;
}
int debug_break_slot_bytes =
debug_break_slot_count * Assembler::kDebugBreakSlotLength;
if (FLAG_trace_deopt) {
PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
"with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
"for debugging, "
"changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
reinterpret_cast<intptr_t>(
current_code->instruction_start()),
reinterpret_cast<intptr_t>(
current_code->instruction_start()) +
current_code->instruction_size(),
current_code->instruction_size(),
reinterpret_cast<intptr_t>(new_code->instruction_start()),
reinterpret_cast<intptr_t>(new_code->instruction_start()) +
new_code->instruction_size(),
new_code->instruction_size(),
reinterpret_cast<intptr_t>(frame->pc()),
reinterpret_cast<intptr_t>(new_code->instruction_start()) +
delta + debug_break_slot_bytes);
active_functions.Sort();
// Scan the heap for all non-optimized functions which has no
// debug break slots.
HeapIterator iterator;
HeapObject* obj = NULL;
while (((obj = iterator.next()) != NULL)) {
if (obj->IsJSFunction()) {
JSFunction* function = JSFunction::cast(obj);
if (function->shared()->allows_lazy_compilation() &&
function->shared()->script()->IsScript() &&
function->code()->kind() == Code::FUNCTION &&
!function->code()->has_debug_break_slots()) {
bool has_activation =
SortedListBSearch<JSFunction*>(active_functions, function) != -1;
if (!has_activation) {
function->set_code(lazy_compile);
function->shared()->set_code(lazy_compile);
}
// Patch the return address to return into the code with
// debug break slots.
frame->set_pc(
new_code->instruction_start() + delta + debug_break_slot_bytes);
}
}
}
@ -1934,9 +1787,7 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
}
// Ensure shared in compiled. Return false if this failed.
if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
return false;
}
if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
// Create the debug info object.
Handle<DebugInfo> debug_info = FACTORY->NewDebugInfo(shared);
@ -2226,7 +2077,6 @@ Debugger::Debugger(Isolate* isolate)
compiling_natives_(false),
is_loading_debugger_(false),
never_unload_debugger_(false),
force_debugger_active_(false),
message_handler_(NULL),
debugger_unload_pending_(false),
host_dispatch_handler_(NULL),
@ -2994,9 +2844,7 @@ void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
bool Debugger::IsDebuggerActive() {
ScopedLock with(debugger_access_);
return message_handler_ != NULL ||
!event_listener_.is_null() ||
force_debugger_active_;
return message_handler_ != NULL || !event_listener_.is_null();
}

7
deps/v8/src/debug.h

@ -810,15 +810,11 @@ class Debugger {
}
void set_compiling_natives(bool compiling_natives) {
compiling_natives_ = compiling_natives;
Debugger::compiling_natives_ = compiling_natives;
}
bool compiling_natives() const { return compiling_natives_; }
void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
bool is_loading_debugger() const { return is_loading_debugger_; }
void set_force_debugger_active(bool force_debugger_active) {
force_debugger_active_ = force_debugger_active;
}
bool force_debugger_active() const { return force_debugger_active_; }
bool IsDebuggerActive();
@ -844,7 +840,6 @@ class Debugger {
bool compiling_natives_; // Are we compiling natives?
bool is_loading_debugger_; // Are we loading the debugger?
bool never_unload_debugger_; // Can we unload the debugger?
bool force_debugger_active_; // Activate debugger without event listeners.
v8::Debug::MessageHandler2 message_handler_;
bool debugger_unload_pending_; // Was message handler cleared?
v8::Debug::HostDispatchHandler host_dispatch_handler_;

9
deps/v8/src/deoptimizer.cc

@ -882,12 +882,10 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
unsigned output_offset =
output->GetOffsetFromSlotIndex(this, output_index);
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
output_offset,
input_value,
*input_offset);
reinterpret_cast<Object*>(input_value)->ShortPrint();
PrintF("\n");
}
output->SetFrameSlot(output_offset, input_value);
break;
@ -1009,10 +1007,7 @@ void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
for (uint32_t i = 0; i < table_length; ++i) {
uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
RevertStackCheckCodeAt(unoptimized_code,
pc_after,
check_code,
replacement_code);
RevertStackCheckCodeAt(pc_after, check_code, replacement_code);
stack_check_cursor += 2 * kIntSize;
}
}

3
deps/v8/src/deoptimizer.h

@ -186,8 +186,7 @@ class Deoptimizer : public Malloced {
// Change all patched stack guard checks in the unoptimized code
// back to a normal stack guard check.
static void RevertStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
static void RevertStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code);

53
deps/v8/src/factory.cc

@ -59,13 +59,13 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
}
Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
PretenureFlag pretenure) {
Handle<FixedArray> Factory::NewFixedDoubleArray(int size,
PretenureFlag pretenure) {
ASSERT(0 <= size);
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure),
FixedDoubleArray);
FixedArray);
}
@ -85,14 +85,6 @@ Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) {
}
Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
ObjectHashSet::Allocate(at_least_space_for),
ObjectHashSet);
}
Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
@ -479,12 +471,6 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
}
Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<FixedDoubleArray> array) {
CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray);
}
Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Map> function_map,
@ -511,20 +497,16 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
pretenure);
result->set_context(*context);
if (!function_info->bound()) {
int number_of_literals = function_info->num_literals();
Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
if (number_of_literals > 0) {
// Store the object, regexp and array functions in the literals
// array prefix. These functions will be used when creating
// object, regexp and array literals in this function.
literals->set(JSFunction::kLiteralGlobalContextIndex,
context->global_context());
}
result->set_literals(*literals);
} else {
result->set_function_bindings(isolate()->heap()->empty_fixed_array());
int number_of_literals = function_info->num_literals();
Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
if (number_of_literals > 0) {
// Store the object, regexp and array functions in the literals
// array prefix. These functions will be used when creating
// object, regexp and array literals in this function.
literals->set(JSFunction::kLiteralGlobalContextIndex,
context->global_context());
}
result->set_literals(*literals);
result->set_next_function_link(isolate()->heap()->undefined_value());
if (V8::UseCrankshaft() &&
@ -839,13 +821,10 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
// Number of descriptors added to the result so far.
int descriptor_count = 0;
// Ensure that marking will not progress and change color of objects.
DescriptorArray::WhitenessWitness witness(*result);
// Copy the descriptors from the array.
for (int i = 0; i < array->number_of_descriptors(); i++) {
if (array->GetType(i) != NULL_DESCRIPTOR) {
result->CopyFrom(descriptor_count++, *array, i, witness);
result->CopyFrom(descriptor_count++, *array, i);
}
}
@ -865,7 +844,7 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
if (result->LinearSearch(*key, descriptor_count) ==
DescriptorArray::kNotFound) {
CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
result->Set(descriptor_count, &desc, witness);
result->Set(descriptor_count, &desc);
descriptor_count++;
} else {
duplicates++;
@ -879,13 +858,13 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
Handle<DescriptorArray> new_result =
NewDescriptorArray(number_of_descriptors);
for (int i = 0; i < number_of_descriptors; i++) {
new_result->CopyFrom(i, *result, i, witness);
new_result->CopyFrom(i, *result, i);
}
result = new_result;
}
// Sort the result before returning.
result->Sort(witness);
result->Sort();
return result;
}

7
deps/v8/src/factory.h

@ -50,7 +50,7 @@ class Factory {
PretenureFlag pretenure = NOT_TENURED);
// Allocate a new uninitialized fixed double array.
Handle<FixedDoubleArray> NewFixedDoubleArray(
Handle<FixedArray> NewFixedDoubleArray(
int size,
PretenureFlag pretenure = NOT_TENURED);
@ -58,8 +58,6 @@ class Factory {
Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
Handle<ObjectHashSet> NewObjectHashSet(int at_least_space_for);
Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
@ -224,9 +222,6 @@ class Factory {
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
Handle<FixedDoubleArray> CopyFixedDoubleArray(
Handle<FixedDoubleArray> array);
// Numbers (eg, literals) are pretenured by the parser.
Handle<Object> NewNumber(double value,
PretenureFlag pretenure = NOT_TENURED);

8
deps/v8/src/flag-definitions.h

@ -100,8 +100,7 @@ private:
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)")
DEFINE_bool(harmony_weakmaps, false, "enable harmony weak maps")
DEFINE_bool(harmony, false, "enable all harmony features")
// Flags for experimental implementation features.
@ -187,8 +186,6 @@ DEFINE_bool(expose_gc, false, "expose gc extension")
DEFINE_bool(expose_externalize_string, false,
"expose externalize string extension")
DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
DEFINE_bool(builtins_in_stack_traces, false,
"show built-in functions in stack traces")
DEFINE_bool(disable_native_files, false, "disable builtin natives files")
// builtins-ia32.cc
@ -530,9 +527,6 @@ DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
#define FLAG FLAG_READONLY
#endif
// elements.cc
DEFINE_bool(trace_elements_transitions, false, "trace elements transitions")
// code-stubs.cc
DEFINE_bool(print_code_stubs, false, "print code stubs")

63
deps/v8/src/frames.cc

@ -711,69 +711,6 @@ void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
}
void JavaScriptFrame::PrintTop(FILE* file,
bool print_args,
bool print_line_number) {
// constructor calls
HandleScope scope;
AssertNoAllocation no_allocation;
JavaScriptFrameIterator it;
while (!it.done()) {
if (it.frame()->is_java_script()) {
JavaScriptFrame* frame = it.frame();
if (frame->IsConstructor()) PrintF(file, "new ");
// function name
Object* fun = frame->function();
if (fun->IsJSFunction()) {
SharedFunctionInfo* shared = JSFunction::cast(fun)->shared();
shared->DebugName()->ShortPrint(file);
if (print_line_number) {
Address pc = frame->pc();
Code* code = Code::cast(
v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
int source_pos = code->SourcePosition(pc);
Object* maybe_script = shared->script();
if (maybe_script->IsScript()) {
Handle<Script> script(Script::cast(maybe_script));
int line = GetScriptLineNumberSafe(script, source_pos) + 1;
Object* script_name_raw = script->name();
if (script_name_raw->IsString()) {
String* script_name = String::cast(script->name());
SmartArrayPointer<char> c_script_name =
script_name->ToCString(DISALLOW_NULLS,
ROBUST_STRING_TRAVERSAL);
PrintF(file, " at %s:%d", *c_script_name, line);
} else {
PrintF(file, "at <unknown>:%d", line);
}
} else {
PrintF(file, " at <unknown>:<unknown>");
}
}
} else {
fun->ShortPrint(file);
}
if (print_args) {
// function arguments
// (we are intentionally only printing the actually
// supplied parameters, not all parameters required)
PrintF(file, "(this=");
frame->receiver()->ShortPrint(file);
const int length = frame->ComputeParametersCount();
for (int i = 0; i < length; i++) {
PrintF(file, ", ");
frame->GetParameter(i)->ShortPrint(file);
}
PrintF(file, ")");
}
break;
}
it.Advance();
}
}
void FrameSummary::Print() {
PrintF("receiver: ");
receiver_->ShortPrint();

2
deps/v8/src/frames.h

@ -512,8 +512,6 @@ class JavaScriptFrame: public StandardFrame {
return static_cast<JavaScriptFrame*>(frame);
}
static void PrintTop(FILE* file, bool print_args, bool print_line_number);
protected:
inline explicit JavaScriptFrame(StackFrameIterator* iterator);

16
deps/v8/src/full-codegen.cc

@ -289,12 +289,11 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
#ifdef ENABLE_DEBUGGER_SUPPORT
code->set_has_debug_break_slots(
info->isolate()->debugger()->IsDebuggerActive());
code->set_compiled_optimizable(info->IsOptimizable());
#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
info->SetCode(code); // may be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit && !code.is_null()) {
GDBJITLineInfo* lineinfo =
@ -521,8 +520,8 @@ void FullCodeGenerator::VisitDeclarations(
if (var->IsUnallocated()) {
array->set(j++, *(var->name()));
if (decl->fun() == NULL) {
if (var->binding_needs_init()) {
// In case this binding needs initialization use the hole.
if (var->mode() == CONST) {
// In case this is const property use the hole.
array->set_the_hole(j++);
} else {
array->set_undefined(j++);
@ -547,10 +546,11 @@ void FullCodeGenerator::VisitDeclarations(
int FullCodeGenerator::DeclareGlobalsFlags() {
ASSERT(DeclareGlobalsStrictModeFlag::is_valid(strict_mode_flag()));
return DeclareGlobalsEvalFlag::encode(is_eval()) |
DeclareGlobalsStrictModeFlag::encode(strict_mode_flag()) |
DeclareGlobalsNativeFlag::encode(is_native());
int flags = 0;
if (is_eval()) flags |= kDeclareGlobalsEvalFlag;
if (is_strict_mode()) flags |= kDeclareGlobalsStrictModeFlag;
if (is_native()) flags |= kDeclareGlobalsNativeFlag;
return flags;
}

6
deps/v8/src/full-codegen.h

@ -577,11 +577,9 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); }
bool is_strict_mode() {
return strict_mode_flag() == kStrictMode;
}
bool is_strict_mode() { return function()->strict_mode(); }
StrictModeFlag strict_mode_flag() {
return function()->strict_mode_flag();
return is_strict_mode() ? kStrictMode : kNonStrictMode;
}
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return scope_; }

3
deps/v8/src/globals.h

@ -230,9 +230,6 @@ const int kPointerSize = sizeof(void*); // NOLINT
const int kDoubleSizeLog2 = 3;
// Size of the state of a the random number generator.
const int kRandomStateSize = 2 * kIntSize;
#if V8_HOST_ARCH_64_BIT
const int kPointerSizeLog2 = 3;
const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);

114
deps/v8/src/handles.cc

@ -376,6 +376,24 @@ Handle<Object> GetProperty(Handle<Object> obj,
}
Handle<Object> GetProperty(Handle<JSReceiver> obj,
Handle<String> name,
LookupResult* result) {
PropertyAttributes attributes;
Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(isolate,
obj->GetProperty(*obj, result, *name, &attributes),
Object);
}
Handle<Object> GetElement(Handle<Object> obj,
uint32_t index) {
Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(isolate, Runtime::GetElement(obj, index), Object);
}
Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<String> name,
@ -486,14 +504,6 @@ Handle<Object> SetOwnElement(Handle<JSObject> object,
}
Handle<Object> TransitionElementsKind(Handle<JSObject> object,
ElementsKind to_kind) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->TransitionElementsKind(to_kind),
Object);
}
Handle<JSObject> Copy(Handle<JSObject> obj) {
Isolate* isolate = obj->GetIsolate();
CALL_HEAP_FUNCTION(isolate,
@ -691,7 +701,7 @@ void CustomArguments::IterateInstance(ObjectVisitor* v) {
// Compute the property keys from the interceptor.
v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object) {
Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
@ -713,7 +723,7 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
// Compute the element keys from the interceptor.
v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object) {
Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
@ -744,9 +754,8 @@ static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
}
Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
KeyCollectionType type,
bool* threw) {
Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
KeyCollectionType type) {
USE(ContainsOnlyValidKeys);
Isolate* isolate = object->GetIsolate();
Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
@ -761,16 +770,6 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
for (Handle<Object> p = object;
*p != isolate->heap()->null_value();
p = Handle<Object>(p->GetPrototype(), isolate)) {
if (p->IsJSProxy()) {
Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
Handle<Object> args[] = { proxy };
Handle<Object> names = Execution::Call(
isolate->proxy_enumerate(), object, ARRAY_SIZE(args), args, threw);
if (*threw) return content;
content = AddKeysFromJSArray(content, Handle<JSArray>::cast(names));
break;
}
Handle<JSObject> current(JSObject::cast(*p), isolate);
// Check access rights if required.
@ -837,11 +836,11 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
}
Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw) {
Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
Isolate* isolate = object->GetIsolate();
isolate->counters()->for_in()->Increment();
Handle<FixedArray> elements =
GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, threw);
Handle<FixedArray> elements = GetKeysInFixedArrayFor(object,
INCLUDE_PROTOS);
return isolate->factory()->NewJSArrayWithElements(elements);
}
@ -891,29 +890,62 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
}
Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
Handle<Object> key) {
Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
Handle<JSReceiver> key,
Handle<Object> value) {
CALL_HEAP_FUNCTION(table->GetIsolate(),
table->Add(*key),
ObjectHashSet);
table->Put(*key, *value),
ObjectHashTable);
}
Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
Handle<Object> key) {
CALL_HEAP_FUNCTION(table->GetIsolate(),
table->Remove(*key),
ObjectHashSet);
bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag) {
return shared->is_compiled() || CompileLazyShared(shared, flag);
}
Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
Handle<Object> key,
Handle<Object> value) {
CALL_HEAP_FUNCTION(table->GetIsolate(),
table->Put(*key, *value),
ObjectHashTable);
static bool CompileLazyHelper(CompilationInfo* info,
ClearExceptionFlag flag) {
// Compile the source information to a code object.
ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
ASSERT(!info->isolate()->has_pending_exception());
bool result = Compiler::CompileLazy(info);
ASSERT(result != Isolate::Current()->has_pending_exception());
if (!result && flag == CLEAR_EXCEPTION) {
info->isolate()->clear_pending_exception();
}
return result;
}
bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag) {
CompilationInfo info(shared);
return CompileLazyHelper(&info, flag);
}
bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag) {
bool result = true;
if (function->shared()->is_compiled()) {
function->ReplaceCode(function->shared()->code());
function->shared()->set_code_age(0);
} else {
CompilationInfo info(function);
result = CompileLazyHelper(&info, flag);
ASSERT(!result || function->is_compiled());
}
return result;
}
bool CompileOptimized(Handle<JSFunction> function,
int osr_ast_id,
ClearExceptionFlag flag) {
CompilationInfo info(function);
info.SetOptimizing(osr_ast_id);
return CompileLazyHelper(&info, flag);
}
} } // namespace v8::internal

46
deps/v8/src/handles.h

@ -240,15 +240,20 @@ Handle<Object> SetOwnElement(Handle<JSObject> object,
Handle<Object> value,
StrictModeFlag strict_mode);
Handle<Object> TransitionElementsKind(Handle<JSObject> object,
ElementsKind to_kind);
Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name);
Handle<Object> GetProperty(Handle<Object> obj,
Handle<Object> key);
Handle<Object> GetProperty(Handle<JSReceiver> obj,
Handle<String> name,
LookupResult* result);
Handle<Object> GetElement(Handle<Object> obj,
uint32_t index);
Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<String> name,
@ -295,19 +300,18 @@ int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
// Computes the enumerable keys from interceptors. Used for debug mirrors and
// by GetKeysInFixedArrayFor below.
v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object);
v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object);
enum KeyCollectionType { LOCAL_ONLY, INCLUDE_PROTOS };
// Computes the enumerable keys for a JSObject. Used for implementing
// "for (n in object) { }".
Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
KeyCollectionType type,
bool* threw);
Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw);
Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
KeyCollectionType type);
Handle<JSArray> GetKeysFor(Handle<JSObject> object);
Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
bool cache_result);
@ -342,16 +346,26 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> PreventExtensions(Handle<JSObject> object);
Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
Handle<Object> key);
Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
Handle<Object> key);
Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
Handle<Object> key,
Handle<JSReceiver> key,
Handle<Object> value);
// Does lazy compilation of the given function. Returns true on success and
// false if the compilation resulted in a stack overflow.
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag);
bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag);
bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
bool CompileOptimized(Handle<JSFunction> function,
int osr_ast_id,
ClearExceptionFlag flag);
class NoHandleAllocation BASE_EMBEDDED {
public:
#ifndef DEBUG

5
deps/v8/src/heap-inl.h

@ -359,6 +359,7 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
CopyWords(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
byte_size / kPointerSize);
@ -590,9 +591,7 @@ void ExternalStringTable::AddOldString(String* string) {
void ExternalStringTable::ShrinkNewStrings(int position) {
new_space_strings_.Rewind(position);
if (FLAG_verify_heap) {
Verify();
}
Verify();
}

135
deps/v8/src/heap.cc

@ -693,9 +693,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
PROFILE(isolate_, CodeMovingGCEvent());
}
if (FLAG_verify_heap) {
VerifySymbolTable();
}
VerifySymbolTable();
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_);
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
@ -791,9 +789,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
global_gc_epilogue_callback_();
}
if (FLAG_verify_heap) {
VerifySymbolTable();
}
VerifySymbolTable();
return next_gc_likely_to_collect_more;
}
@ -987,7 +983,7 @@ void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
#endif
gc_state_ = SCAVENGE;
@ -1116,9 +1112,7 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
if (FLAG_verify_heap) {
external_string_table_.Verify();
}
external_string_table_.Verify();
if (external_string_table_.new_space_strings_.is_empty()) return;
@ -1449,9 +1443,9 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object,
int object_size) {
SLOW_ASSERT((size_restriction != SMALL) ||
(object_size <= Page::kMaxHeapObjectSize));
SLOW_ASSERT(object->Size() == object_size);
ASSERT((size_restriction != SMALL) ||
(object_size <= Page::kMaxHeapObjectSize));
ASSERT(object->Size() == object_size);
Heap* heap = map->GetHeap();
if (heap->ShouldBePromoted(object->address(), object_size)) {
@ -1684,9 +1678,9 @@ void Heap::SelectScavengingVisitorsTable() {
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
SLOW_ASSERT(HEAP->InFromSpace(object));
ASSERT(HEAP->InFromSpace(object));
MapWord first_word = object->map_word();
SLOW_ASSERT(!first_word.IsForwardingAddress());
ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
map->GetHeap()->DoScavengeObject(map, p, object);
}
@ -2916,9 +2910,7 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
ASSERT(buffer->IsFlat());
#if DEBUG
if (FLAG_verify_heap) {
buffer->StringVerify();
}
buffer->StringVerify();
#endif
Object* result;
@ -3164,9 +3156,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->CopyFrom(desc);
#ifdef DEBUG
if (FLAG_verify_heap) {
code->Verify();
}
code->Verify();
#endif
return code;
}
@ -3246,9 +3236,7 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
new_code->Relocate(new_addr - old_addr);
#ifdef DEBUG
if (FLAG_verify_heap) {
code->Verify();
}
code->Verify();
#endif
return new_code;
}
@ -3281,7 +3269,7 @@ void Heap::InitializeFunction(JSFunction* function,
function->set_code(shared->code());
function->set_prototype_or_initial_map(prototype);
function->set_context(undefined_value());
function->set_literals_or_bindings(empty_fixed_array());
function->set_literals(empty_fixed_array());
function->set_next_function_link(undefined_value());
}
@ -3446,22 +3434,22 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
// Inline constructor can only handle inobject properties.
fun->shared()->ForbidInlineConstructor();
} else {
DescriptorArray* descriptors;
Object* descriptors_obj;
{ MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
return maybe_descriptors_obj;
}
}
DescriptorArray::WhitenessWitness witness(descriptors);
DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
for (int i = 0; i < count; i++) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
ASSERT(name->IsSymbol());
FieldDescriptor field(name, i, NONE);
field.SetEnumerationIndex(i);
descriptors->Set(i, &field, witness);
descriptors->Set(i, &field);
}
descriptors->SetNextEnumerationIndex(count);
descriptors->SortUnchecked(witness);
descriptors->SortUnchecked();
// The descriptors may contain duplicates because the compiler does not
// guarantee the uniqueness of property names (it would have required
@ -3700,15 +3688,13 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
MaybeObject* Heap::CopyJSObject(JSObject* source) {
// Never used to copy functions. If functions need to be copied we
// have to be careful to clear the literals array.
SLOW_ASSERT(!source->IsJSFunction());
ASSERT(!source->IsJSFunction());
// Make the clone.
Map* map = source->map();
int object_size = map->instance_size();
Object* clone;
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
// If we're forced to always allocate, we use the general allocation
// functions which may leave us with an object in old space.
if (always_allocate()) {
@ -3725,11 +3711,10 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
JSObject::kHeaderSize,
(object_size - JSObject::kHeaderSize) / kPointerSize);
} else {
wb_mode = SKIP_WRITE_BARRIER;
{ MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
}
SLOW_ASSERT(InNewSpace(clone));
ASSERT(InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
CopyBlock(HeapObject::cast(clone)->address(),
@ -3737,8 +3722,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
object_size);
}
SLOW_ASSERT(
JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
ASSERT(JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
@ -3754,7 +3738,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
}
if (!maybe_elem->ToObject(&elem)) return maybe_elem;
}
JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem));
}
// Update properties if necessary.
if (properties->length() > 0) {
@ -3762,7 +3746,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
{ MaybeObject* maybe_prop = CopyFixedArray(properties);
if (!maybe_prop->ToObject(&prop)) return maybe_prop;
}
JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
}
// Return the new clone.
return clone;
@ -4818,12 +4802,12 @@ void Heap::IterateAndMarkPointersToFromSpace(Address start,
HeapObject::cast(object));
Object* new_object = *slot;
if (InNewSpace(new_object)) {
SLOW_ASSERT(Heap::InToSpace(new_object));
SLOW_ASSERT(new_object->IsHeapObject());
ASSERT(Heap::InToSpace(new_object));
ASSERT(new_object->IsHeapObject());
store_buffer_.EnterDirectlyIntoStoreBuffer(
reinterpret_cast<Address>(slot));
}
SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
} else if (record_slots &&
MarkCompactCollector::IsOnEvacuationCandidate(object)) {
mark_compact_collector()->RecordSlot(slot, slot, object);
@ -5377,7 +5361,6 @@ class HeapDebugUtils {
bool Heap::Setup(bool create_heap_objects) {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
debug_utils_ = new HeapDebugUtils(this);
#endif
@ -5463,7 +5446,7 @@ bool Heap::Setup(bool create_heap_objects) {
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
lo_space_ = new LargeObjectSpace(this, LO_SPACE);
if (lo_space_ == NULL) return false;
if (!lo_space_->Setup()) return false;
if (create_heap_objects) {
@ -5779,51 +5762,56 @@ class HeapObjectsFilter {
class UnreachableObjectsFilter : public HeapObjectsFilter {
public:
UnreachableObjectsFilter() {
MarkReachableObjects();
}
~UnreachableObjectsFilter() {
Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
MarkUnreachableObjects();
}
bool SkipObject(HeapObject* object) {
MarkBit mark_bit = Marking::MarkBitFrom(object);
return !mark_bit.Get();
if (IntrusiveMarking::IsMarked(object)) {
IntrusiveMarking::ClearMark(object);
return true;
} else {
return false;
}
}
private:
class MarkingVisitor : public ObjectVisitor {
class UnmarkingVisitor : public ObjectVisitor {
public:
MarkingVisitor() : marking_stack_(10) {}
UnmarkingVisitor() : list_(10) {}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
MarkBit mark_bit = Marking::MarkBitFrom(obj);
if (!mark_bit.Get()) {
mark_bit.Set();
marking_stack_.Add(obj);
if (IntrusiveMarking::IsMarked(obj)) {
IntrusiveMarking::ClearMark(obj);
list_.Add(obj);
}
}
}
void TransitiveClosure() {
while (!marking_stack_.is_empty()) {
HeapObject* obj = marking_stack_.RemoveLast();
obj->Iterate(this);
}
bool can_process() { return !list_.is_empty(); }
void ProcessNext() {
HeapObject* obj = list_.RemoveLast();
obj->Iterate(this);
}
private:
List<HeapObject*> marking_stack_;
List<HeapObject*> list_;
};
void MarkReachableObjects() {
Heap* heap = Isolate::Current()->heap();
MarkingVisitor visitor;
heap->IterateRoots(&visitor, VISIT_ALL);
visitor.TransitiveClosure();
void MarkUnreachableObjects() {
HeapIterator iterator;
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
IntrusiveMarking::SetMark(obj);
}
UnmarkingVisitor visitor;
HEAP->IterateRoots(&visitor, VISIT_ALL);
while (visitor.can_process())
visitor.ProcessNext();
}
AssertNoAllocation no_alloc;
@ -5851,8 +5839,13 @@ HeapIterator::~HeapIterator() {
void HeapIterator::Init() {
// Start the iteration.
space_iterator_ = new SpaceIterator;
space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
new SpaceIterator(Isolate::Current()->heap()->
GcSafeSizeOfOldObjectFunction());
switch (filtering_) {
case kFilterFreeListNodes:
// TODO(gc): Not handled.
break;
case kFilterUnreachable:
filter_ = new UnreachableObjectsFilter;
break;
@ -6357,9 +6350,7 @@ void ExternalStringTable::CleanUp() {
old_space_strings_[last++] = old_space_strings_[i];
}
old_space_strings_.Rewind(last);
if (FLAG_verify_heap) {
Verify();
}
Verify();
}

40
deps/v8/src/heap.h

@ -64,31 +64,18 @@ inline Heap* _inline_get_heap_();
V(Oddball, null_value, NullValue) \
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, meta_map, MetaMap) \
V(Map, ascii_symbol_map, AsciiSymbolMap) \
V(Map, ascii_string_map, AsciiStringMap) \
V(Oddball, arguments_marker, ArgumentsMarker) \
V(Oddball, frame_alignment_marker, FrameAlignmentMarker) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, global_context_map, GlobalContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Map, code_map, CodeMap) \
V(Map, serialized_scope_info_map, SerializedScopeInfoMap) \
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Map, meta_map, MetaMap) \
V(Map, hash_table_map, HashTableMap) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray) \
V(String, empty_string, EmptyString) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Smi, stack_limit, StackLimit) \
V(Oddball, frame_alignment_marker, FrameAlignmentMarker) \
V(Oddball, arguments_marker, ArgumentsMarker) \
/* The first 32 roots above this line should be boring from a GC point of */ \
/* view. This means they are never in new space and never on a page that */ \
/* is being compacted. */ \
V(FixedArray, number_string_cache, NumberStringCache) \
V(Object, instanceof_cache_function, InstanceofCacheFunction) \
V(Object, instanceof_cache_map, InstanceofCacheMap) \
@ -96,12 +83,19 @@ inline Heap* _inline_get_heap_();
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, string_split_cache, StringSplitCache) \
V(Object, termination_exception, TerminationException) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray) \
V(String, empty_string, EmptyString) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Map, string_map, StringMap) \
V(Map, ascii_string_map, AsciiStringMap) \
V(Map, symbol_map, SymbolMap) \
V(Map, cons_string_map, ConsStringMap) \
V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
V(Map, sliced_string_map, SlicedStringMap) \
V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \
V(Map, ascii_symbol_map, AsciiSymbolMap) \
V(Map, cons_symbol_map, ConsSymbolMap) \
V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
V(Map, external_symbol_map, ExternalSymbolMap) \
@ -126,7 +120,10 @@ inline Heap* _inline_get_heap_();
V(Map, catch_context_map, CatchContextMap) \
V(Map, with_context_map, WithContextMap) \
V(Map, block_context_map, BlockContextMap) \
V(Map, code_map, CodeMap) \
V(Map, oddball_map, OddballMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, foreign_map, ForeignMap) \
V(HeapNumber, nan_value, NanValue) \
@ -1100,7 +1097,7 @@ class Heap {
inline void SetLastScriptId(Object* last_script_id);
// Generated code can embed this address to get access to the roots.
Object** roots_array_start() { return roots_; }
Object** roots_address() { return roots_; }
Address* store_buffer_top_address() {
return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
@ -1422,9 +1419,6 @@ class Heap {
// around a GC).
inline void CompletelyClearInstanceofCache();
// The roots that have an index less than this are always in old space.
static const int kOldSpaceRoots = 0x20;
private:
Heap();
@ -1480,10 +1474,7 @@ class Heap {
int unflattened_strings_length_;
#define ROOT_ACCESSOR(type, name, camel_name) \
inline void set_##name(type* value) { \
/* The deserializer makes use of the fact that these common roots are */ \
/* never in new space and never on a page that is being compacted. */ \
ASSERT(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
inline void set_##name(type* value) { \
roots_[k##camel_name##RootIndex] = value; \
}
ROOT_LIST(ROOT_ACCESSOR)
@ -1963,6 +1954,7 @@ class HeapIterator BASE_EMBEDDED {
public:
enum HeapObjectsFiltering {
kNoFiltering,
kFilterFreeListNodes,
kFilterUnreachable
};

79
deps/v8/src/hydrogen-instructions.cc

@ -587,10 +587,11 @@ void HInstruction::Verify() {
HBasicBlock* other_block = other_operand->block();
if (cur_block == other_block) {
if (!other_operand->IsPhi()) {
HInstruction* cur = this->previous();
HInstruction* cur = cur_block->first();
while (cur != NULL) {
ASSERT(cur != this); // We should reach other_operand before!
if (cur == other_operand) break;
cur = cur->previous();
cur = cur->next();
}
// Must reach other operand in the same block!
ASSERT(cur == other_operand);
@ -782,21 +783,12 @@ void HHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" == %o", *type_literal_);
stream->Add(" == ");
stream->Add(type_literal_->GetFlatContent().ToAsciiVector());
HControlInstruction::PrintDataTo(stream);
}
HValue* HConstant::Canonicalize() {
return HasNoUses() && !IsBlockEntry() ? NULL : this;
}
HValue* HTypeof::Canonicalize() {
return HasNoUses() && !IsBlockEntry() ? NULL : this;
}
void HTypeof::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
@ -1146,16 +1138,15 @@ void HPhi::AddIndirectUsesTo(int* dest) {
void HSimulate::PrintDataTo(StringStream* stream) {
stream->Add("id=%d", ast_id());
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
stream->Add("id=%d ", ast_id());
if (pop_count_ > 0) stream->Add("pop %d", pop_count_);
if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /");
for (int i = 0; i < values_.length(); ++i) {
if (i > 0) stream->Add(",");
if (HasAssignedIndexAt(i)) {
stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
} else {
if (!HasAssignedIndexAt(i)) {
stream->Add(" push ");
} else {
stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
}
values_[i]->PrintNameTo(stream);
}
@ -1236,10 +1227,7 @@ void HConstant::PrintDataTo(StringStream* stream) {
bool HArrayLiteral::IsCopyOnWrite() const {
Handle<FixedArray> constant_elements = this->constant_elements();
FixedArrayBase* constant_elements_values =
FixedArrayBase::cast(constant_elements->get(1));
return constant_elements_values->map() == HEAP->fixed_cow_array_map();
return constant_elements()->map() == HEAP->fixed_cow_array_map();
}
@ -1404,7 +1392,7 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) {
Handle<Map> map = types->at(i);
LookupResult lookup(map->GetIsolate());
LookupResult lookup;
map->LookupInDescriptors(NULL, *name, &lookup);
if (lookup.IsProperty()) {
switch (lookup.type()) {
@ -1457,14 +1445,14 @@ bool HLoadNamedFieldPolymorphic::DataEquals(HValue* value) {
void HLoadNamedFieldPolymorphic::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
stream->Add(" .");
stream->Add(*String::cast(*name())->ToCString());
}
void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
stream->Add(" .");
stream->Add(*String::cast(*name())->ToCString());
}
@ -1561,10 +1549,10 @@ void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
void HStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
ASSERT(name()->IsString());
stream->Add(*String::cast(*name())->ToCString());
stream->Add(" = ");
value()->PrintNameTo(stream);
stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
if (!transition().is_null()) {
stream->Add(" (transition map %p)", *transition());
}
@ -1645,12 +1633,6 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
}
void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
}
void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
stream->Add("[%p]", *cell());
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
@ -1764,12 +1746,6 @@ HType HInstanceOfKnownGlobal::CalculateInferredType() {
}
HType HChange::CalculateInferredType() {
if (from().IsDouble() && to().IsTagged()) return HType::HeapNumber();
return type();
}
HType HBitwiseBinaryOperation::CalculateInferredType() {
return HType::TaggedNumber();
}
@ -1825,31 +1801,6 @@ HType HSar::CalculateInferredType() {
}
HType HStringCharFromCode::CalculateInferredType() {
return HType::String();
}
HType HArrayLiteral::CalculateInferredType() {
return HType::JSArray();
}
HType HObjectLiteral::CalculateInferredType() {
return HType::JSObject();
}
HType HRegExpLiteral::CalculateInferredType() {
return HType::JSObject();
}
HType HFunctionLiteral::CalculateInferredType() {
return HType::JSObject();
}
HValue* HUnaryMathOperation::EnsureAndPropagateNotMinusZero(
BitVector* visited) {
visited->Add(id());

92
deps/v8/src/hydrogen-instructions.h

@ -171,7 +171,6 @@ class LChunkBuilder;
V(Throw) \
V(ToFastProperties) \
V(ToInt32) \
V(TransitionElementsKind) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@ -398,11 +397,6 @@ class HType {
return type_ == kUninitialized;
}
bool IsHeapObject() {
ASSERT(type_ != kUninitialized);
return IsHeapNumber() || IsString() || IsNonPrimitive();
}
static HType TypeFromValue(Handle<Object> value);
const char* ToString();
@ -1107,14 +1101,12 @@ class HChange: public HUnaryOperation {
ASSERT(!value->representation().IsNone() && !to.IsNone());
ASSERT(!value->representation().Equals(to));
set_representation(to);
set_type(HType::TaggedNumber());
SetFlag(kUseGVN);
if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
if (is_truncating) SetFlag(kTruncatingToInt32);
}
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
virtual HType CalculateInferredType();
Representation from() { return value()->representation(); }
Representation to() { return representation(); }
@ -1348,7 +1340,7 @@ class HPushArgument: public HUnaryOperation {
class HThisFunction: public HTemplateInstruction<0> {
public:
explicit HThisFunction(Handle<JSFunction> closure) : closure_(closure) {
HThisFunction() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
@ -1357,18 +1349,10 @@ class HThisFunction: public HTemplateInstruction<0> {
return Representation::None();
}
Handle<JSFunction> closure() const { return closure_; }
DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
protected:
virtual bool DataEquals(HValue* other) {
HThisFunction* b = HThisFunction::cast(other);
return *closure() == *b->closure();
}
private:
Handle<JSFunction> closure_;
virtual bool DataEquals(HValue* other) { return true; }
};
@ -2296,7 +2280,6 @@ class HConstant: public HTemplateInstruction<0> {
}
virtual bool EmitAtUses() { return !representation().IsDouble(); }
virtual HValue* Canonicalize();
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
bool IsInteger() const { return handle_->IsSmi(); }
@ -3277,13 +3260,6 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
};
static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
return !value->type().IsBoolean()
&& !value->type().IsSmi()
&& !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable());
}
class HStoreGlobalCell: public HUnaryOperation {
public:
HStoreGlobalCell(HValue* value,
@ -3299,9 +3275,6 @@ class HStoreGlobalCell: public HUnaryOperation {
bool RequiresHoleCheck() {
return !details_.IsDontDelete() || details_.IsReadOnly();
}
bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value());
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
@ -3382,6 +3355,13 @@ class HLoadContextSlot: public HUnaryOperation {
};
static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
return !value->type().IsBoolean()
&& !value->type().IsSmi()
&& !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable());
}
class HStoreContextSlot: public HTemplateInstruction<2> {
public:
HStoreContextSlot(HValue* context, int slot_index, HValue* value)
@ -3720,9 +3700,9 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
HValue* object,
Handle<String> name,
HValue* value,
StrictModeFlag strict_mode_flag)
bool strict_mode)
: name_(name),
strict_mode_flag_(strict_mode_flag) {
strict_mode_(strict_mode) {
SetOperandAt(0, object);
SetOperandAt(1, value);
SetOperandAt(2, context);
@ -3733,7 +3713,7 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
HValue* value() { return OperandAt(1); }
HValue* context() { return OperandAt(2); }
Handle<String> name() { return name_; }
StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
bool strict_mode() { return strict_mode_; }
virtual void PrintDataTo(StringStream* stream);
@ -3745,7 +3725,7 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
private:
Handle<String> name_;
StrictModeFlag strict_mode_flag_;
bool strict_mode_;
};
@ -3906,44 +3886,6 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
};
class HTransitionElementsKind: public HTemplateInstruction<1> {
public:
HTransitionElementsKind(HValue* object,
Handle<Map> original_map,
Handle<Map> transitioned_map)
: original_map_(original_map),
transitioned_map_(transitioned_map) {
SetOperandAt(0, object);
SetFlag(kUseGVN);
SetFlag(kDependsOnMaps);
set_representation(Representation::Tagged());
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
HValue* object() { return OperandAt(0); }
Handle<Map> original_map() { return original_map_; }
Handle<Map> transitioned_map() { return transitioned_map_; }
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
protected:
virtual bool DataEquals(HValue* other) {
HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
return original_map_.is_identical_to(instr->original_map()) &&
transitioned_map_.is_identical_to(instr->transitioned_map());
}
private:
Handle<Map> original_map_;
Handle<Map> transitioned_map_;
};
class HStringAdd: public HBinaryOperation {
public:
HStringAdd(HValue* context, HValue* left, HValue* right)
@ -4006,7 +3948,7 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
HStringCharFromCode(HValue* context, HValue* char_code) {
SetOperandAt(0, context);
SetOperandAt(1, char_code);
set_representation(Representation::Tagged());
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
@ -4015,7 +3957,6 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
? Representation::Tagged()
: Representation::Integer32();
}
virtual HType CalculateInferredType();
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
@ -4093,7 +4034,6 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)
@ -4128,7 +4068,6 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral)
@ -4158,7 +4097,6 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
@ -4183,7 +4121,6 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral)
@ -4207,7 +4144,6 @@ class HTypeof: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
virtual HValue* Canonicalize();
virtual void PrintDataTo(StringStream* stream);
virtual Representation RequiredInputRepresentation(int index) {

286
deps/v8/src/hydrogen.cc

@ -164,11 +164,10 @@ void HBasicBlock::Finish(HControlInstruction* end) {
}
void HBasicBlock::Goto(HBasicBlock* block, bool drop_extra) {
void HBasicBlock::Goto(HBasicBlock* block) {
if (block->IsInlineReturnTarget()) {
AddInstruction(new(zone()) HLeaveInlined);
last_environment_ = last_environment()->outer();
if (drop_extra) last_environment_->Drop(1);
}
AddSimulate(AstNode::kNoNumber);
HGoto* instr = new(zone()) HGoto(block);
@ -176,14 +175,11 @@ void HBasicBlock::Goto(HBasicBlock* block, bool drop_extra) {
}
void HBasicBlock::AddLeaveInlined(HValue* return_value,
HBasicBlock* target,
bool drop_extra) {
void HBasicBlock::AddLeaveInlined(HValue* return_value, HBasicBlock* target) {
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
AddInstruction(new(zone()) HLeaveInlined);
last_environment_ = last_environment()->outer();
if (drop_extra) last_environment_->Drop(1);
last_environment()->Push(return_value);
AddSimulate(AstNode::kNoNumber);
HGoto* instr = new(zone()) HGoto(target);
@ -545,7 +541,7 @@ HConstant* HGraph::GetConstantHole() {
HGraphBuilder::HGraphBuilder(CompilationInfo* info,
TypeFeedbackOracle* oracle)
: function_state_(NULL),
initial_function_state_(this, info, oracle, false),
initial_function_state_(this, info, oracle),
ast_context_(NULL),
break_scope_(NULL),
graph_(NULL),
@ -1503,9 +1499,6 @@ int HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
block->block_id() < dominated->block_id() &&
visited_on_paths_.Add(block->block_id())) {
side_effects |= block_side_effects_[block->block_id()];
if (block->IsLoopHeader()) {
side_effects |= loop_side_effects_[block->block_id()];
}
side_effects |= CollectSideEffectsOnPathsToDominatedBlock(
dominator, block);
}
@ -2012,13 +2005,11 @@ void HGraph::ComputeMinusZeroChecks() {
// a (possibly inlined) function.
FunctionState::FunctionState(HGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
bool drop_extra)
TypeFeedbackOracle* oracle)
: owner_(owner),
compilation_info_(info),
oracle_(oracle),
call_context_(NULL),
drop_extra_(drop_extra),
function_return_(NULL),
test_context_(NULL),
outer_(owner->function_state()) {
@ -2177,8 +2168,8 @@ void TestContext::ReturnControl(HControlInstruction* instr, int ast_id) {
instr->SetSuccessorAt(0, empty_true);
instr->SetSuccessorAt(1, empty_false);
owner()->current_block()->Finish(instr);
empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
empty_true->Goto(if_true());
empty_false->Goto(if_false());
owner()->set_current_block(NULL);
}
@ -2199,8 +2190,8 @@ void TestContext::BuildBranch(HValue* value) {
HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
builder->current_block()->Finish(test);
empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
empty_true->Goto(if_true());
empty_false->Goto(if_false());
builder->set_current_block(NULL);
}
@ -2661,14 +2652,12 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
test->if_false());
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
current_block()->Goto(function_return(), function_state()->drop_extra());
current_block()->Goto(function_return());
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* return_value = environment()->Pop();
current_block()->AddLeaveInlined(return_value,
function_return(),
function_state()->drop_extra());
current_block()->AddLeaveInlined(return_value, function_return());
}
set_current_block(NULL);
}
@ -3167,7 +3156,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return ast_context()->ReturnInstruction(instr, expr->id());
}
LookupResult lookup(isolate());
LookupResult lookup;
GlobalPropertyAccess type =
LookupGlobalProperty(variable, &lookup, false);
@ -3287,7 +3276,7 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
literal,
name,
value,
function_strict_mode_flag());
function_strict_mode());
AddInstruction(store);
AddSimulate(key->id());
} else {
@ -3348,8 +3337,11 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* value = Pop();
if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
elements = new(zone()) HLoadElements(literal);
AddInstruction(elements);
// Load the elements array before the first store.
if (elements == NULL) {
elements = new(zone()) HLoadElements(literal);
AddInstruction(elements);
}
HValue* key = AddInstruction(
new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
@ -3373,10 +3365,10 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
set_current_block(check_smi_only_elements);
HCompareConstantEqAndBranch* smi_elements_check =
new(zone()) HCompareConstantEqAndBranch(elements_kind,
FAST_ELEMENTS,
FAST_SMI_ONLY_ELEMENTS,
Token::EQ_STRICT);
smi_elements_check->SetSuccessorAt(0, store_fast_edgesplit2);
smi_elements_check->SetSuccessorAt(1, store_generic);
smi_elements_check->SetSuccessorAt(0, store_generic);
smi_elements_check->SetSuccessorAt(1, store_fast_edgesplit2);
current_block()->Finish(smi_elements_check);
store_fast_edgesplit2->Finish(new(zone()) HGoto(store_fast));
@ -3465,7 +3457,7 @@ HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
object,
name,
value,
function_strict_mode_flag());
function_strict_mode());
}
@ -3479,7 +3471,7 @@ HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
Handle<String> name = Handle<String>::cast(key->handle());
ASSERT(!name.is_null());
LookupResult lookup(isolate());
LookupResult lookup;
SmallMapList* types = expr->GetReceiverTypes();
bool is_monomorphic = expr->IsMonomorphic() &&
ComputeStoredField(types->first(), name, &lookup);
@ -3503,7 +3495,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
HBasicBlock* join = NULL;
for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
Handle<Map> map = types->at(i);
LookupResult lookup(isolate());
LookupResult lookup;
if (ComputeStoredField(map, name, &lookup)) {
if (count == 0) {
AddInstruction(new(zone()) HCheckNonSmi(object)); // Only needed once.
@ -3586,7 +3578,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
ASSERT(!name.is_null());
SmallMapList* types = expr->GetReceiverTypes();
LookupResult lookup(isolate());
LookupResult lookup;
if (expr->IsMonomorphic()) {
instr = BuildStoreNamed(object, value, expr);
@ -3631,7 +3623,7 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
HValue* value,
int position,
int ast_id) {
LookupResult lookup(isolate());
LookupResult lookup;
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
Handle<GlobalObject> global(info()->global_object());
@ -3650,7 +3642,7 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
global_object,
var->name(),
value,
function_strict_mode_flag());
function_strict_mode());
instr->set_position(position);
AddInstruction(instr);
ASSERT(instr->HasSideEffects());
@ -3946,7 +3938,7 @@ HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj,
Property* expr,
Handle<Map> map,
Handle<String> name) {
LookupResult lookup(isolate());
LookupResult lookup;
map->LookupInDescriptors(NULL, *name, &lookup);
if (lookup.IsProperty() && lookup.type() == FIELD) {
return BuildLoadNamedField(obj,
@ -4045,8 +4037,11 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
Handle<Map> map,
Expression* expr,
bool is_store) {
ASSERT(expr->IsMonomorphic());
Handle<Map> map = expr->GetMonomorphicReceiverType();
AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
bool fast_smi_only_elements = map->has_fast_smi_only_elements();
bool fast_elements = map->has_fast_elements();
@ -4096,6 +4091,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
bool* has_side_effects) {
*has_side_effects = false;
AddInstruction(new(zone()) HCheckNonSmi(object));
AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
SmallMapList* maps = prop->GetReceiverTypes();
bool todo_external_array = false;
@ -4105,55 +4101,15 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
type_todo[i] = false;
}
// Elements_kind transition support.
MapHandleList transition_target(maps->length());
// Collect possible transition targets.
MapHandleList possible_transitioned_maps(maps->length());
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
ElementsKind elements_kind = map->elements_kind();
if (elements_kind == FAST_DOUBLE_ELEMENTS ||
elements_kind == FAST_ELEMENTS) {
possible_transitioned_maps.Add(map);
ASSERT(maps->at(i)->IsMap());
type_todo[maps->at(i)->elements_kind()] = true;
if (maps->at(i)->elements_kind()
>= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
todo_external_array = true;
}
}
// Get transition target for each map (NULL == no transition).
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
Handle<Map> transitioned_map =
map->FindTransitionedMap(&possible_transitioned_maps);
transition_target.Add(transitioned_map);
}
int num_untransitionable_maps = 0;
Handle<Map> untransitionable_map;
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
ASSERT(map->IsMap());
if (!transition_target.at(i).is_null()) {
object = AddInstruction(new(zone()) HTransitionElementsKind(
object, map, transition_target.at(i)));
} else {
type_todo[map->elements_kind()] = true;
if (map->elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
todo_external_array = true;
}
num_untransitionable_maps++;
untransitionable_map = map;
}
}
// If only one map is left after transitioning, handle this case
// monomorphically.
if (num_untransitionable_maps == 1) {
HInstruction* instr = AddInstruction(BuildMonomorphicElementAccess(
object, key, val, untransitionable_map, is_store));
*has_side_effects |= instr->HasSideEffects();
instr->set_position(position);
return is_store ? NULL : instr;
}
AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
HBasicBlock* join = graph()->CreateBasicBlock();
HInstruction* elements_kind_instr =
@ -4285,9 +4241,7 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
if (expr->IsMonomorphic()) {
Handle<Map> map = expr->GetMonomorphicReceiverType();
AddInstruction(new(zone()) HCheckNonSmi(obj));
instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
instr = BuildMonomorphicElementAccess(obj, key, val, expr, is_store);
} else if (expr->GetReceiverTypes() != NULL &&
!expr->GetReceiverTypes()->is_empty()) {
return HandlePolymorphicElementAccess(
@ -4315,7 +4269,7 @@ HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
object,
key,
value,
function_strict_mode_flag());
function_strict_mode());
}
bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
@ -4557,7 +4511,7 @@ void HGraphBuilder::TraceInline(Handle<JSFunction> target,
}
bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
bool HGraphBuilder::TryInline(Call* expr) {
if (!FLAG_use_inlining) return false;
// The function call we are inlining is a method call if the call
@ -4585,9 +4539,9 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
return false;
}
CompilationInfo* outer_info = info();
#if !defined(V8_TARGET_ARCH_IA32)
// Target must be able to use caller's context.
CompilationInfo* outer_info = info();
if (target->context() != outer_info->closure()->context() ||
outer_info->scope()->contains_with() ||
outer_info->scope()->num_heap_slots() > 0) {
@ -4601,7 +4555,9 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
HEnvironment* env = environment();
int current_level = 1;
while (env->outer() != NULL) {
if (current_level == Compiler::kMaxInliningLevels) {
if (current_level == (FLAG_limit_inlining
? Compiler::kMaxInliningLevels
: 2 * Compiler::kMaxInliningLevels)) {
TraceInline(target, caller, "inline depth limit reached");
return false;
}
@ -4610,13 +4566,9 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
}
// Don't inline recursive functions.
for (FunctionState* state = function_state();
state != NULL;
state = state->outer()) {
if (state->compilation_info()->closure()->shared() == *target_shared) {
TraceInline(target, caller, "target is recursive");
return false;
}
if (*target_shared == outer_info->closure()->shared()) {
TraceInline(target, caller, "target is recursive");
return false;
}
// We don't want to add more than a certain number of nodes from inlining.
@ -4713,10 +4665,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
Handle<Code>(target_shared->code()),
Handle<Context>(target->context()->global_context()),
isolate());
// The function state is new-allocated because we need to delete it
// in two different places.
FunctionState* target_state =
new FunctionState(this, &target_info, &target_oracle, drop_extra);
FunctionState target_state(this, &target_info, &target_oracle);
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner_env =
@ -4750,7 +4699,6 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
TraceInline(target, caller, "inline graph construction failed");
target_shared->DisableOptimization(*target);
inline_bailout_ = true;
delete target_state;
return true;
}
@ -4766,11 +4714,9 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
ASSERT(function_return() != NULL);
ASSERT(call_context()->IsEffect() || call_context()->IsValue());
if (call_context()->IsEffect()) {
current_block()->Goto(function_return(), drop_extra);
current_block()->Goto(function_return());
} else {
current_block()->AddLeaveInlined(undefined,
function_return(),
drop_extra);
current_block()->AddLeaveInlined(undefined, function_return());
}
} else {
// The graph builder assumes control can reach both branches of a
@ -4778,14 +4724,13 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
// simply jumping to the false target.
//
// TODO(3168478): refactor to avoid this.
ASSERT(call_context()->IsTest());
HBasicBlock* empty_true = graph()->CreateBasicBlock();
HBasicBlock* empty_false = graph()->CreateBasicBlock();
HBranch* test = new(zone()) HBranch(undefined, empty_true, empty_false);
current_block()->Finish(test);
empty_true->Goto(inlined_test_context()->if_true(), drop_extra);
empty_false->Goto(inlined_test_context()->if_false(), drop_extra);
empty_true->Goto(inlined_test_context()->if_true());
empty_false->Goto(inlined_test_context()->if_false());
}
}
@ -4797,21 +4742,19 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
// Pop the return test context from the expression context stack.
ASSERT(ast_context() == inlined_test_context());
ClearInlinedTestContext();
delete target_state;
// Forward to the real test context.
if (if_true->HasPredecessor()) {
if_true->SetJoinId(expr->id());
HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
if_true->Goto(true_target, function_state()->drop_extra());
if_true->Goto(true_target);
}
if (if_false->HasPredecessor()) {
if_false->SetJoinId(expr->id());
HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
if_false->Goto(false_target, function_state()->drop_extra());
if_false->Goto(false_target);
}
set_current_block(NULL);
return true;
} else if (function_return()->HasPredecessor()) {
function_return()->SetJoinId(expr->id());
@ -4819,7 +4762,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
} else {
set_current_block(NULL);
}
delete target_state;
return true;
}
@ -5071,7 +5014,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
// If there is a global property cell for the name at compile time and
// access check is not enabled we assume that the function will not change
// and generate optimized code for calling the function.
LookupResult lookup(isolate());
LookupResult lookup;
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
if (type == kUseCell &&
!info()->global_object()->IsAccessCheckNeeded()) {
@ -5126,17 +5069,32 @@ void HGraphBuilder::VisitCall(Call* expr) {
PushAndAdd(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
if (TryInline(expr, true)) { // Drop function from environment.
if (TryInline(expr)) {
// The function is lingering in the deoptimization environment.
// Handle it by case analysis on the AST context.
if (ast_context()->IsEffect()) {
Drop(1);
} else if (ast_context()->IsValue()) {
HValue* result = Pop();
Drop(1);
Push(result);
} else if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
if (context->if_true()->HasPredecessor()) {
context->if_true()->last_environment()->Drop(1);
}
if (context->if_false()->HasPredecessor()) {
context->if_true()->last_environment()->Drop(1);
}
} else {
UNREACHABLE();
}
return;
} else {
call = PreProcessCall(new(zone()) HInvokeFunction(context,
function,
argument_count));
call->set_position(expr->position());
AddInstruction(call);
AddSimulate(expr->id());
Drop(1); // The function.
return ast_context()->ReturnValue(call);
}
} else {
@ -5346,6 +5304,7 @@ void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
void HGraphBuilder::VisitNot(UnaryOperation* expr) {
// TODO(svenpanne) Perhaps a switch/virtual function is nicer here.
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
VisitForControl(expr->expression(),
@ -5832,65 +5791,35 @@ Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
HTypeof* typeof_expr,
Expression* sub_expr,
Handle<String> check) {
// Note: The HTypeof itself is removed during canonicalization, if possible.
HValue* value = typeof_expr->value();
CHECK_ALIVE(VisitForTypeOf(sub_expr));
HValue* value = Pop();
HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
instr->set_position(expr->position());
return ast_context()->ReturnControl(instr, expr->id());
}
static bool MatchLiteralCompareNil(HValue* left,
Token::Value op,
HValue* right,
Handle<Object> nil,
HValue** expr) {
if (left->IsConstant() &&
HConstant::cast(left)->handle().is_identical_to(nil) &&
Token::IsEqualityOp(op)) {
*expr = right;
bool HGraphBuilder::TryLiteralCompare(CompareOperation* expr) {
Expression *sub_expr;
Handle<String> check;
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
HandleLiteralCompareTypeof(expr, sub_expr, check);
return true;
}
return false;
}
static bool MatchLiteralCompareTypeof(HValue* left,
Token::Value op,
HValue* right,
HTypeof** typeof_expr,
Handle<String>* check) {
if (left->IsTypeof() &&
Token::IsEqualityOp(op) &&
right->IsConstant() &&
HConstant::cast(right)->HasStringValue()) {
*typeof_expr = HTypeof::cast(left);
*check = Handle<String>::cast(HConstant::cast(right)->handle());
if (expr->IsLiteralCompareUndefined(&sub_expr)) {
HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
return true;
}
return false;
}
static bool IsLiteralCompareTypeof(HValue* left,
Token::Value op,
HValue* right,
HTypeof** typeof_expr,
Handle<String>* check) {
return MatchLiteralCompareTypeof(left, op, right, typeof_expr, check) ||
MatchLiteralCompareTypeof(right, op, left, typeof_expr, check);
}
if (expr->IsLiteralCompareNull(&sub_expr)) {
HandleLiteralCompareNil(expr, sub_expr, kNullValue);
return true;
}
static bool IsLiteralCompareNil(HValue* left,
Token::Value op,
HValue* right,
Handle<Object> nil,
HValue** expr) {
return MatchLiteralCompareNil(left, op, right, nil, expr) ||
MatchLiteralCompareNil(right, op, left, nil, expr);
return false;
}
@ -5911,9 +5840,11 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnControl(instr, expr->id());
}
// Check for special cases that compare against literals.
if (TryLiteralCompare(expr)) return;
TypeInfo type_info = oracle()->CompareType(expr);
// Check if this expression was ever executed according to type feedback.
// Note that for the special typeof/null/undefined cases we get unknown here.
if (type_info.IsUninitialized()) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
@ -5928,20 +5859,6 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HValue* left = Pop();
Token::Value op = expr->op();
HTypeof* typeof_expr = NULL;
Handle<String> check;
if (IsLiteralCompareTypeof(left, op, right, &typeof_expr, &check)) {
return HandleLiteralCompareTypeof(expr, typeof_expr, check);
}
HValue* sub_expr = NULL;
Factory* f = graph()->isolate()->factory();
if (IsLiteralCompareNil(left, op, right, f->undefined_value(), &sub_expr)) {
return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
}
if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
}
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not
// residing in new space. If it is we assume that the function will stay the
@ -5954,7 +5871,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
!info()->global_object()->IsAccessCheckNeeded()) {
Handle<String> name = proxy->name();
Handle<GlobalObject> global(info()->global_object());
LookupResult lookup(isolate());
LookupResult lookup;
global->Lookup(*name, &lookup);
if (lookup.IsProperty() &&
lookup.type() == NORMAL &&
@ -6030,11 +5947,13 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
HValue* value,
Expression* sub_expr,
NilValue nil) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
EqualityKind kind =
expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil);
@ -6047,8 +5966,7 @@ void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
HThisFunction* self = new(zone()) HThisFunction(
function_state()->compilation_info()->closure());
HThisFunction* self = new(zone()) HThisFunction;
return ast_context()->ReturnInstruction(self, expr->id());
}
@ -6061,9 +5979,7 @@ void HGraphBuilder::VisitDeclaration(Declaration* decl) {
void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function) {
if (mode == LET || mode == CONST_HARMONY) {
return Bailout("unsupported harmony declaration");
}
if (mode == LET) return Bailout("unsupported let declaration");
Variable* var = proxy->var();
switch (var->location()) {
case Variable::UNALLOCATED:

32
deps/v8/src/hydrogen.h

@ -121,7 +121,7 @@ class HBasicBlock: public ZoneObject {
void Finish(HControlInstruction* last);
void FinishExit(HControlInstruction* instruction);
void Goto(HBasicBlock* block, bool drop_extra = false);
void Goto(HBasicBlock* block);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
@ -133,9 +133,7 @@ class HBasicBlock: public ZoneObject {
// Add the inlined function exit sequence, adding an HLeaveInlined
// instruction and updating the bailout environment.
void AddLeaveInlined(HValue* return_value,
HBasicBlock* target,
bool drop_extra = false);
void AddLeaveInlined(HValue* return_value, HBasicBlock* target);
// If a target block is tagged as an inline function return, all
// predecessors should contain the inlined exit sequence:
@ -605,18 +603,16 @@ class TestContext: public AstContext {
};
class FunctionState {
class FunctionState BASE_EMBEDDED {
public:
FunctionState(HGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
bool drop_extra);
TypeFeedbackOracle* oracle);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
TypeFeedbackOracle* oracle() { return oracle_; }
AstContext* call_context() { return call_context_; }
bool drop_extra() { return drop_extra_; }
HBasicBlock* function_return() { return function_return_; }
TestContext* test_context() { return test_context_; }
void ClearInlinedTestContext() {
@ -636,10 +632,6 @@ class FunctionState {
// inlined. NULL when not inlining.
AstContext* call_context_;
// Indicate if we have to drop an extra value from the environment on
// return from inlined functions.
bool drop_extra_;
// When inlining in an effect of value context, this is the return block.
// It is NULL otherwise. When inlining in a test context, there are a
// pair of return blocks in the context. When not inlining, there is no
@ -736,8 +728,6 @@ class HGraphBuilder: public AstVisitor {
TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
FunctionState* function_state() const { return function_state_; }
private:
// Type of a member function that generates inline code for a native function.
typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@ -756,6 +746,7 @@ class HGraphBuilder: public AstVisitor {
static const int kMaxSourceSize = 600;
// Simple accessors.
FunctionState* function_state() const { return function_state_; }
void set_function_state(FunctionState* state) { function_state_ = state; }
AstContext* ast_context() const { return ast_context_; }
@ -778,8 +769,8 @@ class HGraphBuilder: public AstVisitor {
void ClearInlinedTestContext() {
function_state()->ClearInlinedTestContext();
}
StrictModeFlag function_strict_mode_flag() {
return function_state()->compilation_info()->strict_mode_flag();
bool function_strict_mode() {
return function_state()->compilation_info()->is_strict_mode();
}
// Generators for inline runtime functions.
@ -892,7 +883,7 @@ class HGraphBuilder: public AstVisitor {
// Try to optimize fun.apply(receiver, arguments) pattern.
bool TryCallApply(Call* expr);
bool TryInline(Call* expr, bool drop_extra = false);
bool TryInline(Call* expr);
bool TryInlineBuiltinFunction(Call* expr,
HValue* receiver,
Handle<Map> receiver_map,
@ -921,11 +912,12 @@ class HGraphBuilder: public AstVisitor {
HValue* receiver,
SmallMapList* types,
Handle<String> name);
bool TryLiteralCompare(CompareOperation* expr);
void HandleLiteralCompareTypeof(CompareOperation* expr,
HTypeof* typeof_expr,
Expression* sub_expr,
Handle<String> check);
void HandleLiteralCompareNil(CompareOperation* expr,
HValue* value,
Expression* sub_expr,
NilValue nil);
HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
@ -959,7 +951,7 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
Handle<Map> map,
Expression* expr,
bool is_store);
HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key,

15
deps/v8/src/ia32/assembler-ia32-inl.h

@ -88,10 +88,10 @@ int RelocInfo::target_address_size() {
}
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
void RelocInfo::set_target_address(Address target) {
Assembler::set_target_address_at(pc_, target);
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
if (host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
@ -117,13 +117,11 @@ Object** RelocInfo::target_object_address() {
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
if (host() != NULL && target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), &Memory::Object_at(pc_), HeapObject::cast(target));
}
@ -153,13 +151,12 @@ JSGlobalPropertyCell* RelocInfo::target_cell() {
}
void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
WriteBarrierMode mode) {
void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
if (host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(

13
deps/v8/src/ia32/builtins-ia32.cc

@ -915,6 +915,10 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
// Number of empty elements to allocate for an empty array.
static const int kPreallocatedArrayElements = 4;
// Allocate an empty JSArray. The allocated array is put into the result
// register. If the parameter initial_capacity is larger than zero an elements
// backing store is allocated with this size and filled with the hole values.
@ -925,9 +929,10 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
int initial_capacity,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
ASSERT(initial_capacity >= 0);
// Load the initial map from the array function.
__ mov(scratch1, FieldOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
@ -985,6 +990,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
static const int kLoopUnfoldLimit = 4;
STATIC_ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
@ -1147,6 +1153,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
ebx,
ecx,
edi,
kPreallocatedArrayElements,
&prepare_generic_code_call);
__ IncrementCounter(masm->isolate()->counters()->array_function_native(), 1);
__ pop(ebx);
@ -1175,7 +1182,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ mov(eax, Operand(esp, i * kPointerSize));
__ mov(Operand(esp, (i + 1) * kPointerSize), eax);
}
__ Drop(2); // Drop two stack slots.
__ add(esp, Immediate(2 * kPointerSize)); // Drop two stack slots.
__ push(Immediate(0)); // Treat this as a call with argc of zero.
__ jmp(&empty_array);

138
deps/v8/src/ia32/code-stubs-ia32.cc

@ -34,7 +34,6 @@
#include "isolate.h"
#include "jsregexp.h"
#include "regexp-macro-assembler.h"
#include "stub-cache.h"
namespace v8 {
namespace internal {
@ -239,12 +238,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// [esp + (3 * kPointerSize)]: literals array.
// All sizes here are multiples of kPointerSize.
int elements_size = 0;
if (length_ > 0) {
elements_size = mode_ == CLONE_DOUBLE_ELEMENTS
? FixedDoubleArray::SizeFor(length_)
: FixedArray::SizeFor(length_);
}
int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
int size = JSArray::kSize + elements_size;
// Load boilerplate object into ecx and check if we need to create a
@ -267,9 +261,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
if (mode_ == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map = factory->fixed_array_map();
} else if (mode_ == CLONE_DOUBLE_ELEMENTS) {
message = "Expected (writable) fixed double array";
expected_map = factory->fixed_double_array_map();
} else {
ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
@ -302,24 +293,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
// Copy the elements array.
if (mode_ == CLONE_ELEMENTS) {
for (int i = 0; i < elements_size; i += kPointerSize) {
__ mov(ebx, FieldOperand(ecx, i));
__ mov(FieldOperand(edx, i), ebx);
}
} else {
ASSERT(mode_ == CLONE_DOUBLE_ELEMENTS);
int i;
for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
__ mov(ebx, FieldOperand(ecx, i));
__ mov(FieldOperand(edx, i), ebx);
}
while (i < elements_size) {
__ fld_d(FieldOperand(ecx, i));
__ fstp_d(FieldOperand(edx, i));
i += kDoubleSize;
}
ASSERT(i == elements_size);
for (int i = 0; i < elements_size; i += kPointerSize) {
__ mov(ebx, FieldOperand(ecx, i));
__ mov(FieldOperand(edx, i), ebx);
}
}
@ -3882,11 +3858,11 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register scratch = scratch2;
// Load the number string cache.
ExternalReference roots_array_start =
ExternalReference::roots_array_start(masm->isolate());
ExternalReference roots_address =
ExternalReference::roots_address(masm->isolate());
__ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
__ mov(number_string_cache,
Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
Operand::StaticArray(scratch, times_pointer_size, roots_address));
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
@ -4854,8 +4830,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff);
static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
ExternalReference roots_array_start =
ExternalReference::roots_array_start(masm->isolate());
ExternalReference roots_address =
ExternalReference::roots_address(masm->isolate());
ASSERT_EQ(object.code(), InstanceofStub::left().code());
ASSERT_EQ(function.code(), InstanceofStub::right().code());
@ -4877,23 +4853,22 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Look up the function and the map in the instanceof cache.
Label miss;
__ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ cmp(function, Operand::StaticArray(scratch,
times_pointer_size,
roots_array_start));
__ cmp(function,
Operand::StaticArray(scratch, times_pointer_size, roots_address));
__ j(not_equal, &miss, Label::kNear);
__ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ cmp(map, Operand::StaticArray(
scratch, times_pointer_size, roots_array_start));
scratch, times_pointer_size, roots_address));
__ j(not_equal, &miss, Label::kNear);
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(eax, Operand::StaticArray(
scratch, times_pointer_size, roots_array_start));
scratch, times_pointer_size, roots_address));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&miss);
}
// Get the prototype of the function.
__ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
__ TryGetFunctionPrototype(function, prototype, scratch, &slow);
// Check that the function prototype is a JS object.
__ JumpIfSmi(prototype, &slow);
@ -4903,10 +4878,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// map and function. The cached answer will be set when it is known below.
if (!HasCallSiteInlineCheck()) {
__ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
map);
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
__ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
function);
} else {
// The constants for the code patching are based on no push instructions
@ -4943,7 +4917,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Set(eax, Immediate(0));
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(scratch,
times_pointer_size, roots_array_start), eax);
times_pointer_size, roots_address), eax);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->true_value());
@ -4965,7 +4939,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Set(eax, Immediate(Smi::FromInt(1)));
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(
scratch, times_pointer_size, roots_array_start), eax);
scratch, times_pointer_size, roots_address), eax);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->false_value());
@ -5754,11 +5728,11 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Load the symbol table.
Register symbol_table = c2;
ExternalReference roots_array_start =
ExternalReference::roots_array_start(masm->isolate());
ExternalReference roots_address =
ExternalReference::roots_address(masm->isolate());
__ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
__ mov(symbol_table,
Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
Operand::StaticArray(scratch, times_pointer_size, roots_address));
// Calculate capacity mask from the symbol table capacity.
Register mask = scratch2;
@ -6541,67 +6515,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register properties,
Handle<String> name,
Register r0) {
ASSERT(name->IsSymbol());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
// (their names are the null value).
for (int i = 0; i < kInlinedProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
Register index = r0;
// Capacity is smi 2^n.
__ mov(index, FieldOperand(properties, kCapacityOffset));
__ dec(index);
__ and_(index,
Immediate(Smi::FromInt(name->Hash() +
StringDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3);
__ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
Register entity_name = r0;
// Having undefined at this place means the name is not contained.
ASSERT_EQ(kSmiTagSize, 1);
__ mov(entity_name, Operand(properties, index, times_half_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ cmp(entity_name, masm->isolate()->factory()->undefined_value());
__ j(equal, done);
// Stop if found the property.
__ cmp(entity_name, Handle<String>(name));
__ j(equal, miss);
// Check if the entry name is not a symbol.
__ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
__ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
kIsSymbolMask);
__ j(zero, miss);
}
StringDictionaryLookupStub stub(properties,
r0,
r0,
StringDictionaryLookupStub::NEGATIVE_LOOKUP);
__ push(Immediate(Handle<Object>(name)));
__ push(Immediate(name->Hash()));
__ CallStub(&stub);
__ test(r0, r0);
__ j(not_zero, miss);
__ jmp(done);
}
// TODO(kmillikin): Eliminate this function when the stub cache is fully
// handlified.
MaybeObject* StringDictionaryLookupStub::TryGenerateNegativeLookup(
MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
MacroAssembler* masm,
Label* miss,
Label* done,
@ -6835,13 +6749,6 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ ebx, edx, ecx, EMIT_REMEMBERED_SET},
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ edi, edx, ecx, EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ edx, ebx, edi, EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateDoubleToObject
{ eax, edx, esi, EMIT_REMEMBERED_SET},
{ edx, eax, edi, EMIT_REMEMBERED_SET},
// Null termination.
{ no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
};
@ -7084,6 +6991,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
#undef __
} } // namespace v8::internal

11
deps/v8/src/ia32/code-stubs-ia32.h

@ -421,16 +421,7 @@ class StringDictionaryLookupStub: public CodeStub {
void Generate(MacroAssembler* masm);
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register properties,
Handle<String> name,
Register r0);
// TODO(kmillikin): Eliminate this function when the stub cache is fully
// handlified.
MUST_USE_RESULT static MaybeObject* TryGenerateNegativeLookup(
MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
MacroAssembler* masm,
Label* miss,
Label* done,

258
deps/v8/src/ia32/codegen-ia32.cc

@ -30,7 +30,6 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "codegen.h"
#include "macro-assembler.h"
namespace v8 {
namespace internal {
@ -266,263 +265,6 @@ OS::MemCopyFunction CreateMemCopyFunction() {
#undef __
// -------------------------------------------------------------------------
// Code generators
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
// Set transitioned map.
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
__ RecordWriteField(edx,
HeapObject::kMapOffset,
ebx,
edi,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label loop, entry, convert_hole, gc_required;
__ push(eax);
__ push(ebx);
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray.
// edx: receiver
// edi: length of source FixedArray (smi-tagged)
__ lea(esi, Operand(edi, times_4, FixedDoubleArray::kHeaderSize));
__ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
// eax: destination FixedDoubleArray
// edi: number of elements
// edx: receiver
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
Immediate(masm->isolate()->factory()->fixed_double_array_map()));
__ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
__ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
// Replace receiver's backing store with newly created FixedDoubleArray.
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
__ mov(ebx, eax);
__ RecordWriteField(edx,
JSObject::kElementsOffset,
ebx,
edi,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
// Prepare for conversion loop.
ExternalReference canonical_the_hole_nan_reference =
ExternalReference::address_of_the_hole_nan();
XMMRegister the_hole_nan = xmm1;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ movdbl(the_hole_nan,
Operand::StaticVariable(canonical_the_hole_nan_reference));
}
__ jmp(&entry);
// Call into runtime if GC is required.
__ bind(&gc_required);
// Restore registers before jumping into runtime.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ pop(ebx);
__ pop(eax);
__ jmp(fail);
// Convert and copy elements
// esi: source FixedArray
// edi: number of elements to convert/copy
__ bind(&loop);
__ sub(edi, Immediate(Smi::FromInt(1)));
__ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
// ebx: current element from source
// edi: index of current element
__ JumpIfNotSmi(ebx, &convert_hole);
// Normal smi, convert it to double and store.
__ SmiUntag(ebx);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ cvtsi2sd(xmm0, ebx);
__ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
xmm0);
} else {
__ push(ebx);
__ fild_s(Operand(esp, 0));
__ pop(ebx);
__ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
}
__ jmp(&entry);
// Found hole, store hole_nan_as_double instead.
__ bind(&convert_hole);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
the_hole_nan);
} else {
__ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
__ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
}
__ bind(&entry);
__ test(edi, edi);
__ j(not_zero, &loop);
__ pop(ebx);
__ pop(eax);
// eax: value
// ebx: target map
// Set transitioned map.
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
__ RecordWriteField(edx,
HeapObject::kMapOffset,
ebx,
edi,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Restore esi.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label loop, entry, convert_hole, gc_required;
__ push(eax);
__ push(edx);
__ push(ebx);
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
// Allocate new FixedArray.
// ebx: length of source FixedDoubleArray (smi-tagged)
__ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
__ AllocateInNewSpace(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
// eax: destination FixedArray
// ebx: number of elements
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
Immediate(masm->isolate()->factory()->fixed_array_map()));
__ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&entry);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ pop(ebx);
__ pop(edx);
__ pop(eax);
__ jmp(fail);
// Box doubles into heap numbers.
// edi: source FixedDoubleArray
// eax: destination FixedArray
__ bind(&loop);
__ sub(ebx, Immediate(Smi::FromInt(1)));
// ebx: index of current element (smi-tagged)
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
__ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
__ j(equal, &convert_hole);
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
// edx: new heap number
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ movdbl(xmm0,
FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
__ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
__ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
__ mov(esi, FieldOperand(edi, ebx, times_4, offset));
__ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
}
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
__ mov(esi, ebx);
__ RecordWriteArray(eax,
edx,
esi,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ jmp(&entry, Label::kNear);
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
masm->isolate()->factory()->the_hole_value());
__ bind(&entry);
__ test(ebx, ebx);
__ j(not_zero, &loop);
__ pop(ebx);
__ pop(edx);
// ebx: target map
// edx: receiver
// Set transitioned map.
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
__ RecordWriteField(edx,
HeapObject::kMapOffset,
ebx,
edi,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created and filled FixedArray.
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
__ RecordWriteField(edx,
JSObject::kElementsOffset,
eax,
edi,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Restore registers.
__ pop(eax);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

15
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -258,13 +258,16 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
RelocInfo rinfo(call_target_address,
RelocInfo::CODE_TARGET,
0,
unoptimized_code);
unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
unoptimized_code, &rinfo, replacement_code);
}
void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
@ -280,8 +283,8 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Assembler::set_target_address_at(call_target_address,
check_code->entry());
check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, check_code);
check_code->GetHeap()->incremental_marking()->
RecordCodeTargetPatch(call_target_address, check_code);
}

12
deps/v8/src/ia32/disasm-ia32.cc

@ -179,10 +179,6 @@ class InstructionTable {
public:
InstructionTable();
const InstructionDesc& Get(byte x) const { return instructions_[x]; }
static InstructionTable* get_instance() {
static InstructionTable table;
return &table;
}
private:
InstructionDesc instructions_[256];
@ -263,13 +259,15 @@ void InstructionTable::AddJumpConditionalShort() {
}
static InstructionTable instruction_table;
// The IA32 disassembler implementation.
class DisassemblerIA32 {
public:
DisassemblerIA32(const NameConverter& converter,
bool abort_on_unimplemented = true)
: converter_(converter),
instruction_table_(InstructionTable::get_instance()),
tmp_buffer_pos_(0),
abort_on_unimplemented_(abort_on_unimplemented) {
tmp_buffer_[0] = '\0';
@ -283,11 +281,11 @@ class DisassemblerIA32 {
private:
const NameConverter& converter_;
InstructionTable* instruction_table_;
v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
unsigned int tmp_buffer_pos_;
bool abort_on_unimplemented_;
enum {
eax = 0,
ecx = 1,
@ -886,7 +884,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
bool processed = true; // Will be set to false if the current instruction
// is not in 'instructions' table.
const InstructionDesc& idesc = instruction_table_->Get(*data);
const InstructionDesc& idesc = instruction_table.Get(*data);
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
AppendToBuffer(idesc.mnem);

177
deps/v8/src/ia32/full-codegen-ia32.cc

@ -266,10 +266,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
VariableProxy* proxy = scope()->function();
ASSERT(proxy->var()->mode() == CONST ||
proxy->var()->mode() == CONST_HARMONY);
EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
EmitDeclaration(scope()->function(), CONST, NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@ -714,8 +711,6 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
Variable* variable = proxy->var();
bool binding_needs_init =
mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
++(*global_count);
@ -727,7 +722,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ mov(StackOperand(variable), result_register());
} else if (binding_needs_init) {
} else if (mode == CONST || mode == LET) {
Comment cmnt(masm_, "[ Declaration");
__ mov(StackOperand(variable),
Immediate(isolate()->factory()->the_hole_value()));
@ -759,7 +754,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
} else if (binding_needs_init) {
} else if (mode == CONST || mode == LET) {
Comment cmnt(masm_, "[ Declaration");
__ mov(ContextOperand(esi, variable->index()),
Immediate(isolate()->factory()->the_hole_value()));
@ -772,13 +767,9 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
__ push(esi);
__ push(Immediate(variable->name()));
// Declaration nodes are always introduced in one of four modes.
ASSERT(mode == VAR ||
mode == CONST ||
mode == CONST_HARMONY ||
mode == LET);
PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
? READ_ONLY : NONE;
// Declaration nodes are always introduced in one of three modes.
ASSERT(mode == VAR || mode == CONST || mode == LET);
PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE;
__ push(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@ -787,7 +778,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
increment_stack_height(3);
if (function != NULL) {
VisitForStackValue(function);
} else if (binding_needs_init) {
} else if (mode == CONST || mode == LET) {
__ push(Immediate(isolate()->factory()->the_hole_value()));
increment_stack_height();
} else {
@ -929,17 +920,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax);
increment_stack_height();
// Check for proxies.
Label call_runtime;
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
__ j(below_equal, &call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
Label next;
Label next, call_runtime;
__ mov(ecx, eax);
__ bind(&next);
@ -1010,17 +995,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&loop);
// We got a fixed array in register eax. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
__ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
__ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
__ j(above, &non_proxy);
__ mov(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
__ bind(&non_proxy);
__ push(ebx); // Smi
__ push(eax); // Array
__ push(Immediate(Smi::FromInt(0))); // Map (0) - force slow check.
__ push(eax);
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.
@ -1037,23 +1014,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(ebx, Operand(esp, 2 * kPointerSize));
__ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
// Get the expected map from the stack or a smi in the
// Get the expected map from the stack or a zero map in the
// permanent slow case into register edx.
__ mov(edx, Operand(esp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
// If not, we have to filter the key.
Label update_each;
__ mov(ecx, Operand(esp, 4 * kPointerSize));
__ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
// For proxies, no filtering is done.
// TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
ASSERT(Smi::FromInt(0) == 0);
__ test(edx, edx);
__ j(zero, &update_each);
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
@ -1108,7 +1079,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->strict_mode_flag());
FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
__ push(Immediate(info));
__ CallStub(&stub);
} else {
@ -1138,7 +1109,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_non_strict_eval()) {
if (s->calls_eval()) {
// Check that extension is NULL.
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@ -1152,7 +1123,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
// all extensions from this point.
if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@ -1197,7 +1168,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_non_strict_eval()) {
if (s->calls_eval()) {
// Check that extension is NULL.
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@ -1235,13 +1206,12 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == CONST ||
local->mode() == CONST_HARMONY ||
local->mode() == LET) {
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
if (local->mode() == CONST) {
__ mov(eax, isolate()->factory()->undefined_value());
} else { // LET || CONST_HARMONY
} else { // LET
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kThrowReferenceError, 1);
}
@ -1277,7 +1247,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
if (!var->binding_needs_init()) {
if (var->mode() != LET && var->mode() != CONST) {
context()->Plug(var);
} else {
// Let and const need a read barrier.
@ -1285,14 +1255,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(eax, var);
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, &done, Label::kNear);
if (var->mode() == LET || var->mode() == CONST_HARMONY) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
if (var->mode() == LET) {
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kThrowReferenceError, 1);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
ASSERT(var->mode() == CONST);
} else { // CONST
__ mov(eax, isolate()->factory()->undefined_value());
}
__ bind(&done);
@ -1482,18 +1448,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
if (constant_elements_values->map() ==
__ push(Immediate(expr->constant_elements()));
if (expr->constant_elements()->map() ==
isolate()->heap()->fixed_cow_array_map()) {
ASSERT(expr->depth() == 1);
FastCloneShallowArrayStub stub(
@ -1505,14 +1465,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
constant_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
__ CallStub(&stub);
}
@ -1538,61 +1492,22 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Store the subexpression value in the array's elements.
__ mov(ebx, Operand(esp, 0)); // Copy of array literal.
__ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
Label element_done;
Label double_elements;
Label smi_element;
Label slow_elements;
Label fast_elements;
__ CheckFastElements(edi, &double_elements);
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(result_register(), &smi_element);
__ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
// Store into the array literal requires a elements transition. Call into
// the runtime.
__ bind(&slow_elements);
__ push(Operand(esp, 0)); // Copy of array literal.
__ push(Immediate(Smi::FromInt(i)));
__ push(result_register());
__ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
__ push(Immediate(Smi::FromInt(strict_mode_flag()))); // Strict mode.
__ CallRuntime(Runtime::kSetProperty, 5);
__ jmp(&element_done);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ mov(ecx, Immediate(Smi::FromInt(i)));
__ StoreNumberToDoubleElements(result_register(),
ebx,
ecx,
edx,
xmm0,
&slow_elements,
false);
__ jmp(&element_done);
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ mov(FieldOperand(ebx, offset), result_register());
Label no_map_change;
__ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store.
__ RecordWriteField(ebx, offset, result_register(), ecx,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ jmp(&element_done);
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ mov(FieldOperand(ebx, offset), result_register());
// Fall through
__ bind(&element_done);
__ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
__ CheckFastSmiOnlyElements(edi, &no_map_change, Label::kNear);
__ push(Operand(esp, 0));
__ CallRuntime(Runtime::kNonSmiElementStored, 1);
__ bind(&no_map_change);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@ -1975,9 +1890,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
}
} else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
} else if (var->mode() != CONST) {
// Assignment to var or initializing assignment to let.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, ecx);
if (FLAG_debug_code && op == Token::INIT_LET) {
@ -2190,7 +2104,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
}
CallFunctionStub stub(arg_count, flags);
__ CallStub(&stub, expr->id());
__ CallStub(&stub);
if (record_call_target) {
// There is a one element cache in the instruction stream.
#ifdef DEBUG
@ -2867,10 +2781,9 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ bind(&heapnumber_allocated);
__ PrepareCallCFunction(1, ebx);
__ mov(eax, ContextOperand(context_register(), Context::GLOBAL_INDEX));
__ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
__ mov(Operand(esp, 0), eax);
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
__ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()),
1);
// Convert 32 random bits in eax to 0.(32 random bits) in a double
// by computing:
@ -4234,25 +4147,33 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::EQ_STRICT:
case Token::EQ:
cc = equal;
__ pop(edx);
break;
case Token::LT:
cc = less;
__ pop(edx);
break;
case Token::GT:
cc = greater;
// Reverse left and right sizes to obtain ECMA-262 conversion order.
cc = less;
__ mov(edx, result_register());
__ pop(eax);
break;
case Token::LTE:
cc = less_equal;
// Reverse left and right sizes to obtain ECMA-262 conversion order.
cc = greater_equal;
__ mov(edx, result_register());
__ pop(eax);
break;
case Token::GTE:
cc = greater_equal;
__ pop(edx);
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
__ pop(edx);
decrement_stack_height();
bool inline_smi_code = ShouldInlineSmiCase(op);

142
deps/v8/src/ia32/ic-ia32.cc

@ -860,10 +860,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
Code::ExtraICState extra_state) {
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- edx : receiver
@ -873,11 +873,11 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
extra_state,
extra_ic_state,
NORMAL,
argc);
Isolate* isolate = masm->isolate();
isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, eax);
Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
eax);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@ -903,9 +903,9 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Check for boolean.
__ bind(&non_string);
__ cmp(edx, isolate->factory()->true_value());
__ cmp(edx, FACTORY->true_value());
__ j(equal, &boolean);
__ cmp(edx, isolate->factory()->false_value());
__ cmp(edx, FACTORY->false_value());
__ j(not_equal, &miss);
__ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
@ -913,7 +913,8 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
no_reg);
__ bind(&miss);
}
@ -943,9 +944,8 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
NullCallWrapper(), CALL_AS_METHOD);
}
// The generated code falls through if the call should be handled by runtime.
void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@ -969,10 +969,10 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
}
void CallICBase::GenerateMiss(MacroAssembler* masm,
int argc,
IC::UtilityId id,
Code::ExtraICState extra_state) {
static void GenerateCallMiss(MacroAssembler* masm,
int argc,
IC::UtilityId id,
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@ -1029,7 +1029,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
}
// Invoke the function.
CallKind call_kind = CallICBase::Contextual::decode(extra_state)
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
ParameterCount actual(argc);
@ -1043,7 +1043,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_state) {
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@ -1054,10 +1054,38 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC,
extra_state);
GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
GenerateMiss(masm, argc, extra_ic_state);
}
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
GenerateMiss(masm, argc, extra_state);
GenerateCallNormal(masm, argc);
GenerateMiss(masm, argc, Code::kNoExtraICState);
}
void CallIC::GenerateMiss(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
}
@ -1159,8 +1187,10 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&lookup_monomorphic_cache);
__ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC,
Code::kNoExtraICState);
GenerateMonomorphicCacheProbe(masm,
argc,
Code::KEYED_CALL_IC,
Code::kNoExtraICState);
// Fall through on miss.
__ bind(&slow_call);
@ -1223,12 +1253,25 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ JumpIfSmi(ecx, &miss);
Condition cond = masm->IsObjectStringType(ecx, eax, eax);
__ j(NegateCondition(cond), &miss);
CallICBase::GenerateNormal(masm, argc);
GenerateCallNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
}
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : receiver
@ -1537,51 +1580,6 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ebx : target map
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
__ mov(eax, edx);
__ Ret();
__ bind(&fail);
}
__ pop(ebx);
__ push(edx);
__ push(ebx); // return address
__ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
}
void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ebx : target map
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
__ mov(eax, edx);
__ Ret();
__ bind(&fail);
}
__ pop(ebx);
__ push(edx);
__ push(ebx); // return address
__ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
}
#undef __
@ -1593,9 +1591,11 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
case Token::LT:
return less;
case Token::GT:
return greater;
// Reverse left and right operands to obtain ECMA-262 conversion order.
return less;
case Token::LTE:
return less_equal;
// Reverse left and right operands to obtain ECMA-262 conversion order.
return greater_equal;
case Token::GTE:
return greater_equal;
default:

192
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -355,12 +355,6 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
Handle<Object> value = chunk_->LookupLiteral(op);
return value->Number();
}
Immediate LCodeGen::ToImmediate(LOperand* op) {
LConstantOperand* const_op = LConstantOperand::cast(op);
Handle<Object> literal = chunk_->LookupLiteral(const_op);
@ -1580,40 +1574,32 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
if (right->IsConstantOperand()) {
__ cmp(ToOperand(left), ToImmediate(right));
} else {
__ cmp(ToRegister(left), ToOperand(right));
}
}
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cc = TokenToCondition(instr->op(), instr->is_double());
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
int next_block =
EvalComparison(instr->op(), left_val, right_val) ? true_block
: false_block;
EmitGoto(next_block);
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, chunk_->GetAssemblyLabel(false_block));
} else {
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, chunk_->GetAssemblyLabel(false_block));
} else {
if (right->IsConstantOperand()) {
__ cmp(ToRegister(left), ToImmediate(right));
} else if (left->IsConstantOperand()) {
__ cmp(ToOperand(right), ToImmediate(left));
// We transposed the operands. Reverse the condition.
cc = ReverseCondition(cc);
} else {
__ cmp(ToRegister(left), ToOperand(right));
}
}
EmitBranch(true_block, false_block, cc);
EmitCmpI(left, right);
}
Condition cc = TokenToCondition(instr->op(), instr->is_double());
EmitBranch(true_block, false_block, cc);
}
@ -2043,6 +2029,9 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
Label true_value, done;
__ test(eax, Operand(eax));
__ j(condition, &true_value, Label::kNear);
@ -2127,18 +2116,12 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
__ mov(FieldOperand(object, offset), value);
// Cells are always in the remembered set.
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteField(object,
offset,
value,
address,
kSaveFPRegs,
OMIT_REMEMBERED_SET,
check_needed);
}
__ RecordWriteField(object,
offset,
value,
address,
kSaveFPRegs,
OMIT_REMEMBERED_SET);
}
@ -2166,19 +2149,10 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
__ mov(ContextOperand(context, instr->slot_index()), value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0));
int offset = Context::SlotOffset(instr->slot_index());
__ RecordWriteContextSlot(context,
offset,
value,
temp,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
__ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs);
}
}
@ -2199,7 +2173,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name) {
LookupResult lookup(isolate());
LookupResult lookup;
type->LookupInDescriptors(NULL, *name, &lookup);
ASSERT(lookup.IsProperty() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
@ -2640,7 +2614,7 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
LoadHeapObject(result, instr->hydrogen()->closure());
__ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
@ -3172,36 +3146,21 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (instr->is_in_object()) {
__ mov(FieldOperand(object, offset), value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
offset,
value,
temp,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
__ RecordWriteField(object, offset, value, temp, kSaveFPRegs);
}
} else {
Register temp = ToRegister(instr->TempAt(0));
__ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(FieldOperand(temp, offset), value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
__ RecordWriteField(temp,
offset,
value,
object,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
__ RecordWriteField(temp, offset, value, object, kSaveFPRegs);
}
}
}
@ -3300,21 +3259,13 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ lea(key,
FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize));
__ RecordWrite(elements,
key,
value,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
__ RecordWrite(elements, key, value, kSaveFPRegs);
}
}
@ -3352,48 +3303,6 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
Register new_map_reg = ToRegister(instr->new_map_reg());
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
ElementsKind from_kind = from_map->elements_kind();
ElementsKind to_kind = to_map->elements_kind();
Label not_applicable;
__ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
__ j(not_equal, &not_applicable);
__ mov(new_map_reg, to_map);
if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register object_reg = ToRegister(instr->object());
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
ASSERT_NE(instr->temp_reg(), NULL);
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
ToRegister(instr->temp_reg()), kDontSaveFPRegs);
} else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
to_kind == FAST_DOUBLE_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
} else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
RelocInfo::CODE_TARGET, instr);
} else {
UNREACHABLE();
}
__ bind(&not_applicable);
}
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
class DeferredStringCharCodeAt: public LDeferredCode {
public:
@ -4186,17 +4095,11 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
// Setup the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(constant_elements));
__ push(Immediate(instr->hydrogen()->constant_elements()));
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@ -4212,9 +4115,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
constant_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@ -4313,7 +4214,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
FastNewClosureStub stub(shared_info->strict_mode_flag());
FastNewClosureStub stub(
shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
__ push(Immediate(shared_info));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
@ -4345,11 +4247,12 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition final_branch_condition =
EmitTypeofIs(true_label, false_label, input, instr->type_literal());
if (final_branch_condition != no_condition) {
EmitBranch(true_block, false_block, final_branch_condition);
}
Condition final_branch_condition = EmitTypeofIs(true_label,
false_label,
input,
instr->type_literal());
EmitBranch(true_block, false_block, final_branch_condition);
}
@ -4416,8 +4319,11 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = zero;
} else {
final_branch_condition = not_equal;
__ jmp(false_label);
// A dead branch instruction will be generated after this point.
}
return final_branch_condition;
}

12
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -131,8 +131,8 @@ class LCodeGen BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
StrictModeFlag strict_mode_flag() const {
return info()->strict_mode_flag();
int strict_mode_flag() const {
return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
}
bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; }
void set_dynamic_frame_alignment(bool value) {
@ -227,7 +227,6 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
int ToInteger32(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
ElementsKind elements_kind,
@ -262,6 +261,7 @@ class LCodeGen BASE_EMBEDDED {
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
XMMRegister result,
bool deoptimize_on_undefined,
@ -270,10 +270,8 @@ class LCodeGen BASE_EMBEDDED {
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name);
Condition EmitTypeofIs(Label* true_label, Label* false_label,
Register input, Handle<String> type_name);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to

46
deps/v8/src/ia32/lithium-ia32.cc

@ -452,12 +452,6 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
}
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LInstructionGap* gap = new LInstructionGap(block);
int index = -1;
@ -1440,11 +1434,13 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Token::Value op = instr->token();
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
LCmpT* result = new LCmpT(context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@ -1456,22 +1452,15 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left;
LOperand* right;
if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
left = UseRegisterOrConstantAtStart(instr->left());
right = UseRegisterOrConstantAtStart(instr->right());
} else {
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
}
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new LCmpIDAndBranch(left, right);
}
}
@ -2044,27 +2033,6 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
}
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();
LTransitionElementsKind* result =
new LTransitionElementsKind(object, new_map_reg, temp_reg);
return DefineSameAsFirst(result);
} else {
LOperand* object = UseFixed(instr->object(), eax);
LOperand* fixed_object_reg = FixedTemp(edx);
LOperand* new_map_reg = FixedTemp(ebx);
LTransitionElementsKind* result =
new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
return MarkAsCall(DefineFixed(result, eax), instr);
}
}
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();

32
deps/v8/src/ia32/lithium-ia32.h

@ -156,7 +156,6 @@ class LCodeGen;
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@ -1296,6 +1295,7 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
LOperand* context() { return InputAt(0); }
LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
virtual void PrintDataTo(StringStream* stream);
};
@ -1312,9 +1312,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
@ -1619,6 +1617,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
@ -1640,8 +1639,7 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
bool strict_mode() { return strict_mode_flag() == kStrictMode; }
bool strict_mode() { return hydrogen()->strict_mode(); }
};
@ -1735,30 +1733,6 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
};
class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
LOperand* temp_reg) {
inputs_[0] = object;
temps_[0] = new_map_temp;
temps_[1] = temp_reg;
}
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* new_map_reg() { return temps_[0]; }
LOperand* temp_reg() { return temps_[1]; }
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
};
class LStringAdd: public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {

44
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -352,7 +352,7 @@ void MacroAssembler::SafePush(const Immediate& x) {
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
// see ROOT_ACCESSOR macro in factory.h
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
Handle<Object> value(&isolate()->heap()->roots_address()[index]);
cmp(with, value);
}
@ -1492,19 +1492,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
}
void MacroAssembler::BooleanBitTest(Register object,
int field_offset,
int bit_index) {
bit_index += kSmiTagSize + kSmiShiftSize;
ASSERT(IsPowerOf2(kBitsPerByte));
int byte_index = bit_index / kBitsPerByte;
int byte_bit_index = bit_index & (kBitsPerByte - 1);
test_b(FieldOperand(object, field_offset + byte_index),
static_cast<byte>(1 << byte_bit_index));
}
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
@ -1535,8 +1522,7 @@ void MacroAssembler::NegativeZeroTest(Register result,
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
Label* miss,
bool miss_on_bound_function) {
Label* miss) {
// Check that the receiver isn't a smi.
JumpIfSmi(function, miss);
@ -1544,15 +1530,6 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
CmpObjectType(function, JS_FUNCTION_TYPE, result);
j(not_equal, miss);
if (miss_on_bound_function) {
// If a bound function, go to miss label.
mov(scratch,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
SharedFunctionInfo::kBoundFunction);
j(not_zero, miss);
}
// Make sure that the function has an instance prototype.
Label non_instance;
movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
@ -2087,16 +2064,23 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
ASSERT(function->is_compiled());
// Get the function and setup the context.
mov(edi, Immediate(Handle<JSFunction>(function)));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
expected, actual, flag, call_wrapper, call_kind);
if (V8::UseCrankshaft()) {
// TODO(kasperl): For now, we always call indirectly through the
// code field in the function to allow recompilation to take effect
// without changing any of the call sites.
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
expected, actual, flag, call_wrapper, call_kind);
} else {
Handle<Code> code(function->code());
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET,
flag, call_wrapper, call_kind);
}
}

6
deps/v8/src/ia32/macro-assembler-ia32.h

@ -594,9 +594,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Support functions.
// Check a boolean-bit of a Smi field.
void BooleanBitTest(Register object, int field_offset, int bit_index);
// Check if result is zero and op is negative.
void NegativeZeroTest(Register result, Register op, Label* then_label);
@ -613,8 +610,7 @@ class MacroAssembler: public Assembler {
void TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
Label* miss,
bool miss_on_bound_function = false);
Label* miss);
// Generates code for reporting that an illegal operation has
// occurred.

5
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -1141,11 +1141,6 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
} else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
// Subject string might have been a ConsString that underwent
// short-circuiting during GC. That will not change start_address but
// will change pointer inside the subject handle.
frame_entry<const String*>(re_frame, kInputString) = *subject;
}
return 0;

707
deps/v8/src/ia32/stub-cache-ia32.cc

File diff suppressed because it is too large

1087
deps/v8/src/ic.cc

File diff suppressed because it is too large

190
deps/v8/src/ic.h

@ -198,60 +198,47 @@ class CallICBase: public IC {
class Contextual: public BitField<bool, 0, 1> {};
class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
// Returns a JSFunction or a Failure.
protected:
CallICBase(Code::Kind kind, Isolate* isolate)
: IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
public:
MUST_USE_RESULT MaybeObject* LoadFunction(State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name);
protected:
CallICBase(Code::Kind kind, Isolate* isolate)
: IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
Code::Kind kind_;
bool TryUpdateExtraICState(LookupResult* lookup,
Handle<Object> object,
Code::ExtraICState* extra_ic_state);
// Compute a monomorphic stub if possible, otherwise return a null handle.
Handle<Code> ComputeMonomorphicStub(LookupResult* lookup,
State state,
Code::ExtraICState extra_state,
Handle<Object> object,
Handle<String> name);
MUST_USE_RESULT MaybeObject* ComputeMonomorphicStub(
LookupResult* lookup,
State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name);
// Update the inline cache and the global stub cache based on the lookup
// result.
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name);
// Returns a JSFunction if the object can be called as a function, and
// patches the stack to be ready for the call. Otherwise, it returns the
// undefined value.
Handle<Object> TryCallAsFunction(Handle<Object> object);
// Returns a JSFunction if the object can be called as a function,
// and patches the stack to be ready for the call.
// Otherwise, it returns the undefined value.
Object* TryCallAsFunction(Object* object);
void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
static void Clear(Address address, Code* target);
// Platform-specific code generation functions used by both call and
// keyed call.
static void GenerateMiss(MacroAssembler* masm,
int argc,
IC::UtilityId id,
Code::ExtraICState extra_state);
static void GenerateNormal(MacroAssembler* masm, int argc);
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
Code::ExtraICState extra_state);
Code::Kind kind_;
friend class IC;
};
@ -265,24 +252,16 @@ class CallIC: public CallICBase {
// Code generator routines.
static void GenerateInitialize(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_state) {
GenerateMiss(masm, argc, extra_state);
Code::ExtraICState extra_ic_state) {
GenerateMiss(masm, argc, extra_ic_state);
}
static void GenerateMiss(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_state) {
CallICBase::GenerateMiss(masm, argc, IC::kCallIC_Miss, extra_state);
}
Code::ExtraICState extra_ic_state);
static void GenerateMegamorphic(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_ic_state);
static void GenerateNormal(MacroAssembler* masm, int argc) {
CallICBase::GenerateNormal(masm, argc);
GenerateMiss(masm, argc, Code::kNoExtraICState);
}
static void GenerateNormal(MacroAssembler* masm, int argc);
};
@ -301,12 +280,7 @@ class KeyedCallIC: public CallICBase {
static void GenerateInitialize(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
}
static void GenerateMiss(MacroAssembler* masm, int argc) {
CallICBase::GenerateMiss(masm, argc, IC::kKeyedCallIC_Miss,
Code::kNoExtraICState);
}
static void GenerateMiss(MacroAssembler* masm, int argc);
static void GenerateMegamorphic(MacroAssembler* masm, int argc);
static void GenerateNormal(MacroAssembler* masm, int argc);
static void GenerateNonStrictArguments(MacroAssembler* masm, int argc);
@ -347,15 +321,17 @@ class LoadIC: public IC {
Handle<String> name);
// Stub accessors.
Handle<Code> megamorphic_stub() {
return isolate()->builtins()->LoadIC_Megamorphic();
Code* megamorphic_stub() {
return isolate()->builtins()->builtin(
Builtins::kLoadIC_Megamorphic);
}
static Code* initialize_stub() {
return Isolate::Current()->builtins()->builtin(
Builtins::kLoadIC_Initialize);
}
Handle<Code> pre_monomorphic_stub() {
return isolate()->builtins()->LoadIC_PreMonomorphic();
Code* pre_monomorphic_stub() {
return isolate()->builtins()->builtin(
Builtins::kLoadIC_PreMonomorphic);
}
static void Clear(Address address, Code* target);
@ -376,39 +352,38 @@ class KeyedIC: public IC {
explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
virtual ~KeyedIC() {}
virtual Handle<Code> GetElementStubWithoutMapCheck(
virtual MaybeObject* GetElementStubWithoutMapCheck(
bool is_js_array,
ElementsKind elements_kind) = 0;
protected:
virtual Handle<Code> string_stub() {
return Handle<Code>::null();
virtual Code* string_stub() {
return NULL;
}
virtual Code::Kind kind() const = 0;
Handle<Code> ComputeStub(Handle<JSObject> receiver,
MaybeObject* ComputeStub(JSObject* receiver,
StubKind stub_kind,
StrictModeFlag strict_mode,
Handle<Code> default_stub);
Code* default_stub);
virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
virtual MaybeObject* ComputePolymorphicStub(MapList* receiver_maps,
StrictModeFlag strict_mode) = 0;
Handle<Code> ComputeMonomorphicStubWithoutMapCheck(
Handle<Map> receiver_map,
MaybeObject* ComputeMonomorphicStubWithoutMapCheck(
Map* receiver_map,
StrictModeFlag strict_mode);
private:
void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result);
void GetReceiverMapsForStub(Code* stub, MapList* result);
Handle<Code> ComputeMonomorphicStub(Handle<JSObject> receiver,
MaybeObject* ComputeMonomorphicStub(JSObject* receiver,
StubKind stub_kind,
StrictModeFlag strict_mode,
Handle<Code> default_stub);
Code* default_stub);
Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver,
StubKind stub_kind);
MaybeObject* ComputeTransitionedMap(JSObject* receiver, StubKind stub_kind);
static bool IsTransitionStubKind(StubKind stub_kind) {
return stub_kind > STORE_NO_TRANSITION;
@ -448,18 +423,20 @@ class KeyedLoadIC: public KeyedIC {
static const int kSlowCaseBitFieldMask =
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
virtual Handle<Code> GetElementStubWithoutMapCheck(
virtual MaybeObject* GetElementStubWithoutMapCheck(
bool is_js_array,
ElementsKind elements_kind);
protected:
virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
StrictModeFlag strict_mode);
virtual MaybeObject* ComputePolymorphicStub(
MapList* receiver_maps,
StrictModeFlag strict_mode);
virtual Handle<Code> string_stub() {
return isolate()->builtins()->KeyedLoadIC_String();
virtual Code* string_stub() {
return isolate()->builtins()->builtin(
Builtins::kKeyedLoadIC_String);
}
private:
@ -474,20 +451,25 @@ class KeyedLoadIC: public KeyedIC {
return Isolate::Current()->builtins()->builtin(
Builtins::kKeyedLoadIC_Initialize);
}
Handle<Code> megamorphic_stub() {
return isolate()->builtins()->KeyedLoadIC_Generic();
Code* megamorphic_stub() {
return isolate()->builtins()->builtin(
Builtins::kKeyedLoadIC_Generic);
}
Handle<Code> generic_stub() {
return isolate()->builtins()->KeyedLoadIC_Generic();
Code* generic_stub() {
return isolate()->builtins()->builtin(
Builtins::kKeyedLoadIC_Generic);
}
Handle<Code> pre_monomorphic_stub() {
return isolate()->builtins()->KeyedLoadIC_PreMonomorphic();
Code* pre_monomorphic_stub() {
return isolate()->builtins()->builtin(
Builtins::kKeyedLoadIC_PreMonomorphic);
}
Handle<Code> indexed_interceptor_stub() {
return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
Code* indexed_interceptor_stub() {
return isolate()->builtins()->builtin(
Builtins::kKeyedLoadIC_IndexedInterceptor);
}
Handle<Code> non_strict_arguments_stub() {
return isolate()->builtins()->KeyedLoadIC_NonStrictArguments();
Code* non_strict_arguments_stub() {
return isolate()->builtins()->builtin(
Builtins::kKeyedLoadIC_NonStrictArguments);
}
static void Clear(Address address, Code* target);
@ -552,11 +534,13 @@ class StoreIC: public IC {
return Isolate::Current()->builtins()->builtin(
Builtins::kStoreIC_Initialize_Strict);
}
Handle<Code> global_proxy_stub() {
return isolate()->builtins()->StoreIC_GlobalProxy();
Code* global_proxy_stub() {
return isolate()->builtins()->builtin(
Builtins::kStoreIC_GlobalProxy);
}
Handle<Code> global_proxy_stub_strict() {
return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
Code* global_proxy_stub_strict() {
return isolate()->builtins()->builtin(
Builtins::kStoreIC_GlobalProxy_Strict);
}
static void Clear(Address address, Code* target);
@ -588,18 +572,17 @@ class KeyedStoreIC: public KeyedIC {
StrictModeFlag strict_mode);
static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
static void GenerateNonStrictArguments(MacroAssembler* masm);
static void GenerateTransitionElementsSmiToDouble(MacroAssembler* masm);
static void GenerateTransitionElementsDoubleToObject(MacroAssembler* masm);
virtual Handle<Code> GetElementStubWithoutMapCheck(
virtual MaybeObject* GetElementStubWithoutMapCheck(
bool is_js_array,
ElementsKind elements_kind);
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
StrictModeFlag strict_mode);
virtual MaybeObject* ComputePolymorphicStub(
MapList* receiver_maps,
StrictModeFlag strict_mode);
private:
// Update the inline cache.
@ -622,24 +605,29 @@ class KeyedStoreIC: public KeyedIC {
return Isolate::Current()->builtins()->builtin(
Builtins::kKeyedStoreIC_Initialize);
}
Code* megamorphic_stub() {
return isolate()->builtins()->builtin(
Builtins::kKeyedStoreIC_Generic);
}
static Code* initialize_stub_strict() {
return Isolate::Current()->builtins()->builtin(
Builtins::kKeyedStoreIC_Initialize_Strict);
}
Handle<Code> megamorphic_stub() {
return isolate()->builtins()->KeyedStoreIC_Generic();
}
Handle<Code> megamorphic_stub_strict() {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
Code* megamorphic_stub_strict() {
return isolate()->builtins()->builtin(
Builtins::kKeyedStoreIC_Generic_Strict);
}
Handle<Code> generic_stub() {
return isolate()->builtins()->KeyedStoreIC_Generic();
Code* generic_stub() {
return isolate()->builtins()->builtin(
Builtins::kKeyedStoreIC_Generic);
}
Handle<Code> generic_stub_strict() {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
Code* generic_stub_strict() {
return isolate()->builtins()->builtin(
Builtins::kKeyedStoreIC_Generic_Strict);
}
Handle<Code> non_strict_arguments_stub() {
return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
Code* non_strict_arguments_stub() {
return isolate()->builtins()->builtin(
Builtins::kKeyedStoreIC_NonStrictArguments);
}
static void Clear(Address address, Code* target);

3
deps/v8/src/incremental-marking-inl.h

@ -143,6 +143,9 @@ void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
ASSERT(obj->Size() >= 2*kPointerSize);
ASSERT(IsMarking());
Marking::WhiteToGrey(mark_bit);
}

26
deps/v8/src/incremental-marking.cc

@ -50,8 +50,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
steps_took_since_last_gc_(0),
should_hurry_(false),
allocation_marking_factor_(0),
allocated_(0),
no_marking_scope_depth_(0) {
allocated_(0) {
}
@ -88,16 +87,6 @@ void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
}
void IncrementalMarking::RecordCodeTargetPatch(Code* host,
Address pc,
HeapObject* value) {
if (IsMarking()) {
RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
RecordWriteIntoCode(host, &rinfo, value);
}
}
void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
if (IsMarking()) {
Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
@ -354,8 +343,7 @@ bool IncrementalMarking::WorthActivating() {
static const intptr_t kActivationThreshold = 0;
#endif
return !FLAG_expose_gc &&
FLAG_incremental_marking &&
return FLAG_incremental_marking &&
!Serializer::enabled() &&
heap_->PromotedSpaceSize() > kActivationThreshold;
}
@ -473,9 +461,7 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) {
#ifdef DEBUG
// Marking bits are cleared by the sweeper.
if (FLAG_verify_heap) {
heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
}
heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
#endif
heap_->CompletelyClearInstanceofCache();
@ -706,8 +692,6 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
if (allocated_ < kAllocatedThreshold) return;
if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
double start = 0;
@ -755,8 +739,8 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
}
MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
ASSERT(Marking::IsGrey(obj_mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
Marking::MarkBlack(obj_mark_bit);
MemoryChunk::IncrementLiveBytes(obj->address(), size);
}

11
deps/v8/src/incremental-marking.h

@ -127,7 +127,6 @@ class IncrementalMarking {
inline void RecordWriteIntoCode(HeapObject* obj,
RelocInfo* rinfo,
Object* value);
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
void RecordCodeTargetPatch(Address pc, HeapObject* value);
void RecordWriteOfCodeEntry(JSFunction* host, Object** slot, Code* value);
@ -198,14 +197,6 @@ class IncrementalMarking {
}
}
void EnterNoMarkingScope() {
no_marking_scope_depth_++;
}
void LeaveNoMarkingScope() {
no_marking_scope_depth_--;
}
private:
void set_should_hurry(bool val) {
should_hurry_ = val;
@ -257,8 +248,6 @@ class IncrementalMarking {
int allocation_marking_factor_;
intptr_t allocated_;
int no_marking_scope_depth_;
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};

37
deps/v8/src/interpreter-irregexp.cc

@ -33,9 +33,9 @@
#include "utils.h"
#include "ast.h"
#include "bytecodes-irregexp.h"
#include "jsregexp.h"
#include "interpreter-irregexp.h"
namespace v8 {
namespace internal {
@ -187,12 +187,12 @@ class BacktrackStack {
template <typename Char>
static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
const byte* code_base,
Vector<const Char> subject,
int* registers,
int current,
uint32_t current_char) {
static bool RawMatch(Isolate* isolate,
const byte* code_base,
Vector<const Char> subject,
int* registers,
int current,
uint32_t current_char) {
const byte* pc = code_base;
// BacktrackStack ensures that the memory allocated for the backtracking stack
// is returned to the system or cached if there is no stack being cached at
@ -211,24 +211,24 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
switch (insn & BYTECODE_MASK) {
BYTECODE(BREAK)
UNREACHABLE();
return RegExpImpl::RE_FAILURE;
return false;
BYTECODE(PUSH_CP)
if (--backtrack_stack_space < 0) {
return RegExpImpl::RE_EXCEPTION;
return false; // No match on backtrack stack overflow.
}
*backtrack_sp++ = current;
pc += BC_PUSH_CP_LENGTH;
break;
BYTECODE(PUSH_BT)
if (--backtrack_stack_space < 0) {
return RegExpImpl::RE_EXCEPTION;
return false; // No match on backtrack stack overflow.
}
*backtrack_sp++ = Load32Aligned(pc + 4);
pc += BC_PUSH_BT_LENGTH;
break;
BYTECODE(PUSH_REGISTER)
if (--backtrack_stack_space < 0) {
return RegExpImpl::RE_EXCEPTION;
return false; // No match on backtrack stack overflow.
}
*backtrack_sp++ = registers[insn >> BYTECODE_SHIFT];
pc += BC_PUSH_REGISTER_LENGTH;
@ -278,9 +278,9 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
pc += BC_POP_REGISTER_LENGTH;
break;
BYTECODE(FAIL)
return RegExpImpl::RE_FAILURE;
return false;
BYTECODE(SUCCEED)
return RegExpImpl::RE_SUCCESS;
return true;
BYTECODE(ADVANCE_CP)
current += insn >> BYTECODE_SHIFT;
pc += BC_ADVANCE_CP_LENGTH;
@ -625,12 +625,11 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
}
RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
Isolate* isolate,
Handle<ByteArray> code_array,
Handle<String> subject,
int* registers,
int start_position) {
bool IrregexpInterpreter::Match(Isolate* isolate,
Handle<ByteArray> code_array,
Handle<String> subject,
int* registers,
int start_position) {
ASSERT(subject->IsFlat());
AssertNoAllocation a;

12
deps/v8/src/interpreter-irregexp.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -36,11 +36,11 @@ namespace internal {
class IrregexpInterpreter {
public:
static RegExpImpl::IrregexpResult Match(Isolate* isolate,
Handle<ByteArray> code,
Handle<String> subject,
int* captures,
int start_position);
static bool Match(Isolate* isolate,
Handle<ByteArray> code,
Handle<String> subject,
int* captures,
int start_position);
};

14
deps/v8/src/isolate.cc

@ -98,7 +98,6 @@ void ThreadLocalTop::InitializeInternal() {
failed_access_check_callback_ = NULL;
save_context_ = NULL;
catcher_ = NULL;
top_lookup_result_ = NULL;
// These members are re-initialized later after deserialization
// is complete.
@ -481,9 +480,6 @@ void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
it.frame()->Iterate(v);
}
// Iterate pointers in live lookup results.
thread->top_lookup_result_->Iterate(v);
}
@ -1072,16 +1068,6 @@ void Isolate::DoThrow(MaybeObject* exception, MessageLocation* location) {
message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
location, HandleVector<Object>(&exception_handle, 1), stack_trace,
stack_trace_object);
} else if (location != NULL && !location->script().is_null()) {
// We are bootstrapping and caught an error where the location is set
// and we have a script for the location.
// In this case we could have an extension (or an internal error
// somewhere) and we print out the line number at which the error occured
// to the console for easier debugging.
int line_number = GetScriptLineNumberSafe(location->script(),
location->start_pos());
OS::PrintError("Extension or internal compilation error at line %d.\n",
line_number);
}
}

11
deps/v8/src/isolate.h

@ -255,9 +255,6 @@ class ThreadLocalTop BASE_EMBEDDED {
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
// Head of the list of live LookupResults.
LookupResult* top_lookup_result_;
// Whether out of memory exceptions should be ignored.
bool ignore_out_of_memory_;
@ -314,6 +311,7 @@ class HashMap;
V(int, bad_char_shift_table, kUC16AlphabetSize) \
V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
V(int, suffix_table, (kBMMaxShift + 1)) \
V(uint32_t, random_seed, 2) \
V(uint32_t, private_random_seed, 2) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
@ -997,13 +995,6 @@ class Isolate {
void SetData(void* data) { embedder_data_ = data; }
void* GetData() { return embedder_data_; }
LookupResult* top_lookup_result() {
return thread_local_top_.top_lookup_result_;
}
void SetTopLookupResult(LookupResult* top) {
thread_local_top_.top_lookup_result_ = top;
}
private:
Isolate();

18
deps/v8/src/jsregexp.cc

@ -509,16 +509,14 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
}
Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii), isolate);
IrregexpResult result = IrregexpInterpreter::Match(isolate,
byte_codes,
subject,
register_vector,
index);
if (result == RE_EXCEPTION) {
ASSERT(!isolate->has_pending_exception());
isolate->StackOverflow();
}
return result;
if (IrregexpInterpreter::Match(isolate,
byte_codes,
subject,
register_vector,
index)) {
return RE_SUCCESS;
}
return RE_FAILURE;
#endif // V8_INTERPRETED_REGEXP
}

5
deps/v8/src/list-inl.h

@ -216,11 +216,11 @@ int SortedListBSearch(
int mid = (low + high) / 2;
T mid_elem = list[mid];
if (cmp(&mid_elem, &elem) > 0) {
if (mid_elem > elem) {
high = mid - 1;
continue;
}
if (cmp(&mid_elem, &elem) < 0) {
if (mid_elem < elem) {
low = mid + 1;
continue;
}
@ -236,7 +236,6 @@ int SortedListBSearch(const List<T>& list, T elem) {
return SortedListBSearch<T>(list, elem, PointerValueCompare<T>);
}
} } // namespace v8::internal
#endif // V8_LIST_INL_H_

4
deps/v8/src/list.h

@ -165,11 +165,8 @@ class List {
class Map;
class Code;
template<typename T> class Handle;
typedef List<Map*> MapList;
typedef List<Code*> CodeList;
typedef List<Handle<Map> > MapHandleList;
typedef List<Handle<Code> > CodeHandleList;
// Perform binary search for an element in an already sorted
// list. Returns the index of the element of -1 if it was not found.
@ -179,7 +176,6 @@ int SortedListBSearch(
template <typename T>
int SortedListBSearch(const List<T>& list, T elem);
} } // namespace v8::internal

6
deps/v8/src/liveobjectlist.cc

@ -1085,7 +1085,7 @@ void LiveObjectList::SortAll() {
static int CountHeapObjects() {
int count = 0;
// Iterate over all the heap spaces and count the number of objects.
HeapIterator iterator;
HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
HeapObject* heap_obj = NULL;
while ((heap_obj = iterator.next()) != NULL) {
count++;
@ -1122,7 +1122,7 @@ MaybeObject* LiveObjectList::Capture() {
// allocation, and we need allocate below.
{
// Iterate over all the heap spaces and add the objects.
HeapIterator iterator;
HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
HeapObject* heap_obj = NULL;
bool failed = false;
while (!failed && (heap_obj = iterator.next()) != NULL) {
@ -2513,7 +2513,7 @@ void LiveObjectList::Verify(bool match_heap_exactly) {
OS::Print(" Start verify ...\n");
OS::Print(" Verifying ...");
Flush();
HeapIterator iterator;
HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
HeapObject* heap_obj = NULL;
while ((heap_obj = iterator.next()) != NULL) {
number_of_heap_objects++;

5
deps/v8/src/macros.py

@ -128,11 +128,6 @@ macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
# we cannot handle those anyway.
macro IS_SPEC_FUNCTION(arg) = (%_ClassOf(arg) === 'Function');
# Indices in bound function info retrieved by %BoundFunctionGetBindings(...).
const kBoundFunctionIndex = 0;
const kBoundThisIndex = 1;
const kBoundArgumentsStartIndex = 2;
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));

8
deps/v8/src/mark-compact-inl.h

@ -38,7 +38,7 @@ namespace internal {
MarkBit Marking::MarkBitFrom(Address addr) {
MemoryChunk* p = MemoryChunk::FromAddress(addr);
MemoryChunk *p = MemoryChunk::FromAddress(addr);
return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
p->ContainsOnlyData());
}
@ -54,6 +54,9 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
if (!mark_bit.Get()) {
mark_bit.Set();
MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
#ifdef DEBUG
UpdateLiveObjectCount(obj);
#endif
ProcessNewlyMarkedObject(obj);
}
}
@ -64,6 +67,9 @@ void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
mark_bit.Set();
MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
#ifdef DEBUG
UpdateLiveObjectCount(obj);
#endif
}

73
deps/v8/src/mark-compact.cc

@ -65,6 +65,16 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
collect_maps_(FLAG_collect_maps),
tracer_(NULL),
migration_slots_buffer_(NULL),
#ifdef DEBUG
live_young_objects_size_(0),
live_old_pointer_objects_size_(0),
live_old_data_objects_size_(0),
live_code_objects_size_(0),
live_map_objects_size_(0),
live_cell_objects_size_(0),
live_lo_objects_size_(0),
live_bytes_(0),
#endif
heap_(NULL),
code_flusher_(NULL),
encountered_weak_maps_(NULL) { }
@ -320,7 +330,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
#endif
static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
static void ClearMarkbits(PagedSpace* space) {
PageIterator it(space);
while (it.has_next()) {
@ -329,7 +339,7 @@ static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
}
static void ClearMarkbitsInNewSpace(NewSpace* space) {
static void ClearMarkbits(NewSpace* space) {
NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
while (it.has_next()) {
@ -338,15 +348,15 @@ static void ClearMarkbitsInNewSpace(NewSpace* space) {
}
void MarkCompactCollector::ClearMarkbits() {
ClearMarkbitsInPagedSpace(heap_->code_space());
ClearMarkbitsInPagedSpace(heap_->map_space());
ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
ClearMarkbitsInPagedSpace(heap_->old_data_space());
ClearMarkbitsInPagedSpace(heap_->cell_space());
ClearMarkbitsInNewSpace(heap_->new_space());
static void ClearMarkbits(Heap* heap) {
ClearMarkbits(heap->code_space());
ClearMarkbits(heap->map_space());
ClearMarkbits(heap->old_pointer_space());
ClearMarkbits(heap->old_data_space());
ClearMarkbits(heap->cell_space());
ClearMarkbits(heap->new_space());
LargeObjectIterator it(heap_->lo_space());
LargeObjectIterator it(heap->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
mark_bit.Clear();
@ -494,7 +504,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
// Clear marking bits for precise sweeping to collect all garbage.
if (was_marked_incrementally_ && PreciseSweepingRequired()) {
heap()->incremental_marking()->Abort();
ClearMarkbits();
ClearMarkbits(heap_);
AbortCompaction();
was_marked_incrementally_ = false;
}
@ -513,10 +523,21 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
}
#ifdef DEBUG
if (!was_marked_incrementally_ && FLAG_verify_heap) {
if (!was_marked_incrementally_) {
VerifyMarkbitsAreClean();
}
#endif
#ifdef DEBUG
live_bytes_ = 0;
live_young_objects_size_ = 0;
live_old_pointer_objects_size_ = 0;
live_old_data_objects_size_ = 0;
live_code_objects_size_ = 0;
live_map_objects_size_ = 0;
live_cell_objects_size_ = 0;
live_lo_objects_size_ = 0;
#endif
}
@ -2155,6 +2176,32 @@ void MarkCompactCollector::ProcessMapCaches() {
}
#ifdef DEBUG
void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
live_bytes_ += obj->Size();
if (heap()->new_space()->Contains(obj)) {
live_young_objects_size_ += obj->Size();
} else if (heap()->map_space()->Contains(obj)) {
ASSERT(obj->IsMap());
live_map_objects_size_ += obj->Size();
} else if (heap()->cell_space()->Contains(obj)) {
ASSERT(obj->IsJSGlobalPropertyCell());
live_cell_objects_size_ += obj->Size();
} else if (heap()->old_pointer_space()->Contains(obj)) {
live_old_pointer_objects_size_ += obj->Size();
} else if (heap()->old_data_space()->Contains(obj)) {
live_old_data_objects_size_ += obj->Size();
} else if (heap()->code_space()->Contains(obj)) {
live_code_objects_size_ += obj->Size();
} else if (heap()->lo_space()->Contains(obj)) {
live_lo_objects_size_ += obj->Size();
} else {
UNREACHABLE();
}
}
#endif // DEBUG
void MarkCompactCollector::ReattachInitialMaps() {
HeapObjectIterator map_iterator(heap()->map_space());
for (HeapObject* obj = map_iterator.Next();
@ -3602,6 +3649,8 @@ void MarkCompactCollector::SweepSpaces() {
// of the previous ones.
SweepSpace(heap()->map_space(), PRECISE);
ASSERT(live_map_objects_size_ <= heap()->map_space()->Size());
// Deallocate unmarked objects and clear marked bits for marked objects.
heap_->lo_space()->FreeUnmarkedObjects();
}

56
deps/v8/src/mark-compact.h

@ -61,52 +61,68 @@ class Marking {
// Impossible markbits: 01
static const char* kImpossibleBitPattern;
static inline bool IsImpossible(MarkBit mark_bit) {
ASSERT(strcmp(kImpossibleBitPattern, "01") == 0);
return !mark_bit.Get() && mark_bit.Next().Get();
}
// Black markbits: 10 - this is required by the sweeper.
static const char* kBlackBitPattern;
static inline bool IsBlack(MarkBit mark_bit) {
ASSERT(strcmp(kBlackBitPattern, "10") == 0);
ASSERT(!IsImpossible(mark_bit));
return mark_bit.Get() && !mark_bit.Next().Get();
}
// White markbits: 00 - this is required by the mark bit clearer.
static const char* kWhiteBitPattern;
static inline bool IsWhite(MarkBit mark_bit) {
ASSERT(strcmp(kWhiteBitPattern, "00") == 0);
ASSERT(!IsImpossible(mark_bit));
return !mark_bit.Get();
}
// Grey markbits: 11
static const char* kGreyBitPattern;
static inline bool IsGrey(MarkBit mark_bit) {
ASSERT(strcmp(kGreyBitPattern, "11") == 0);
ASSERT(!IsImpossible(mark_bit));
return mark_bit.Get() && mark_bit.Next().Get();
}
static inline void MarkBlack(MarkBit mark_bit) {
mark_bit.Set();
mark_bit.Next().Clear();
ASSERT(Marking::IsBlack(mark_bit));
}
static inline void BlackToGrey(MarkBit markbit) {
ASSERT(IsBlack(markbit));
markbit.Next().Set();
ASSERT(IsGrey(markbit));
}
static inline void WhiteToGrey(MarkBit markbit) {
ASSERT(IsWhite(markbit));
markbit.Set();
markbit.Next().Set();
ASSERT(IsGrey(markbit));
}
static inline void GreyToBlack(MarkBit markbit) {
ASSERT(IsGrey(markbit));
markbit.Next().Clear();
ASSERT(IsBlack(markbit));
}
static inline void BlackToGrey(HeapObject* obj) {
ASSERT(obj->Size() >= 2 * kPointerSize);
BlackToGrey(MarkBitFrom(obj));
}
static inline void AnyToGrey(MarkBit markbit) {
markbit.Set();
markbit.Next().Set();
ASSERT(IsGrey(markbit));
}
// Returns true if the the object whose mark is transferred is marked black.
@ -157,6 +173,8 @@ class Marking {
to_mark_bit.Next().Set();
is_black = false; // Was actually gray.
}
ASSERT(Color(from) == Color(to));
ASSERT(is_black == (Color(to) == BLACK_OBJECT));
return is_black;
}
@ -209,6 +227,7 @@ class MarkingDeque {
inline void PushGrey(HeapObject* object) {
ASSERT(object->IsHeapObject());
if (IsFull()) {
ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
SetOverflowed();
} else {
array_[top_] = object;
@ -227,6 +246,7 @@ class MarkingDeque {
inline void UnshiftGrey(HeapObject* object) {
ASSERT(object->IsHeapObject());
if (IsFull()) {
ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
SetOverflowed();
} else {
bottom_ = ((bottom_ - 1) & mask_);
@ -538,8 +558,6 @@ class MarkCompactCollector {
void InvalidateCode(Code* code);
void ClearMarkbits();
private:
MarkCompactCollector();
~MarkCompactCollector();
@ -669,6 +687,10 @@ class MarkCompactCollector {
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
#ifdef DEBUG
void UpdateLiveObjectCount(HeapObject* obj);
#endif
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
void ClearNonLiveTransitions();
@ -715,7 +737,37 @@ class MarkCompactCollector {
void SweepSpace(PagedSpace* space, SweeperType sweeper);
#ifdef DEBUG
// -----------------------------------------------------------------------
// Debugging variables, functions and classes
// Counters used for debugging the marking phase of mark-compact or
// mark-sweep collection.
// Size of live objects in Heap::to_space_.
int live_young_objects_size_;
// Size of live objects in Heap::old_pointer_space_.
int live_old_pointer_objects_size_;
// Size of live objects in Heap::old_data_space_.
int live_old_data_objects_size_;
// Size of live objects in Heap::code_space_.
int live_code_objects_size_;
// Size of live objects in Heap::map_space_.
int live_map_objects_size_;
// Size of live objects in Heap::cell_space_.
int live_cell_objects_size_;
// Size of live objects in Heap::lo_space_.
int live_lo_objects_size_;
// Number of live bytes in this collection.
int live_bytes_;
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);

44
deps/v8/src/messages.js

@ -83,7 +83,7 @@ function IsNativeErrorObject(obj) {
// objects between script tags in a browser setting.
function ToStringCheckErrorObject(obj) {
if (IsNativeErrorObject(obj)) {
return %_CallFunction(obj, ErrorToString);
return %_CallFunction(obj, errorToString);
} else {
return ToString(obj);
}
@ -185,15 +185,14 @@ function FormatMessage(message) {
"define_disallowed", ["Cannot define property:", "%0", ", object is not extensible."],
"non_extensible_proto", ["%0", " is not extensible"],
"handler_non_object", ["Proxy.", "%0", " called with non-object as handler"],
"proto_non_object", ["Proxy.", "%0", " called with non-object as prototype"],
"trap_function_expected", ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"],
"trap_function_expected", ["Proxy.", "%0", " called with non-function for ", "%1", " trap"],
"handler_trap_missing", ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
"handler_trap_must_be_callable", ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
"handler_returned_false", ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"],
"handler_returned_undefined", ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"],
"proxy_prop_not_configurable", ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"],
"proxy_non_object_prop_names", ["Trap '", "%1", "' returned non-object ", "%0"],
"proxy_repeated_prop_name", ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
"handler_returned_false", ["Proxy handler ", "%0", " returned false for '", "%1", "' trap"],
"handler_returned_undefined", ["Proxy handler ", "%0", " returned undefined for '", "%1", "' trap"],
"proxy_prop_not_configurable", ["Trap ", "%1", " of proxy handler ", "%0", " returned non-configurable descriptor for property ", "%2"],
"proxy_non_object_prop_names", ["Trap ", "%1", " returned non-object ", "%0"],
"proxy_repeated_prop_name", ["Trap ", "%1", " returned repeated property name ", "%2"],
"invalid_weakmap_key", ["Invalid value used as weak map key"],
// RangeError
"invalid_array_length", ["Invalid array length"],
@ -241,7 +240,6 @@ function FormatMessage(message) {
"strict_poison_pill", ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
"strict_caller", ["Illegal access to a strict mode caller function."],
"unprotected_let", ["Illegal let declaration in unprotected statement context."],
"unprotected_const", ["Illegal const declaration in unprotected statement context."],
"cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
"redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
];
@ -1128,7 +1126,6 @@ function SetUpError() {
return new f(m);
}
});
%SetNativeFlag(f);
}
DefineError(function Error() { });
@ -1146,43 +1143,42 @@ $Error.captureStackTrace = captureStackTrace;
%SetProperty($Error.prototype, 'message', '', DONT_ENUM);
// Global list of error objects visited during ErrorToString. This is
// Global list of error objects visited during errorToString. This is
// used to detect cycles in error toString formatting.
const visited_errors = new InternalArray();
const cyclic_error_marker = new $Object();
function ErrorToStringDetectCycle(error) {
function errorToStringDetectCycle(error) {
if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
try {
var type = error.type;
var name = error.name
name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
var message = error.message;
var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
if (type && !hasMessage) {
message = FormatMessage(%NewMessageObject(type, error.arguments));
var formatted = FormatMessage(%NewMessageObject(type, error.arguments));
return error.name + ": " + formatted;
}
message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
if (name === "") return message;
if (message === "") return name;
return name + ": " + message;
var message = hasMessage ? (": " + error.message) : "";
return error.name + message;
} finally {
visited_errors.length = visited_errors.length - 1;
}
}
function ErrorToString() {
function errorToString() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Error.prototype.toString"]);
}
// This helper function is needed because access to properties on
// the builtins object do not work inside of a catch clause.
function isCyclicErrorMarker(o) { return o === cyclic_error_marker; }
try {
return ErrorToStringDetectCycle(this);
return errorToStringDetectCycle(this);
} catch(e) {
// If this error message was encountered already return the empty
// string for it instead of recursively formatting it.
if (e === cyclic_error_marker) {
if (isCyclicErrorMarker(e)) {
return '';
}
throw e;
@ -1190,7 +1186,7 @@ function ErrorToString() {
}
InstallFunctions($Error.prototype, DONT_ENUM, ['toString', ErrorToString]);
InstallFunctions($Error.prototype, DONT_ENUM, ['toString', errorToString]);
// Boilerplate for exceptions for stack overflows. Used from
// Isolate::StackOverflow().

15
deps/v8/src/mips/assembler-mips-inl.h

@ -116,10 +116,10 @@ int RelocInfo::target_address_size() {
}
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
if (host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
@ -150,12 +150,10 @@ Object** RelocInfo::target_object_address() {
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
if (host() != NULL && target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), &Memory::Object_at(pc_), HeapObject::cast(target));
}
@ -186,12 +184,11 @@ JSGlobalPropertyCell* RelocInfo::target_cell() {
}
void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
WriteBarrierMode mode) {
void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
if (host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(

2
deps/v8/src/mips/assembler-mips.h

@ -302,7 +302,7 @@ const FPURegister f29 = { 29 };
const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 };
static const FPURegister& kDoubleRegZero = f28;
const FPURegister kDoubleRegZero = f28;
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save