Browse Source

Upgrade V8 to 3.0.10

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
4c5e570706
  1. 19
      deps/v8/ChangeLog
  2. 10
      deps/v8/include/v8.h
  3. 1
      deps/v8/src/SConscript
  4. 4
      deps/v8/src/api.cc
  5. 8
      deps/v8/src/arm/assembler-arm.cc
  6. 158
      deps/v8/src/arm/code-stubs-arm.cc
  7. 2
      deps/v8/src/arm/codegen-arm.cc
  8. 5
      deps/v8/src/arm/constants-arm.h
  9. 3
      deps/v8/src/arm/frames-arm.h
  10. 2
      deps/v8/src/arm/full-codegen-arm.cc
  11. 683
      deps/v8/src/arm/ic-arm.cc
  12. 335
      deps/v8/src/arm/lithium-arm.cc
  13. 1158
      deps/v8/src/arm/lithium-arm.h
  14. 608
      deps/v8/src/arm/lithium-codegen-arm.cc
  15. 8
      deps/v8/src/arm/lithium-codegen-arm.h
  16. 37
      deps/v8/src/arm/macro-assembler-arm.cc
  17. 13
      deps/v8/src/arm/macro-assembler-arm.h
  18. 11
      deps/v8/src/arm/simulator-arm.cc
  19. 702
      deps/v8/src/arm/stub-cache-arm.cc
  20. 26
      deps/v8/src/ast.cc
  21. 6
      deps/v8/src/ast.h
  22. 76
      deps/v8/src/builtins.cc
  23. 14
      deps/v8/src/builtins.h
  24. 13
      deps/v8/src/code-stubs.h
  25. 2
      deps/v8/src/codegen.cc
  26. 2
      deps/v8/src/compiler.cc
  27. 5
      deps/v8/src/extensions/experimental/experimental.gyp
  28. 1
      deps/v8/src/flag-definitions.h
  29. 2
      deps/v8/src/full-codegen.cc
  30. 5
      deps/v8/src/heap.h
  31. 32
      deps/v8/src/hydrogen-instructions.cc
  32. 190
      deps/v8/src/hydrogen-instructions.h
  33. 168
      deps/v8/src/hydrogen.cc
  34. 7
      deps/v8/src/hydrogen.h
  35. 10
      deps/v8/src/ia32/assembler-ia32-inl.h
  36. 7
      deps/v8/src/ia32/code-stubs-ia32.cc
  37. 2
      deps/v8/src/ia32/codegen-ia32.cc
  38. 2
      deps/v8/src/ia32/full-codegen-ia32.cc
  39. 342
      deps/v8/src/ia32/ic-ia32.cc
  40. 242
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  41. 2
      deps/v8/src/ia32/lithium-codegen-ia32.h
  42. 2
      deps/v8/src/ia32/lithium-gap-resolver-ia32.h
  43. 60
      deps/v8/src/ia32/lithium-ia32.cc
  44. 161
      deps/v8/src/ia32/lithium-ia32.h
  45. 358
      deps/v8/src/ia32/stub-cache-ia32.cc
  46. 59
      deps/v8/src/ic.cc
  47. 15
      deps/v8/src/ic.h
  48. 140
      deps/v8/src/lithium-allocator.cc
  49. 412
      deps/v8/src/lithium-allocator.h
  50. 72
      deps/v8/src/lithium.cc
  51. 350
      deps/v8/src/lithium.h
  52. 36
      deps/v8/src/liveobjectlist-inl.h
  53. 53
      deps/v8/src/liveobjectlist.cc
  54. 112
      deps/v8/src/liveobjectlist.h
  55. 62
      deps/v8/src/messages.js
  56. 12
      deps/v8/src/mips/ic-mips.cc
  57. 14
      deps/v8/src/mips/stub-cache-mips.cc
  58. 6
      deps/v8/src/objects-inl.h
  59. 1
      deps/v8/src/objects-printer.cc
  60. 13
      deps/v8/src/objects.h
  61. 100
      deps/v8/src/parser.cc
  62. 14
      deps/v8/src/platform-freebsd.cc
  63. 14
      deps/v8/src/platform-linux.cc
  64. 14
      deps/v8/src/platform-macos.cc
  65. 6
      deps/v8/src/platform-nullos.cc
  66. 14
      deps/v8/src/platform-openbsd.cc
  67. 5
      deps/v8/src/platform-posix.cc
  68. 14
      deps/v8/src/platform-solaris.cc
  69. 38
      deps/v8/src/platform-win32.cc
  70. 3
      deps/v8/src/platform.h
  71. 5
      deps/v8/src/scopes.cc
  72. 80
      deps/v8/src/stub-cache.cc
  73. 18
      deps/v8/src/stub-cache.h
  74. 92
      deps/v8/src/utils.cc
  75. 33
      deps/v8/src/v8utils.h
  76. 2
      deps/v8/src/version.cc
  77. 11
      deps/v8/src/x64/assembler-x64-inl.h
  78. 22
      deps/v8/src/x64/assembler-x64.cc
  79. 6
      deps/v8/src/x64/assembler-x64.h
  80. 118
      deps/v8/src/x64/code-stubs-x64.cc
  81. 2
      deps/v8/src/x64/codegen-x64.cc
  82. 8
      deps/v8/src/x64/disasm-x64.cc
  83. 2
      deps/v8/src/x64/full-codegen-x64.cc
  84. 268
      deps/v8/src/x64/ic-x64.cc
  85. 444
      deps/v8/src/x64/lithium-codegen-x64.cc
  86. 3
      deps/v8/src/x64/lithium-codegen-x64.h
  87. 154
      deps/v8/src/x64/lithium-x64.cc
  88. 159
      deps/v8/src/x64/lithium-x64.h
  89. 41
      deps/v8/src/x64/macro-assembler-x64.cc
  90. 8
      deps/v8/src/x64/macro-assembler-x64.h
  91. 300
      deps/v8/src/x64/stub-cache-x64.cc
  92. 2
      deps/v8/test/cctest/cctest.status
  93. 35
      deps/v8/test/cctest/test-api.cc
  94. 46
      deps/v8/test/mjsunit/cyclic-error-to-string.js
  95. 23
      deps/v8/test/mjsunit/debug-evaluate-locals.js
  96. 117
      deps/v8/test/mjsunit/strict-mode.js
  97. 11
      deps/v8/test/mjsunit/string-charcodeat.js
  98. 3
      deps/v8/test/mozilla/mozilla.status
  99. 14
      deps/v8/tools/gyp/v8.gyp
  100. 30
      deps/v8/tools/v8.xcodeproj/project.pbxproj

19
deps/v8/ChangeLog

@ -1,3 +1,22 @@
2011-01-24: Version 3.0.10
Fixed External::Wrap for 64-bit addresses (issue 1037).
Fixed incorrect .arguments variable proxy handling in the full
code generator (issue 1060).
Introduced partial strict mode support.
Changed formatting of recursive error messages to match Firefox and Safari
(issue http://crbug.com/70334).
Fixed incorrect rounding for float-to-integer conversions for external
array types, which implement the Typed Array spec
(issue http://crbug.com/50972).
Performance improvements on the IA32 platform.
2011-01-19: Version 3.0.9 2011-01-19: Version 3.0.9
Added basic GDB JIT Interface integration. Added basic GDB JIT Interface integration.

10
deps/v8/include/v8.h

@ -3367,7 +3367,7 @@ template <> struct SmiTagging<4> {
// For 32-bit systems any 2 bytes aligned pointer can be encoded as smi // For 32-bit systems any 2 bytes aligned pointer can be encoded as smi
// with a plain reinterpret_cast. // with a plain reinterpret_cast.
static const intptr_t kEncodablePointerMask = 0x1; static const uintptr_t kEncodablePointerMask = 0x1;
static const int kPointerToSmiShift = 0; static const int kPointerToSmiShift = 0;
}; };
@ -3387,8 +3387,8 @@ template <> struct SmiTagging<8> {
// It might be not enough to cover stack allocated objects on some platforms. // It might be not enough to cover stack allocated objects on some platforms.
static const int kPointerAlignment = 3; static const int kPointerAlignment = 3;
static const intptr_t kEncodablePointerMask = static const uintptr_t kEncodablePointerMask =
~(intptr_t(0xffffffff) << kPointerAlignment); ~(uintptr_t(0xffffffff) << kPointerAlignment);
static const int kPointerToSmiShift = static const int kPointerToSmiShift =
kSmiTagSize + kSmiShiftSize - kPointerAlignment; kSmiTagSize + kSmiShiftSize - kPointerAlignment;
@ -3397,7 +3397,7 @@ template <> struct SmiTagging<8> {
typedef SmiTagging<kApiPointerSize> PlatformSmiTagging; typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize; const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize; const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
const intptr_t kEncodablePointerMask = const uintptr_t kEncodablePointerMask =
PlatformSmiTagging::kEncodablePointerMask; PlatformSmiTagging::kEncodablePointerMask;
const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift; const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
@ -3457,7 +3457,7 @@ class Internals {
} }
static inline void* GetExternalPointerFromSmi(internal::Object* value) { static inline void* GetExternalPointerFromSmi(internal::Object* value) {
const intptr_t address = reinterpret_cast<intptr_t>(value); const uintptr_t address = reinterpret_cast<uintptr_t>(value);
return reinterpret_cast<void*>(address >> kPointerToSmiShift); return reinterpret_cast<void*>(address >> kPointerToSmiShift);
} }

1
deps/v8/src/SConscript

@ -89,6 +89,7 @@ SOURCES = {
lithium-allocator.cc lithium-allocator.cc
lithium.cc lithium.cc
liveedit.cc liveedit.cc
liveobjectlist.cc
log-utils.cc log-utils.cc
log.cc log.cc
mark-compact.cc mark-compact.cc

4
deps/v8/src/api.cc

@ -3267,14 +3267,14 @@ void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
static bool CanBeEncodedAsSmi(void* ptr) { static bool CanBeEncodedAsSmi(void* ptr) {
const intptr_t address = reinterpret_cast<intptr_t>(ptr); const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
return ((address & i::kEncodablePointerMask) == 0); return ((address & i::kEncodablePointerMask) == 0);
} }
static i::Smi* EncodeAsSmi(void* ptr) { static i::Smi* EncodeAsSmi(void* ptr) {
ASSERT(CanBeEncodedAsSmi(ptr)); ASSERT(CanBeEncodedAsSmi(ptr));
const intptr_t address = reinterpret_cast<intptr_t>(ptr); const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift); i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift);
ASSERT(i::Internals::HasSmiTag(result)); ASSERT(i::Internals::HasSmiTag(result));
ASSERT_EQ(result, i::Smi::FromInt(result->value())); ASSERT_EQ(result, i::Smi::FromInt(result->value()));

8
deps/v8/src/arm/assembler-arm.cc

@ -1656,8 +1656,14 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
emit(reinterpret_cast<Instr>(msg)); emit(reinterpret_cast<Instr>(msg));
#else // def __arm__ #else // def __arm__
#ifdef CAN_USE_ARMV5_INSTRUCTIONS #ifdef CAN_USE_ARMV5_INSTRUCTIONS
ASSERT(cond == al); if (cond != al) {
Label skip;
b(&skip, NegateCondition(cond));
bkpt(0); bkpt(0);
bind(&skip);
} else {
bkpt(0);
}
#else // ndef CAN_USE_ARMV5_INSTRUCTIONS #else // ndef CAN_USE_ARMV5_INSTRUCTIONS
svc(0x9f0001, cond); svc(0x9f0001, cond);
#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS #endif // ndef CAN_USE_ARMV5_INSTRUCTIONS

158
deps/v8/src/arm/code-stubs-arm.cc

@ -112,10 +112,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
void FastNewContextStub::Generate(MacroAssembler* masm) { void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space. // Try to allocate the context in new space.
Label gc; Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
// Attempt to allocate the context in new space. // Attempt to allocate the context in new space.
__ AllocateInNewSpace(FixedArray::SizeFor(length), __ AllocateInNewSpace(FixedArray::SizeFor(slots_),
r0, r0,
r1, r1,
r2, r2,
@ -128,7 +127,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header. // Setup the object header.
__ LoadRoot(r2, Heap::kContextMapRootIndex); __ LoadRoot(r2, Heap::kContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(Smi::FromInt(length))); __ mov(r2, Operand(Smi::FromInt(slots_)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
// Setup the fixed slots. // Setup the fixed slots.
@ -144,7 +143,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Initialize the rest of the slots to undefined. // Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex); __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) {
__ str(r1, MemOperand(r0, Context::SlotOffset(i))); __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
} }
@ -2890,18 +2889,33 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
} }
// Uses registers r0 to r4. Expected input is // Uses registers r0 to r4.
// object in r0 (or at sp+1*kPointerSize) and function in // Expected input (depending on whether args are in registers or on the stack):
// r1 (or at sp), depending on whether or not // * object: r0 or at sp + 1 * kPointerSize.
// args_in_registers() is true. // * function: r1 or at sp.
//
// An inlined call site may have been generated before calling this stub.
// In this case the offset to the inline site to patch is passed on the stack,
// in the safepoint slot for register r4.
// (See LCodeGen::DoInstanceOfKnownGlobal)
void InstanceofStub::Generate(MacroAssembler* masm) { void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
// ReturnTrueFalse is only implemented for inlined call sites.
ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
// Fixed register usage throughout the stub: // Fixed register usage throughout the stub:
const Register object = r0; // Object (lhs). const Register object = r0; // Object (lhs).
const Register map = r3; // Map of the object. Register map = r3; // Map of the object.
const Register function = r1; // Function (rhs). const Register function = r1; // Function (rhs).
const Register prototype = r4; // Prototype of the function. const Register prototype = r4; // Prototype of the function.
const Register inline_site = r9;
const Register scratch = r2; const Register scratch = r2;
const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
Label slow, loop, is_instance, is_not_instance, not_js_object; Label slow, loop, is_instance, is_not_instance, not_js_object;
if (!HasArgsInRegisters()) { if (!HasArgsInRegisters()) {
__ ldr(object, MemOperand(sp, 1 * kPointerSize)); __ ldr(object, MemOperand(sp, 1 * kPointerSize));
__ ldr(function, MemOperand(sp, 0)); __ ldr(function, MemOperand(sp, 0));
@ -2911,7 +2925,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ BranchOnSmi(object, &not_js_object); __ BranchOnSmi(object, &not_js_object);
__ IsObjectJSObjectType(object, map, scratch, &not_js_object); __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
// Look up the function and the map in the instanceof cache. // If there is a call site cache don't look in the global cache, but do the
// real lookup and update the call site cache.
if (!HasCallSiteInlineCheck()) {
Label miss; Label miss;
__ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
__ cmp(function, ip); __ cmp(function, ip);
@ -2923,38 +2939,86 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Ret(HasArgsInRegisters() ? 0 : 2); __ Ret(HasArgsInRegisters() ? 0 : 2);
__ bind(&miss); __ bind(&miss);
}
// Get the prototype of the function.
__ TryGetFunctionPrototype(function, prototype, scratch, &slow); __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
// Check that the function prototype is a JS object. // Check that the function prototype is a JS object.
__ BranchOnSmi(prototype, &slow); __ BranchOnSmi(prototype, &slow);
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow); __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
// Update the global instanceof or call site inlined cache with the current
// map and function. The cached answer will be set when it is known below.
if (!HasCallSiteInlineCheck()) {
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
} else {
ASSERT(HasArgsInRegisters());
// Patch the (relocated) inlined map check.
// The offset was stored in r4 safepoint slot.
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
__ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4));
__ sub(inline_site, lr, scratch);
// Get the map location in scratch and patch it.
__ GetRelocatedValueLocation(inline_site, scratch);
__ str(map, MemOperand(scratch));
}
// Register mapping: r3 is object map and r4 is function prototype. // Register mapping: r3 is object map and r4 is function prototype.
// Get prototype of object into r2. // Get prototype of object into r2.
__ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
// We don't need map any more. Use it as a scratch register.
Register scratch2 = map;
map = no_reg;
// Loop through the prototype chain looking for the function prototype. // Loop through the prototype chain looking for the function prototype.
__ LoadRoot(scratch2, Heap::kNullValueRootIndex);
__ bind(&loop); __ bind(&loop);
__ cmp(scratch, Operand(prototype)); __ cmp(scratch, Operand(prototype));
__ b(eq, &is_instance); __ b(eq, &is_instance);
__ LoadRoot(ip, Heap::kNullValueRootIndex); __ cmp(scratch, scratch2);
__ cmp(scratch, ip);
__ b(eq, &is_not_instance); __ b(eq, &is_not_instance);
__ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
__ jmp(&loop); __ jmp(&loop);
__ bind(&is_instance); __ bind(&is_instance);
if (!HasCallSiteInlineCheck()) {
__ mov(r0, Operand(Smi::FromInt(0))); __ mov(r0, Operand(Smi::FromInt(0)));
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Patch the call site to return true.
__ LoadRoot(r0, Heap::kTrueValueRootIndex);
__ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
// Get the boolean result location in scratch and patch it.
__ GetRelocatedValueLocation(inline_site, scratch);
__ str(r0, MemOperand(scratch));
if (!ReturnTrueFalseObject()) {
__ mov(r0, Operand(Smi::FromInt(0)));
}
}
__ Ret(HasArgsInRegisters() ? 0 : 2); __ Ret(HasArgsInRegisters() ? 0 : 2);
__ bind(&is_not_instance); __ bind(&is_not_instance);
if (!HasCallSiteInlineCheck()) {
__ mov(r0, Operand(Smi::FromInt(1))); __ mov(r0, Operand(Smi::FromInt(1)));
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Patch the call site to return false.
__ LoadRoot(r0, Heap::kFalseValueRootIndex);
__ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
// Get the boolean result location in scratch and patch it.
__ GetRelocatedValueLocation(inline_site, scratch);
__ str(r0, MemOperand(scratch));
if (!ReturnTrueFalseObject()) {
__ mov(r0, Operand(Smi::FromInt(1)));
}
}
__ Ret(HasArgsInRegisters() ? 0 : 2); __ Ret(HasArgsInRegisters() ? 0 : 2);
Label object_not_null, object_not_null_or_smi; Label object_not_null, object_not_null_or_smi;
@ -2962,7 +3026,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Before null, smi and string value checks, check that the rhs is a function // Before null, smi and string value checks, check that the rhs is a function
// as for a non-function rhs an exception needs to be thrown. // as for a non-function rhs an exception needs to be thrown.
__ BranchOnSmi(function, &slow); __ BranchOnSmi(function, &slow);
__ CompareObjectType(function, map, scratch, JS_FUNCTION_TYPE); __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
__ b(ne, &slow); __ b(ne, &slow);
// Null is not instance of anything. // Null is not instance of anything.
@ -2985,13 +3049,30 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Slow-case. Tail call builtin. // Slow-case. Tail call builtin.
__ bind(&slow); __ bind(&slow);
if (!ReturnTrueFalseObject()) {
if (HasArgsInRegisters()) { if (HasArgsInRegisters()) {
__ Push(r0, r1); __ Push(r0, r1);
} }
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
} else {
__ EnterInternalFrame();
__ Push(r0, r1);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
__ LeaveInternalFrame();
__ cmp(r0, Operand(0));
__ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
__ Ret(HasArgsInRegisters() ? 0 : 2);
}
} }
Register InstanceofStub::left() { return r0; }
Register InstanceofStub::right() { return r1; }
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any) // The displacement is the offset of the last parameter (if any)
// relative to the frame pointer. // relative to the frame pointer.
@ -3703,7 +3784,6 @@ int CompareStub::MinorKey() {
// StringCharCodeAtGenerator // StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string; Label flat_string;
Label ascii_string; Label ascii_string;
@ -4862,6 +4942,56 @@ void StringAddStub::Generate(MacroAssembler* masm) {
} }
void StringCharAtStub::Generate(MacroAssembler* masm) {
// Expects two arguments (object, index) on the stack:
// lr: return address
// sp[0]: index
// sp[4]: object
Register object = r1;
Register index = r0;
Register scratch1 = r2;
Register scratch2 = r3;
Register result = r0;
// Get object and index from the stack.
__ pop(index);
__ pop(object);
Label need_conversion;
Label index_out_of_range;
Label done;
StringCharAtGenerator generator(object,
index,
scratch1,
scratch2,
result,
&need_conversion,
&need_conversion,
&index_out_of_range,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm);
__ b(&done);
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
__ LoadRoot(result, Heap::kEmptyStringRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
// Move smi zero into the result register, which will trigger
// conversion.
__ mov(result, Operand(Smi::FromInt(0)));
__ b(&done);
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm, call_helper);
__ bind(&done);
__ Ret();
}
void ICCompareStub::GenerateSmis(MacroAssembler* masm) { void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS); ASSERT(state_ == CompareIC::SMIS);
Label miss; Label miss;

2
deps/v8/src/arm/codegen-arm.cc

@ -209,7 +209,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AllocateStackSlots(); frame_->AllocateStackSlots();
frame_->AssertIsSpilled(); frame_->AssertIsSpilled();
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) { if (heap_slots > 0) {
// Allocate local context. // Allocate local context.
// Get outer context and create a new context based on it. // Get outer context and create a new context based on it.

5
deps/v8/src/arm/constants-arm.h

@ -352,6 +352,11 @@ class Instr {
&& (Bit(20) == 0) && (Bit(20) == 0)
&& ((Bit(7) == 0)); } && ((Bit(7) == 0)); }
// Test for a stop instruction.
inline bool IsStop() const {
return (TypeField() == 7) && (Bit(24) == 1) && (SvcField() >= stop);
}
// Special accessors that test for existence of a value. // Special accessors that test for existence of a value.
inline bool HasS() const { return SField() == 1; } inline bool HasS() const { return SField() == 1; }
inline bool HasB() const { return BField() == 1; } inline bool HasB() const { return BField() == 1; }

3
deps/v8/src/arm/frames-arm.h

@ -66,8 +66,7 @@ static const RegList kCalleeSaved =
1 << 6 | // r6 v3 1 << 6 | // r6 v3
1 << 7 | // r7 v4 1 << 7 | // r7 v4
1 << 8 | // r8 v5 (cp in JavaScript code) 1 << 8 | // r8 v5 (cp in JavaScript code)
kR9Available kR9Available << 9 | // r9 v6
<< 9 | // r9 v6
1 << 10 | // r10 v7 1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code) 1 << 11; // r11 v8 (fp in JavaScript code)

2
deps/v8/src/arm/full-codegen-arm.cc

@ -92,7 +92,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true; bool function_in_register = true;
// Possibly allocate a local context. // Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) { if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context"); Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1. // Argument to NewContext is the function, which is in r1.

683
deps/v8/src/arm/ic-arm.cc

@ -1337,311 +1337,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
} }
// Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double.
// Integer to convert is passed in register hiword.
// Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm,
Register hiword,
Register loword,
Register scratch,
int leading_zeroes) {
const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
const int mantissa_shift_for_hi_word =
meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
const int mantissa_shift_for_lo_word =
kBitsPerInt - mantissa_shift_for_hi_word;
__ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
if (mantissa_shift_for_hi_word > 0) {
__ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
__ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
} else {
__ mov(loword, Operand(0, RelocInfo::NONE));
__ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
}
// If least significant bit of biased exponent was not 1 it was corrupted
// by most significant bit of mantissa so we should fix that.
if (!(biased_exponent & 1)) {
__ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
}
}
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label slow, failed_allocation;
Register key = r0;
Register receiver = r1;
// Check that the object isn't a smi
__ BranchOnSmi(receiver, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
// Check that the object is a JS object. Load map into r2.
__ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &slow);
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r2, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(ip, Operand(key, ASR, kSmiTagSize));
// Unsigned comparison catches both negative and too-large values.
__ b(lo, &slow);
// r3: elements array
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage
// We are not untagging smi key and instead work with it
// as if it was premultiplied by 2.
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
Register value = r2;
switch (array_type) {
case kExternalByteArray:
__ ldrsb(value, MemOperand(r3, key, LSR, 1));
break;
case kExternalUnsignedByteArray:
__ ldrb(value, MemOperand(r3, key, LSR, 1));
break;
case kExternalShortArray:
__ ldrsh(value, MemOperand(r3, key, LSL, 0));
break;
case kExternalUnsignedShortArray:
__ ldrh(value, MemOperand(r3, key, LSL, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
case kExternalFloatArray:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0);
} else {
__ ldr(value, MemOperand(r3, key, LSL, 1));
}
break;
default:
UNREACHABLE();
break;
}
// For integer array types:
// r2: value
// For floating-point array type
// s0: value (if VFP3 is supported)
// r2: value (if VFP3 is not supported)
if (array_type == kExternalIntArray) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
__ cmp(value, Operand(0xC0000000));
__ b(mi, &box_int);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't touch r0 or r1 as they are needed if allocation
// fails.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow);
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
__ sub(r3, r0, Operand(kHeapObjectTag));
__ vstr(d0, r3, HeapNumber::kValueOffset);
__ Ret();
} else {
WriteInt32ToHeapNumberStub stub(value, r0, r3);
__ TailCallStub(&stub);
}
} else if (array_type == kExternalUnsignedIntArray) {
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label box_int, done;
__ tst(value, Operand(0xC0000000));
__ b(ne, &box_int);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
__ vmov(s0, value);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_u32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ mov(r0, r2);
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
Label box_int_0, box_int_1, done;
__ tst(value, Operand(0x80000000));
__ b(ne, &box_int_0);
__ tst(value, Operand(0x40000000));
__ b(ne, &box_int_1);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
Register hiword = value; // r2.
Register loword = r3;
__ bind(&box_int_0);
// Integer does not have leading zeros.
GenerateUInt2Double(masm, hiword, loword, r4, 0);
__ b(&done);
__ bind(&box_int_1);
// Integer has one leading zero.
GenerateUInt2Double(masm, hiword, loword, r4, 1);
__ bind(&done);
// Integer was converted to double in registers hiword:loword.
// Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
// clobbers all registers - also when jumping due to exhausted young
// space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r5, r7, r6, &slow);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
__ mov(r0, r4);
__ Ret();
}
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_f32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ mov(r0, r2);
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r3, r4, r5, r6, &slow);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)
// r3: heap number for result
// Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
// the slow case from here.
__ and_(r0, value, Operand(kBinary32MantissaMask));
// Extract exponent to r1. OK to clobber r1 now as there are no jumps to
// the slow case from here.
__ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
__ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ teq(r1, Operand(0x00, RelocInfo::NONE));
__ b(eq, &exponent_rebiased);
__ teq(r1, Operand(0xff));
__ mov(r1, Operand(0x7ff), LeaveCC, eq);
__ b(eq, &exponent_rebiased);
// Rebias exponent.
__ add(r1,
r1,
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
__ bind(&exponent_rebiased);
__ and_(r2, value, Operand(kBinary32SignMask));
value = no_reg;
__ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
// Shift mantissa.
static const int kMantissaShiftForHiWord =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaShiftForLoWord =
kBitsPerInt - kMantissaShiftForHiWord;
__ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
__ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
__ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
__ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
__ mov(r0, r3);
__ Ret();
}
} else {
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
}
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
GenerateRuntimeGetProperty(masm);
}
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ---------- S t a t e -------------- // ---------- S t a t e --------------
// -- lr : return address // -- lr : return address
@ -1838,384 +1533,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
} }
// Convert and store int passed in register ival to IEEE 754 single precision
// floating point value at memory location (dst + 4 * wordoffset)
// If VFP3 is available use it for conversion.
static void StoreIntAsFloat(MacroAssembler* masm,
Register dst,
Register wordoffset,
Register ival,
Register fval,
Register scratch1,
Register scratch2) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
__ vstr(s0, scratch1, 0);
} else {
Label not_special, done;
// Move sign bit from source to destination. This works because the sign
// bit in the exponent word of the double has the same position and polarity
// as the 2's complement sign bit in a Smi.
ASSERT(kBinary32SignMask == 0x80000000u);
__ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
// Negate value if it is negative.
__ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register ival contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
// greater than 1 (not a special case) or less than 1 (special case of 0).
__ cmp(ival, Operand(1));
__ b(gt, &not_special);
// For 1 or -1 we need to or in the 0 exponent (biased).
static const uint32_t exponent_word_for_1 =
kBinary32ExponentBias << kBinary32ExponentShift;
__ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
__ b(&done);
__ bind(&not_special);
// Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above.
Register zeros = scratch2;
__ CountLeadingZeros(zeros, ival, scratch1);
// Compute exponent and or it into the exponent register.
__ rsb(scratch1,
zeros,
Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
__ orr(fval,
fval,
Operand(scratch1, LSL, kBinary32ExponentShift));
// Shift up the source chopping the top bit off.
__ add(zeros, zeros, Operand(1));
// This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
__ mov(ival, Operand(ival, LSL, zeros));
// And the top (top 20 bits).
__ orr(fval,
fval,
Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
__ bind(&done);
__ str(fval, MemOperand(dst, wordoffset, LSL, 2));
}
}
static bool IsElementTypeSigned(ExternalArrayType array_type) {
switch (array_type) {
case kExternalByteArray:
case kExternalShortArray:
case kExternalIntArray:
return true;
case kExternalUnsignedByteArray:
case kExternalUnsignedShortArray:
case kExternalUnsignedIntArray:
return false;
default:
UNREACHABLE();
return false;
}
}
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
Label slow, check_heap_number;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
// r3 mostly holds the elements array or the destination external array.
// Check that the object isn't a smi.
__ BranchOnSmi(receiver, &slow);
// Check that the object is a JS object. Load map into r3.
__ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
__ b(le, &slow);
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
// Check that the elements array is the appropriate type of ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r4, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(r4, ip);
// Unsigned comparison catches both negative and too-large values.
__ b(hs, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// r3: external array.
// r4: key (integer).
__ BranchOnNotSmi(value, &check_heap_number);
__ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r4: key (integer).
// r5: value (integer).
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
case kExternalFloatArray:
// Perform int-to-float conversion and store to memory.
StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
break;
default:
UNREACHABLE();
break;
}
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
// r3: external array.
// r4: index (integer).
__ bind(&check_heap_number);
__ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r4: key (integer).
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (array_type == kExternalFloatArray) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(r4, LSL, 2));
__ vcvt_f32_f64(s0, d0);
__ vstr(s0, r5, 0);
} else {
// Need to perform float-to-int conversion.
// Test for NaN or infinity (both give zero).
__ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
// Hoisted load. vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// NaNs and Infinities have all-one exponents so they sign extend to -1.
__ cmp(r6, Operand(-1));
__ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
// Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
__ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
} else {
__ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
}
__ vmov(r5, s0, ne);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
}
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
} else {
// VFP3 is not available do manual conversions.
__ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
__ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
if (array_type == kExternalFloatArray) {
Label done, nan_or_infinity_or_zero;
static const int kMantissaInHiWordShift =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaInLoWordShift =
kBitsPerInt - kMantissaInHiWordShift;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ b(eq, &nan_or_infinity_or_zero);
__ teq(r9, Operand(r7));
__ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
__ b(eq, &nan_or_infinity_or_zero);
// Rebias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ add(r9,
r9,
Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
__ cmp(r9, Operand(kBinary32MaxExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
__ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
__ b(gt, &done);
__ cmp(r9, Operand(kBinary32MinExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
__ b(lt, &done);
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
__ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
__ bind(&done);
__ str(r5, MemOperand(r3, r4, LSL, 2));
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
__ bind(&nan_or_infinity_or_zero);
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r9, r9, r7);
__ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
__ b(&done);
} else {
bool is_signed_type = IsElementTypeSigned(array_type);
int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
Label done, sign;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
__ b(eq, &done);
__ teq(r9, Operand(r7));
__ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
__ b(eq, &done);
// Unbias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
// If exponent is negative than result is 0.
__ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
__ b(mi, &done);
// If exponent is too big than result is minimal value.
__ cmp(r9, Operand(meaningfull_bits - 1));
__ mov(r5, Operand(min_value), LeaveCC, ge);
__ b(ge, &done);
__ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
__ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
__ b(pl, &sign);
__ rsb(r9, r9, Operand(0, RelocInfo::NONE));
__ mov(r5, Operand(r5, LSL, r9));
__ rsb(r9, r9, Operand(meaningfull_bits));
__ orr(r5, r5, Operand(r6, LSR, r9));
__ bind(&sign);
__ teq(r7, Operand(0, RelocInfo::NONE));
__ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
}
}
// Slow case: call runtime.
__ bind(&slow);
// Entry registers are intact.
// r0: value
// r1: key
// r2: receiver
GenerateRuntimeSetProperty(masm);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value

335
deps/v8/src/arm/lithium-arm.cc

@ -64,12 +64,12 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
} }
void LInstruction::PrintTo(StringStream* stream) const { void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic()); stream->Add("%s ", this->Mnemonic());
if (HasResult()) { if (HasResult()) {
result()->PrintTo(stream); PrintOutputOperandTo(stream);
stream->Add(" ");
} }
PrintDataTo(stream); PrintDataTo(stream);
if (HasEnvironment()) { if (HasEnvironment()) {
@ -84,7 +84,29 @@ void LInstruction::PrintTo(StringStream* stream) const {
} }
void LLabel::PrintDataTo(StringStream* stream) const { template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
stream->Add("= ");
inputs_.PrintOperandsTo(stream);
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
results_.PrintOperandsTo(stream);
}
template<typename T, int N>
void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
for (int i = 0; i < N; i++) {
if (i > 0) stream->Add(" ");
elems_[i]->PrintTo(stream);
}
}
void LLabel::PrintDataTo(StringStream* stream) {
LGap::PrintDataTo(stream); LGap::PrintDataTo(stream);
LLabel* rep = replacement(); LLabel* rep = replacement();
if (rep != NULL) { if (rep != NULL) {
@ -143,74 +165,65 @@ const char* LArithmeticT::Mnemonic() const {
} }
void LGoto::PrintDataTo(StringStream* stream) {
void LBinaryOperation::PrintDataTo(StringStream* stream) const {
stream->Add("= ");
left()->PrintTo(stream);
stream->Add(" ");
right()->PrintTo(stream);
}
void LGoto::PrintDataTo(StringStream* stream) const {
stream->Add("B%d", block_id()); stream->Add("B%d", block_id());
} }
void LBranch::PrintDataTo(StringStream* stream) const { void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
input()->PrintTo(stream); InputAt(0)->PrintTo(stream);
} }
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const { void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if "); stream->Add("if ");
left()->PrintTo(stream); InputAt(0)->PrintTo(stream);
stream->Add(" %s ", Token::String(op())); stream->Add(" %s ", Token::String(op()));
right()->PrintTo(stream); InputAt(1)->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
} }
void LIsNullAndBranch::PrintDataTo(StringStream* stream) const { void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if "); stream->Add("if ");
input()->PrintTo(stream); InputAt(0)->PrintTo(stream);
stream->Add(is_strict() ? " === null" : " == null"); stream->Add(is_strict() ? " === null" : " == null");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
} }
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const { void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object("); stream->Add("if is_object(");
input()->PrintTo(stream); InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
} }
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const { void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi("); stream->Add("if is_smi(");
input()->PrintTo(stream); InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
} }
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) const { void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type("); stream->Add("if has_instance_type(");
input()->PrintTo(stream); InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
} }
void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) const { void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index("); stream->Add("if has_cached_array_index(");
input()->PrintTo(stream); InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
} }
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const { void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test("); stream->Add("if class_of_test(");
input()->PrintTo(stream); InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d", stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(), *hydrogen()->class_name(),
true_block_id(), true_block_id(),
@ -218,29 +231,29 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const {
} }
void LTypeofIs::PrintDataTo(StringStream* stream) const { void LTypeofIs::PrintDataTo(StringStream* stream) {
input()->PrintTo(stream); InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString()); stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
} }
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const { void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof "); stream->Add("if typeof ");
input()->PrintTo(stream); InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d", stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(), *hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id()); true_block_id(), false_block_id());
} }
void LCallConstantFunction::PrintDataTo(StringStream* stream) const { void LCallConstantFunction::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity()); stream->Add("#%d / ", arity());
} }
void LUnaryMathOperation::PrintDataTo(StringStream* stream) const { void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName()); stream->Add("/%s ", hydrogen()->OpName());
input()->PrintTo(stream); InputAt(0)->PrintTo(stream);
} }
@ -249,48 +262,43 @@ void LLoadContextSlot::PrintDataTo(StringStream* stream) {
} }
void LCallKeyed::PrintDataTo(StringStream* stream) const { void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[r2] #%d / ", arity()); stream->Add("[r2] #%d / ", arity());
} }
void LCallNamed::PrintDataTo(StringStream* stream) const { void LCallNamed::PrintDataTo(StringStream* stream) {
SmartPointer<char> name_string = name()->ToCString(); SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity()); stream->Add("%s #%d / ", *name_string, arity());
} }
void LCallGlobal::PrintDataTo(StringStream* stream) const { void LCallGlobal::PrintDataTo(StringStream* stream) {
SmartPointer<char> name_string = name()->ToCString(); SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity()); stream->Add("%s #%d / ", *name_string, arity());
} }
void LCallKnownGlobal::PrintDataTo(StringStream* stream) const { void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity()); stream->Add("#%d / ", arity());
} }
void LCallNew::PrintDataTo(StringStream* stream) const { void LCallNew::PrintDataTo(StringStream* stream) {
LUnaryOperation::PrintDataTo(stream); stream->Add("= ");
InputAt(0)->PrintTo(stream);
stream->Add(" #%d / ", arity()); stream->Add(" #%d / ", arity());
} }
void LClassOfTest::PrintDataTo(StringStream* stream) const { void LClassOfTest::PrintDataTo(StringStream* stream) {
stream->Add("= class_of_test("); stream->Add("= class_of_test(");
input()->PrintTo(stream); InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\")", *hydrogen()->class_name()); stream->Add(", \"%o\")", *hydrogen()->class_name());
} }
void LUnaryOperation::PrintDataTo(StringStream* stream) const { void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
stream->Add("= ");
input()->PrintTo(stream);
}
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
arguments()->PrintTo(stream); arguments()->PrintTo(stream);
stream->Add(" length "); stream->Add(" length ");
@ -301,6 +309,24 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
} }
void LStoreNamed::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
stream->Add(" <- ");
value()->PrintTo(stream);
}
void LStoreKeyed::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
stream->Add("] <- ");
value()->PrintTo(stream);
}
LChunk::LChunk(HGraph* graph) LChunk::LChunk(HGraph* graph)
: spill_slot_count_(0), : spill_slot_count_(0),
graph_(graph), graph_(graph),
@ -310,11 +336,6 @@ LChunk::LChunk(HGraph* graph)
} }
void LChunk::Verify() const {
// TODO(twuerthinger): Implement verification for chunk.
}
int LChunk::GetNextSpillIndex(bool is_double) { int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot. // Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++; if (is_double) spill_slot_count_++;
@ -369,24 +390,6 @@ void LChunk::MarkEmptyBlocks() {
} }
void LStoreNamed::PrintDataTo(StringStream* stream) const {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
stream->Add(" <- ");
value()->PrintTo(stream);
}
void LStoreKeyed::PrintDataTo(StringStream* stream) const {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
stream->Add("] <- ");
value()->PrintTo(stream);
}
int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LGap* gap = new LGap(block); LGap* gap = new LGap(block);
int index = -1; int index = -1;
@ -593,33 +596,52 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
} }
LInstruction* LChunkBuilder::Define(LInstruction* instr) { template<int I, int T>
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result) {
allocator_->RecordDefinition(current_instruction_, result);
instr->set_result(result);
return instr;
}
template<int I, int T>
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::NONE)); return Define(instr, new LUnallocated(LUnallocated::NONE));
} }
LInstruction* LChunkBuilder::DefineAsRegister(LInstruction* instr) { template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
} }
LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) { template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
LTemplateInstruction<1, I, T>* instr, int index) {
return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index)); return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
} }
LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) { template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
} }
LInstruction* LChunkBuilder::DefineFixed(LInstruction* instr, Register reg) { template<int I, int T>
LInstruction* LChunkBuilder::DefineFixed(
LTemplateInstruction<1, I, T>* instr, Register reg) {
return Define(instr, ToUnallocated(reg)); return Define(instr, ToUnallocated(reg));
} }
LInstruction* LChunkBuilder::DefineFixedDouble(LInstruction* instr, template<int I, int T>
DoubleRegister reg) { LInstruction* LChunkBuilder::DefineFixedDouble(
LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
return Define(instr, ToUnallocated(reg)); return Define(instr, ToUnallocated(reg));
} }
@ -674,16 +696,15 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
} }
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
ASSERT(!instr->HasPointerMap()); allocator_->MarkAsSaveDoubles();
instr->set_pointer_map(new LPointerMap(position_));
return instr; return instr;
} }
LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) { LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
allocator_->RecordDefinition(current_instruction_, result); ASSERT(!instr->HasPointerMap());
instr->set_result(result); instr->set_pointer_map(new LPointerMap(position_));
return instr; return instr;
} }
@ -795,7 +816,7 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
ASSERT(right->representation().IsTagged()); ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, r1); LOperand* left_operand = UseFixed(left, r1);
LOperand* right_operand = UseFixed(right, r0); LOperand* right_operand = UseFixed(right, r0);
LInstruction* result = new LArithmeticT(op, left_operand, right_operand); LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, r0), instr); return MarkAsCall(DefineFixed(result, r0), instr);
} }
@ -876,8 +897,14 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) { if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr); instr = AssignEnvironment(instr);
} }
if (current->IsBranch()) { if (current->IsTest() && !instr->IsGoto()) {
instr->set_hydrogen_value(HBranch::cast(current)->value()); ASSERT(instr->IsControl());
HTest* test = HTest::cast(current);
instr->set_hydrogen_value(test->value());
HBasicBlock* first = test->FirstSuccessor();
HBasicBlock* second = test->SecondSuccessor();
ASSERT(first != NULL && second != NULL);
instr->SetBranchTargets(first->block_id(), second->block_id());
} else { } else {
instr->set_hydrogen_value(current); instr->set_hydrogen_value(current);
} }
@ -931,23 +958,15 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
} }
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { LInstruction* LChunkBuilder::DoTest(HTest* instr) {
HValue* v = instr->value(); HValue* v = instr->value();
HBasicBlock* first = instr->FirstSuccessor();
HBasicBlock* second = instr->SecondSuccessor();
ASSERT(first != NULL && second != NULL);
int first_id = first->block_id();
int second_id = second->block_id();
if (v->EmitAtUses()) { if (v->EmitAtUses()) {
if (v->IsClassOfTest()) { if (v->IsClassOfTest()) {
HClassOfTest* compare = HClassOfTest::cast(v); HClassOfTest* compare = HClassOfTest::cast(v);
ASSERT(compare->value()->representation().IsTagged()); ASSERT(compare->value()->representation().IsTagged());
return new LClassOfTestAndBranch(UseTempRegister(compare->value()), return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
TempRegister(), TempRegister());
first_id,
second_id);
} else if (v->IsCompare()) { } else if (v->IsCompare()) {
HCompare* compare = HCompare::cast(v); HCompare* compare = HCompare::cast(v);
Token::Value op = compare->token(); Token::Value op = compare->token();
@ -958,16 +977,12 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
ASSERT(left->representation().IsInteger32()); ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32()); ASSERT(right->representation().IsInteger32());
return new LCmpIDAndBranch(UseRegisterAtStart(left), return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseOrConstantAtStart(right), UseOrConstantAtStart(right));
first_id,
second_id);
} else if (r.IsDouble()) { } else if (r.IsDouble()) {
ASSERT(left->representation().IsDouble()); ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble()); ASSERT(right->representation().IsDouble());
return new LCmpIDAndBranch(UseRegisterAtStart(left), return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseRegisterAtStart(right), UseRegisterAtStart(right));
first_id,
second_id);
} else { } else {
ASSERT(left->representation().IsTagged()); ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged()); ASSERT(right->representation().IsTagged());
@ -975,38 +990,30 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
LOperand* left_operand = UseFixed(left, reversed ? r0 : r1); LOperand* left_operand = UseFixed(left, reversed ? r0 : r1);
LOperand* right_operand = UseFixed(right, reversed ? r1 : r0); LOperand* right_operand = UseFixed(right, reversed ? r1 : r0);
LInstruction* result = new LCmpTAndBranch(left_operand, LInstruction* result = new LCmpTAndBranch(left_operand,
right_operand, right_operand);
first_id,
second_id);
return MarkAsCall(result, instr); return MarkAsCall(result, instr);
} }
} else if (v->IsIsSmi()) { } else if (v->IsIsSmi()) {
HIsSmi* compare = HIsSmi::cast(v); HIsSmi* compare = HIsSmi::cast(v);
ASSERT(compare->value()->representation().IsTagged()); ASSERT(compare->value()->representation().IsTagged());
return new LIsSmiAndBranch(Use(compare->value()), return new LIsSmiAndBranch(Use(compare->value()));
first_id,
second_id);
} else if (v->IsHasInstanceType()) { } else if (v->IsHasInstanceType()) {
HHasInstanceType* compare = HHasInstanceType::cast(v); HHasInstanceType* compare = HHasInstanceType::cast(v);
ASSERT(compare->value()->representation().IsTagged()); ASSERT(compare->value()->representation().IsTagged());
return new LHasInstanceTypeAndBranch(
return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()), UseRegisterAtStart(compare->value()));
first_id,
second_id);
} else if (v->IsHasCachedArrayIndex()) { } else if (v->IsHasCachedArrayIndex()) {
HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v); HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
ASSERT(compare->value()->representation().IsTagged()); ASSERT(compare->value()->representation().IsTagged());
return new LHasCachedArrayIndexAndBranch( return new LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(compare->value()), first_id, second_id); UseRegisterAtStart(compare->value()));
} else if (v->IsIsNull()) { } else if (v->IsIsNull()) {
HIsNull* compare = HIsNull::cast(v); HIsNull* compare = HIsNull::cast(v);
ASSERT(compare->value()->representation().IsTagged()); ASSERT(compare->value()->representation().IsTagged());
return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), return new LIsNullAndBranch(UseRegisterAtStart(compare->value()));
first_id,
second_id);
} else if (v->IsIsObject()) { } else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v); HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged()); ASSERT(compare->value()->representation().IsTagged());
@ -1015,46 +1022,37 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
LOperand* temp2 = TempRegister(); LOperand* temp2 = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
temp1, temp1,
temp2, temp2);
first_id,
second_id);
} else if (v->IsCompareJSObjectEq()) { } else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v); HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()), return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
UseRegisterAtStart(compare->right()), UseRegisterAtStart(compare->right()));
first_id,
second_id);
} else if (v->IsInstanceOf()) { } else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v); HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstruction* result = LInstruction* result =
new LInstanceOfAndBranch(Use(instance_of->left()), new LInstanceOfAndBranch(Use(instance_of->left()),
Use(instance_of->right()), Use(instance_of->right()));
first_id,
second_id);
return MarkAsCall(result, instr); return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) { } else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v); HTypeofIs* typeof_is = HTypeofIs::cast(v);
return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()), return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
first_id,
second_id);
} else { } else {
if (v->IsConstant()) { if (v->IsConstant()) {
if (HConstant::cast(v)->handle()->IsTrue()) { if (HConstant::cast(v)->handle()->IsTrue()) {
return new LGoto(first_id); return new LGoto(instr->FirstSuccessor()->block_id());
} else if (HConstant::cast(v)->handle()->IsFalse()) { } else if (HConstant::cast(v)->handle()->IsFalse()) {
return new LGoto(second_id); return new LGoto(instr->SecondSuccessor()->block_id());
} }
} }
Abort("Undefined compare before branch"); Abort("Undefined compare before branch");
return NULL; return NULL;
} }
} }
return new LBranch(UseRegisterAtStart(v), first_id, second_id); return new LBranch(UseRegisterAtStart(v));
} }
LInstruction* LChunkBuilder::DoCompareMapAndBranch( LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
HCompareMapAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged()); ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value()); LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister(); LOperand* temp = TempRegister();
@ -1073,7 +1071,7 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) { LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* result = LInstanceOf* result =
new LInstanceOf(UseFixed(instr->left(), r0), new LInstanceOf(UseFixed(instr->left(), r0),
UseFixed(instr->right(), r1)); UseFixed(instr->right(), r1));
return MarkAsCall(DefineFixed(result, r0), instr); return MarkAsCall(DefineFixed(result, r0), instr);
@ -1082,9 +1080,10 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) { HInstanceOfKnownGlobal* instr) {
LInstruction* result = LInstanceOfKnownGlobal* result =
new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0)); new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4));
return MarkAsCall(DefineFixed(result, r0), instr); MarkAsSaveDoubles(result);
return AssignEnvironment(AssignPointerMap(DefineFixed(result, r0)));
} }
@ -1093,7 +1092,7 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* receiver = UseFixed(instr->receiver(), r0); LOperand* receiver = UseFixed(instr->receiver(), r0);
LOperand* length = UseRegisterAtStart(instr->length()); LOperand* length = UseRegisterAtStart(instr->length());
LOperand* elements = UseRegisterAtStart(instr->elements()); LOperand* elements = UseRegisterAtStart(instr->elements());
LInstruction* result = new LApplyArguments(function, LApplyArguments* result = new LApplyArguments(function,
receiver, receiver,
length, length,
elements); elements);
@ -1129,7 +1128,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op(); BuiltinFunctionId op = instr->op();
LOperand* input = UseRegisterAtStart(instr->value()); LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL; LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
LInstruction* result = new LUnaryMathOperation(input, temp); LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
switch (op) { switch (op) {
case kMathAbs: case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@ -1162,8 +1161,8 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) { LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged()); ASSERT(instr->key()->representation().IsTagged());
argument_count_ -= instr->argument_count(); argument_count_ -= instr->argument_count();
UseFixed(instr->key(), r2); LOperand* key = UseFixed(instr->key(), r2);
return MarkAsCall(DefineFixed(new LCallKeyed, r0), instr); return MarkAsCall(DefineFixed(new LCallKeyed(key), r0), instr);
} }
@ -1188,7 +1187,7 @@ LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* constructor = UseFixed(instr->constructor(), r1); LOperand* constructor = UseFixed(instr->constructor(), r1);
argument_count_ -= instr->argument_count(); argument_count_ -= instr->argument_count();
LInstruction* result = new LCallNew(constructor); LCallNew* result = new LCallNew(constructor);
return MarkAsCall(DefineFixed(result, r0), instr); return MarkAsCall(DefineFixed(result, r0), instr);
} }
@ -1378,7 +1377,7 @@ LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
bool reversed = (op == Token::GT || op == Token::LTE); bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1); LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0); LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
LInstruction* result = new LCmpT(left, right); LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, r0), instr); return MarkAsCall(DefineFixed(result, r0), instr);
} }
} }
@ -1388,7 +1387,7 @@ LInstruction* LChunkBuilder::DoCompareJSObjectEq(
HCompareJSObjectEq* instr) { HCompareJSObjectEq* instr) {
LOperand* left = UseRegisterAtStart(instr->left()); LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right()); LOperand* right = UseRegisterAtStart(instr->right());
LInstruction* result = new LCmpJSObjectEq(left, right); LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
return DefineAsRegister(result); return DefineAsRegister(result);
} }
@ -1455,7 +1454,7 @@ LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) { LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value()); LOperand* object = UseRegister(instr->value());
LInstruction* result = new LValueOf(object, TempRegister()); LValueOf* result = new LValueOf(object, TempRegister());
return AssignEnvironment(DefineSameAsFirst(result)); return AssignEnvironment(DefineSameAsFirst(result));
} }
@ -1478,7 +1477,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (from.IsTagged()) { if (from.IsTagged()) {
if (to.IsDouble()) { if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(instr->value());
LInstruction* res = new LNumberUntagD(value); LNumberUntagD* res = new LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res)); return AssignEnvironment(DefineAsRegister(res));
} else { } else {
ASSERT(to.IsInteger32()); ASSERT(to.IsInteger32());
@ -1504,13 +1503,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
// Make sure that the temp and result_temp registers are // Make sure that the temp and result_temp registers are
// different. // different.
LUnallocated* result_temp = TempRegister(); LUnallocated* result_temp = TempRegister();
LInstruction* result = new LNumberTagD(value, temp1, temp2); LNumberTagD* result = new LNumberTagD(value, temp1, temp2);
Define(result, result_temp); Define(result, result_temp);
return AssignPointerMap(result); return AssignPointerMap(result);
} else { } else {
ASSERT(to.IsInteger32()); ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(instr->value());
LInstruction* res = new LDoubleToI(value); LDoubleToI* res = new LDoubleToI(value);
return AssignEnvironment(DefineAsRegister(res)); return AssignEnvironment(DefineAsRegister(res));
} }
} else if (from.IsInteger32()) { } else if (from.IsInteger32()) {
@ -1520,7 +1519,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (val->HasRange() && val->range()->IsInSmiRange()) { if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new LSmiTag(value)); return DefineSameAsFirst(new LSmiTag(value));
} else { } else {
LInstruction* result = new LNumberTagI(value); LNumberTagI* result = new LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
} }
} else { } else {
@ -1597,7 +1596,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) { LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
LInstruction* result = new LLoadGlobal(); LLoadGlobal* result = new LLoadGlobal();
return instr->check_hole_value() return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result)) ? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result); : DefineAsRegister(result);
@ -1646,7 +1645,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
ASSERT(instr->key()->representation().IsInteger32()); ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseRegisterAtStart(instr->object()); LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key()); LOperand* key = UseRegisterAtStart(instr->key());
LInstruction* result = new LLoadKeyedFastElement(obj, key); LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
return AssignEnvironment(DefineSameAsFirst(result)); return AssignEnvironment(DefineSameAsFirst(result));
} }
@ -1717,6 +1716,20 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
} }
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegisterOrConstant(instr->index());
LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LOperand* string = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LStringLength(string));
}
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr); return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr);
} }
@ -1740,7 +1753,7 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) { LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LOperand* object = UseRegisterAtStart(instr->object()); LOperand* object = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key()); LOperand* key = UseRegisterAtStart(instr->key());
LInstruction* result = new LDeleteProperty(object, key); LDeleteProperty* result = new LDeleteProperty(object, key);
return MarkAsCall(DefineFixed(result, r0), instr); return MarkAsCall(DefineFixed(result, r0), instr);
} }
@ -1781,13 +1794,13 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LOperand* arguments = UseRegister(instr->arguments()); LOperand* arguments = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length()); LOperand* length = UseTempRegister(instr->length());
LOperand* index = UseRegister(instr->index()); LOperand* index = UseRegister(instr->index());
LInstruction* result = new LAccessArgumentsAt(arguments, length, index); LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
return DefineAsRegister(AssignEnvironment(result)); return AssignEnvironment(DefineAsRegister(result));
} }
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* result = new LTypeof(UseRegisterAtStart(instr->value())); LTypeof* result = new LTypeof(UseRegisterAtStart(instr->value()));
return MarkAsCall(DefineFixed(result, r0), instr); return MarkAsCall(DefineFixed(result, r0), instr);
} }

1158
deps/v8/src/arm/lithium-arm.h

File diff suppressed because it is too large

608
deps/v8/src/arm/lithium-codegen-arm.cc

File diff suppressed because it is too large

8
deps/v8/src/arm/lithium-codegen-arm.h

@ -93,12 +93,17 @@ class LCodeGen BASE_EMBEDDED {
void FinishCode(Handle<Code> code); void FinishCode(Handle<Code> code);
// Deferred code support. // Deferred code support.
void DoDeferredGenericBinaryStub(LBinaryOperation* instr, Token::Value op); template<int T>
void DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr,
Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr); void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagI(LNumberTagI* instr); void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr); void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr); void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr); void DoDeferredStackCheck(LGoto* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
// Parallel move support. // Parallel move support.
void DoParallelMove(LParallelMove* move); void DoParallelMove(LParallelMove* move);
@ -212,6 +217,7 @@ class LCodeGen BASE_EMBEDDED {
MemOperand ToMemOperand(LOperand* op) const; MemOperand ToMemOperand(LOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation. // Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr);
void DoMathFloor(LUnaryMathOperation* instr); void DoMathFloor(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr); void DoMathSqrt(LUnaryMathOperation* instr);

37
deps/v8/src/arm/macro-assembler-arm.cc

@ -485,6 +485,11 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() {
PopSafepointRegisters(); PopSafepointRegisters();
} }
void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) {
str(reg, SafepointRegisterSlot(reg));
}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding, // The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer. // which means that lowest encodings are closest to the stack pointer.
@ -493,6 +498,11 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
} }
MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
}
void MacroAssembler::Ldrd(Register dst1, Register dst2, void MacroAssembler::Ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) { const MemOperand& src, Condition cond) {
ASSERT(src.rm().is(no_reg)); ASSERT(src.rm().is(no_reg));
@ -1960,6 +1970,13 @@ void MacroAssembler::AbortIfSmi(Register object) {
} }
void MacroAssembler::AbortIfNotSmi(Register object) {
ASSERT_EQ(0, kSmiTag);
tst(object, Operand(kSmiTagMask));
Assert(eq, "Operand is not smi");
}
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first, Register first,
Register second, Register second,
@ -2185,6 +2202,26 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) {
} }
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
Register result) {
const uint32_t kLdrOffsetMask = (1 << 12) - 1;
const int32_t kPCRegOffset = 2 * kPointerSize;
ldr(result, MemOperand(ldr_location));
if (FLAG_debug_code) {
// Check that the instruction is a ldr reg, [pc + offset] .
and_(result, result, Operand(kLdrPCPattern));
cmp(result, Operand(kLdrPCPattern));
Check(eq, "The instruction to patch should be a load from pc.");
// Result was clobbered. Restore it.
ldr(result, MemOperand(ldr_location));
}
// Get the address of the constant.
and_(result, result, Operand(kLdrOffsetMask));
add(result, ldr_location, Operand(result));
add(result, result, Operand(kPCRegOffset));
}
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions) CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address), : address_(address),

13
deps/v8/src/arm/macro-assembler-arm.h

@ -234,8 +234,9 @@ class MacroAssembler: public Assembler {
void PopSafepointRegisters(); void PopSafepointRegisters();
void PushSafepointRegistersAndDoubles(); void PushSafepointRegistersAndDoubles();
void PopSafepointRegistersAndDoubles(); void PopSafepointRegistersAndDoubles();
void StoreToSafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code); static int SafepointRegisterStackIndex(int reg_code);
static MemOperand SafepointRegisterSlot(Register reg);
// Load two consecutive registers with two consecutive memory locations. // Load two consecutive registers with two consecutive memory locations.
void Ldrd(Register dst1, void Ldrd(Register dst1,
@ -740,6 +741,7 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is a smi. Used in debug code. // Abort execution if argument is a smi. Used in debug code.
void AbortIfSmi(Register object); void AbortIfSmi(Register object);
void AbortIfNotSmi(Register object);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// String utilities // String utilities
@ -776,6 +778,15 @@ class MacroAssembler: public Assembler {
Label* failure); Label* failure);
// ---------------------------------------------------------------------------
// Patching helpers.
// Get the location of a relocated constant (its address in the constant pool)
// from its load site.
void GetRelocatedValueLocation(Register ldr_location,
Register result);
private: private:
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);

11
deps/v8/src/arm/simulator-arm.cc

@ -153,7 +153,12 @@ void Debugger::Stop(Instr* instr) {
if (sim_->isWatchedStop(code) && !sim_->watched_stops[code].desc) { if (sim_->isWatchedStop(code) && !sim_->watched_stops[code].desc) {
sim_->watched_stops[code].desc = msg; sim_->watched_stops[code].desc = msg;
} }
// Print the stop message and code if it is not the default code.
if (code != kMaxStopCode) {
PrintF("Simulator hit stop %u: %s\n", code, msg);
} else {
PrintF("Simulator hit %s\n", msg); PrintF("Simulator hit %s\n", msg);
}
sim_->set_pc(sim_->get_pc() + 2 * Instr::kInstrSize); sim_->set_pc(sim_->get_pc() + 2 * Instr::kInstrSize);
Debug(); Debug();
} }
@ -450,7 +455,7 @@ void Debugger::Debug() {
PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_); PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_); PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_); PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
PrintF("INEXACT flag: %d; ", sim_->inexact_vfp_flag_); PrintF("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
} else if (strcmp(cmd, "stop") == 0) { } else if (strcmp(cmd, "stop") == 0) {
int32_t value; int32_t value;
intptr_t stop_pc = sim_->get_pc() - 2 * Instr::kInstrSize; intptr_t stop_pc = sim_->get_pc() - 2 * Instr::kInstrSize;
@ -2902,6 +2907,10 @@ void Simulator::InstructionDecode(Instr* instr) {
break; break;
} }
} }
// If the instruction is a non taken conditional stop, we need to skip the
// inlined message address.
} else if (instr->IsStop()) {
set_pc(get_pc() + 2 * Instr::kInstrSize);
} }
if (!pc_modified_) { if (!pc_modified_) {
set_register(pc, reinterpret_cast<int32_t>(instr) + Instr::kInstrSize); set_register(pc, reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);

702
deps/v8/src/arm/stub-cache-arm.cc

@ -902,6 +902,111 @@ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
} }
// Convert and store int passed in register ival to IEEE 754 single precision
// floating point value at memory location (dst + 4 * wordoffset)
// If VFP3 is available use it for conversion.
static void StoreIntAsFloat(MacroAssembler* masm,
Register dst,
Register wordoffset,
Register ival,
Register fval,
Register scratch1,
Register scratch2) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
__ vstr(s0, scratch1, 0);
} else {
Label not_special, done;
// Move sign bit from source to destination. This works because the sign
// bit in the exponent word of the double has the same position and polarity
// as the 2's complement sign bit in a Smi.
ASSERT(kBinary32SignMask == 0x80000000u);
__ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
// Negate value if it is negative.
__ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register ival contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
// greater than 1 (not a special case) or less than 1 (special case of 0).
__ cmp(ival, Operand(1));
__ b(gt, &not_special);
// For 1 or -1 we need to or in the 0 exponent (biased).
static const uint32_t exponent_word_for_1 =
kBinary32ExponentBias << kBinary32ExponentShift;
__ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
__ b(&done);
__ bind(&not_special);
// Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above.
Register zeros = scratch2;
__ CountLeadingZeros(zeros, ival, scratch1);
// Compute exponent and or it into the exponent register.
__ rsb(scratch1,
zeros,
Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
__ orr(fval,
fval,
Operand(scratch1, LSL, kBinary32ExponentShift));
// Shift up the source chopping the top bit off.
__ add(zeros, zeros, Operand(1));
// This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
__ mov(ival, Operand(ival, LSL, zeros));
// And the top (top 20 bits).
__ orr(fval,
fval,
Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
__ bind(&done);
__ str(fval, MemOperand(dst, wordoffset, LSL, 2));
}
}
// Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double.
// Integer to convert is passed in register hiword.
// Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm,
Register hiword,
Register loword,
Register scratch,
int leading_zeroes) {
const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
const int mantissa_shift_for_hi_word =
meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
const int mantissa_shift_for_lo_word =
kBitsPerInt - mantissa_shift_for_hi_word;
__ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
if (mantissa_shift_for_hi_word > 0) {
__ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
__ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
} else {
__ mov(loword, Operand(0, RelocInfo::NONE));
__ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
}
// If least significant bit of biased exponent was not 1 it was corrupted
// by most significant bit of mantissa so we should fix that.
if (!(biased_exponent & 1)) {
__ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
}
}
#undef __ #undef __
#define __ ACCESS_MASM(masm()) #define __ ACCESS_MASM(masm())
@ -3224,6 +3329,603 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
} }
static bool IsElementTypeSigned(ExternalArrayType array_type) {
switch (array_type) {
case kExternalByteArray:
case kExternalShortArray:
case kExternalIntArray:
return true;
case kExternalUnsignedByteArray:
case kExternalUnsignedShortArray:
case kExternalUnsignedIntArray:
return false;
default:
UNREACHABLE();
return false;
}
}
MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
ExternalArrayType array_type, Code::Flags flags) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label slow, failed_allocation;
Register key = r0;
Register receiver = r1;
// Check that the object isn't a smi
__ BranchOnSmi(receiver, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
// Check that the object is a JS object. Load map into r2.
__ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &slow);
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r2, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(ip, Operand(key, ASR, kSmiTagSize));
// Unsigned comparison catches both negative and too-large values.
__ b(lo, &slow);
// r3: elements array
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage
// We are not untagging smi key and instead work with it
// as if it was premultiplied by 2.
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
Register value = r2;
switch (array_type) {
case kExternalByteArray:
__ ldrsb(value, MemOperand(r3, key, LSR, 1));
break;
case kExternalUnsignedByteArray:
__ ldrb(value, MemOperand(r3, key, LSR, 1));
break;
case kExternalShortArray:
__ ldrsh(value, MemOperand(r3, key, LSL, 0));
break;
case kExternalUnsignedShortArray:
__ ldrh(value, MemOperand(r3, key, LSL, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
case kExternalFloatArray:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0);
} else {
__ ldr(value, MemOperand(r3, key, LSL, 1));
}
break;
default:
UNREACHABLE();
break;
}
// For integer array types:
// r2: value
// For floating-point array type
// s0: value (if VFP3 is supported)
// r2: value (if VFP3 is not supported)
if (array_type == kExternalIntArray) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
__ cmp(value, Operand(0xC0000000));
__ b(mi, &box_int);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't touch r0 or r1 as they are needed if allocation
// fails.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow);
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
__ sub(r3, r0, Operand(kHeapObjectTag));
__ vstr(d0, r3, HeapNumber::kValueOffset);
__ Ret();
} else {
WriteInt32ToHeapNumberStub stub(value, r0, r3);
__ TailCallStub(&stub);
}
} else if (array_type == kExternalUnsignedIntArray) {
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label box_int, done;
__ tst(value, Operand(0xC0000000));
__ b(ne, &box_int);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
__ vmov(s0, value);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_u32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ mov(r0, r2);
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
Label box_int_0, box_int_1, done;
__ tst(value, Operand(0x80000000));
__ b(ne, &box_int_0);
__ tst(value, Operand(0x40000000));
__ b(ne, &box_int_1);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
Register hiword = value; // r2.
Register loword = r3;
__ bind(&box_int_0);
// Integer does not have leading zeros.
GenerateUInt2Double(masm(), hiword, loword, r4, 0);
__ b(&done);
__ bind(&box_int_1);
// Integer has one leading zero.
GenerateUInt2Double(masm(), hiword, loword, r4, 1);
__ bind(&done);
// Integer was converted to double in registers hiword:loword.
// Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
// clobbers all registers - also when jumping due to exhausted young
// space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r5, r7, r6, &slow);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
__ mov(r0, r4);
__ Ret();
}
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_f32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ mov(r0, r2);
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r3, r4, r5, r6, &slow);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)
// r3: heap number for result
// Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
// the slow case from here.
__ and_(r0, value, Operand(kBinary32MantissaMask));
// Extract exponent to r1. OK to clobber r1 now as there are no jumps to
// the slow case from here.
__ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
__ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ teq(r1, Operand(0x00));
__ b(eq, &exponent_rebiased);
__ teq(r1, Operand(0xff));
__ mov(r1, Operand(0x7ff), LeaveCC, eq);
__ b(eq, &exponent_rebiased);
// Rebias exponent.
__ add(r1,
r1,
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
__ bind(&exponent_rebiased);
__ and_(r2, value, Operand(kBinary32SignMask));
value = no_reg;
__ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
// Shift mantissa.
static const int kMantissaShiftForHiWord =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaShiftForLoWord =
kBitsPerInt - kMantissaShiftForHiWord;
__ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
__ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
__ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
__ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
__ mov(r0, r3);
__ Ret();
}
} else {
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
}
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
__ Push(r1, r0);
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
return GetCode(flags);
}
MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
ExternalArrayType array_type, Code::Flags flags) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
Label slow, check_heap_number;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
// r3 mostly holds the elements array or the destination external array.
// Check that the object isn't a smi.
__ BranchOnSmi(receiver, &slow);
// Check that the object is a JS object. Load map into r3.
__ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
__ b(le, &slow);
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
// Check that the elements array is the appropriate type of ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r4, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(r4, ip);
// Unsigned comparison catches both negative and too-large values.
__ b(hs, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// r3: external array.
// r4: key (integer).
__ BranchOnNotSmi(value, &check_heap_number);
__ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r4: key (integer).
// r5: value (integer).
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
case kExternalFloatArray:
// Perform int-to-float conversion and store to memory.
StoreIntAsFloat(masm(), r3, r4, r5, r6, r7, r9);
break;
default:
UNREACHABLE();
break;
}
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
// r3: external array.
// r4: index (integer).
__ bind(&check_heap_number);
__ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r4: key (integer).
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (array_type == kExternalFloatArray) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(r4, LSL, 2));
__ vcvt_f32_f64(s0, d0);
__ vstr(s0, r5, 0);
} else {
// Need to perform float-to-int conversion.
// Test for NaN or infinity (both give zero).
__ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset));
// Hoisted load. vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// NaNs and Infinities have all-one exponents so they sign extend to -1.
__ cmp(r6, Operand(-1));
__ mov(r5, Operand(0), LeaveCC, eq);
// Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
__ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
} else {
__ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
}
__ vmov(r5, s0, ne);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
}
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
} else {
// VFP3 is not available do manual conversions.
__ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
__ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
if (array_type == kExternalFloatArray) {
Label done, nan_or_infinity_or_zero;
static const int kMantissaInHiWordShift =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaInLoWordShift =
kBitsPerInt - kMantissaInHiWordShift;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ b(eq, &nan_or_infinity_or_zero);
__ teq(r9, Operand(r7));
__ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
__ b(eq, &nan_or_infinity_or_zero);
// Rebias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ add(r9,
r9,
Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
__ cmp(r9, Operand(kBinary32MaxExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
__ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
__ b(gt, &done);
__ cmp(r9, Operand(kBinary32MinExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
__ b(lt, &done);
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
__ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
__ bind(&done);
__ str(r5, MemOperand(r3, r4, LSL, 2));
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
__ bind(&nan_or_infinity_or_zero);
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r9, r9, r7);
__ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
__ b(&done);
} else {
bool is_signed_type = IsElementTypeSigned(array_type);
int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
Label done, sign;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
__ b(eq, &done);
__ teq(r9, Operand(r7));
__ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
__ b(eq, &done);
// Unbias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
// If exponent is negative then result is 0.
__ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
__ b(mi, &done);
// If exponent is too big then result is minimal value.
__ cmp(r9, Operand(meaningfull_bits - 1));
__ mov(r5, Operand(min_value), LeaveCC, ge);
__ b(ge, &done);
__ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
__ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
__ b(pl, &sign);
__ rsb(r9, r9, Operand(0, RelocInfo::NONE));
__ mov(r5, Operand(r5, LSL, r9));
__ rsb(r9, r9, Operand(meaningfull_bits));
__ orr(r5, r5, Operand(r6, LSR, r9));
__ bind(&sign);
__ teq(r7, Operand(0, RelocInfo::NONE));
__ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
}
}
// Slow case: call runtime.
__ bind(&slow);
// Entry registers are intact.
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
return GetCode(flags);
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

26
deps/v8/src/ast.cc

@ -32,7 +32,6 @@
#include "parser.h" #include "parser.h"
#include "scopes.h" #include "scopes.h"
#include "string-stream.h" #include "string-stream.h"
#include "stub-cache.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -560,20 +559,13 @@ void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
} }
static bool CallWithoutIC(Handle<JSFunction> target, int arity) { static bool CanCallWithoutIC(Handle<JSFunction> target, int arity) {
SharedFunctionInfo* info = target->shared(); SharedFunctionInfo* info = target->shared();
if (target->NeedsArgumentsAdaption()) { // If the number of formal parameters of the target function does
// If the number of formal parameters of the target function // not match the number of arguments we're passing, we don't want to
// does not match the number of arguments we're passing, we // deal with it. Otherwise, we can call it directly.
// don't want to deal with it. return !target->NeedsArgumentsAdaption() ||
return info->formal_parameter_count() == arity; info->formal_parameter_count() == arity;
} else {
// If the target doesn't need arguments adaption, we can call
// it directly, but we avoid to do so if it has a custom call
// generator, because that is likely to generate better code.
return !info->HasBuiltinFunctionId() ||
!CallStubCompiler::HasCustomCallGenerator(info->builtin_function_id());
}
} }
@ -589,7 +581,7 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
type = Handle<Map>(holder()->map()); type = Handle<Map>(holder()->map());
} else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) { } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type)); target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
return CallWithoutIC(target_, arguments()->length()); return CanCallWithoutIC(target_, arguments()->length());
} else { } else {
return false; return false;
} }
@ -609,8 +601,8 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
Handle<JSFunction> candidate(JSFunction::cast(cell_->value())); Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to // If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code. // change and thus prefer the general IC code.
if (!Heap::InNewSpace(*candidate) if (!Heap::InNewSpace(*candidate) &&
&& CallWithoutIC(candidate, arguments()->length())) { CanCallWithoutIC(candidate, arguments()->length())) {
target_ = candidate; target_ = candidate;
return true; return true;
} }

6
deps/v8/src/ast.h

@ -1675,7 +1675,8 @@ class FunctionLiteral: public Expression {
int start_position, int start_position,
int end_position, int end_position,
bool is_expression, bool is_expression,
bool contains_loops) bool contains_loops,
bool strict_mode)
: name_(name), : name_(name),
scope_(scope), scope_(scope),
body_(body), body_(body),
@ -1689,6 +1690,7 @@ class FunctionLiteral: public Expression {
end_position_(end_position), end_position_(end_position),
is_expression_(is_expression), is_expression_(is_expression),
contains_loops_(contains_loops), contains_loops_(contains_loops),
strict_mode_(strict_mode),
function_token_position_(RelocInfo::kNoPosition), function_token_position_(RelocInfo::kNoPosition),
inferred_name_(Heap::empty_string()), inferred_name_(Heap::empty_string()),
try_full_codegen_(false), try_full_codegen_(false),
@ -1705,6 +1707,7 @@ class FunctionLiteral: public Expression {
int end_position() const { return end_position_; } int end_position() const { return end_position_; }
bool is_expression() const { return is_expression_; } bool is_expression() const { return is_expression_; }
bool contains_loops() const { return contains_loops_; } bool contains_loops() const { return contains_loops_; }
bool strict_mode() const { return strict_mode_; }
int materialized_literal_count() { return materialized_literal_count_; } int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; } int expected_property_count() { return expected_property_count_; }
@ -1747,6 +1750,7 @@ class FunctionLiteral: public Expression {
int end_position_; int end_position_;
bool is_expression_; bool is_expression_;
bool contains_loops_; bool contains_loops_;
bool strict_mode_;
int function_token_position_; int function_token_position_;
Handle<String> inferred_name_; Handle<String> inferred_name_;
bool try_full_codegen_; bool try_full_codegen_;

76
deps/v8/src/builtins.cc

@ -1282,44 +1282,6 @@ static void Generate_KeyedLoadIC_String(MacroAssembler* masm) {
} }
static void Generate_KeyedLoadIC_ExternalByteArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalByteArray);
}
static void Generate_KeyedLoadIC_ExternalUnsignedByteArray(
MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
}
static void Generate_KeyedLoadIC_ExternalShortArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalShortArray);
}
static void Generate_KeyedLoadIC_ExternalUnsignedShortArray(
MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
}
static void Generate_KeyedLoadIC_ExternalIntArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalIntArray);
}
static void Generate_KeyedLoadIC_ExternalUnsignedIntArray(
MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
}
static void Generate_KeyedLoadIC_ExternalFloatArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalFloatArray);
}
static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) { static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
KeyedLoadIC::GeneratePreMonomorphic(masm); KeyedLoadIC::GeneratePreMonomorphic(masm);
} }
@ -1364,44 +1326,6 @@ static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
} }
static void Generate_KeyedStoreIC_ExternalByteArray(MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalByteArray);
}
static void Generate_KeyedStoreIC_ExternalUnsignedByteArray(
MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
}
static void Generate_KeyedStoreIC_ExternalShortArray(MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalShortArray);
}
static void Generate_KeyedStoreIC_ExternalUnsignedShortArray(
MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
}
static void Generate_KeyedStoreIC_ExternalIntArray(MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalIntArray);
}
static void Generate_KeyedStoreIC_ExternalUnsignedIntArray(
MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
}
static void Generate_KeyedStoreIC_ExternalFloatArray(MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalFloatArray);
}
static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) { static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
KeyedStoreIC::GenerateMiss(masm); KeyedStoreIC::GenerateMiss(masm);
} }

14
deps/v8/src/builtins.h

@ -93,13 +93,6 @@ enum BuiltinExtraArguments {
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \ V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \ V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC) \ V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalUnsignedByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalUnsignedShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalUnsignedIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalFloatArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC) \ V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC) \
\ \
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \ V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \
@ -110,13 +103,6 @@ enum BuiltinExtraArguments {
\ \
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \ V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalByteArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalUnsignedByteArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalShortArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalUnsignedShortArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalIntArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalUnsignedIntArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalFloatArray, KEYED_STORE_IC, MEGAMORPHIC) \
\ \
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \ /* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED) \ V(FunctionCall, BUILTIN, UNINITIALIZED) \

13
deps/v8/src/code-stubs.h

@ -273,20 +273,21 @@ class FastNewClosureStub : public CodeStub {
class FastNewContextStub : public CodeStub { class FastNewContextStub : public CodeStub {
public: public:
static const int kMaximumSlots = 64; // We want no more than 64 different stubs.
static const int kMaximumSlots = Context::MIN_CONTEXT_SLOTS + 63;
explicit FastNewContextStub(int slots) : slots_(slots) { explicit FastNewContextStub(int slots) : slots_(slots) {
ASSERT(slots_ > 0 && slots <= kMaximumSlots); ASSERT(slots_ >= Context::MIN_CONTEXT_SLOTS && slots_ <= kMaximumSlots);
} }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
private: private:
int slots_; virtual const char* GetName() { return "FastNewContextStub"; }
virtual Major MajorKey() { return FastNewContext; }
virtual int MinorKey() { return slots_; }
const char* GetName() { return "FastNewContextStub"; } int slots_;
Major MajorKey() { return FastNewContext; }
int MinorKey() { return slots_; }
}; };

2
deps/v8/src/codegen.cc

@ -267,7 +267,7 @@ bool CodeGenerator::MakeCode(CompilationInfo* info) {
CodeGenerator::PrintCode(code, info); CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle. info->SetCode(code); // May be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE #ifdef ENABLE_GDB_JIT_INTERFACE
if (!code.is_null()) { if (FLAG_gdbjit && !code.is_null()) {
GDBJITLineInfo* lineinfo = GDBJITLineInfo* lineinfo =
masm.positions_recorder()->DetachGDBJITLineInfo(); masm.positions_recorder()->DetachGDBJITLineInfo();

2
deps/v8/src/compiler.cc

@ -37,7 +37,7 @@
#include "full-codegen.h" #include "full-codegen.h"
#include "gdb-jit.h" #include "gdb-jit.h"
#include "hydrogen.h" #include "hydrogen.h"
#include "lithium-allocator.h" #include "lithium.h"
#include "liveedit.h" #include "liveedit.h"
#include "oprofile-agent.h" #include "oprofile-agent.h"
#include "parser.h" #include "parser.h"

5
deps/v8/src/extensions/experimental/experimental.gyp

@ -27,7 +27,10 @@
{ {
'variables': { 'variables': {
'icu_src_dir%': '', # TODO(cira): Find out how to pass this value for arbitrary embedder.
# Chromium sets it in common.gypi and does force include of that file for
# all sub projects.
'icu_src_dir%': '../../../../third_party/icu',
}, },
'targets': [ 'targets': [
{ {

1
deps/v8/src/flag-definitions.h

@ -301,6 +301,7 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
// parser.cc // parser.cc
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(strict_mode, true, "allow strict mode directives")
// rewriter.cc // rewriter.cc
DEFINE_bool(optimize_ast, true, "optimize the ast") DEFINE_bool(optimize_ast, true, "optimize the ast")

2
deps/v8/src/full-codegen.cc

@ -308,7 +308,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
CodeGenerator::PrintCode(code, info); CodeGenerator::PrintCode(code, info);
info->SetCode(code); // may be an empty handle. info->SetCode(code); // may be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE #ifdef ENABLE_GDB_JIT_INTERFACE
if (!code.is_null()) { if (FLAG_gdbjit && !code.is_null()) {
GDBJITLineInfo* lineinfo = GDBJITLineInfo* lineinfo =
masm.positions_recorder()->DetachGDBJITLineInfo(); masm.positions_recorder()->DetachGDBJITLineInfo();

5
deps/v8/src/heap.h

@ -203,7 +203,10 @@ namespace internal {
V(zero_symbol, "0") \ V(zero_symbol, "0") \
V(global_eval_symbol, "GlobalEval") \ V(global_eval_symbol, "GlobalEval") \
V(identity_hash_symbol, "v8::IdentityHash") \ V(identity_hash_symbol, "v8::IdentityHash") \
V(closure_symbol, "(closure)") V(closure_symbol, "(closure)") \
V(use_strict, "use strict") \
V(KeyedLoadExternalArray_symbol, "KeyedLoadExternalArray") \
V(KeyedStoreExternalArray_symbol, "KeyedStoreExternalArray")
// Forward declarations. // Forward declarations.

32
deps/v8/src/hydrogen-instructions.cc

@ -570,34 +570,29 @@ void HCallConstantFunction::PrintDataTo(StringStream* stream) const {
} }
void HBranch::PrintDataTo(StringStream* stream) const { void HControlInstruction::PrintDataTo(StringStream* stream) const {
if (FirstSuccessor() != NULL) {
int first_id = FirstSuccessor()->block_id(); int first_id = FirstSuccessor()->block_id();
if (SecondSuccessor() == NULL) {
stream->Add(" B%d", first_id);
} else {
int second_id = SecondSuccessor()->block_id(); int second_id = SecondSuccessor()->block_id();
stream->Add("on "); stream->Add(" goto (B%d, B%d)", first_id, second_id);
value()->PrintNameTo(stream);
stream->Add(" (B%d, B%d)", first_id, second_id);
} }
void HCompareMapAndBranch::PrintDataTo(StringStream* stream) const {
stream->Add("on ");
value()->PrintNameTo(stream);
stream->Add(" (%p)", *map());
} }
void HGoto::PrintDataTo(StringStream* stream) const {
stream->Add("B%d", FirstSuccessor()->block_id());
} }
void HReturn::PrintDataTo(StringStream* stream) const { void HUnaryControlInstruction::PrintDataTo(StringStream* stream) const {
value()->PrintNameTo(stream); value()->PrintNameTo(stream);
HControlInstruction::PrintDataTo(stream);
} }
void HThrow::PrintDataTo(StringStream* stream) const { void HCompareMap::PrintDataTo(StringStream* stream) const {
value()->PrintNameTo(stream); value()->PrintNameTo(stream);
stream->Add(" (%p)", *map());
HControlInstruction::PrintDataTo(stream);
} }
@ -1255,6 +1250,11 @@ HType HUnaryPredicate::CalculateInferredType() const {
} }
HType HBitwiseBinaryOperation::CalculateInferredType() const {
return HType::TaggedNumber();
}
HType HArithmeticBinaryOperation::CalculateInferredType() const { HType HArithmeticBinaryOperation::CalculateInferredType() const {
return HType::TaggedNumber(); return HType::TaggedNumber();
} }

190
deps/v8/src/hydrogen-instructions.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -81,6 +81,7 @@ class LChunkBuilder;
// HStoreNamed // HStoreNamed
// HStoreNamedField // HStoreNamedField
// HStoreNamedGeneric // HStoreNamedGeneric
// HStringCharCodeAt
// HBlockEntry // HBlockEntry
// HCall // HCall
// HCallConstantFunction // HCallConstantFunction
@ -98,9 +99,9 @@ class LChunkBuilder;
// HDeoptimize // HDeoptimize
// HGoto // HGoto
// HUnaryControlInstruction // HUnaryControlInstruction
// HBranch // HCompareMap
// HCompareMapAndBranch
// HReturn // HReturn
// HTest
// HThrow // HThrow
// HEnterInlined // HEnterInlined
// HFunctionLiteral // HFunctionLiteral
@ -137,6 +138,7 @@ class LChunkBuilder;
// HLoadNamedGeneric // HLoadNamedGeneric
// HLoadFunctionPrototype // HLoadFunctionPrototype
// HPushArgument // HPushArgument
// HStringLength
// HTypeof // HTypeof
// HUnaryMathOperation // HUnaryMathOperation
// HUnaryPredicate // HUnaryPredicate
@ -181,7 +183,6 @@ class LChunkBuilder;
V(BitXor) \ V(BitXor) \
V(BlockEntry) \ V(BlockEntry) \
V(BoundsCheck) \ V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \ V(CallConstantFunction) \
V(CallFunction) \ V(CallFunction) \
V(CallGlobal) \ V(CallGlobal) \
@ -200,7 +201,7 @@ class LChunkBuilder;
V(CheckSmi) \ V(CheckSmi) \
V(Compare) \ V(Compare) \
V(CompareJSObjectEq) \ V(CompareJSObjectEq) \
V(CompareMapAndBranch) \ V(CompareMap) \
V(Constant) \ V(Constant) \
V(DeleteProperty) \ V(DeleteProperty) \
V(Deoptimize) \ V(Deoptimize) \
@ -248,7 +249,10 @@ class LChunkBuilder;
V(StoreKeyedGeneric) \ V(StoreKeyedGeneric) \
V(StoreNamedField) \ V(StoreNamedField) \
V(StoreNamedGeneric) \ V(StoreNamedGeneric) \
V(StringCharCodeAt) \
V(StringLength) \
V(Sub) \ V(Sub) \
V(Test) \
V(Throw) \ V(Throw) \
V(Typeof) \ V(Typeof) \
V(TypeofIs) \ V(TypeofIs) \
@ -811,44 +815,55 @@ class HBlockEntry: public HInstruction {
class HControlInstruction: public HInstruction { class HControlInstruction: public HInstruction {
public: public:
virtual HBasicBlock* FirstSuccessor() const { return NULL; } HControlInstruction(HBasicBlock* first, HBasicBlock* second)
virtual HBasicBlock* SecondSuccessor() const { return NULL; } : first_successor_(first), second_successor_(second) {
}
HBasicBlock* FirstSuccessor() const { return first_successor_; }
HBasicBlock* SecondSuccessor() const { return second_successor_; }
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_INSTRUCTION(ControlInstruction) DECLARE_INSTRUCTION(ControlInstruction)
private:
HBasicBlock* first_successor_;
HBasicBlock* second_successor_;
}; };
class HDeoptimize: public HControlInstruction { class HDeoptimize: public HControlInstruction {
public: public:
HDeoptimize() : HControlInstruction(NULL, NULL) { }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
}; };
class HGoto: public HControlInstruction { class HGoto: public HControlInstruction {
public: public:
explicit HGoto(HBasicBlock* destination) explicit HGoto(HBasicBlock* target)
: destination_(destination), : HControlInstruction(target, NULL), include_stack_check_(false) {
include_stack_check_(false) {} }
virtual HBasicBlock* FirstSuccessor() const { return destination_; }
void set_include_stack_check(bool include_stack_check) { void set_include_stack_check(bool include_stack_check) {
include_stack_check_ = include_stack_check; include_stack_check_ = include_stack_check;
} }
bool include_stack_check() const { return include_stack_check_; } bool include_stack_check() const { return include_stack_check_; }
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
private: private:
HBasicBlock* destination_;
bool include_stack_check_; bool include_stack_check_;
}; };
class HUnaryControlInstruction: public HControlInstruction { class HUnaryControlInstruction: public HControlInstruction {
public: public:
explicit HUnaryControlInstruction(HValue* value) { explicit HUnaryControlInstruction(HValue* value,
HBasicBlock* true_target,
HBasicBlock* false_target)
: HControlInstruction(true_target, false_target) {
SetOperandAt(0, value); SetOperandAt(0, value);
} }
@ -856,6 +871,8 @@ class HUnaryControlInstruction: public HControlInstruction {
return Representation::Tagged(); return Representation::Tagged();
} }
virtual void PrintDataTo(StringStream* stream) const;
HValue* value() const { return OperandAt(0); } HValue* value() const { return OperandAt(0); }
virtual int OperandCount() const { return 1; } virtual int OperandCount() const { return 1; }
virtual HValue* OperandAt(int index) const { return operands_[index]; } virtual HValue* OperandAt(int index) const { return operands_[index]; }
@ -872,73 +889,50 @@ class HUnaryControlInstruction: public HControlInstruction {
}; };
class HBranch: public HUnaryControlInstruction { class HTest: public HUnaryControlInstruction {
public: public:
HBranch(HBasicBlock* true_destination, HTest(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target)
HBasicBlock* false_destination, : HUnaryControlInstruction(value, true_target, false_target) {
HValue* boolean_value) ASSERT(true_target != NULL && false_target != NULL);
: HUnaryControlInstruction(boolean_value),
true_destination_(true_destination),
false_destination_(false_destination) {
ASSERT(true_destination != NULL && false_destination != NULL);
} }
virtual Representation RequiredInputRepresentation(int index) const { virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None(); return Representation::None();
} }
virtual HBasicBlock* FirstSuccessor() const { return true_destination_; } DECLARE_CONCRETE_INSTRUCTION(Test, "test")
virtual HBasicBlock* SecondSuccessor() const { return false_destination_; }
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
private:
HBasicBlock* true_destination_;
HBasicBlock* false_destination_;
}; };
class HCompareMapAndBranch: public HUnaryControlInstruction { class HCompareMap: public HUnaryControlInstruction {
public: public:
HCompareMapAndBranch(HValue* result, HCompareMap(HValue* value,
Handle<Map> map, Handle<Map> map,
HBasicBlock* true_destination, HBasicBlock* true_target,
HBasicBlock* false_destination) HBasicBlock* false_target)
: HUnaryControlInstruction(result), : HUnaryControlInstruction(value, true_target, false_target),
map_(map), map_(map) {
true_destination_(true_destination), ASSERT(true_target != NULL);
false_destination_(false_destination) { ASSERT(false_target != NULL);
ASSERT(true_destination != NULL);
ASSERT(false_destination != NULL);
ASSERT(!map.is_null()); ASSERT(!map.is_null());
} }
virtual HBasicBlock* FirstSuccessor() const { return true_destination_; }
virtual HBasicBlock* SecondSuccessor() const { return false_destination_; }
HBasicBlock* true_destination() const { return true_destination_; }
HBasicBlock* false_destination() const { return false_destination_; }
virtual void PrintDataTo(StringStream* stream) const; virtual void PrintDataTo(StringStream* stream) const;
Handle<Map> map() const { return map_; } Handle<Map> map() const { return map_; }
DECLARE_CONCRETE_INSTRUCTION(CompareMapAndBranch, "compare_map_and_branch") DECLARE_CONCRETE_INSTRUCTION(CompareMap, "compare_map")
private: private:
Handle<Map> map_; Handle<Map> map_;
HBasicBlock* true_destination_;
HBasicBlock* false_destination_;
}; };
class HReturn: public HUnaryControlInstruction { class HReturn: public HUnaryControlInstruction {
public: public:
explicit HReturn(HValue* result) : HUnaryControlInstruction(result) { } explicit HReturn(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) {
virtual void PrintDataTo(StringStream* stream) const; }
DECLARE_CONCRETE_INSTRUCTION(Return, "return") DECLARE_CONCRETE_INSTRUCTION(Return, "return")
}; };
@ -946,9 +940,8 @@ class HReturn: public HUnaryControlInstruction {
class HThrow: public HUnaryControlInstruction { class HThrow: public HUnaryControlInstruction {
public: public:
explicit HThrow(HValue* value) : HUnaryControlInstruction(value) { } explicit HThrow(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw") DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
}; };
@ -1579,6 +1572,12 @@ class HCheckInstanceType: public HUnaryOperation {
ASSERT(first <= last); ASSERT(first <= last);
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlag(kUseGVN); SetFlag(kUseGVN);
if ((FIRST_STRING_TYPE < first && last <= LAST_STRING_TYPE) ||
(FIRST_STRING_TYPE <= first && last < LAST_STRING_TYPE)) {
// A particular string instance type can change because of GC or
// externalization, but the value still remains a string.
SetFlag(kDependsOnMaps);
}
} }
virtual bool IsCheckInstruction() const { return true; } virtual bool IsCheckInstruction() const { return true; }
@ -2033,16 +2032,26 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
public: public:
HBitwiseBinaryOperation(HValue* left, HValue* right) HBitwiseBinaryOperation(HValue* left, HValue* right)
: HBinaryOperation(left, right) { : HBinaryOperation(left, right) {
// Default to truncating, Integer32, UseGVN. set_representation(Representation::Tagged());
set_representation(Representation::Integer32()); SetFlag(kFlexibleRepresentation);
SetFlag(kTruncatingToInt32); SetFlagMask(AllSideEffects());
SetFlag(kUseGVN);
} }
virtual Representation RequiredInputRepresentation(int index) const { virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Integer32(); return representation();
} }
virtual void RepresentationChanged(Representation to) {
if (!to.IsTagged()) {
ASSERT(to.IsInteger32());
ClearFlagMask(AllSideEffects());
SetFlag(kTruncatingToInt32);
SetFlag(kUseGVN);
}
}
HType CalculateInferredType() const;
DECLARE_INSTRUCTION(BitwiseBinaryOperation) DECLARE_INSTRUCTION(BitwiseBinaryOperation)
}; };
@ -2937,6 +2946,61 @@ class HStoreKeyedGeneric: public HStoreKeyed {
}; };
class HStringCharCodeAt: public HBinaryOperation {
public:
HStringCharCodeAt(HValue* string, HValue* index)
: HBinaryOperation(string, index) {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) const {
// The index is supposed to be Integer32.
return (index == 1) ? Representation::Integer32()
: Representation::Tagged();
}
virtual bool DataEquals(HValue* other) const { return true; }
HValue* string() const { return OperandAt(0); }
HValue* index() const { return OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at")
protected:
virtual Range* InferRange() {
return new Range(0, String::kMaxUC16CharCode);
}
};
class HStringLength: public HUnaryOperation {
public:
explicit HStringLength(HValue* string) : HUnaryOperation(string) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual HType CalculateInferredType() const {
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
return HType::Smi();
}
virtual bool DataEquals(HValue* other) const { return true; }
DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length")
protected:
virtual Range* InferRange() {
return new Range(0, String::kMaxLength);
}
};
class HMaterializedLiteral: public HInstruction { class HMaterializedLiteral: public HInstruction {
public: public:
HMaterializedLiteral(int index, int depth) HMaterializedLiteral(int index, int depth)

168
deps/v8/src/hydrogen.cc

@ -34,6 +34,7 @@
#include "lithium-allocator.h" #include "lithium-allocator.h"
#include "parser.h" #include "parser.h"
#include "scopes.h" #include "scopes.h"
#include "stub-cache.h"
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
#include "ia32/lithium-codegen-ia32.h" #include "ia32/lithium-codegen-ia32.h"
@ -504,19 +505,15 @@ HConstant* HGraph::GetConstantFalse() {
void HSubgraph::AppendOptional(HSubgraph* graph, void HSubgraph::AppendOptional(HSubgraph* graph,
bool on_true_branch, bool on_true_branch,
HValue* boolean_value) { HValue* value) {
ASSERT(HasExit() && graph->HasExit()); ASSERT(HasExit() && graph->HasExit());
HBasicBlock* other_block = graph_->CreateBasicBlock(); HBasicBlock* other_block = graph_->CreateBasicBlock();
HBasicBlock* join_block = graph_->CreateBasicBlock(); HBasicBlock* join_block = graph_->CreateBasicBlock();
HBasicBlock* true_branch = other_block; HTest* test = on_true_branch
HBasicBlock* false_branch = graph->entry_block(); ? new HTest(value, graph->entry_block(), other_block)
if (on_true_branch) { : new HTest(value, other_block, graph->entry_block());
true_branch = graph->entry_block(); exit_block_->Finish(test);
false_branch = other_block;
}
exit_block_->Finish(new HBranch(true_branch, false_branch, boolean_value));
other_block->Goto(join_block); other_block->Goto(join_block);
graph->exit_block()->Goto(join_block); graph->exit_block()->Goto(join_block);
exit_block_ = join_block; exit_block_ = join_block;
@ -934,7 +931,7 @@ class HRangeAnalysis BASE_EMBEDDED {
private: private:
void TraceRange(const char* msg, ...); void TraceRange(const char* msg, ...);
void Analyze(HBasicBlock* block); void Analyze(HBasicBlock* block);
void InferControlFlowRange(HBranch* branch, HBasicBlock* dest); void InferControlFlowRange(HTest* test, HBasicBlock* dest);
void InferControlFlowRange(Token::Value op, HValue* value, HValue* other); void InferControlFlowRange(Token::Value op, HValue* value, HValue* other);
void InferPhiRange(HPhi* phi); void InferPhiRange(HPhi* phi);
void InferRange(HValue* value); void InferRange(HValue* value);
@ -970,8 +967,8 @@ void HRangeAnalysis::Analyze(HBasicBlock* block) {
// Infer range based on control flow. // Infer range based on control flow.
if (block->predecessors()->length() == 1) { if (block->predecessors()->length() == 1) {
HBasicBlock* pred = block->predecessors()->first(); HBasicBlock* pred = block->predecessors()->first();
if (pred->end()->IsBranch()) { if (pred->end()->IsTest()) {
InferControlFlowRange(HBranch::cast(pred->end()), block); InferControlFlowRange(HTest::cast(pred->end()), block);
} }
} }
@ -997,14 +994,12 @@ void HRangeAnalysis::Analyze(HBasicBlock* block) {
} }
void HRangeAnalysis::InferControlFlowRange(HBranch* branch, HBasicBlock* dest) { void HRangeAnalysis::InferControlFlowRange(HTest* test, HBasicBlock* dest) {
ASSERT(branch->FirstSuccessor() == dest || branch->SecondSuccessor() == dest); ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
ASSERT(branch->FirstSuccessor() != dest || branch->SecondSuccessor() != dest); if (test->value()->IsCompare()) {
HCompare* compare = HCompare::cast(test->value());
if (branch->value()->IsCompare()) {
HCompare* compare = HCompare::cast(branch->value());
Token::Value op = compare->token(); Token::Value op = compare->token();
if (branch->SecondSuccessor() == dest) { if (test->SecondSuccessor() == dest) {
op = Token::NegateCompareOp(op); op = Token::NegateCompareOp(op);
} }
Token::Value inverted_op = Token::InvertCompareOp(op); Token::Value inverted_op = Token::InvertCompareOp(op);
@ -2067,8 +2062,8 @@ void TestContext::BuildBranch(HValue* value) {
HGraphBuilder* builder = owner(); HGraphBuilder* builder = owner();
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock(); HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock(); HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
HBranch* branch = new HBranch(empty_true, empty_false, value); HTest* test = new HTest(value, empty_true, empty_false);
builder->CurrentBlock()->Finish(branch); builder->CurrentBlock()->Finish(test);
HValue* const no_return_value = NULL; HValue* const no_return_value = NULL;
HBasicBlock* true_target = if_true(); HBasicBlock* true_target = if_true();
@ -2596,9 +2591,9 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
prev_graph->exit_block()->Finish(new HGoto(subgraph->entry_block())); prev_graph->exit_block()->Finish(new HGoto(subgraph->entry_block()));
} else { } else {
HBasicBlock* empty = graph()->CreateBasicBlock(); HBasicBlock* empty = graph()->CreateBasicBlock();
prev_graph->exit_block()->Finish(new HBranch(empty, prev_graph->exit_block()->Finish(new HTest(prev_compare_inst,
subgraph->entry_block(), empty,
prev_compare_inst)); subgraph->entry_block()));
} }
// Build instructions for current subgraph. // Build instructions for current subgraph.
@ -2617,9 +2612,9 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
if (prev_graph != current_subgraph_) { if (prev_graph != current_subgraph_) {
last_false_block = graph()->CreateBasicBlock(); last_false_block = graph()->CreateBasicBlock();
HBasicBlock* empty = graph()->CreateBasicBlock(); HBasicBlock* empty = graph()->CreateBasicBlock();
prev_graph->exit_block()->Finish(new HBranch(empty, prev_graph->exit_block()->Finish(new HTest(prev_compare_inst,
last_false_block, empty,
prev_compare_inst)); last_false_block));
} }
// If we have a non-smi compare clause, we deoptimize after trying // If we have a non-smi compare clause, we deoptimize after trying
@ -2702,8 +2697,8 @@ void HSubgraph::PreProcessOsrEntry(IterationStatement* statement) {
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock(); HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
HBasicBlock* osr_entry = graph()->CreateBasicBlock(); HBasicBlock* osr_entry = graph()->CreateBasicBlock();
HValue* true_value = graph()->GetConstantTrue(); HValue* true_value = graph()->GetConstantTrue();
HBranch* branch = new HBranch(non_osr_entry, osr_entry, true_value); HTest* test = new HTest(true_value, non_osr_entry, osr_entry);
exit_block()->Finish(branch); exit_block()->Finish(test);
HBasicBlock* loop_predecessor = graph()->CreateBasicBlock(); HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
non_osr_entry->Goto(loop_predecessor); non_osr_entry->Goto(loop_predecessor);
@ -3105,11 +3100,11 @@ HBasicBlock* HGraphBuilder::BuildTypeSwitch(ZoneMapList* maps,
(i == (maps->length() - 1)) (i == (maps->length() - 1))
? subgraphs->last() ? subgraphs->last()
: map_compare_subgraphs.last(); : map_compare_subgraphs.last();
current_subgraph_->exit_block()->Finish( HCompareMap* compare = new HCompareMap(receiver,
new HCompareMapAndBranch(receiver,
maps->at(i), maps->at(i),
subgraphs->at(i)->entry_block(), subgraphs->at(i)->entry_block(),
else_subgraph->entry_block())); else_subgraph->entry_block());
current_subgraph_->exit_block()->Finish(compare);
map_compare_subgraphs.Add(subgraph); map_compare_subgraphs.Add(subgraph);
} }
@ -3117,11 +3112,11 @@ HBasicBlock* HGraphBuilder::BuildTypeSwitch(ZoneMapList* maps,
AddInstruction(new HCheckNonSmi(receiver)); AddInstruction(new HCheckNonSmi(receiver));
HSubgraph* else_subgraph = HSubgraph* else_subgraph =
(maps->length() == 1) ? subgraphs->at(1) : map_compare_subgraphs.last(); (maps->length() == 1) ? subgraphs->at(1) : map_compare_subgraphs.last();
current_subgraph_->exit_block()->Finish( HCompareMap* compare = new HCompareMap(receiver,
new HCompareMapAndBranch(receiver,
Handle<Map>(maps->first()), Handle<Map>(maps->first()),
subgraphs->first()->entry_block(), subgraphs->first()->entry_block(),
else_subgraph->entry_block())); else_subgraph->entry_block());
current_subgraph_->exit_block()->Finish(compare);
// Join all the call subgraphs in a new basic block and make // Join all the call subgraphs in a new basic block and make
// this basic block the current basic block. // this basic block the current basic block.
@ -4075,9 +4070,8 @@ bool HGraphBuilder::TryInline(Call* expr) {
// TODO(3168478): refactor to avoid this. // TODO(3168478): refactor to avoid this.
HBasicBlock* empty_true = graph()->CreateBasicBlock(); HBasicBlock* empty_true = graph()->CreateBasicBlock();
HBasicBlock* empty_false = graph()->CreateBasicBlock(); HBasicBlock* empty_false = graph()->CreateBasicBlock();
HBranch* branch = HTest* test = new HTest(return_value, empty_true, empty_false);
new HBranch(empty_true, empty_false, return_value); body->exit_block()->Finish(test);
body->exit_block()->Finish(branch);
HValue* const no_return_value = NULL; HValue* const no_return_value = NULL;
empty_true->AddLeaveInlined(no_return_value, test_context->if_true()); empty_true->AddLeaveInlined(no_return_value, test_context->if_true());
@ -4146,12 +4140,29 @@ void HBasicBlock::AddLeaveInlined(HValue* return_value, HBasicBlock* target) {
} }
bool HGraphBuilder::TryMathFunctionInline(Call* expr) { bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr,
HValue* receiver,
Handle<Map> receiver_map,
CheckType check_type) {
ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
// Try to inline calls like Math.* as operations in the calling function. // Try to inline calls like Math.* as operations in the calling function.
if (!expr->target()->shared()->IsBuiltinMathFunction()) return false; if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id(); BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
int argument_count = expr->arguments()->length() + 1; // Plus receiver. int argument_count = expr->arguments()->length() + 1; // Plus receiver.
switch (id) { switch (id) {
case kStringCharCodeAt:
if (argument_count == 2 && check_type == STRING_CHECK) {
HValue* index = Pop();
HValue* string = Pop();
ASSERT(!expr->holder().is_null());
AddInstruction(new HCheckPrototypeMaps(
oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK),
expr->holder()));
HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
case kMathRound: case kMathRound:
case kMathFloor: case kMathFloor:
case kMathAbs: case kMathAbs:
@ -4159,7 +4170,8 @@ bool HGraphBuilder::TryMathFunctionInline(Call* expr) {
case kMathLog: case kMathLog:
case kMathSin: case kMathSin:
case kMathCos: case kMathCos:
if (argument_count == 2) { if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
AddCheckConstantFunction(expr, receiver, receiver_map, true);
HValue* argument = Pop(); HValue* argument = Pop();
Drop(1); // Receiver. Drop(1); // Receiver.
HUnaryMathOperation* op = new HUnaryMathOperation(argument, id); HUnaryMathOperation* op = new HUnaryMathOperation(argument, id);
@ -4169,7 +4181,8 @@ bool HGraphBuilder::TryMathFunctionInline(Call* expr) {
} }
break; break;
case kMathPow: case kMathPow:
if (argument_count == 3) { if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
AddCheckConstantFunction(expr, receiver, receiver_map, true);
HValue* right = Pop(); HValue* right = Pop();
HValue* left = Pop(); HValue* left = Pop();
Pop(); // Pop receiver. Pop(); // Pop receiver.
@ -4179,8 +4192,6 @@ bool HGraphBuilder::TryMathFunctionInline(Call* expr) {
double exponent = HConstant::cast(right)->DoubleValue(); double exponent = HConstant::cast(right)->DoubleValue();
if (exponent == 0.5) { if (exponent == 0.5) {
result = new HUnaryMathOperation(left, kMathPowHalf); result = new HUnaryMathOperation(left, kMathPowHalf);
ast_context()->ReturnInstruction(result, expr->id());
return true;
} else if (exponent == -0.5) { } else if (exponent == -0.5) {
HConstant* double_one = HConstant* double_one =
new HConstant(Handle<Object>(Smi::FromInt(1)), new HConstant(Handle<Object>(Smi::FromInt(1)),
@ -4193,22 +4204,18 @@ bool HGraphBuilder::TryMathFunctionInline(Call* expr) {
// an environment simulation here. // an environment simulation here.
ASSERT(!square_root->HasSideEffects()); ASSERT(!square_root->HasSideEffects());
result = new HDiv(double_one, square_root); result = new HDiv(double_one, square_root);
ast_context()->ReturnInstruction(result, expr->id());
return true;
} else if (exponent == 2.0) { } else if (exponent == 2.0) {
result = new HMul(left, left); result = new HMul(left, left);
ast_context()->ReturnInstruction(result, expr->id());
return true;
} }
} else if (right->IsConstant() && } else if (right->IsConstant() &&
HConstant::cast(right)->HasInteger32Value() && HConstant::cast(right)->HasInteger32Value() &&
HConstant::cast(right)->Integer32Value() == 2) { HConstant::cast(right)->Integer32Value() == 2) {
result = new HMul(left, left); result = new HMul(left, left);
ast_context()->ReturnInstruction(result, expr->id());
return true;
} }
if (result == NULL) {
result = new HPower(left, right); result = new HPower(left, right);
}
ast_context()->ReturnInstruction(result, expr->id()); ast_context()->ReturnInstruction(result, expr->id());
return true; return true;
} }
@ -4263,6 +4270,13 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
} }
static bool HasCustomCallGenerator(Handle<JSFunction> function) {
SharedFunctionInfo* info = function->shared();
return info->HasBuiltinFunctionId() &&
CallStubCompiler::HasCustomCallGenerator(info->builtin_function_id());
}
void HGraphBuilder::VisitCall(Call* expr) { void HGraphBuilder::VisitCall(Call* expr) {
Expression* callee = expr->expression(); Expression* callee = expr->expression();
int argument_count = expr->arguments()->length() + 1; // Plus receiver. int argument_count = expr->arguments()->length() + 1; // Plus receiver.
@ -4309,12 +4323,26 @@ void HGraphBuilder::VisitCall(Call* expr) {
expr->RecordTypeFeedback(oracle()); expr->RecordTypeFeedback(oracle());
ZoneMapList* types = expr->GetReceiverTypes(); ZoneMapList* types = expr->GetReceiverTypes();
if (expr->IsMonomorphic() && expr->check_type() == RECEIVER_MAP_CHECK) { if (expr->IsMonomorphic()) {
AddCheckConstantFunction(expr, receiver, types->first(), true); Handle<Map> receiver_map =
(types == NULL) ? Handle<Map>::null() : types->first();
if (TryMathFunctionInline(expr)) { if (TryInlineBuiltinFunction(expr,
receiver,
receiver_map,
expr->check_type())) {
return; return;
} else if (TryInline(expr)) { }
if (HasCustomCallGenerator(expr->target()) ||
expr->check_type() != RECEIVER_MAP_CHECK) {
// When the target has a custom call IC generator, use the IC,
// because it is likely to generate better code. Also use the
// IC when a primitive receiver check is required.
call = new HCallNamed(name, argument_count);
} else {
AddCheckConstantFunction(expr, receiver, receiver_map, true);
if (TryInline(expr)) {
if (subgraph()->HasExit()) { if (subgraph()->HasExit()) {
HValue* return_value = Pop(); HValue* return_value = Pop();
// If we inlined a function in a test context then we need to emit // If we inlined a function in a test context then we need to emit
@ -4332,7 +4360,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
CHECK_BAILOUT; CHECK_BAILOUT;
call = new HCallConstantFunction(expr->target(), argument_count); call = new HCallConstantFunction(expr->target(), argument_count);
} }
}
} else if (types != NULL && types->length() > 1) { } else if (types != NULL && types->length() > 1) {
ASSERT(expr->check_type() == RECEIVER_MAP_CHECK); ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
HandlePolymorphicCallNamed(expr, receiver, types, name); HandlePolymorphicCallNamed(expr, receiver, types, name);
@ -4720,6 +4748,18 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
} }
HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* string,
HValue* index) {
AddInstruction(new HCheckNonSmi(string));
AddInstruction(new HCheckInstanceType(
string, FIRST_STRING_TYPE, LAST_STRING_TYPE));
HStringLength* length = new HStringLength(string);
AddInstruction(length);
AddInstruction(new HBoundsCheck(index, length));
return new HStringCharCodeAt(string, index);
}
HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr, HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
HValue* left, HValue* left,
HValue* right) { HValue* right) {
@ -4773,7 +4813,12 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
if (FLAG_trace_representation) { if (FLAG_trace_representation) {
PrintF("Info: %s/%s\n", info.ToString(), ToRepresentation(info).Mnemonic()); PrintF("Info: %s/%s\n", info.ToString(), ToRepresentation(info).Mnemonic());
} }
AssumeRepresentation(instr, ToRepresentation(info)); Representation rep = ToRepresentation(info);
// We only generate either int32 or generic tagged bitwise operations.
if (instr->IsBitwiseBinaryOperation() && rep.IsDouble()) {
rep = Representation::Integer32();
}
AssumeRepresentation(instr, rep);
return instr; return instr;
} }
@ -4854,7 +4899,8 @@ void HGraphBuilder::AssumeRepresentation(HValue* value, Representation r) {
graph_->GetMaximumValueID()); graph_->GetMaximumValueID());
} }
value->ChangeRepresentation(r); value->ChangeRepresentation(r);
// The representation of the value is dictated by type feedback. // The representation of the value is dictated by type feedback and
// will not be changed later.
value->ClearFlag(HValue::kFlexibleRepresentation); value->ClearFlag(HValue::kFlexibleRepresentation);
} else if (FLAG_trace_representation) { } else if (FLAG_trace_representation) {
PrintF("No representation assumed\n"); PrintF("No representation assumed\n");
@ -5129,7 +5175,11 @@ void HGraphBuilder::GenerateSetValueOf(int argument_count, int ast_id) {
// Fast support for charCodeAt(n). // Fast support for charCodeAt(n).
void HGraphBuilder::GenerateStringCharCodeAt(int argument_count, int ast_id) { void HGraphBuilder::GenerateStringCharCodeAt(int argument_count, int ast_id) {
BAILOUT("inlined runtime function: StringCharCodeAt"); ASSERT(argument_count == 2);
HValue* index = Pop();
HValue* string = Pop();
HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
ast_context()->ReturnInstruction(result, ast_id);
} }

7
deps/v8/src/hydrogen.h

@ -748,7 +748,10 @@ class HGraphBuilder: public AstVisitor {
bool TryArgumentsAccess(Property* expr); bool TryArgumentsAccess(Property* expr);
bool TryCallApply(Call* expr); bool TryCallApply(Call* expr);
bool TryInline(Call* expr); bool TryInline(Call* expr);
bool TryMathFunctionInline(Call* expr); bool TryInlineBuiltinFunction(Call* expr,
HValue* receiver,
Handle<Map> receiver_map,
CheckType check_type);
void TraceInline(Handle<JSFunction> target, bool result); void TraceInline(Handle<JSFunction> target, bool result);
void HandleGlobalVariableAssignment(Variable* var, void HandleGlobalVariableAssignment(Variable* var,
@ -772,6 +775,8 @@ class HGraphBuilder: public AstVisitor {
ZoneMapList* types, ZoneMapList* types,
Handle<String> name); Handle<String> name);
HStringCharCodeAt* BuildStringCharCodeAt(HValue* string,
HValue* index);
HInstruction* BuildBinaryOperation(BinaryOperation* expr, HInstruction* BuildBinaryOperation(BinaryOperation* expr,
HValue* left, HValue* left,
HValue* right); HValue* right);

10
deps/v8/src/ia32/assembler-ia32-inl.h

@ -49,20 +49,24 @@ void RelocInfo::apply(intptr_t delta) {
if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) { if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_); int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry. *p -= delta; // Relocate entry.
CPU::FlushICache(p, sizeof(uint32_t));
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) { } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call // Special handling of js_return when a break point is set (call
// instruction has been inserted). // instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1); int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry. *p -= delta; // Relocate entry.
CPU::FlushICache(p, sizeof(uint32_t));
} else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) { } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
// Special handling of a debug break slot when a break point is set (call // Special handling of a debug break slot when a break point is set (call
// instruction has been inserted). // instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1); int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry. *p -= delta; // Relocate entry.
CPU::FlushICache(p, sizeof(uint32_t));
} else if (IsInternalReference(rmode_)) { } else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object. // absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_); int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // Relocate entry. *p += delta; // Relocate entry.
CPU::FlushICache(p, sizeof(uint32_t));
} }
} }
@ -111,6 +115,7 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target) { void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target; Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
} }
@ -141,6 +146,7 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address; Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
} }
@ -189,12 +195,14 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode(); RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) { if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitPointer(target_object_address()); visitor->VisitPointer(target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) { } else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this); visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this); visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address()); visitor->VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() && } else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) && ((RelocInfo::IsJSReturn(mode) &&
@ -214,12 +222,14 @@ void RelocInfo::Visit() {
RelocInfo::Mode mode = rmode(); RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) { if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(target_object_address()); StaticVisitor::VisitPointer(target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) { } else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this); StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(this); StaticVisitor::VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address()); StaticVisitor::VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() && } else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) && ((RelocInfo::IsJSReturn(mode) &&

7
deps/v8/src/ia32/code-stubs-ia32.cc

@ -91,8 +91,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
void FastNewContextStub::Generate(MacroAssembler* masm) { void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space. // Try to allocate the context in new space.
Label gc; Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS; __ AllocateInNewSpace((slots_ * kPointerSize) + FixedArray::kHeaderSize,
__ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
eax, ebx, ecx, &gc, TAG_OBJECT); eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the function from the stack. // Get the function from the stack.
@ -101,7 +100,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header. // Setup the object header.
__ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map()); __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
__ mov(FieldOperand(eax, Context::kLengthOffset), __ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(length))); Immediate(Smi::FromInt(slots_)));
// Setup the fixed slots. // Setup the fixed slots.
__ Set(ebx, Immediate(0)); // Set to NULL. __ Set(ebx, Immediate(0)); // Set to NULL.
@ -119,7 +118,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Initialize the rest of the slots to undefined. // Initialize the rest of the slots to undefined.
__ mov(ebx, Factory::undefined_value()); __ mov(ebx, Factory::undefined_value());
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) {
__ mov(Operand(eax, Context::SlotOffset(i)), ebx); __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
} }

2
deps/v8/src/ia32/codegen-ia32.cc

@ -209,7 +209,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AllocateStackSlots(); frame_->AllocateStackSlots();
// Allocate the local context if needed. // Allocate the local context if needed.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) { if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context"); Comment cmnt(masm_, "[ allocate local context");
// Allocate local context. // Allocate local context.

2
deps/v8/src/ia32/full-codegen-ia32.cc

@ -142,7 +142,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true; bool function_in_register = true;
// Possibly allocate a local context. // Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) { if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context"); Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi. // Argument to NewContext is the function, which is still in edi.

342
deps/v8/src/ia32/ic-ia32.cc

@ -718,160 +718,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
} }
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, failed_allocation;
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &slow, not_taken);
// Get the map of the receiver.
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow, not_taken);
__ CmpInstanceType(ecx, JS_OBJECT_TYPE);
__ j(not_equal, &slow, not_taken);
// Check that the elements array is the appropriate type of
// ExternalArray.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
Handle<Map> map(Heap::MapForExternalArrayType(array_type));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(map));
__ j(not_equal, &slow, not_taken);
// eax: key, known to be a smi.
// edx: receiver, known to be a JSObject.
// ebx: elements object, known to be an external array.
// Check that the index is in range.
__ mov(ecx, eax);
__ SmiUntag(ecx); // Untag the index.
__ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
__ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
// ebx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
__ movsx_b(ecx, Operand(ebx, ecx, times_1, 0));
break;
case kExternalUnsignedByteArray:
__ movzx_b(ecx, Operand(ebx, ecx, times_1, 0));
break;
case kExternalShortArray:
__ movsx_w(ecx, Operand(ebx, ecx, times_2, 0));
break;
case kExternalUnsignedShortArray:
__ movzx_w(ecx, Operand(ebx, ecx, times_2, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(ecx, Operand(ebx, ecx, times_4, 0));
break;
case kExternalFloatArray:
__ fld_s(Operand(ebx, ecx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
// For integer array types:
// ecx: value
// For floating-point array type:
// FP(0): value
if (array_type == kExternalIntArray ||
array_type == kExternalUnsignedIntArray) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
if (array_type == kExternalIntArray) {
__ cmp(ecx, 0xC0000000);
__ j(sign, &box_int);
} else {
ASSERT_EQ(array_type, kExternalUnsignedIntArray);
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
__ test(ecx, Immediate(0xC0000000));
__ j(not_zero, &box_int);
}
__ mov(eax, ecx);
__ SmiTag(eax);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
if (array_type == kExternalIntArray) {
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
} else {
ASSERT(array_type == kExternalUnsignedIntArray);
// Need to zero-extend the value.
// There's no fild variant for unsigned values, so zero-extend
// to a 64-bit int manually.
__ push(Immediate(0));
__ push(ecx);
__ fild_d(Operand(esp, 0));
__ pop(ecx);
__ pop(ecx);
}
// FP(0): value
__ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value.
__ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value.
__ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else {
__ mov(eax, ecx);
__ SmiTag(eax);
__ ret(0);
}
// If we fail allocation of the HeapNumber, we still have a value on
// top of the FPU stack. Remove it.
__ bind(&failed_allocation);
__ ffree();
__ fincstp();
// Fall through to slow case.
// Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
GenerateRuntimeGetProperty(masm);
}
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : key // -- eax : key
@ -1031,194 +877,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
} }
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, check_heap_number;
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow);
// Get the map from the receiver.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow);
// Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
// Get the instance type from the map of the receiver.
__ CmpInstanceType(edi, JS_OBJECT_TYPE);
__ j(not_equal, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// eax: value
// edx: receiver, a JSObject
// ecx: key, a smi
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ CheckMap(edi, Handle<Map>(Heap::MapForExternalArrayType(array_type)),
&slow, true);
// Check that the index is in range.
__ mov(ebx, ecx);
__ SmiUntag(ebx);
__ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// eax: value
// edx: receiver
// ecx: key
// edi: elements array
// ebx: untagged index
__ test(eax, Immediate(kSmiTagMask));
__ j(not_equal, &check_heap_number);
// smi case
__ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
__ SmiUntag(ecx);
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ecx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
__ fstp_s(Operand(edi, ebx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
__ ret(0); // Return the original value.
__ bind(&check_heap_number);
// eax: value
// edx: receiver
// ecx: key
// edi: elements array
// ebx: untagged index
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
__ j(not_equal, &slow);
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ebx: untagged index
// edi: base pointer of external storage
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
__ fstp_s(Operand(edi, ebx, times_4, 0));
__ ret(0);
} else {
// Need to perform float-to-int conversion.
// Test the top of the FP stack for NaN.
Label is_nan;
__ fucomi(0);
__ j(parity_even, &is_nan);
if (array_type != kExternalUnsignedIntArray) {
__ push(ecx); // Make room on stack
__ fistp_s(Operand(esp, 0));
__ pop(ecx);
} else {
// fistp stores values as signed integers.
// To represent the entire range, we need to store as a 64-bit
// int and discard the high 32 bits.
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ fistp_d(Operand(esp, 0));
__ pop(ecx);
__ add(Operand(esp), Immediate(kPointerSize));
}
// ecx: untagged integer value
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray: {
// We also need to explicitly check for +/-Infinity. These are
// converted to MIN_INT, but we need to be careful not to
// confuse with legal uses of MIN_INT.
Label not_infinity;
// This test would apparently detect both NaN and Infinity,
// but we've already checked for NaN using the FPU hardware
// above.
__ mov_w(edx, FieldOperand(eax, HeapNumber::kValueOffset + 6));
__ and_(edx, 0x7FF0);
__ cmp(edx, 0x7FF0);
__ j(not_equal, &not_infinity);
__ mov(ecx, 0);
__ bind(&not_infinity);
__ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
}
default:
UNREACHABLE();
break;
}
__ ret(0); // Return original value.
__ bind(&is_nan);
__ ffree();
__ fincstp();
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(edi, ebx, times_1, 0), 0);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ Set(ecx, Immediate(0));
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(Operand(edi, ebx, times_4, 0), Immediate(0));
break;
default:
UNREACHABLE();
break;
}
__ ret(0); // Return the original value.
}
// Slow case: call runtime.
__ bind(&slow);
GenerateRuntimeSetProperty(masm);
}
// The generated code does not accept smi keys. // The generated code does not accept smi keys.
// The generated code falls through if both probes miss. // The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm, static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,

242
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -1285,11 +1285,11 @@ void LCodeGen::DoCmpID(LCmpID* instr) {
NearLabel done; NearLabel done;
Condition cc = TokenToCondition(instr->op(), instr->is_double()); Condition cc = TokenToCondition(instr->op(), instr->is_double());
__ mov(ToRegister(result), Handle<Object>(Heap::true_value())); __ mov(ToRegister(result), Factory::true_value());
__ j(cc, &done); __ j(cc, &done);
__ bind(&unordered); __ bind(&unordered);
__ mov(ToRegister(result), Handle<Object>(Heap::false_value())); __ mov(ToRegister(result), Factory::false_value());
__ bind(&done); __ bind(&done);
} }
@ -1320,10 +1320,10 @@ void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
__ cmp(left, Operand(right)); __ cmp(left, Operand(right));
__ mov(result, Handle<Object>(Heap::true_value())); __ mov(result, Factory::true_value());
NearLabel done; NearLabel done;
__ j(equal, &done); __ j(equal, &done);
__ mov(result, Handle<Object>(Heap::false_value())); __ mov(result, Factory::false_value());
__ bind(&done); __ bind(&done);
} }
@ -1348,10 +1348,10 @@ void LCodeGen::DoIsNull(LIsNull* instr) {
__ cmp(reg, Factory::null_value()); __ cmp(reg, Factory::null_value());
if (instr->is_strict()) { if (instr->is_strict()) {
__ mov(result, Handle<Object>(Heap::true_value())); __ mov(result, Factory::true_value());
NearLabel done; NearLabel done;
__ j(equal, &done); __ j(equal, &done);
__ mov(result, Handle<Object>(Heap::false_value())); __ mov(result, Factory::false_value());
__ bind(&done); __ bind(&done);
} else { } else {
NearLabel true_value, false_value, done; NearLabel true_value, false_value, done;
@ -1368,10 +1368,10 @@ void LCodeGen::DoIsNull(LIsNull* instr) {
__ test(scratch, Immediate(1 << Map::kIsUndetectable)); __ test(scratch, Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &true_value); __ j(not_zero, &true_value);
__ bind(&false_value); __ bind(&false_value);
__ mov(result, Handle<Object>(Heap::false_value())); __ mov(result, Factory::false_value());
__ jmp(&done); __ jmp(&done);
__ bind(&true_value); __ bind(&true_value);
__ mov(result, Handle<Object>(Heap::true_value())); __ mov(result, Factory::true_value());
__ bind(&done); __ bind(&done);
} }
} }
@ -1447,11 +1447,11 @@ void LCodeGen::DoIsObject(LIsObject* instr) {
__ j(true_cond, &is_true); __ j(true_cond, &is_true);
__ bind(&is_false); __ bind(&is_false);
__ mov(result, Handle<Object>(Heap::false_value())); __ mov(result, Factory::false_value());
__ jmp(&done); __ jmp(&done);
__ bind(&is_true); __ bind(&is_true);
__ mov(result, Handle<Object>(Heap::true_value())); __ mov(result, Factory::true_value());
__ bind(&done); __ bind(&done);
} }
@ -1479,10 +1479,10 @@ void LCodeGen::DoIsSmi(LIsSmi* instr) {
ASSERT(instr->hydrogen()->value()->representation().IsTagged()); ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ test(input, Immediate(kSmiTagMask)); __ test(input, Immediate(kSmiTagMask));
__ mov(result, Handle<Object>(Heap::true_value())); __ mov(result, Factory::true_value());
NearLabel done; NearLabel done;
__ j(zero, &done); __ j(zero, &done);
__ mov(result, Handle<Object>(Heap::false_value())); __ mov(result, Factory::false_value());
__ bind(&done); __ bind(&done);
} }
@ -1507,7 +1507,6 @@ static InstanceType TestType(HHasInstanceType* instr) {
} }
static Condition BranchCondition(HHasInstanceType* instr) { static Condition BranchCondition(HHasInstanceType* instr) {
InstanceType from = instr->from(); InstanceType from = instr->from();
InstanceType to = instr->to(); InstanceType to = instr->to();
@ -1529,10 +1528,10 @@ void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
__ j(zero, &is_false); __ j(zero, &is_false);
__ CmpObjectType(input, TestType(instr->hydrogen()), result); __ CmpObjectType(input, TestType(instr->hydrogen()), result);
__ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false); __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
__ mov(result, Handle<Object>(Heap::true_value())); __ mov(result, Factory::true_value());
__ jmp(&done); __ jmp(&done);
__ bind(&is_false); __ bind(&is_false);
__ mov(result, Handle<Object>(Heap::false_value())); __ mov(result, Factory::false_value());
__ bind(&done); __ bind(&done);
} }
@ -1559,12 +1558,12 @@ void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged()); ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ mov(result, Handle<Object>(Heap::true_value())); __ mov(result, Factory::true_value());
__ test(FieldOperand(input, String::kHashFieldOffset), __ test(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask)); Immediate(String::kContainsCachedArrayIndexMask));
NearLabel done; NearLabel done;
__ j(not_zero, &done); __ j(not_zero, &done);
__ mov(result, Handle<Object>(Heap::false_value())); __ mov(result, Factory::false_value());
__ bind(&done); __ bind(&done);
} }
@ -1653,11 +1652,11 @@ void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
__ j(not_equal, &is_false); __ j(not_equal, &is_false);
__ bind(&is_true); __ bind(&is_true);
__ mov(result, Handle<Object>(Heap::true_value())); __ mov(result, Factory::true_value());
__ jmp(&done); __ jmp(&done);
__ bind(&is_false); __ bind(&is_false);
__ mov(result, Handle<Object>(Heap::false_value())); __ mov(result, Factory::false_value());
__ bind(&done); __ bind(&done);
} }
@ -2221,11 +2220,12 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
Label negative; Label negative;
__ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, // Check the sign of the argument. If the argument is positive, just
// just return it. // return it. We do not need to patch the stack since |input| and
// |result| are the same register and |input| will be restored
// unchanged by popping safepoint registers.
__ test(tmp, Immediate(HeapNumber::kSignMask)); __ test(tmp, Immediate(HeapNumber::kSignMask));
__ j(not_zero, &negative); __ j(not_zero, &negative);
__ mov(tmp, input_reg);
__ jmp(&done); __ jmp(&done);
__ bind(&negative); __ bind(&negative);
@ -2252,14 +2252,25 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
__ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2); __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
__ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
__ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2); __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
__ bind(&done);
__ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp); __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
__ bind(&done);
__ PopSafepointRegisters(); __ PopSafepointRegisters();
} }
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
Register input_reg = ToRegister(instr->InputAt(0));
__ test(input_reg, Operand(input_reg));
Label is_positive;
__ j(not_sign, &is_positive);
__ neg(input_reg);
__ test(input_reg, Operand(input_reg));
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
}
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
// Class for deferred case. // Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
@ -2284,31 +2295,15 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
__ subsd(scratch, input_reg); __ subsd(scratch, input_reg);
__ pand(input_reg, scratch); __ pand(input_reg, scratch);
} else if (r.IsInteger32()) { } else if (r.IsInteger32()) {
Register input_reg = ToRegister(instr->InputAt(0)); EmitIntegerMathAbs(instr);
__ test(input_reg, Operand(input_reg));
Label is_positive;
__ j(not_sign, &is_positive);
__ neg(input_reg);
__ test(input_reg, Operand(input_reg));
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
} else { // Tagged case. } else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred = DeferredMathAbsTaggedHeapNumber* deferred =
new DeferredMathAbsTaggedHeapNumber(this, instr); new DeferredMathAbsTaggedHeapNumber(this, instr);
Label not_smi;
Register input_reg = ToRegister(instr->InputAt(0)); Register input_reg = ToRegister(instr->InputAt(0));
// Smi check. // Smi check.
__ test(input_reg, Immediate(kSmiTagMask)); __ test(input_reg, Immediate(kSmiTagMask));
__ j(not_zero, deferred->entry()); __ j(not_zero, deferred->entry());
__ test(input_reg, Operand(input_reg)); EmitIntegerMathAbs(instr);
Label is_positive;
__ j(not_sign, &is_positive);
__ neg(input_reg);
__ test(input_reg, Operand(input_reg));
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
__ bind(deferred->exit()); __ bind(deferred->exit());
} }
} }
@ -2651,6 +2646,151 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
} }
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
class DeferredStringCharCodeAt: public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
private:
LStringCharCodeAt* instr_;
};
Register string = ToRegister(instr->string());
Register index = no_reg;
int const_index = -1;
if (instr->index()->IsConstantOperand()) {
const_index = ToInteger32(LConstantOperand::cast(instr->index()));
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
if (!Smi::IsValid(const_index)) {
// Guaranteed to be out of bounds because of the assert above.
// So the bounds check that must dominate this instruction must
// have deoptimized already.
if (FLAG_debug_code) {
__ Abort("StringCharCodeAt: out of bounds index.");
}
// No code needs to be generated.
return;
}
} else {
index = ToRegister(instr->index());
}
Register result = ToRegister(instr->result());
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr);
NearLabel flat_string, ascii_string, done;
// Fetch the instance type of the receiver into result register.
__ mov(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
// We need special handling for non-flat strings.
STATIC_ASSERT(kSeqStringTag == 0);
__ test(result, Immediate(kStringRepresentationMask));
__ j(zero, &flat_string);
// Handle non-flat strings.
__ test(result, Immediate(kIsConsStringMask));
__ j(zero, deferred->entry());
// ConsString.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
__ cmp(FieldOperand(string, ConsString::kSecondOffset),
Immediate(Factory::empty_string()));
__ j(not_equal, deferred->entry());
// Get the first of the two strings and load its instance type.
__ mov(string, FieldOperand(string, ConsString::kFirstOffset));
__ mov(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
// If the first cons component is also non-flat, then go to runtime.
STATIC_ASSERT(kSeqStringTag == 0);
__ test(result, Immediate(kStringRepresentationMask));
__ j(not_zero, deferred->entry());
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
STATIC_ASSERT(kAsciiStringTag != 0);
__ test(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
// 2-byte string.
// Load the 2-byte character code into the result register.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
if (instr->index()->IsConstantOperand()) {
__ movzx_w(result,
FieldOperand(string,
SeqTwoByteString::kHeaderSize + 2 * const_index));
} else {
__ movzx_w(result, FieldOperand(string,
index,
times_2,
SeqTwoByteString::kHeaderSize));
}
__ jmp(&done);
// ASCII string.
// Load the byte into the result register.
__ bind(&ascii_string);
if (instr->index()->IsConstantOperand()) {
__ movzx_b(result, FieldOperand(string,
SeqAsciiString::kHeaderSize + const_index));
} else {
__ movzx_b(result, FieldOperand(string,
index,
times_1,
SeqAsciiString::kHeaderSize));
}
__ bind(&done);
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
Register string = ToRegister(instr->string());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
__ Set(result, Immediate(0));
__ PushSafepointRegisters();
__ push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
if (instr->index()->IsConstantOperand()) {
int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
__ push(Immediate(Smi::FromInt(const_index)));
} else {
Register index = ToRegister(instr->index());
__ SmiTag(index);
__ push(index);
}
__ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
RecordSafepointWithRegisters(
instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
if (FLAG_debug_code) {
__ AbortIfNotSmi(eax);
}
__ SmiUntag(eax);
__ mov(Operand(esp, EspIndexForPushAll(result) * kPointerSize), eax);
__ PopSafepointRegisters();
}
void LCodeGen::DoStringLength(LStringLength* instr) {
Register string = ToRegister(instr->string());
Register result = ToRegister(instr->result());
__ mov(result, FieldOperand(string, String::kLengthOffset));
}
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->InputAt(0); LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() || input->IsStackSlot()); ASSERT(input->IsRegister() || input->IsStackSlot());
@ -3077,13 +3217,19 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
InstanceType last = instr->hydrogen()->last(); InstanceType last = instr->hydrogen()->last();
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(first));
// If there is only one type in the interval check for equality. // If there is only one type in the interval check for equality.
if (first == last) { if (first == last) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(first));
DeoptimizeIf(not_equal, instr->environment()); DeoptimizeIf(not_equal, instr->environment());
} else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
// String has a dedicated bit in instance type.
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), kIsNotStringMask);
DeoptimizeIf(not_zero, instr->environment());
} else { } else {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(first));
DeoptimizeIf(below, instr->environment()); DeoptimizeIf(below, instr->environment());
// Omit check for the last type. // Omit check for the last type.
if (last != LAST_TYPE) { if (last != LAST_TYPE) {
@ -3292,11 +3438,11 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
instr->type_literal()); instr->type_literal());
__ j(final_branch_condition, &true_label); __ j(final_branch_condition, &true_label);
__ bind(&false_label); __ bind(&false_label);
__ mov(result, Handle<Object>(Heap::false_value())); __ mov(result, Factory::false_value());
__ jmp(&done); __ jmp(&done);
__ bind(&true_label); __ bind(&true_label);
__ mov(result, Handle<Object>(Heap::true_value())); __ mov(result, Factory::true_value());
__ bind(&done); __ bind(&done);
} }
@ -3341,9 +3487,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = below; final_branch_condition = below;
} else if (type_name->Equals(Heap::boolean_symbol())) { } else if (type_name->Equals(Heap::boolean_symbol())) {
__ cmp(input, Handle<Object>(Heap::true_value())); __ cmp(input, Factory::true_value());
__ j(equal, true_label); __ j(equal, true_label);
__ cmp(input, Handle<Object>(Heap::false_value())); __ cmp(input, Factory::false_value());
final_branch_condition = equal; final_branch_condition = equal;
} else if (type_name->Equals(Heap::undefined_symbol())) { } else if (type_name->Equals(Heap::undefined_symbol())) {

2
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -92,6 +92,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr); void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr); void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr); void DoDeferredStackCheck(LGoto* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check); Label* map_check);
@ -186,6 +187,7 @@ class LCodeGen BASE_EMBEDDED {
int ToInteger32(LConstantOperand* op) const; int ToInteger32(LConstantOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation. // Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr);
void DoMathFloor(LUnaryMathOperation* instr); void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr); void DoMathRound(LUnaryMathOperation* instr);

2
deps/v8/src/ia32/lithium-gap-resolver-ia32.h

@ -30,7 +30,7 @@
#include "v8.h" #include "v8.h"
#include "lithium-allocator.h" #include "lithium.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {

60
deps/v8/src/ia32/lithium-ia32.cc

@ -162,6 +162,12 @@ const char* LArithmeticT::Mnemonic() const {
case Token::MUL: return "mul-t"; case Token::MUL: return "mul-t";
case Token::MOD: return "mod-t"; case Token::MOD: return "mod-t";
case Token::DIV: return "div-t"; case Token::DIV: return "div-t";
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
case Token::SHL: return "sal-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
default: default:
UNREACHABLE(); UNREACHABLE();
return NULL; return NULL;
@ -739,18 +745,38 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoBit(Token::Value op, LInstruction* LChunkBuilder::DoBit(Token::Value op,
HBitwiseBinaryOperation* instr) { HBitwiseBinaryOperation* instr) {
ASSERT(instr->representation().IsInteger32()); if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand()); LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
return DefineSameAsFirst(new LBitI(op, left, right)); return DefineSameAsFirst(new LBitI(op, left, right));
} else {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
LArithmeticT* result = new LArithmeticT(op, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
} }
LInstruction* LChunkBuilder::DoShift(Token::Value op, LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) { HBitwiseBinaryOperation* instr) {
if (instr->representation().IsTagged()) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
LArithmeticT* result = new LArithmeticT(op, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
ASSERT(instr->representation().IsInteger32()); ASSERT(instr->representation().IsInteger32());
ASSERT(instr->OperandAt(0)->representation().IsInteger32()); ASSERT(instr->OperandAt(0)->representation().IsInteger32());
ASSERT(instr->OperandAt(1)->representation().IsInteger32()); ASSERT(instr->OperandAt(1)->representation().IsInteger32());
@ -894,15 +920,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) { if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr); instr = AssignEnvironment(instr);
} }
if (current->IsBranch() && !instr->IsGoto()) { if (current->IsTest() && !instr->IsGoto()) {
// TODO(fschneider): Handle branch instructions uniformly like
// other instructions. This requires us to generate the right
// branch instruction already at the HIR level.
ASSERT(instr->IsControl()); ASSERT(instr->IsControl());
HBranch* branch = HBranch::cast(current); HTest* test = HTest::cast(current);
instr->set_hydrogen_value(branch->value()); instr->set_hydrogen_value(test->value());
HBasicBlock* first = branch->FirstSuccessor(); HBasicBlock* first = test->FirstSuccessor();
HBasicBlock* second = branch->SecondSuccessor(); HBasicBlock* second = test->SecondSuccessor();
ASSERT(first != NULL && second != NULL); ASSERT(first != NULL && second != NULL);
instr->SetBranchTargets(first->block_id(), second->block_id()); instr->SetBranchTargets(first->block_id(), second->block_id());
} else { } else {
@ -959,7 +982,7 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
} }
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { LInstruction* LChunkBuilder::DoTest(HTest* instr) {
HValue* v = instr->value(); HValue* v = instr->value();
if (v->EmitAtUses()) { if (v->EmitAtUses()) {
if (v->IsClassOfTest()) { if (v->IsClassOfTest()) {
@ -1061,8 +1084,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
} }
LInstruction* LChunkBuilder::DoCompareMapAndBranch( LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
HCompareMapAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged()); ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value()); LOperand* value = UseRegisterAtStart(instr->value());
return new LCmpMapAndBranch(value); return new LCmpMapAndBranch(value);
@ -1741,6 +1763,20 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
} }
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegisterOrConstant(instr->index());
LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LOperand* string = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LStringLength(string));
}
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new LArrayLiteral, eax), instr); return MarkAsCall(DefineFixed(new LArrayLiteral, eax), instr);
} }

161
deps/v8/src/ia32/lithium-ia32.h

@ -114,6 +114,7 @@ class LCodeGen;
// LStoreNamed // LStoreNamed
// LStoreNamedField // LStoreNamedField
// LStoreNamedGeneric // LStoreNamedGeneric
// LStringCharCodeAt
// LBitNotI // LBitNotI
// LCallNew // LCallNew
// LCheckFunction // LCheckFunction
@ -141,6 +142,7 @@ class LCodeGen;
// LReturn // LReturn
// LSmiTag // LSmiTag
// LStoreGlobal // LStoreGlobal
// LStringLength
// LTaggedToI // LTaggedToI
// LThrow // LThrow
// LTypeof // LTypeof
@ -253,6 +255,8 @@ class LCodeGen;
V(StoreKeyedGeneric) \ V(StoreKeyedGeneric) \
V(StoreNamedField) \ V(StoreNamedField) \
V(StoreNamedGeneric) \ V(StoreNamedGeneric) \
V(StringCharCodeAt) \
V(StringLength) \
V(SubI) \ V(SubI) \
V(TaggedToI) \ V(TaggedToI) \
V(Throw) \ V(Throw) \
@ -335,33 +339,36 @@ class LInstruction: public ZoneObject {
}; };
template<typename T, int N> template<typename ElementType, int NumElements>
class OperandContainer { class OperandContainer {
public: public:
OperandContainer() { OperandContainer() {
for (int i = 0; i < N; i++) elems_[i] = NULL; for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
} }
int length() { return N; } int length() { return NumElements; }
T& operator[](int i) { ElementType& operator[](int i) {
ASSERT(i < length()); ASSERT(i < length());
return elems_[i]; return elems_[i];
} }
void PrintOperandsTo(StringStream* stream); void PrintOperandsTo(StringStream* stream);
private: private:
T elems_[N]; ElementType elems_[NumElements];
}; };
template<typename T> template<typename ElementType>
class OperandContainer<T, 0> { class OperandContainer<ElementType, 0> {
public: public:
int length() { return 0; } int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { } void PrintOperandsTo(StringStream* stream) { }
}; };
template<int R, int I, int T = 0> // R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
class LTemplateInstruction: public LInstruction { class LTemplateInstruction: public LInstruction {
public: public:
// Allow 0 or 1 output operands. // Allow 0 or 1 output operands.
@ -512,7 +519,7 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
}; };
template<int I, int T = 0> template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> { class LControlInstruction: public LTemplateInstruction<0, I, T> {
public: public:
DECLARE_INSTRUCTION(ControlInstruction) DECLARE_INSTRUCTION(ControlInstruction)
@ -570,7 +577,7 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
}; };
class LArgumentsLength: public LTemplateInstruction<1, 1> { class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LArgumentsLength(LOperand* elements) { explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements; inputs_[0] = elements;
@ -627,7 +634,7 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
}; };
class LCmpID: public LTemplateInstruction<1, 2> { class LCmpID: public LTemplateInstruction<1, 2, 0> {
public: public:
LCmpID(LOperand* left, LOperand* right) { LCmpID(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -644,7 +651,7 @@ class LCmpID: public LTemplateInstruction<1, 2> {
}; };
class LCmpIDAndBranch: public LControlInstruction<2> { class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public: public:
LCmpIDAndBranch(LOperand* left, LOperand* right) { LCmpIDAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -663,7 +670,7 @@ class LCmpIDAndBranch: public LControlInstruction<2> {
}; };
class LUnaryMathOperation: public LTemplateInstruction<1, 1> { class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LUnaryMathOperation(LOperand* value) { explicit LUnaryMathOperation(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -677,7 +684,7 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1> {
}; };
class LCmpJSObjectEq: public LTemplateInstruction<1, 2> { class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
public: public:
LCmpJSObjectEq(LOperand* left, LOperand* right) { LCmpJSObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -688,7 +695,7 @@ class LCmpJSObjectEq: public LTemplateInstruction<1, 2> {
}; };
class LCmpJSObjectEqAndBranch: public LControlInstruction<2> { class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
public: public:
LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) { LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -700,7 +707,7 @@ class LCmpJSObjectEqAndBranch: public LControlInstruction<2> {
}; };
class LIsNull: public LTemplateInstruction<1, 1> { class LIsNull: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LIsNull(LOperand* value) { explicit LIsNull(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -754,7 +761,7 @@ class LIsObjectAndBranch: public LControlInstruction<1, 2> {
}; };
class LIsSmi: public LTemplateInstruction<1, 1> { class LIsSmi: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LIsSmi(LOperand* value) { explicit LIsSmi(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -765,7 +772,7 @@ class LIsSmi: public LTemplateInstruction<1, 1> {
}; };
class LIsSmiAndBranch: public LControlInstruction<1> { class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public: public:
explicit LIsSmiAndBranch(LOperand* value) { explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -777,7 +784,7 @@ class LIsSmiAndBranch: public LControlInstruction<1> {
}; };
class LHasInstanceType: public LTemplateInstruction<1, 1> { class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LHasInstanceType(LOperand* value) { explicit LHasInstanceType(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -803,7 +810,7 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
}; };
class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> { class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LHasCachedArrayIndex(LOperand* value) { explicit LHasCachedArrayIndex(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -814,7 +821,7 @@ class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> {
}; };
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1> { class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public: public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) { explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -856,7 +863,7 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
}; };
class LCmpT: public LTemplateInstruction<1, 2> { class LCmpT: public LTemplateInstruction<1, 2, 0> {
public: public:
LCmpT(LOperand* left, LOperand* right) { LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -870,7 +877,7 @@ class LCmpT: public LTemplateInstruction<1, 2> {
}; };
class LCmpTAndBranch: public LControlInstruction<2> { class LCmpTAndBranch: public LControlInstruction<2, 0> {
public: public:
LCmpTAndBranch(LOperand* left, LOperand* right) { LCmpTAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -884,7 +891,7 @@ class LCmpTAndBranch: public LControlInstruction<2> {
}; };
class LInstanceOf: public LTemplateInstruction<1, 2> { class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
public: public:
LInstanceOf(LOperand* left, LOperand* right) { LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -895,7 +902,7 @@ class LInstanceOf: public LTemplateInstruction<1, 2> {
}; };
class LInstanceOfAndBranch: public LControlInstruction<2> { class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
public: public:
LInstanceOfAndBranch(LOperand* left, LOperand* right) { LInstanceOfAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -935,7 +942,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
}; };
class LBitI: public LTemplateInstruction<1, 2> { class LBitI: public LTemplateInstruction<1, 2, 0> {
public: public:
LBitI(Token::Value op, LOperand* left, LOperand* right) LBitI(Token::Value op, LOperand* left, LOperand* right)
: op_(op) { : op_(op) {
@ -952,7 +959,7 @@ class LBitI: public LTemplateInstruction<1, 2> {
}; };
class LShiftI: public LTemplateInstruction<1, 2> { class LShiftI: public LTemplateInstruction<1, 2, 0> {
public: public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) { : op_(op), can_deopt_(can_deopt) {
@ -972,7 +979,7 @@ class LShiftI: public LTemplateInstruction<1, 2> {
}; };
class LSubI: public LTemplateInstruction<1, 2> { class LSubI: public LTemplateInstruction<1, 2, 0> {
public: public:
LSubI(LOperand* left, LOperand* right) { LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -1025,7 +1032,7 @@ class LConstantT: public LConstant {
}; };
class LBranch: public LControlInstruction<1> { class LBranch: public LControlInstruction<1, 0> {
public: public:
explicit LBranch(LOperand* value) { explicit LBranch(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1038,28 +1045,28 @@ class LBranch: public LControlInstruction<1> {
}; };
class LCmpMapAndBranch: public LTemplateInstruction<0, 1> { class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LCmpMapAndBranch(LOperand* value) { explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
} }
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch) DECLARE_HYDROGEN_ACCESSOR(CompareMap)
virtual bool IsControl() const { return true; } virtual bool IsControl() const { return true; }
Handle<Map> map() const { return hydrogen()->map(); } Handle<Map> map() const { return hydrogen()->map(); }
int true_block_id() const { int true_block_id() const {
return hydrogen()->true_destination()->block_id(); return hydrogen()->FirstSuccessor()->block_id();
} }
int false_block_id() const { int false_block_id() const {
return hydrogen()->false_destination()->block_id(); return hydrogen()->SecondSuccessor()->block_id();
} }
}; };
class LJSArrayLength: public LTemplateInstruction<1, 1> { class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LJSArrayLength(LOperand* value) { explicit LJSArrayLength(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1070,7 +1077,7 @@ class LJSArrayLength: public LTemplateInstruction<1, 1> {
}; };
class LFixedArrayLength: public LTemplateInstruction<1, 1> { class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LFixedArrayLength(LOperand* value) { explicit LFixedArrayLength(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1093,7 +1100,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
}; };
class LThrow: public LTemplateInstruction<0, 1> { class LThrow: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LThrow(LOperand* value) { explicit LThrow(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1103,7 +1110,7 @@ class LThrow: public LTemplateInstruction<0, 1> {
}; };
class LBitNotI: public LTemplateInstruction<1, 1> { class LBitNotI: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LBitNotI(LOperand* value) { explicit LBitNotI(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1113,7 +1120,7 @@ class LBitNotI: public LTemplateInstruction<1, 1> {
}; };
class LAddI: public LTemplateInstruction<1, 2> { class LAddI: public LTemplateInstruction<1, 2, 0> {
public: public:
LAddI(LOperand* left, LOperand* right) { LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -1125,7 +1132,7 @@ class LAddI: public LTemplateInstruction<1, 2> {
}; };
class LPower: public LTemplateInstruction<1, 2> { class LPower: public LTemplateInstruction<1, 2, 0> {
public: public:
LPower(LOperand* left, LOperand* right) { LPower(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -1137,7 +1144,7 @@ class LPower: public LTemplateInstruction<1, 2> {
}; };
class LArithmeticD: public LTemplateInstruction<1, 2> { class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public: public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right) LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) { : op_(op) {
@ -1155,7 +1162,7 @@ class LArithmeticD: public LTemplateInstruction<1, 2> {
}; };
class LArithmeticT: public LTemplateInstruction<1, 2> { class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
public: public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right) LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) { : op_(op) {
@ -1173,7 +1180,7 @@ class LArithmeticT: public LTemplateInstruction<1, 2> {
}; };
class LReturn: public LTemplateInstruction<0, 1> { class LReturn: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LReturn(LOperand* value) { explicit LReturn(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1183,7 +1190,7 @@ class LReturn: public LTemplateInstruction<0, 1> {
}; };
class LLoadNamedField: public LTemplateInstruction<1, 1> { class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LLoadNamedField(LOperand* object) { explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object; inputs_[0] = object;
@ -1194,7 +1201,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1> {
}; };
class LLoadNamedGeneric: public LTemplateInstruction<1, 1> { class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LLoadNamedGeneric(LOperand* object) { explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object; inputs_[0] = object;
@ -1222,7 +1229,7 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
}; };
class LLoadElements: public LTemplateInstruction<1, 1> { class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LLoadElements(LOperand* object) { explicit LLoadElements(LOperand* object) {
inputs_[0] = object; inputs_[0] = object;
@ -1232,7 +1239,7 @@ class LLoadElements: public LTemplateInstruction<1, 1> {
}; };
class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> { class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public: public:
LLoadKeyedFastElement(LOperand* elements, LOperand* key) { LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements; inputs_[0] = elements;
@ -1247,7 +1254,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> {
}; };
class LLoadKeyedGeneric: public LTemplateInstruction<1, 2> { class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public: public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) { LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
inputs_[0] = obj; inputs_[0] = obj;
@ -1268,7 +1275,7 @@ class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
}; };
class LStoreGlobal: public LTemplateInstruction<0, 1> { class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LStoreGlobal(LOperand* value) { explicit LStoreGlobal(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1291,7 +1298,7 @@ class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
}; };
class LPushArgument: public LTemplateInstruction<0, 1> { class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LPushArgument(LOperand* value) { explicit LPushArgument(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1385,7 +1392,7 @@ class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
}; };
class LCallNew: public LTemplateInstruction<1, 1> { class LCallNew: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LCallNew(LOperand* constructor) { explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor; inputs_[0] = constructor;
@ -1410,7 +1417,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
}; };
class LInteger32ToDouble: public LTemplateInstruction<1, 1> { class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LInteger32ToDouble(LOperand* value) { explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1420,7 +1427,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1> {
}; };
class LNumberTagI: public LTemplateInstruction<1, 1> { class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LNumberTagI(LOperand* value) { explicit LNumberTagI(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1432,7 +1439,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1> {
class LNumberTagD: public LTemplateInstruction<1, 1, 1> { class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
public: public:
explicit LNumberTagD(LOperand* value, LOperand* temp) { LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value; inputs_[0] = value;
temps_[0] = temp; temps_[0] = temp;
} }
@ -1471,7 +1478,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
}; };
class LSmiTag: public LTemplateInstruction<1, 1> { class LSmiTag: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LSmiTag(LOperand* value) { explicit LSmiTag(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1481,7 +1488,7 @@ class LSmiTag: public LTemplateInstruction<1, 1> {
}; };
class LNumberUntagD: public LTemplateInstruction<1, 1> { class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LNumberUntagD(LOperand* value) { explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1491,7 +1498,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1> {
}; };
class LSmiUntag: public LTemplateInstruction<1, 1> { class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
public: public:
LSmiUntag(LOperand* value, bool needs_check) LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) { : needs_check_(needs_check) {
@ -1590,7 +1597,35 @@ class LStoreKeyedGeneric: public LStoreKeyed {
}; };
class LCheckFunction: public LTemplateInstruction<0, 1> { class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
}
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
LOperand* string() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
};
class LStringLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringLength(LOperand* string) {
inputs_[0] = string;
}
DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
DECLARE_HYDROGEN_ACCESSOR(StringLength)
LOperand* string() { return inputs_[0]; }
};
class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LCheckFunction(LOperand* value) { explicit LCheckFunction(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1613,7 +1648,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
}; };
class LCheckMap: public LTemplateInstruction<0, 1> { class LCheckMap: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LCheckMap(LOperand* value) { explicit LCheckMap(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1638,7 +1673,7 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
}; };
class LCheckSmi: public LTemplateInstruction<0, 1> { class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public: public:
LCheckSmi(LOperand* value, Condition condition) LCheckSmi(LOperand* value, Condition condition)
: condition_(condition) { : condition_(condition) {
@ -1687,7 +1722,7 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
}; };
class LTypeof: public LTemplateInstruction<1, 1> { class LTypeof: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LTypeof(LOperand* value) { explicit LTypeof(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1697,7 +1732,7 @@ class LTypeof: public LTemplateInstruction<1, 1> {
}; };
class LTypeofIs: public LTemplateInstruction<1, 1> { class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LTypeofIs(LOperand* value) { explicit LTypeofIs(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1712,7 +1747,7 @@ class LTypeofIs: public LTemplateInstruction<1, 1> {
}; };
class LTypeofIsAndBranch: public LControlInstruction<1> { class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public: public:
explicit LTypeofIsAndBranch(LOperand* value) { explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1727,7 +1762,7 @@ class LTypeofIsAndBranch: public LControlInstruction<1> {
}; };
class LDeleteProperty: public LTemplateInstruction<1, 2> { class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public: public:
LDeleteProperty(LOperand* obj, LOperand* key) { LDeleteProperty(LOperand* obj, LOperand* key) {
inputs_[0] = obj; inputs_[0] = obj;

358
deps/v8/src/ia32/stub-cache-ia32.cc

@ -3306,6 +3306,364 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
} }
MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
ExternalArrayType array_type, Code::Flags flags) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, failed_allocation;
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &slow, not_taken);
// Get the map of the receiver.
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow, not_taken);
__ CmpInstanceType(ecx, JS_OBJECT_TYPE);
__ j(not_equal, &slow, not_taken);
// Check that the elements array is the appropriate type of
// ExternalArray.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
Handle<Map> map(Heap::MapForExternalArrayType(array_type));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(map));
__ j(not_equal, &slow, not_taken);
// eax: key, known to be a smi.
// edx: receiver, known to be a JSObject.
// ebx: elements object, known to be an external array.
// Check that the index is in range.
__ mov(ecx, eax);
__ SmiUntag(ecx); // Untag the index.
__ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
__ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
// ebx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
__ movsx_b(ecx, Operand(ebx, ecx, times_1, 0));
break;
case kExternalUnsignedByteArray:
__ movzx_b(ecx, Operand(ebx, ecx, times_1, 0));
break;
case kExternalShortArray:
__ movsx_w(ecx, Operand(ebx, ecx, times_2, 0));
break;
case kExternalUnsignedShortArray:
__ movzx_w(ecx, Operand(ebx, ecx, times_2, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(ecx, Operand(ebx, ecx, times_4, 0));
break;
case kExternalFloatArray:
__ fld_s(Operand(ebx, ecx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
// For integer array types:
// ecx: value
// For floating-point array type:
// FP(0): value
if (array_type == kExternalIntArray ||
array_type == kExternalUnsignedIntArray) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
if (array_type == kExternalIntArray) {
__ cmp(ecx, 0xC0000000);
__ j(sign, &box_int);
} else {
ASSERT_EQ(array_type, kExternalUnsignedIntArray);
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
__ test(ecx, Immediate(0xC0000000));
__ j(not_zero, &box_int);
}
__ mov(eax, ecx);
__ SmiTag(eax);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
if (array_type == kExternalIntArray) {
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
} else {
ASSERT(array_type == kExternalUnsignedIntArray);
// Need to zero-extend the value.
// There's no fild variant for unsigned values, so zero-extend
// to a 64-bit int manually.
__ push(Immediate(0));
__ push(ecx);
__ fild_d(Operand(esp, 0));
__ pop(ecx);
__ pop(ecx);
}
// FP(0): value
__ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value.
__ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value.
__ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else {
__ mov(eax, ecx);
__ SmiTag(eax);
__ ret(0);
}
// If we fail allocation of the HeapNumber, we still have a value on
// top of the FPU stack. Remove it.
__ bind(&failed_allocation);
__ ffree();
__ fincstp();
// Fall through to slow case.
// Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
__ pop(ebx);
__ push(edx); // receiver
__ push(eax); // name
__ push(ebx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
// Return the generated code.
return GetCode(flags);
}
MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
ExternalArrayType array_type, Code::Flags flags) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, check_heap_number;
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow);
// Get the map from the receiver.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow);
// Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
// Get the instance type from the map of the receiver.
__ CmpInstanceType(edi, JS_OBJECT_TYPE);
__ j(not_equal, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// eax: value
// edx: receiver, a JSObject
// ecx: key, a smi
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ CheckMap(edi, Handle<Map>(Heap::MapForExternalArrayType(array_type)),
&slow, true);
// Check that the index is in range.
__ mov(ebx, ecx);
__ SmiUntag(ebx);
__ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// eax: value
// edx: receiver
// ecx: key
// edi: elements array
// ebx: untagged index
__ test(eax, Immediate(kSmiTagMask));
__ j(not_equal, &check_heap_number);
// smi case
__ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
__ SmiUntag(ecx);
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ecx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
__ fstp_s(Operand(edi, ebx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
__ ret(0); // Return the original value.
__ bind(&check_heap_number);
// eax: value
// edx: receiver
// ecx: key
// edi: elements array
// ebx: untagged index
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
__ j(not_equal, &slow);
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ebx: untagged index
// edi: base pointer of external storage
if (array_type == kExternalFloatArray) {
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ fstp_s(Operand(edi, ebx, times_4, 0));
__ ret(0);
} else {
// Perform float-to-int conversion with truncation (round-to-zero)
// behavior.
// For the moment we make the slow call to the runtime on
// processors that don't support SSE2. The code in IntegerConvert
// (code-stubs-ia32.cc) is roughly what is needed here though the
// conversion failure case does not need to be handled.
if (CpuFeatures::IsSupported(SSE2)) {
if (array_type != kExternalIntArray &&
array_type != kExternalUnsignedIntArray) {
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
__ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
// ecx: untagged integer value
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
default:
UNREACHABLE();
break;
}
} else {
if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatures::Scope scope(SSE3);
// fisttp stores values as signed integers. To represent the
// entire range of int and unsigned int arrays, store as a
// 64-bit int and discard the high 32 bits.
// If the value is NaN or +/-infinity, the result is 0x80000000,
// which is automatically zero when taken mod 2^n, n < 32.
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ fisttp_d(Operand(esp, 0));
__ pop(ecx);
__ add(Operand(esp), Immediate(kPointerSize));
} else {
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
// We can easily implement the correct rounding behavior for the
// range [0, 2^31-1]. For the time being, to keep this code simple,
// make the slow runtime call for values outside this range.
// Note: we could do better for signed int arrays.
__ movd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
// We will need the key if we have to make the slow runtime call.
__ push(ecx);
__ LoadPowerOf2(xmm1, ecx, 31);
__ pop(ecx);
__ ucomisd(xmm1, xmm0);
__ j(above_equal, &slow);
__ cvttsd2si(ecx, Operand(xmm0));
}
// ecx: untagged integer value
__ mov(Operand(edi, ebx, times_4, 0), ecx);
}
__ ret(0); // Return original value.
}
}
// Slow case: call runtime.
__ bind(&slow);
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
__ pop(ebx);
__ push(edx);
__ push(ecx);
__ push(eax);
__ push(ebx);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
return GetCode(flags);
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

59
deps/v8/src/ic.cc

@ -367,55 +367,6 @@ void KeyedStoreIC::Clear(Address address, Code* target) {
} }
Code* KeyedLoadIC::external_array_stub(JSObject::ElementsKind elements_kind) {
switch (elements_kind) {
case JSObject::EXTERNAL_BYTE_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalByteArray);
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedByteArray);
case JSObject::EXTERNAL_SHORT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalShortArray);
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
return Builtins::builtin(
Builtins::KeyedLoadIC_ExternalUnsignedShortArray);
case JSObject::EXTERNAL_INT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalIntArray);
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedIntArray);
case JSObject::EXTERNAL_FLOAT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalFloatArray);
default:
UNREACHABLE();
return NULL;
}
}
Code* KeyedStoreIC::external_array_stub(JSObject::ElementsKind elements_kind) {
switch (elements_kind) {
case JSObject::EXTERNAL_BYTE_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalByteArray);
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
return Builtins::builtin(
Builtins::KeyedStoreIC_ExternalUnsignedByteArray);
case JSObject::EXTERNAL_SHORT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalShortArray);
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
return Builtins::builtin(
Builtins::KeyedStoreIC_ExternalUnsignedShortArray);
case JSObject::EXTERNAL_INT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalIntArray);
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalUnsignedIntArray);
case JSObject::EXTERNAL_FLOAT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalFloatArray);
default:
UNREACHABLE();
return NULL;
}
}
static bool HasInterceptorGetter(JSObject* object) { static bool HasInterceptorGetter(JSObject* object) {
return !object->GetNamedInterceptor()->getter()->IsUndefined(); return !object->GetNamedInterceptor()->getter()->IsUndefined();
} }
@ -1243,7 +1194,10 @@ MaybeObject* KeyedLoadIC::Load(State state,
} else if (object->IsJSObject()) { } else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object); Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) { if (receiver->HasExternalArrayElements()) {
stub = external_array_stub(receiver->GetElementsKind()); MaybeObject* probe =
StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, false);
stub =
probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
} else if (receiver->HasIndexedInterceptor()) { } else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub(); stub = indexed_interceptor_stub();
} else if (state == UNINITIALIZED && } else if (state == UNINITIALIZED &&
@ -1636,7 +1590,10 @@ MaybeObject* KeyedStoreIC::Store(State state,
if (object->IsJSObject()) { if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object); Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) { if (receiver->HasExternalArrayElements()) {
stub = external_array_stub(receiver->GetElementsKind()); MaybeObject* probe =
StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, true);
stub =
probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
} else if (state == UNINITIALIZED && } else if (state == UNINITIALIZED &&
key->IsSmi() && key->IsSmi() &&
receiver->map()->has_fast_elements()) { receiver->map()->has_fast_elements()) {

15
deps/v8/src/ic.h

@ -345,12 +345,6 @@ class KeyedLoadIC: public IC {
static void GenerateGeneric(MacroAssembler* masm); static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm); static void GenerateString(MacroAssembler* masm);
// Generators for external array types. See objects.h.
// These are similar to the generic IC; they optimize the case of
// operating upon external array types but fall back to the runtime
// for all other types.
static void GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type);
static void GenerateIndexedInterceptor(MacroAssembler* masm); static void GenerateIndexedInterceptor(MacroAssembler* masm);
// Clear the use of the inlined version. // Clear the use of the inlined version.
@ -386,7 +380,6 @@ class KeyedLoadIC: public IC {
static Code* string_stub() { static Code* string_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_String); return Builtins::builtin(Builtins::KeyedLoadIC_String);
} }
static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static Code* indexed_interceptor_stub() { static Code* indexed_interceptor_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_IndexedInterceptor); return Builtins::builtin(Builtins::KeyedLoadIC_IndexedInterceptor);
@ -470,13 +463,6 @@ class KeyedStoreIC: public IC {
static void GenerateRuntimeSetProperty(MacroAssembler* masm); static void GenerateRuntimeSetProperty(MacroAssembler* masm);
static void GenerateGeneric(MacroAssembler* masm); static void GenerateGeneric(MacroAssembler* masm);
// Generators for external array types. See objects.h.
// These are similar to the generic IC; they optimize the case of
// operating upon external array types but fall back to the runtime
// for all other types.
static void GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type);
// Clear the inlined version so the IC is always hit. // Clear the inlined version so the IC is always hit.
static void ClearInlinedVersion(Address address); static void ClearInlinedVersion(Address address);
@ -501,7 +487,6 @@ class KeyedStoreIC: public IC {
static Code* generic_stub() { static Code* generic_stub() {
return Builtins::builtin(Builtins::KeyedStoreIC_Generic); return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
} }
static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static void Clear(Address address, Code* target); static void Clear(Address address, Code* target);

140
deps/v8/src/lithium-allocator.cc

@ -71,73 +71,24 @@ static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
} }
void LOperand::PrintTo(StringStream* stream) { UsePosition::UsePosition(LifetimePosition pos, LOperand* operand)
LUnallocated* unalloc = NULL; : operand_(operand),
switch (kind()) { hint_(NULL),
case INVALID: pos_(pos),
break; next_(NULL),
case UNALLOCATED: requires_reg_(false),
unalloc = LUnallocated::cast(this); register_beneficial_(true) {
stream->Add("v%d", unalloc->virtual_register()); if (operand_ != NULL && operand_->IsUnallocated()) {
switch (unalloc->policy()) { LUnallocated* unalloc = LUnallocated::cast(operand_);
case LUnallocated::NONE: requires_reg_ = unalloc->HasRegisterPolicy();
break; register_beneficial_ = !unalloc->HasAnyPolicy();
case LUnallocated::FIXED_REGISTER: {
const char* register_name =
Register::AllocationIndexToString(unalloc->fixed_index());
stream->Add("(=%s)", register_name);
break;
}
case LUnallocated::FIXED_DOUBLE_REGISTER: {
const char* double_register_name =
DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
stream->Add("(=%s)", double_register_name);
break;
}
case LUnallocated::FIXED_SLOT:
stream->Add("(=%dS)", unalloc->fixed_index());
break;
case LUnallocated::MUST_HAVE_REGISTER:
stream->Add("(R)");
break;
case LUnallocated::WRITABLE_REGISTER:
stream->Add("(WR)");
break;
case LUnallocated::SAME_AS_FIRST_INPUT:
stream->Add("(1)");
break;
case LUnallocated::ANY:
stream->Add("(-)");
break;
case LUnallocated::IGNORE:
stream->Add("(0)");
break;
}
break;
case CONSTANT_OPERAND:
stream->Add("[constant:%d]", index());
break;
case STACK_SLOT:
stream->Add("[stack:%d]", index());
break;
case DOUBLE_STACK_SLOT:
stream->Add("[double_stack:%d]", index());
break;
case REGISTER:
stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
break;
case DOUBLE_REGISTER:
stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
break;
case ARGUMENT:
stream->Add("[arg:%d]", index());
break;
} }
ASSERT(pos_.IsValid());
} }
int LOperand::VirtualRegister() {
LUnallocated* unalloc = LUnallocated::cast(this); bool UsePosition::HasHint() const {
return unalloc->virtual_register(); return hint_ != NULL && !hint_->IsUnallocated();
} }
@ -190,6 +141,53 @@ bool LiveRange::HasOverlap(UseInterval* target) const {
#endif #endif
LiveRange::LiveRange(int id)
: id_(id),
spilled_(false),
assigned_register_(kInvalidAssignment),
assigned_register_kind_(NONE),
last_interval_(NULL),
first_interval_(NULL),
first_pos_(NULL),
parent_(NULL),
next_(NULL),
current_interval_(NULL),
last_processed_use_(NULL),
spill_start_index_(kMaxInt) {
spill_operand_ = new LUnallocated(LUnallocated::IGNORE);
}
void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) {
ASSERT(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
assigned_register_kind_ = register_kind;
ConvertOperands();
}
void LiveRange::MakeSpilled() {
ASSERT(!IsSpilled());
ASSERT(TopLevel()->HasAllocatedSpillOperand());
spilled_ = true;
assigned_register_ = kInvalidAssignment;
ConvertOperands();
}
bool LiveRange::HasAllocatedSpillOperand() const {
return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
}
void LiveRange::SetSpillOperand(LOperand* operand) {
ASSERT(!operand->IsUnallocated());
ASSERT(spill_operand_ != NULL);
ASSERT(spill_operand_->IsUnallocated());
spill_operand_->ConvertTo(operand->kind(), operand->index());
}
UsePosition* LiveRange::NextUsePosition(LifetimePosition start) { UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
UsePosition* use_pos = last_processed_use_; UsePosition* use_pos = last_processed_use_;
if (use_pos == NULL) use_pos = first_pos(); if (use_pos == NULL) use_pos = first_pos();
@ -2015,20 +2013,6 @@ bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
} }
void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) {
UsePosition* prev_pos = prev->AddUsePosition(
LifetimePosition::FromInstructionIndex(pos));
UsePosition* next_pos = next->AddUsePosition(
LifetimePosition::FromInstructionIndex(pos));
LOperand* prev_operand = prev_pos->operand();
LOperand* next_operand = next_pos->operand();
LGap* gap = chunk_->GetGapAt(pos);
gap->GetOrCreateParallelMove(LGap::START)->
AddMove(prev_operand, next_operand);
next_pos->set_hint(prev_operand);
}
LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) { LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) {
ASSERT(!range->IsFixed()); ASSERT(!range->IsFixed());
TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value()); TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());

412
deps/v8/src/lithium-allocator.h

@ -48,6 +48,8 @@ class StringStream;
class LArgument; class LArgument;
class LChunk; class LChunk;
class LOperand;
class LUnallocated;
class LConstantOperand; class LConstantOperand;
class LGap; class LGap;
class LParallelMove; class LParallelMove;
@ -149,355 +151,6 @@ enum RegisterKind {
}; };
class LOperand: public ZoneObject {
public:
enum Kind {
INVALID,
UNALLOCATED,
CONSTANT_OPERAND,
STACK_SLOT,
DOUBLE_STACK_SLOT,
REGISTER,
DOUBLE_REGISTER,
ARGUMENT
};
LOperand() : value_(KindField::encode(INVALID)) { }
Kind kind() const { return KindField::decode(value_); }
int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; }
bool IsStackSlot() const { return kind() == STACK_SLOT; }
bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; }
bool IsRegister() const { return kind() == REGISTER; }
bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
bool IsArgument() const { return kind() == ARGUMENT; }
bool IsUnallocated() const { return kind() == UNALLOCATED; }
bool Equals(LOperand* other) const { return value_ == other->value_; }
int VirtualRegister();
void PrintTo(StringStream* stream);
void ConvertTo(Kind kind, int index) {
value_ = KindField::encode(kind);
value_ |= index << kKindFieldWidth;
ASSERT(this->index() == index);
}
protected:
static const int kKindFieldWidth = 3;
class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
LOperand(Kind kind, int index) { ConvertTo(kind, index); }
unsigned value_;
};
class LUnallocated: public LOperand {
public:
enum Policy {
NONE,
ANY,
FIXED_REGISTER,
FIXED_DOUBLE_REGISTER,
FIXED_SLOT,
MUST_HAVE_REGISTER,
WRITABLE_REGISTER,
SAME_AS_FIRST_INPUT,
IGNORE
};
// Lifetime of operand inside the instruction.
enum Lifetime {
// USED_AT_START operand is guaranteed to be live only at
// instruction start. Register allocator is free to assign the same register
// to some other operand used inside instruction (i.e. temporary or
// output).
USED_AT_START,
// USED_AT_END operand is treated as live until the end of
// instruction. This means that register allocator will not reuse it's
// register for any other operand inside instruction.
USED_AT_END
};
explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
Initialize(policy, 0, USED_AT_END);
}
LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
Initialize(policy, fixed_index, USED_AT_END);
}
LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
Initialize(policy, 0, lifetime);
}
// The superclass has a KindField. Some policies have a signed fixed
// index in the upper bits.
static const int kPolicyWidth = 4;
static const int kLifetimeWidth = 1;
static const int kVirtualRegisterWidth = 17;
static const int kPolicyShift = kKindFieldWidth;
static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
static const int kFixedIndexShift =
kVirtualRegisterShift + kVirtualRegisterWidth;
class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
class LifetimeField
: public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
};
class VirtualRegisterField
: public BitField<unsigned,
kVirtualRegisterShift,
kVirtualRegisterWidth> {
};
static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
static const int kMaxFixedIndices = 128;
bool HasIgnorePolicy() const { return policy() == IGNORE; }
bool HasNoPolicy() const { return policy() == NONE; }
bool HasAnyPolicy() const {
return policy() == ANY;
}
bool HasFixedPolicy() const {
return policy() == FIXED_REGISTER ||
policy() == FIXED_DOUBLE_REGISTER ||
policy() == FIXED_SLOT;
}
bool HasRegisterPolicy() const {
return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
}
bool HasSameAsInputPolicy() const {
return policy() == SAME_AS_FIRST_INPUT;
}
Policy policy() const { return PolicyField::decode(value_); }
void set_policy(Policy policy) {
value_ &= ~PolicyField::mask();
value_ |= PolicyField::encode(policy);
}
int fixed_index() const {
return static_cast<int>(value_) >> kFixedIndexShift;
}
unsigned virtual_register() const {
return VirtualRegisterField::decode(value_);
}
void set_virtual_register(unsigned id) {
value_ &= ~VirtualRegisterField::mask();
value_ |= VirtualRegisterField::encode(id);
}
LUnallocated* CopyUnconstrained() {
LUnallocated* result = new LUnallocated(ANY);
result->set_virtual_register(virtual_register());
return result;
}
static LUnallocated* cast(LOperand* op) {
ASSERT(op->IsUnallocated());
return reinterpret_cast<LUnallocated*>(op);
}
bool IsUsedAtStart() {
return LifetimeField::decode(value_) == USED_AT_START;
}
private:
void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
value_ |= PolicyField::encode(policy);
value_ |= LifetimeField::encode(lifetime);
value_ |= fixed_index << kFixedIndexShift;
ASSERT(this->fixed_index() == fixed_index);
}
};
class LMoveOperands BASE_EMBEDDED {
public:
LMoveOperands(LOperand* source, LOperand* destination)
: source_(source), destination_(destination) {
}
LOperand* source() const { return source_; }
void set_source(LOperand* operand) { source_ = operand; }
LOperand* destination() const { return destination_; }
void set_destination(LOperand* operand) { destination_ = operand; }
// The gap resolver marks moves as "in-progress" by clearing the
// destination (but not the source).
bool IsPending() const {
return destination_ == NULL && source_ != NULL;
}
// True if this move a move into the given destination operand.
bool Blocks(LOperand* operand) const {
return !IsEliminated() && source()->Equals(operand);
}
// A move is redundant if it's been eliminated, if its source and
// destination are the same, or if its destination is unneeded.
bool IsRedundant() const {
return IsEliminated() || source_->Equals(destination_) || IsIgnored();
}
bool IsIgnored() const {
return destination_ != NULL &&
destination_->IsUnallocated() &&
LUnallocated::cast(destination_)->HasIgnorePolicy();
}
// We clear both operands to indicate move that's been eliminated.
void Eliminate() { source_ = destination_ = NULL; }
bool IsEliminated() const {
ASSERT(source_ != NULL || destination_ == NULL);
return source_ == NULL;
}
private:
LOperand* source_;
LOperand* destination_;
};
class LConstantOperand: public LOperand {
public:
static LConstantOperand* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LConstantOperand(index);
}
static LConstantOperand* cast(LOperand* op) {
ASSERT(op->IsConstantOperand());
return reinterpret_cast<LConstantOperand*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 128;
static LConstantOperand cache[];
LConstantOperand() : LOperand() { }
explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
};
class LArgument: public LOperand {
public:
explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
static LArgument* cast(LOperand* op) {
ASSERT(op->IsArgument());
return reinterpret_cast<LArgument*>(op);
}
};
class LStackSlot: public LOperand {
public:
static LStackSlot* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LStackSlot(index);
}
static LStackSlot* cast(LOperand* op) {
ASSERT(op->IsStackSlot());
return reinterpret_cast<LStackSlot*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 128;
static LStackSlot cache[];
LStackSlot() : LOperand() { }
explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
};
class LDoubleStackSlot: public LOperand {
public:
static LDoubleStackSlot* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LDoubleStackSlot(index);
}
static LDoubleStackSlot* cast(LOperand* op) {
ASSERT(op->IsStackSlot());
return reinterpret_cast<LDoubleStackSlot*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 128;
static LDoubleStackSlot cache[];
LDoubleStackSlot() : LOperand() { }
explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
};
class LRegister: public LOperand {
public:
static LRegister* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LRegister(index);
}
static LRegister* cast(LOperand* op) {
ASSERT(op->IsRegister());
return reinterpret_cast<LRegister*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 16;
static LRegister cache[];
LRegister() : LOperand() { }
explicit LRegister(int index) : LOperand(REGISTER, index) { }
};
class LDoubleRegister: public LOperand {
public:
static LDoubleRegister* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LDoubleRegister(index);
}
static LDoubleRegister* cast(LOperand* op) {
ASSERT(op->IsDoubleRegister());
return reinterpret_cast<LDoubleRegister*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 16;
static LDoubleRegister cache[];
LDoubleRegister() : LOperand() { }
explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
};
// A register-allocator view of a Lithium instruction. It contains the id of // A register-allocator view of a Lithium instruction. It contains the id of
// the output operand and a list of input operand uses. // the output operand and a list of input operand uses.
class InstructionSummary: public ZoneObject { class InstructionSummary: public ZoneObject {
@ -588,27 +241,14 @@ class UseInterval: public ZoneObject {
// Representation of a use position. // Representation of a use position.
class UsePosition: public ZoneObject { class UsePosition: public ZoneObject {
public: public:
UsePosition(LifetimePosition pos, LOperand* operand) UsePosition(LifetimePosition pos, LOperand* operand);
: operand_(operand),
hint_(NULL),
pos_(pos),
next_(NULL),
requires_reg_(false),
register_beneficial_(true) {
if (operand_ != NULL && operand_->IsUnallocated()) {
LUnallocated* unalloc = LUnallocated::cast(operand_);
requires_reg_ = unalloc->HasRegisterPolicy();
register_beneficial_ = !unalloc->HasAnyPolicy();
}
ASSERT(pos_.IsValid());
}
LOperand* operand() const { return operand_; } LOperand* operand() const { return operand_; }
bool HasOperand() const { return operand_ != NULL; } bool HasOperand() const { return operand_ != NULL; }
LOperand* hint() const { return hint_; } LOperand* hint() const { return hint_; }
void set_hint(LOperand* hint) { hint_ = hint; } void set_hint(LOperand* hint) { hint_ = hint; }
bool HasHint() const { return hint_ != NULL && !hint_->IsUnallocated(); } bool HasHint() const;
bool RequiresRegister() const; bool RequiresRegister() const;
bool RegisterIsBeneficial() const; bool RegisterIsBeneficial() const;
@ -634,21 +274,7 @@ class LiveRange: public ZoneObject {
public: public:
static const int kInvalidAssignment = 0x7fffffff; static const int kInvalidAssignment = 0x7fffffff;
explicit LiveRange(int id) explicit LiveRange(int id);
: id_(id),
spilled_(false),
assigned_register_(kInvalidAssignment),
assigned_register_kind_(NONE),
last_interval_(NULL),
first_interval_(NULL),
first_pos_(NULL),
parent_(NULL),
next_(NULL),
current_interval_(NULL),
last_processed_use_(NULL),
spill_start_index_(kMaxInt) {
spill_operand_ = new LUnallocated(LUnallocated::IGNORE);
}
UseInterval* first_interval() const { return first_interval_; } UseInterval* first_interval() const { return first_interval_; }
UsePosition* first_pos() const { return first_pos_; } UsePosition* first_pos() const { return first_pos_; }
@ -663,19 +289,8 @@ class LiveRange: public ZoneObject {
LOperand* CreateAssignedOperand(); LOperand* CreateAssignedOperand();
int assigned_register() const { return assigned_register_; } int assigned_register() const { return assigned_register_; }
int spill_start_index() const { return spill_start_index_; } int spill_start_index() const { return spill_start_index_; }
void set_assigned_register(int reg, RegisterKind register_kind) { void set_assigned_register(int reg, RegisterKind register_kind);
ASSERT(!HasRegisterAssigned() && !IsSpilled()); void MakeSpilled();
assigned_register_ = reg;
assigned_register_kind_ = register_kind;
ConvertOperands();
}
void MakeSpilled() {
ASSERT(!IsSpilled());
ASSERT(TopLevel()->HasAllocatedSpillOperand());
spilled_ = true;
assigned_register_ = kInvalidAssignment;
ConvertOperands();
}
// Returns use position in this live range that follows both start // Returns use position in this live range that follows both start
// and last processed use position. // and last processed use position.
@ -724,17 +339,9 @@ class LiveRange: public ZoneObject {
return last_interval_->end(); return last_interval_->end();
} }
bool HasAllocatedSpillOperand() const { bool HasAllocatedSpillOperand() const;
return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
}
LOperand* GetSpillOperand() const { return spill_operand_; } LOperand* GetSpillOperand() const { return spill_operand_; }
void SetSpillOperand(LOperand* operand) { void SetSpillOperand(LOperand* operand);
ASSERT(!operand->IsUnallocated());
ASSERT(spill_operand_ != NULL);
ASSERT(spill_operand_->IsUnallocated());
spill_operand_->ConvertTo(operand->kind(), operand->index());
}
void SetSpillStartIndex(int start) { void SetSpillStartIndex(int start) {
spill_start_index_ = Min(start, spill_start_index_); spill_start_index_ = Min(start, spill_start_index_);
@ -984,7 +591,6 @@ class LAllocator BASE_EMBEDDED {
void Spill(LiveRange* range); void Spill(LiveRange* range);
bool IsBlockBoundary(LifetimePosition pos); bool IsBlockBoundary(LifetimePosition pos);
void AddGapMove(int pos, LiveRange* prev, LiveRange* next);
// Helper methods for resolving control flow. // Helper methods for resolving control flow.
void ResolveControlFlow(LiveRange* range, void ResolveControlFlow(LiveRange* range,

72
deps/v8/src/lithium.cc

@ -30,6 +30,78 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
void LOperand::PrintTo(StringStream* stream) {
LUnallocated* unalloc = NULL;
switch (kind()) {
case INVALID:
break;
case UNALLOCATED:
unalloc = LUnallocated::cast(this);
stream->Add("v%d", unalloc->virtual_register());
switch (unalloc->policy()) {
case LUnallocated::NONE:
break;
case LUnallocated::FIXED_REGISTER: {
const char* register_name =
Register::AllocationIndexToString(unalloc->fixed_index());
stream->Add("(=%s)", register_name);
break;
}
case LUnallocated::FIXED_DOUBLE_REGISTER: {
const char* double_register_name =
DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
stream->Add("(=%s)", double_register_name);
break;
}
case LUnallocated::FIXED_SLOT:
stream->Add("(=%dS)", unalloc->fixed_index());
break;
case LUnallocated::MUST_HAVE_REGISTER:
stream->Add("(R)");
break;
case LUnallocated::WRITABLE_REGISTER:
stream->Add("(WR)");
break;
case LUnallocated::SAME_AS_FIRST_INPUT:
stream->Add("(1)");
break;
case LUnallocated::ANY:
stream->Add("(-)");
break;
case LUnallocated::IGNORE:
stream->Add("(0)");
break;
}
break;
case CONSTANT_OPERAND:
stream->Add("[constant:%d]", index());
break;
case STACK_SLOT:
stream->Add("[stack:%d]", index());
break;
case DOUBLE_STACK_SLOT:
stream->Add("[double_stack:%d]", index());
break;
case REGISTER:
stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
break;
case DOUBLE_REGISTER:
stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
break;
case ARGUMENT:
stream->Add("[arg:%d]", index());
break;
}
}
int LOperand::VirtualRegister() {
LUnallocated* unalloc = LUnallocated::cast(this);
return unalloc->virtual_register();
}
bool LParallelMove::IsRedundant() const { bool LParallelMove::IsRedundant() const {
for (int i = 0; i < move_operands_.length(); ++i) { for (int i = 0; i < move_operands_.length(); ++i) {
if (!move_operands_[i].IsRedundant()) return false; if (!move_operands_[i].IsRedundant()) return false;

350
deps/v8/src/lithium.h

@ -29,12 +29,360 @@
#define V8_LITHIUM_H_ #define V8_LITHIUM_H_
#include "hydrogen.h" #include "hydrogen.h"
#include "lithium-allocator.h"
#include "safepoint-table.h" #include "safepoint-table.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class LOperand: public ZoneObject {
public:
enum Kind {
INVALID,
UNALLOCATED,
CONSTANT_OPERAND,
STACK_SLOT,
DOUBLE_STACK_SLOT,
REGISTER,
DOUBLE_REGISTER,
ARGUMENT
};
LOperand() : value_(KindField::encode(INVALID)) { }
Kind kind() const { return KindField::decode(value_); }
int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; }
bool IsStackSlot() const { return kind() == STACK_SLOT; }
bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; }
bool IsRegister() const { return kind() == REGISTER; }
bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
bool IsArgument() const { return kind() == ARGUMENT; }
bool IsUnallocated() const { return kind() == UNALLOCATED; }
bool Equals(LOperand* other) const { return value_ == other->value_; }
int VirtualRegister();
void PrintTo(StringStream* stream);
void ConvertTo(Kind kind, int index) {
value_ = KindField::encode(kind);
value_ |= index << kKindFieldWidth;
ASSERT(this->index() == index);
}
protected:
static const int kKindFieldWidth = 3;
class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
LOperand(Kind kind, int index) { ConvertTo(kind, index); }
unsigned value_;
};
class LUnallocated: public LOperand {
public:
enum Policy {
NONE,
ANY,
FIXED_REGISTER,
FIXED_DOUBLE_REGISTER,
FIXED_SLOT,
MUST_HAVE_REGISTER,
WRITABLE_REGISTER,
SAME_AS_FIRST_INPUT,
IGNORE
};
// Lifetime of operand inside the instruction.
enum Lifetime {
// USED_AT_START operand is guaranteed to be live only at
// instruction start. Register allocator is free to assign the same register
// to some other operand used inside instruction (i.e. temporary or
// output).
USED_AT_START,
// USED_AT_END operand is treated as live until the end of
// instruction. This means that register allocator will not reuse it's
// register for any other operand inside instruction.
USED_AT_END
};
explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
Initialize(policy, 0, USED_AT_END);
}
LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
Initialize(policy, fixed_index, USED_AT_END);
}
LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
Initialize(policy, 0, lifetime);
}
// The superclass has a KindField. Some policies have a signed fixed
// index in the upper bits.
static const int kPolicyWidth = 4;
static const int kLifetimeWidth = 1;
static const int kVirtualRegisterWidth = 17;
static const int kPolicyShift = kKindFieldWidth;
static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
static const int kFixedIndexShift =
kVirtualRegisterShift + kVirtualRegisterWidth;
class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
class LifetimeField
: public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
};
class VirtualRegisterField
: public BitField<unsigned,
kVirtualRegisterShift,
kVirtualRegisterWidth> {
};
static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
static const int kMaxFixedIndices = 128;
bool HasIgnorePolicy() const { return policy() == IGNORE; }
bool HasNoPolicy() const { return policy() == NONE; }
bool HasAnyPolicy() const {
return policy() == ANY;
}
bool HasFixedPolicy() const {
return policy() == FIXED_REGISTER ||
policy() == FIXED_DOUBLE_REGISTER ||
policy() == FIXED_SLOT;
}
bool HasRegisterPolicy() const {
return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
}
bool HasSameAsInputPolicy() const {
return policy() == SAME_AS_FIRST_INPUT;
}
Policy policy() const { return PolicyField::decode(value_); }
void set_policy(Policy policy) {
value_ &= ~PolicyField::mask();
value_ |= PolicyField::encode(policy);
}
int fixed_index() const {
return static_cast<int>(value_) >> kFixedIndexShift;
}
unsigned virtual_register() const {
return VirtualRegisterField::decode(value_);
}
void set_virtual_register(unsigned id) {
value_ &= ~VirtualRegisterField::mask();
value_ |= VirtualRegisterField::encode(id);
}
LUnallocated* CopyUnconstrained() {
LUnallocated* result = new LUnallocated(ANY);
result->set_virtual_register(virtual_register());
return result;
}
static LUnallocated* cast(LOperand* op) {
ASSERT(op->IsUnallocated());
return reinterpret_cast<LUnallocated*>(op);
}
bool IsUsedAtStart() {
return LifetimeField::decode(value_) == USED_AT_START;
}
private:
void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
value_ |= PolicyField::encode(policy);
value_ |= LifetimeField::encode(lifetime);
value_ |= fixed_index << kFixedIndexShift;
ASSERT(this->fixed_index() == fixed_index);
}
};
class LMoveOperands BASE_EMBEDDED {
public:
LMoveOperands(LOperand* source, LOperand* destination)
: source_(source), destination_(destination) {
}
LOperand* source() const { return source_; }
void set_source(LOperand* operand) { source_ = operand; }
LOperand* destination() const { return destination_; }
void set_destination(LOperand* operand) { destination_ = operand; }
// The gap resolver marks moves as "in-progress" by clearing the
// destination (but not the source).
bool IsPending() const {
return destination_ == NULL && source_ != NULL;
}
// True if this move a move into the given destination operand.
bool Blocks(LOperand* operand) const {
return !IsEliminated() && source()->Equals(operand);
}
// A move is redundant if it's been eliminated, if its source and
// destination are the same, or if its destination is unneeded.
bool IsRedundant() const {
return IsEliminated() || source_->Equals(destination_) || IsIgnored();
}
bool IsIgnored() const {
return destination_ != NULL &&
destination_->IsUnallocated() &&
LUnallocated::cast(destination_)->HasIgnorePolicy();
}
// We clear both operands to indicate move that's been eliminated.
void Eliminate() { source_ = destination_ = NULL; }
bool IsEliminated() const {
ASSERT(source_ != NULL || destination_ == NULL);
return source_ == NULL;
}
private:
LOperand* source_;
LOperand* destination_;
};
class LConstantOperand: public LOperand {
public:
static LConstantOperand* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LConstantOperand(index);
}
static LConstantOperand* cast(LOperand* op) {
ASSERT(op->IsConstantOperand());
return reinterpret_cast<LConstantOperand*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 128;
static LConstantOperand cache[];
LConstantOperand() : LOperand() { }
explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
};
class LArgument: public LOperand {
public:
explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
static LArgument* cast(LOperand* op) {
ASSERT(op->IsArgument());
return reinterpret_cast<LArgument*>(op);
}
};
class LStackSlot: public LOperand {
public:
static LStackSlot* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LStackSlot(index);
}
static LStackSlot* cast(LOperand* op) {
ASSERT(op->IsStackSlot());
return reinterpret_cast<LStackSlot*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 128;
static LStackSlot cache[];
LStackSlot() : LOperand() { }
explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
};
class LDoubleStackSlot: public LOperand {
public:
static LDoubleStackSlot* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LDoubleStackSlot(index);
}
static LDoubleStackSlot* cast(LOperand* op) {
ASSERT(op->IsStackSlot());
return reinterpret_cast<LDoubleStackSlot*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 128;
static LDoubleStackSlot cache[];
LDoubleStackSlot() : LOperand() { }
explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
};
class LRegister: public LOperand {
public:
static LRegister* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LRegister(index);
}
static LRegister* cast(LOperand* op) {
ASSERT(op->IsRegister());
return reinterpret_cast<LRegister*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 16;
static LRegister cache[];
LRegister() : LOperand() { }
explicit LRegister(int index) : LOperand(REGISTER, index) { }
};
class LDoubleRegister: public LOperand {
public:
static LDoubleRegister* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LDoubleRegister(index);
}
static LDoubleRegister* cast(LOperand* op) {
ASSERT(op->IsDoubleRegister());
return reinterpret_cast<LDoubleRegister*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 16;
static LDoubleRegister cache[];
LDoubleRegister() : LOperand() { }
explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
};
class LParallelMove : public ZoneObject { class LParallelMove : public ZoneObject {
public: public:
LParallelMove() : move_operands_(4) { } LParallelMove() : move_operands_(4) { }

36
deps/v8/src/liveobjectlist-inl.h

@ -0,0 +1,36 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_LIVEOBJECTLIST_INL_H_
#define V8_LIVEOBJECTLIST_INL_H_
#include "v8.h"
#include "liveobjectlist.h"
#endif // V8_LIVEOBJECTLIST_INL_H_

53
deps/v8/src/liveobjectlist.cc

@ -0,0 +1,53 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef LIVE_OBJECT_LIST
#include <ctype.h>
#include <stdlib.h>
#include "v8.h"
#include "checks.h"
#include "global-handles.h"
#include "heap.h"
#include "inspector.h"
#include "list-inl.h"
#include "liveobjectlist.h"
#include "string-stream.h"
#include "top.h"
#include "v8utils.h"
namespace v8 {
namespace internal {
} } // namespace v8::internal
#endif // LIVE_OBJECT_LIST

112
deps/v8/src/liveobjectlist.h

@ -0,0 +1,112 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_LIVEOBJECTLIST_H_
#define V8_LIVEOBJECTLIST_H_
#include "v8.h"
#include "checks.h"
#include "heap.h"
#include "objects.h"
#include "globals.h"
namespace v8 {
namespace internal {
#ifdef LIVE_OBJECT_LIST
// Temporary stubbed out LiveObjectList implementation.
class LiveObjectList {
public:
inline static void GCEpilogue() {}
inline static void GCPrologue() {}
inline static void IterateElements(ObjectVisitor* v) {}
inline static void ProcessNonLive(HeapObject *obj) {}
inline static void UpdateReferencesForScavengeGC() {}
static MaybeObject* Capture() { return Heap::undefined_value(); }
static bool Delete(int id) { return false; }
static MaybeObject* Dump(int id1,
int id2,
int start_idx,
int dump_limit,
Handle<JSObject> filter_obj) {
return Heap::undefined_value();
}
static MaybeObject* Info(int start_idx, int dump_limit) {
return Heap::undefined_value();
}
static MaybeObject* Summarize(int id1,
int id2,
Handle<JSObject> filter_obj) {
return Heap::undefined_value();
}
static void Reset() {}
static Object* GetObj(int obj_id) { return Heap::undefined_value(); }
static Object* GetObjId(Handle<String> address) {
return Heap::undefined_value();
}
static MaybeObject* GetObjRetainers(int obj_id,
Handle<JSObject> instance_filter,
bool verbose,
int start,
int count,
Handle<JSObject> filter_obj) {
return Heap::undefined_value();
}
static Object* GetPath(int obj_id1,
int obj_id2,
Handle<JSObject> instance_filter) {
return Heap::undefined_value();
}
static Object* PrintObj(int obj_id) { return Heap::undefined_value(); }
};
#else // !LIVE_OBJECT_LIST
class LiveObjectList {
public:
static void GCEpilogue() {}
static void GCPrologue() {}
static void IterateElements(ObjectVisitor* v) {}
static void ProcessNonLive(HeapObject *obj) {}
static void UpdateReferencesForScavengeGC() {}
};
#endif // LIVE_OBJECT_LIST
} } // namespace v8::internal
#endif // V8_LIVEOBJECTLIST_H_

62
deps/v8/src/messages.js

@ -90,18 +90,12 @@ function FormatString(format, args) {
} }
function ToDetailString(obj) {
if (obj != null && IS_OBJECT(obj) && obj.toString === $Object.prototype.toString) {
var constructor = obj.constructor;
if (!constructor) return ToString(obj);
var constructorName = constructor.name;
if (!constructorName) return ToString(obj);
return "#<" + GetInstanceName(constructorName) + ">";
} else if (obj instanceof $Error) {
// When formatting internally created error messages, do not // When formatting internally created error messages, do not
// invoke overwritten error toString methods but explicitly use // invoke overwritten error toString methods but explicitly use
// the error to string method. This is to avoid leaking error // the error to string method. This is to avoid leaking error
// objects between script tags in a browser setting. // objects between script tags in a browser setting.
function ToStringCheckErrorObject(obj) {
if (obj instanceof $Error) {
return %_CallFunction(obj, errorToString); return %_CallFunction(obj, errorToString);
} else { } else {
return ToString(obj); return ToString(obj);
@ -109,6 +103,19 @@ function ToDetailString(obj) {
} }
function ToDetailString(obj) {
if (obj != null && IS_OBJECT(obj) && obj.toString === $Object.prototype.toString) {
var constructor = obj.constructor;
if (!constructor) return ToStringCheckErrorObject(obj);
var constructorName = constructor.name;
if (!constructorName) return ToStringCheckErrorObject(obj);
return "#<" + GetInstanceName(constructorName) + ">";
} else {
return ToStringCheckErrorObject(obj);
}
}
function MakeGenericError(constructor, type, args) { function MakeGenericError(constructor, type, args) {
if (IS_UNDEFINED(args)) { if (IS_UNDEFINED(args)) {
args = []; args = [];
@ -202,7 +209,13 @@ function FormatMessage(message) {
array_indexof_not_defined: "Array.getIndexOf: Argument undefined", array_indexof_not_defined: "Array.getIndexOf: Argument undefined",
object_not_extensible: "Can't add property %0, object is not extensible", object_not_extensible: "Can't add property %0, object is not extensible",
illegal_access: "Illegal access", illegal_access: "Illegal access",
invalid_preparser_data: "Invalid preparser data for function %0" invalid_preparser_data: "Invalid preparser data for function %0",
strict_mode_with: "Strict mode code may not include a with statement",
strict_catch_variable: "Catch variable may not be eval or arguments in strict mode",
strict_param_name: "Parameter name eval or arguments is not allowed in strict mode",
strict_param_dupe: "Strict mode function may not have duplicate parameter names",
strict_var_name: "Variable name may not be eval or arguments in strict mode",
strict_function_name: "Function name may not be eval or arguments in strict mode",
}; };
} }
var format = kMessages[message.type]; var format = kMessages[message.type];
@ -1006,19 +1019,44 @@ $Error.captureStackTrace = captureStackTrace;
// Setup extra properties of the Error.prototype object. // Setup extra properties of the Error.prototype object.
$Error.prototype.message = ''; $Error.prototype.message = '';
function errorToString() { // Global list of error objects visited during errorToString. This is
// used to detect cycles in error toString formatting.
var visited_errors = new $Array();
var cyclic_error_marker = new $Object();
function errorToStringDetectCycle() {
if (!%PushIfAbsent(visited_errors, this)) throw cyclic_error_marker;
try {
var type = this.type; var type = this.type;
if (type && !this.hasOwnProperty("message")) { if (type && !this.hasOwnProperty("message")) {
return this.name + ": " + FormatMessage({ type: type, args: this.arguments }); var formatted = FormatMessage({ type: type, args: this.arguments });
return this.name + ": " + formatted;
} }
var message = this.hasOwnProperty("message") ? (": " + this.message) : ""; var message = this.hasOwnProperty("message") ? (": " + this.message) : "";
return this.name + message; return this.name + message;
} finally {
visited_errors.pop();
}
}
function errorToString() {
// This helper function is needed because access to properties on
// the builtins object do not work inside of a catch clause.
function isCyclicErrorMarker(o) { return o === cyclic_error_marker; }
try {
return %_CallFunction(this, errorToStringDetectCycle);
} catch(e) {
// If this error message was encountered already return the empty
// string for it instead of recursively formatting it.
if (isCyclicErrorMarker(e)) return '';
else throw e;
}
} }
%FunctionSetName(errorToString, 'toString'); %FunctionSetName(errorToString, 'toString');
%SetProperty($Error.prototype, 'toString', errorToString, DONT_ENUM); %SetProperty($Error.prototype, 'toString', errorToString, DONT_ENUM);
// Boilerplate for exceptions for stack overflows. Used from // Boilerplate for exceptions for stack overflows. Used from
// Top::StackOverflow(). // Top::StackOverflow().
const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []); const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);

12
deps/v8/src/mips/ic-mips.cc

@ -172,23 +172,11 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
} }
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS(); UNIMPLEMENTED_MIPS();
} }
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
UNIMPLEMENTED_MIPS();
}
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS(); UNIMPLEMENTED_MIPS();
} }

14
deps/v8/src/mips/stub-cache-mips.cc

@ -397,6 +397,20 @@ Object* ConstructStubCompiler::CompileConstructStub(
} }
Object* ExternalArrayStubCompiler::CompileKeyedLoadStub(
ExternalArrayType array_type, Code::Flags flags) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* ExternalArrayStubCompiler::CompileKeyedStoreStub(
ExternalArrayType array_type, Code::Flags flags) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

6
deps/v8/src/objects-inl.h

@ -3079,12 +3079,6 @@ bool SharedFunctionInfo::HasBuiltinFunctionId() {
} }
bool SharedFunctionInfo::IsBuiltinMathFunction() {
return HasBuiltinFunctionId() &&
builtin_function_id() >= kFirstMathFunctionId;
}
BuiltinFunctionId SharedFunctionInfo::builtin_function_id() { BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
ASSERT(HasBuiltinFunctionId()); ASSERT(HasBuiltinFunctionId());
return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value()); return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value());

1
deps/v8/src/objects-printer.cc

@ -395,6 +395,7 @@ static const char* TypeToString(InstanceType type) {
case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT"; case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY"; case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
case PROXY_TYPE: return "PROXY"; case PROXY_TYPE: return "PROXY";
case LAST_STRING_TYPE: return "LAST_STRING_TYPE";
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME; #define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
STRUCT_LIST(MAKE_STRUCT_CASE) STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE #undef MAKE_STRUCT_CASE

13
deps/v8/src/objects.h

@ -455,6 +455,7 @@ const uint32_t kShortcutTypeTag = kConsStringTag;
enum InstanceType { enum InstanceType {
// String types. // String types.
// FIRST_STRING_TYPE
SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag, SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag, ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag, CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
@ -471,6 +472,7 @@ enum InstanceType {
EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag, EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
EXTERNAL_STRING_WITH_ASCII_DATA_TYPE = EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag, kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
// LAST_STRING_TYPE
EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag, EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE, PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
@ -523,7 +525,8 @@ enum InstanceType {
JS_BUILTINS_OBJECT_TYPE, JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE, JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE, JS_ARRAY_TYPE,
JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE
JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE, FIRST_FUNCTION_CLASS_TYPE
JS_FUNCTION_TYPE, JS_FUNCTION_TYPE,
@ -532,6 +535,8 @@ enum InstanceType {
LAST_TYPE = JS_FUNCTION_TYPE, LAST_TYPE = JS_FUNCTION_TYPE,
INVALID_TYPE = FIRST_TYPE - 1, INVALID_TYPE = FIRST_TYPE - 1,
FIRST_NONSTRING_TYPE = MAP_TYPE, FIRST_NONSTRING_TYPE = MAP_TYPE,
FIRST_STRING_TYPE = FIRST_TYPE,
LAST_STRING_TYPE = FIRST_NONSTRING_TYPE - 1,
// Boundaries for testing for an external array. // Boundaries for testing for an external array.
FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE, FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_FLOAT_ARRAY_TYPE, LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_FLOAT_ARRAY_TYPE,
@ -541,7 +546,10 @@ enum InstanceType {
// function objects are not counted as objects, even though they are // function objects are not counted as objects, even though they are
// implemented as such; only values whose typeof is "object" are included. // implemented as such; only values whose typeof is "object" are included.
FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE, FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
LAST_JS_OBJECT_TYPE = JS_REGEXP_TYPE LAST_JS_OBJECT_TYPE = JS_REGEXP_TYPE,
// RegExp objects have [[Class]] "function" because they are callable.
// All types from this type and above are objects with [[Class]] "function".
FIRST_FUNCTION_CLASS_TYPE = JS_REGEXP_TYPE
}; };
@ -4066,7 +4074,6 @@ class SharedFunctionInfo: public HeapObject {
inline bool IsApiFunction(); inline bool IsApiFunction();
inline FunctionTemplateInfo* get_api_func_data(); inline FunctionTemplateInfo* get_api_func_data();
inline bool HasBuiltinFunctionId(); inline bool HasBuiltinFunctionId();
inline bool IsBuiltinMathFunction();
inline BuiltinFunctionId builtin_function_id(); inline BuiltinFunctionId builtin_function_id();
// [script info]: Script from which the function originates. // [script info]: Script from which the function originates.

100
deps/v8/src/parser.cc

@ -283,6 +283,11 @@ class TemporaryScope BASE_EMBEDDED {
void AddLoop() { loop_count_++; } void AddLoop() { loop_count_++; }
bool ContainsLoops() const { return loop_count_ > 0; } bool ContainsLoops() const { return loop_count_ > 0; }
bool StrictMode() { return strict_mode_; }
void EnableStrictMode() {
strict_mode_ = FLAG_strict_mode;
}
private: private:
// Captures the number of literals that need materialization in the // Captures the number of literals that need materialization in the
// function. Includes regexp literals, and boilerplate for object // function. Includes regexp literals, and boilerplate for object
@ -300,6 +305,9 @@ class TemporaryScope BASE_EMBEDDED {
// Captures the number of loops inside the scope. // Captures the number of loops inside the scope.
int loop_count_; int loop_count_;
// Parsing strict mode code.
bool strict_mode_;
// Bookkeeping // Bookkeeping
TemporaryScope** variable_; TemporaryScope** variable_;
TemporaryScope* parent_; TemporaryScope* parent_;
@ -314,6 +322,8 @@ TemporaryScope::TemporaryScope(TemporaryScope** variable)
loop_count_(0), loop_count_(0),
variable_(variable), variable_(variable),
parent_(*variable) { parent_(*variable) {
// Inherit the strict mode from the parent scope.
strict_mode_ = (parent_ != NULL) && parent_->strict_mode_;
*variable = this; *variable = this;
} }
@ -561,7 +571,6 @@ class LexicalScope BASE_EMBEDDED {
int prev_level_; int prev_level_;
}; };
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// The CHECK_OK macro is a convenient macro to enforce error // The CHECK_OK macro is a convenient macro to enforce error
// handling for functions that may fail (by returning !*ok). // handling for functions that may fail (by returning !*ok).
@ -669,7 +678,8 @@ FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
0, 0,
source->length(), source->length(),
false, false,
temp_scope.ContainsLoops()); temp_scope.ContainsLoops(),
temp_scope.StrictMode());
} else if (stack_overflow_) { } else if (stack_overflow_) {
Top::StackOverflow(); Top::StackOverflow();
} }
@ -1075,9 +1085,46 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
ASSERT(processor != NULL); ASSERT(processor != NULL);
InitializationBlockFinder block_finder; InitializationBlockFinder block_finder;
ThisNamedPropertyAssigmentFinder this_property_assignment_finder; ThisNamedPropertyAssigmentFinder this_property_assignment_finder;
bool directive_prologue = true; // Parsing directive prologue.
while (peek() != end_token) { while (peek() != end_token) {
if (directive_prologue && peek() != Token::STRING) {
directive_prologue = false;
}
Scanner::Location token_loc = scanner().peek_location();
Statement* stat = ParseStatement(NULL, CHECK_OK); Statement* stat = ParseStatement(NULL, CHECK_OK);
if (stat == NULL || stat->IsEmpty()) continue;
if (stat == NULL || stat->IsEmpty()) {
directive_prologue = false; // End of directive prologue.
continue;
}
if (directive_prologue) {
// A shot at a directive.
ExpressionStatement *e_stat;
Literal *literal;
// Still processing directive prologue?
if ((e_stat = stat->AsExpressionStatement()) != NULL &&
(literal = e_stat->expression()->AsLiteral()) != NULL &&
literal->handle()->IsString()) {
Handle<String> directive = Handle<String>::cast(literal->handle());
// Check "use strict" directive (ES5 14.1).
if (!temp_scope_->StrictMode() &&
directive->Equals(Heap::use_strict()) &&
token_loc.end_pos - token_loc.beg_pos ==
Heap::use_strict()->length() + 2) {
temp_scope_->EnableStrictMode();
// "use strict" is the only directive for now.
directive_prologue = false;
}
} else {
// End of the directive prologue.
directive_prologue = false;
}
}
// We find and mark the initialization blocks on top level code only. // We find and mark the initialization blocks on top level code only.
// This is because the optimization prevents reuse of the map transitions, // This is because the optimization prevents reuse of the map transitions,
// so it should be used only for code that will only be run once. // so it should be used only for code that will only be run once.
@ -1431,6 +1478,10 @@ Block* Parser::ParseVariableStatement(bool* ok) {
return result; return result;
} }
static bool IsEvalOrArguments(Handle<String> string) {
return string.is_identical_to(Factory::eval_symbol()) ||
string.is_identical_to(Factory::arguments_symbol());
}
// If the variable declaration declares exactly one non-const // If the variable declaration declares exactly one non-const
// variable, then *var is set to that variable. In all other cases, // variable, then *var is set to that variable. In all other cases,
@ -1479,6 +1530,13 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
Handle<String> name = ParseIdentifier(CHECK_OK); Handle<String> name = ParseIdentifier(CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name); if (fni_ != NULL) fni_->PushVariableName(name);
// Strict mode variables may not be named eval or arguments
if (temp_scope_->StrictMode() && IsEvalOrArguments(name)) {
ReportMessage("strict_var_name", Vector<const char*>::empty());
*ok = false;
return NULL;
}
// Declare variable. // Declare variable.
// Note that we *always* must treat the initial value via a separate init // Note that we *always* must treat the initial value via a separate init
// assignment for variables and constants because the value must be assigned // assignment for variables and constants because the value must be assigned
@ -1839,6 +1897,13 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
// 'with' '(' Expression ')' Statement // 'with' '(' Expression ')' Statement
Expect(Token::WITH, CHECK_OK); Expect(Token::WITH, CHECK_OK);
if (temp_scope_->StrictMode()) {
ReportMessage("strict_mode_with", Vector<const char*>::empty());
*ok = false;
return NULL;
}
Expect(Token::LPAREN, CHECK_OK); Expect(Token::LPAREN, CHECK_OK);
Expression* expr = ParseExpression(true, CHECK_OK); Expression* expr = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK); Expect(Token::RPAREN, CHECK_OK);
@ -1971,6 +2036,13 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Expect(Token::LPAREN, CHECK_OK); Expect(Token::LPAREN, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK); Handle<String> name = ParseIdentifier(CHECK_OK);
if (temp_scope_->StrictMode() && IsEvalOrArguments(name)) {
ReportMessage("strict_catch_variable", Vector<const char*>::empty());
*ok = false;
return NULL;
}
Expect(Token::RPAREN, CHECK_OK); Expect(Token::RPAREN, CHECK_OK);
if (peek() == Token::LBRACE) { if (peek() == Token::LBRACE) {
@ -3224,11 +3296,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
// '(' (Identifier)*[','] ')' // '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK); Expect(Token::LPAREN, CHECK_OK);
int start_pos = scanner().location().beg_pos; int start_pos = scanner().location().beg_pos;
bool done = (peek() == Token::RPAREN); bool done = (peek() == Token::RPAREN);
while (!done) { while (!done) {
Handle<String> param_name = ParseIdentifier(CHECK_OK); Handle<String> param_name = ParseIdentifier(CHECK_OK);
top_scope_->AddParameter(top_scope_->DeclareLocal(param_name, Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR);
Variable::VAR)); top_scope_->AddParameter(parameter);
num_parameters++; num_parameters++;
done = (peek() == Token::RPAREN); done = (peek() == Token::RPAREN);
if (!done) Expect(Token::COMMA, CHECK_OK); if (!done) Expect(Token::COMMA, CHECK_OK);
@ -3300,6 +3373,20 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
end_pos = scanner().location().end_pos; end_pos = scanner().location().end_pos;
} }
// Validate strict mode.
if (temp_scope_->StrictMode()) {
if (IsEvalOrArguments(name)) {
int position = function_token_position != RelocInfo::kNoPosition
? function_token_position
: (start_pos > 0 ? start_pos - 1 : start_pos);
ReportMessageAt(Scanner::Location(position, start_pos),
"strict_function_name", Vector<const char*>::empty());
*ok = false;
return NULL;
}
// TODO(mmaly): Check for octal escape sequence here.
}
FunctionLiteral* function_literal = FunctionLiteral* function_literal =
new FunctionLiteral(name, new FunctionLiteral(name,
top_scope_, top_scope_,
@ -3312,7 +3399,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
start_pos, start_pos,
end_pos, end_pos,
function_name->length() > 0, function_name->length() > 0,
temp_scope.ContainsLoops()); temp_scope.ContainsLoops(),
temp_scope.StrictMode());
function_literal->set_function_token_position(function_token_position); function_literal->set_function_token_position(function_token_position);
if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal); if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);

14
deps/v8/src/platform-freebsd.cc

@ -215,6 +215,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
: file_(file), memory_(memory), size_(size) { } : file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile(); virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; } virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private: private:
FILE* file_; FILE* file_;
void* memory_; void* memory_;
@ -222,6 +223,19 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
}; };
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) { void* initial) {
FILE* file = fopen(name, "w+"); FILE* file = fopen(name, "w+");

14
deps/v8/src/platform-linux.cc

@ -318,6 +318,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
: file_(file), memory_(memory), size_(size) { } : file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile(); virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; } virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private: private:
FILE* file_; FILE* file_;
void* memory_; void* memory_;
@ -325,6 +326,19 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
}; };
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) { void* initial) {
FILE* file = fopen(name, "w+"); FILE* file = fopen(name, "w+");

14
deps/v8/src/platform-macos.cc

@ -196,6 +196,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
: file_(file), memory_(memory), size_(size) { } : file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile(); virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; } virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private: private:
FILE* file_; FILE* file_;
void* memory_; void* memory_;
@ -203,6 +204,19 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
}; };
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) { void* initial) {
FILE* file = fopen(name, "w+"); FILE* file = fopen(name, "w+");

6
deps/v8/src/platform-nullos.cc

@ -242,6 +242,12 @@ void OS::DebugBreak() {
} }
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
UNIMPLEMENTED();
return NULL;
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) { void* initial) {
UNIMPLEMENTED(); UNIMPLEMENTED();

14
deps/v8/src/platform-openbsd.cc

@ -213,6 +213,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
: file_(file), memory_(memory), size_(size) { } : file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile(); virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; } virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private: private:
FILE* file_; FILE* file_;
void* memory_; void* memory_;
@ -220,6 +221,19 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
}; };
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) { void* initial) {
FILE* file = fopen(name, "w+"); FILE* file = fopen(name, "w+");

5
deps/v8/src/platform-posix.cc

@ -122,6 +122,11 @@ FILE* OS::FOpen(const char* path, const char* mode) {
} }
bool OS::Remove(const char* path) {
return (remove(path) == 0);
}
const char* OS::LogFileOpenMode = "w"; const char* OS::LogFileOpenMode = "w";

14
deps/v8/src/platform-solaris.cc

@ -226,6 +226,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
: file_(file), memory_(memory), size_(size) { } : file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile(); virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; } virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private: private:
FILE* file_; FILE* file_;
void* memory_; void* memory_;
@ -233,6 +234,19 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
}; };
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) { void* initial) {
FILE* file = fopen(name, "w+"); FILE* file = fopen(name, "w+");

38
deps/v8/src/platform-win32.cc

@ -670,6 +670,11 @@ FILE* OS::FOpen(const char* path, const char* mode) {
} }
bool OS::Remove(const char* path) {
return (DeleteFileA(path) != 0);
}
// Open log file in binary mode to avoid /n -> /r/n conversion. // Open log file in binary mode to avoid /n -> /r/n conversion.
const char* OS::LogFileOpenMode = "wb"; const char* OS::LogFileOpenMode = "wb";
@ -911,17 +916,44 @@ void OS::DebugBreak() {
class Win32MemoryMappedFile : public OS::MemoryMappedFile { class Win32MemoryMappedFile : public OS::MemoryMappedFile {
public: public:
Win32MemoryMappedFile(HANDLE file, HANDLE file_mapping, void* memory) Win32MemoryMappedFile(HANDLE file,
: file_(file), file_mapping_(file_mapping), memory_(memory) { } HANDLE file_mapping,
void* memory,
int size)
: file_(file),
file_mapping_(file_mapping),
memory_(memory),
size_(size) { }
virtual ~Win32MemoryMappedFile(); virtual ~Win32MemoryMappedFile();
virtual void* memory() { return memory_; } virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private: private:
HANDLE file_; HANDLE file_;
HANDLE file_mapping_; HANDLE file_mapping_;
void* memory_; void* memory_;
int size_;
}; };
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
// Open a physical file
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
if (file == NULL) return NULL;
int size = static_cast<int>(GetFileSize(file, NULL));
// Create a file mapping for the physical file
HANDLE file_mapping = CreateFileMapping(file, NULL,
PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
if (file_mapping == NULL) return NULL;
// Map a view of the file into memory
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
return new Win32MemoryMappedFile(file, file_mapping, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) { void* initial) {
// Open a physical file // Open a physical file
@ -935,7 +967,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
// Map a view of the file into memory // Map a view of the file into memory
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size); void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
if (memory) memmove(memory, initial, size); if (memory) memmove(memory, initial, size);
return new Win32MemoryMappedFile(file, file_mapping, memory); return new Win32MemoryMappedFile(file, file_mapping, memory, size);
} }

3
deps/v8/src/platform.h

@ -174,6 +174,7 @@ class OS {
static int GetLastError(); static int GetLastError();
static FILE* FOpen(const char* path, const char* mode); static FILE* FOpen(const char* path, const char* mode);
static bool Remove(const char* path);
// Log file open mode is platform-dependent due to line ends issues. // Log file open mode is platform-dependent due to line ends issues.
static const char* LogFileOpenMode; static const char* LogFileOpenMode;
@ -251,9 +252,11 @@ class OS {
class MemoryMappedFile { class MemoryMappedFile {
public: public:
static MemoryMappedFile* open(const char* name);
static MemoryMappedFile* create(const char* name, int size, void* initial); static MemoryMappedFile* create(const char* name, int size, void* initial);
virtual ~MemoryMappedFile() { } virtual ~MemoryMappedFile() { }
virtual void* memory() = 0; virtual void* memory() = 0;
virtual int size() = 0;
}; };
// Safe formatting print. Ensures that str is always null-terminated. // Safe formatting print. Ensures that str is always null-terminated.

5
deps/v8/src/scopes.cc

@ -726,6 +726,7 @@ void Scope::ResolveVariable(Scope* global_scope,
// Note that we must do a lookup anyway, because if we find one, // Note that we must do a lookup anyway, because if we find one,
// we must mark that variable as potentially accessed from this // we must mark that variable as potentially accessed from this
// inner scope (the property may not be in the 'with' object). // inner scope (the property may not be in the 'with' object).
if (var != NULL) var->set_is_used(true);
var = NonLocal(proxy->name(), Variable::DYNAMIC); var = NonLocal(proxy->name(), Variable::DYNAMIC);
} else { } else {
@ -833,8 +834,8 @@ bool Scope::MustAllocate(Variable* var) {
// visible name. // visible name.
if ((var->is_this() || var->name()->length() > 0) && if ((var->is_this() || var->name()->length() > 0) &&
(var->is_accessed_from_inner_scope() || (var->is_accessed_from_inner_scope() ||
scope_calls_eval_ || inner_scope_calls_eval_ || scope_calls_eval_ ||
scope_contains_with_)) { inner_scope_calls_eval_)) {
var->set_is_used(true); var->set_is_used(true);
} }
// Global variables do not need to be allocated. // Global variables do not need to be allocated.

80
deps/v8/src/stub-cache.cc

@ -507,6 +507,74 @@ MaybeObject* StubCache::ComputeKeyedStoreSpecialized(JSObject* receiver) {
} }
namespace {
ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) {
switch (kind) {
case JSObject::EXTERNAL_BYTE_ELEMENTS:
return kExternalByteArray;
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
return kExternalUnsignedByteArray;
case JSObject::EXTERNAL_SHORT_ELEMENTS:
return kExternalShortArray;
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
return kExternalUnsignedShortArray;
case JSObject::EXTERNAL_INT_ELEMENTS:
return kExternalIntArray;
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
return kExternalUnsignedIntArray;
case JSObject::EXTERNAL_FLOAT_ELEMENTS:
return kExternalFloatArray;
default:
UNREACHABLE();
return static_cast<ExternalArrayType>(0);
}
}
} // anonymous namespace
MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
JSObject* receiver,
bool is_store) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(
is_store ? Code::KEYED_STORE_IC : Code::KEYED_LOAD_IC,
NORMAL);
ExternalArrayType array_type =
ElementsKindToExternalArrayType(receiver->GetElementsKind());
String* name =
is_store ? Heap::KeyedStoreExternalArray_symbol()
: Heap::KeyedLoadExternalArray_symbol();
// Use the global maps for the particular external array types,
// rather than the receiver's map, when looking up the cached code,
// so that we actually canonicalize these stubs.
Map* map = Heap::MapForExternalArrayType(array_type);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
ExternalArrayStubCompiler compiler;
{ MaybeObject* maybe_code =
is_store ? compiler.CompileKeyedStoreStub(array_type, flags) :
compiler.CompileKeyedLoadStub(array_type, flags);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
if (is_store) {
PROFILE(
CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
} else {
PROFILE(
CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
}
Object* result;
{ MaybeObject* maybe_result =
map->UpdateCodeCache(name, Code::cast(code));
if (!maybe_result->ToObject(&result)) return maybe_result;
}
}
return code;
}
MaybeObject* StubCache::ComputeStoreNormal() { MaybeObject* StubCache::ComputeStoreNormal() {
return Builtins::builtin(Builtins::StoreIC_Normal); return Builtins::builtin(Builtins::StoreIC_Normal);
} }
@ -1709,4 +1777,16 @@ void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) {
} }
MaybeObject* ExternalArrayStubCompiler::GetCode(Code::Flags flags) {
Object* result;
{ MaybeObject* maybe_result = GetCodeWithFlags(flags, "ExternalArrayStub");
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Code* code = Code::cast(result);
USE(code);
PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayStub"));
return result;
}
} } // namespace v8::internal } } // namespace v8::internal

18
deps/v8/src/stub-cache.h

@ -167,6 +167,10 @@ class StubCache : public AllStatic {
MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized( MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized(
JSObject* receiver); JSObject* receiver);
MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
JSObject* receiver,
bool is_store);
// --- // ---
MUST_USE_RESULT static MaybeObject* ComputeCallField(int argc, MUST_USE_RESULT static MaybeObject* ComputeCallField(int argc,
@ -797,6 +801,20 @@ class CallOptimization BASE_EMBEDDED {
CallHandlerInfo* api_call_info_; CallHandlerInfo* api_call_info_;
}; };
class ExternalArrayStubCompiler: public StubCompiler {
public:
explicit ExternalArrayStubCompiler() {}
MUST_USE_RESULT MaybeObject* CompileKeyedLoadStub(
ExternalArrayType array_type, Code::Flags flags);
MUST_USE_RESULT MaybeObject* CompileKeyedStoreStub(
ExternalArrayType array_type, Code::Flags flags);
private:
MaybeObject* GetCode(Code::Flags flags);
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_STUB_CACHE_H_ #endif // V8_STUB_CACHE_H_

92
deps/v8/src/utils.cc

@ -276,4 +276,96 @@ char* StringBuilder::Finalize() {
} }
MemoryMappedExternalResource::MemoryMappedExternalResource(const char* filename)
: filename_(NULL),
data_(NULL),
length_(0),
remove_file_on_cleanup_(false) {
Init(filename);
}
MemoryMappedExternalResource::
MemoryMappedExternalResource(const char* filename,
bool remove_file_on_cleanup)
: filename_(NULL),
data_(NULL),
length_(0),
remove_file_on_cleanup_(remove_file_on_cleanup) {
Init(filename);
}
MemoryMappedExternalResource::~MemoryMappedExternalResource() {
// Release the resources if we had successfully acquired them:
if (file_ != NULL) {
delete file_;
if (remove_file_on_cleanup_) {
OS::Remove(filename_);
}
DeleteArray<char>(filename_);
}
}
void MemoryMappedExternalResource::Init(const char* filename) {
file_ = OS::MemoryMappedFile::open(filename);
if (file_ != NULL) {
filename_ = StrDup(filename);
data_ = reinterpret_cast<char*>(file_->memory());
length_ = file_->size();
}
}
bool MemoryMappedExternalResource::EnsureIsAscii(bool abort_if_failed) const {
bool is_ascii = true;
int line_no = 1;
const char* start_of_line = data_;
const char* end = data_ + length_;
for (const char* p = data_; p < end; p++) {
char c = *p;
if ((c & 0x80) != 0) {
// Non-ascii detected:
is_ascii = false;
// Report the error and abort if appropriate:
if (abort_if_failed) {
int char_no = static_cast<int>(p - start_of_line) - 1;
ASSERT(filename_ != NULL);
PrintF("\n\n\n"
"Abort: Non-Ascii character 0x%.2x in file %s line %d char %d",
c, filename_, line_no, char_no);
// Allow for some context up to kNumberOfLeadingContextChars chars
// before the offending non-ascii char to help the user see where
// the offending char is.
const int kNumberOfLeadingContextChars = 10;
const char* err_context = p - kNumberOfLeadingContextChars;
if (err_context < data_) {
err_context = data_;
}
// Compute the length of the error context and print it.
int err_context_length = static_cast<int>(p - err_context);
if (err_context_length != 0) {
PrintF(" after \"%.*s\"", err_context_length, err_context);
}
PrintF(".\n\n\n");
OS::Abort();
}
break; // Non-ascii detected. No need to continue scanning.
}
if (c == '\n') {
start_of_line = p;
line_no++;
}
}
return is_ascii;
}
} } // namespace v8::internal } } // namespace v8::internal

33
deps/v8/src/v8utils.h

@ -316,6 +316,39 @@ static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
} }
} }
// A resource for using mmapped files to back external strings that are read
// from files.
class MemoryMappedExternalResource: public
v8::String::ExternalAsciiStringResource {
public:
explicit MemoryMappedExternalResource(const char* filename);
MemoryMappedExternalResource(const char* filename,
bool remove_file_on_cleanup);
virtual ~MemoryMappedExternalResource();
virtual const char* data() const { return data_; }
virtual size_t length() const { return length_; }
bool exists() const { return file_ != NULL; }
bool is_empty() const { return length_ == 0; }
bool EnsureIsAscii(bool abort_if_failed) const;
bool EnsureIsAscii() const { return EnsureIsAscii(true); }
bool IsAscii() const { return EnsureIsAscii(false); }
private:
void Init(const char* filename);
char* filename_;
OS::MemoryMappedFile* file_;
const char* data_;
size_t length_;
bool remove_file_on_cleanup_;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_V8UTILS_H_ #endif // V8_V8UTILS_H_

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3 #define MAJOR_VERSION 3
#define MINOR_VERSION 0 #define MINOR_VERSION 0
#define BUILD_NUMBER 9 #define BUILD_NUMBER 10
#define PATCH_LEVEL 0 #define PATCH_LEVEL 0
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false

11
deps/v8/src/x64/assembler-x64-inl.h

@ -199,8 +199,10 @@ void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_)) { if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object. // absolute code pointer inside code object moves with the code object.
Memory::Address_at(pc_) += static_cast<int32_t>(delta); Memory::Address_at(pc_) += static_cast<int32_t>(delta);
CPU::FlushICache(pc_, sizeof(Address));
} else if (IsCodeTarget(rmode_)) { } else if (IsCodeTarget(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta); Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
CPU::FlushICache(pc_, sizeof(int32_t));
} }
} }
@ -236,6 +238,7 @@ void RelocInfo::set_target_address(Address target) {
Assembler::set_target_address_at(pc_, target); Assembler::set_target_address_at(pc_, target);
} else { } else {
Memory::Address_at(pc_) = target; Memory::Address_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
} }
} }
@ -271,6 +274,7 @@ Address* RelocInfo::target_reference_address() {
void RelocInfo::set_target_object(Object* target) { void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
*reinterpret_cast<Object**>(pc_) = target; *reinterpret_cast<Object**>(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
} }
@ -295,6 +299,7 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address; Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
} }
@ -331,6 +336,8 @@ void RelocInfo::set_call_address(Address target) {
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) = Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
target; target;
CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
sizeof(Address));
} }
@ -356,10 +363,12 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode(); RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) { if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitPointer(target_object_address()); visitor->VisitPointer(target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) { } else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this); visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address()); visitor->VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() && } else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) && ((RelocInfo::IsJSReturn(mode) &&
@ -379,10 +388,12 @@ void RelocInfo::Visit() {
RelocInfo::Mode mode = rmode(); RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) { if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(target_object_address()); StaticVisitor::VisitPointer(target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) { } else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this); StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address()); StaticVisitor::VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() && } else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) && ((RelocInfo::IsJSReturn(mode) &&

22
deps/v8/src/x64/assembler-x64.cc

@ -2721,6 +2721,17 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
} }
void Assembler::cvttss2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x2C);
emit_sse_operand(dst, src);
}
void Assembler::cvttsd2si(Register dst, const Operand& src) { void Assembler::cvttsd2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -2732,6 +2743,17 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
} }
void Assembler::cvttsd2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x2C);
emit_sse_operand(dst, src);
}
void Assembler::cvttsd2siq(Register dst, XMMRegister src) { void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;

6
deps/v8/src/x64/assembler-x64.h

@ -707,6 +707,10 @@ class Assembler : public Malloced {
arithmetic_op_32(0x1b, dst, src); arithmetic_op_32(0x1b, dst, src);
} }
void sbbq(Register dst, Register src) {
arithmetic_op(0x1b, dst, src);
}
void cmpb(Register dst, Immediate src) { void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src); immediate_arithmetic_op_8(0x7, dst, src);
} }
@ -1205,7 +1209,9 @@ class Assembler : public Malloced {
void movss(const Operand& dst, XMMRegister src); void movss(const Operand& dst, XMMRegister src);
void cvttss2si(Register dst, const Operand& src); void cvttss2si(Register dst, const Operand& src);
void cvttss2si(Register dst, XMMRegister src);
void cvttsd2si(Register dst, const Operand& src); void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src); void cvttsd2siq(Register dst, XMMRegister src);
void cvtlsi2sd(XMMRegister dst, const Operand& src); void cvtlsi2sd(XMMRegister dst, const Operand& src);

118
deps/v8/src/x64/code-stubs-x64.cc

@ -91,8 +91,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
void FastNewContextStub::Generate(MacroAssembler* masm) { void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space. // Try to allocate the context in new space.
Label gc; Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS; __ AllocateInNewSpace((slots_ * kPointerSize) + FixedArray::kHeaderSize,
__ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
rax, rbx, rcx, &gc, TAG_OBJECT); rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack. // Get the function from the stack.
@ -101,7 +100,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header. // Setup the object header.
__ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex); __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(slots_));
// Setup the fixed slots. // Setup the fixed slots.
__ Set(rbx, 0); // Set to NULL. __ Set(rbx, 0); // Set to NULL.
@ -116,7 +115,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Initialize the rest of the slots to undefined. // Initialize the rest of the slots to undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) {
__ movq(Operand(rax, Context::SlotOffset(i)), rbx); __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
} }
@ -3248,6 +3247,12 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
} }
Register InstanceofStub::left() { return rax; }
Register InstanceofStub::right() { return rdx; }
int CompareStub::MinorKey() { int CompareStub::MinorKey() {
// Encode the three parameters in a unique 16 bit value. To avoid duplicate // Encode the three parameters in a unique 16 bit value. To avoid duplicate
// stubs the never NaN NaN condition is only taken into account if the // stubs the never NaN NaN condition is only taken into account if the
@ -4272,22 +4277,119 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
} }
void ICCompareStub::GenerateSmis(MacroAssembler* masm) { void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
UNIMPLEMENTED(); ASSERT(state_ == CompareIC::SMIS);
NearLabel miss;
__ JumpIfNotBothSmi(rdx, rax, &miss);
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
__ SmiSub(rax, rax, rdx);
} else {
NearLabel done;
__ SmiSub(rdx, rdx, rax);
__ j(no_overflow, &done);
// Correct sign of result in case of overflow.
__ SmiNot(rdx, rdx);
__ bind(&done);
__ movq(rax, rdx);
}
__ ret(0);
__ bind(&miss);
GenerateMiss(masm);
} }
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
UNIMPLEMENTED(); ASSERT(state_ == CompareIC::HEAP_NUMBERS);
NearLabel generic_stub;
NearLabel unordered;
NearLabel miss;
Condition either_smi = masm->CheckEitherSmi(rax, rdx);
__ j(either_smi, &generic_stub);
__ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
__ j(not_equal, &miss);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
__ j(not_equal, &miss);
// Load left and right operand
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
// Compare operands
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered);
// Return a result of -1, 0, or 1, based on EFLAGS.
// Performing mov, because xor would destroy the flag register.
__ movl(rax, Immediate(0));
__ movl(rcx, Immediate(0));
__ setcc(above, rax); // Add one to zero if carry clear and not equal.
__ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
__ ret(0);
__ bind(&unordered);
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&miss);
GenerateMiss(masm);
} }
void ICCompareStub::GenerateObjects(MacroAssembler* masm) { void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
UNIMPLEMENTED(); ASSERT(state_ == CompareIC::OBJECTS);
NearLabel miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss);
__ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
__ j(not_equal, &miss, not_taken);
__ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
__ j(not_equal, &miss, not_taken);
ASSERT(GetCondition() == equal);
__ subq(rax, rdx);
__ ret(0);
__ bind(&miss);
GenerateMiss(masm);
} }
void ICCompareStub::GenerateMiss(MacroAssembler* masm) { void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
UNIMPLEMENTED(); // Save the registers.
__ pop(rcx);
__ push(rdx);
__ push(rax);
__ push(rcx);
// Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
__ EnterInternalFrame();
__ push(rdx);
__ push(rax);
__ Push(Smi::FromInt(op_));
__ CallExternalReference(miss, 3);
__ LeaveInternalFrame();
// Compute the entry point of the rewritten stub.
__ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
// Restore registers.
__ pop(rcx);
__ pop(rax);
__ pop(rdx);
__ push(rcx);
// Do a tail call to the rewritten stub.
__ jmp(rdi);
} }
#undef __ #undef __

2
deps/v8/src/x64/codegen-x64.cc

@ -206,7 +206,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AllocateStackSlots(); frame_->AllocateStackSlots();
// Allocate the local context if needed. // Allocate the local context if needed.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) { if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context"); Comment cmnt(masm_, "[ allocate local context");
// Allocate local context. // Allocate local context.

8
deps/v8/src/x64/disasm-x64.cc

@ -1113,9 +1113,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x2C) { } else if (opcode == 0x2C) {
// CVTTSS2SI: // CVTTSS2SI:
// Convert with truncation scalar single-precision FP to dword integer. // Convert with truncation scalar single-precision FP to dword integer.
// Assert that mod is not 3, so source is memory, not an XMM register. int mod, regop, rm;
ASSERT_NE(0xC0, *current & 0xC0); get_modrm(*current, &mod, &regop, &rm);
current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current); AppendToBuffer("cvttss2si%c %s,",
operand_size_code(), NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x5A) { } else if (opcode == 0x5A) {
// CVTSS2SD: // CVTSS2SD:
// Convert scalar single-precision FP to scalar double-precision FP. // Convert scalar single-precision FP to scalar double-precision FP.

2
deps/v8/src/x64/full-codegen-x64.cc

@ -88,7 +88,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true; bool function_in_register = true;
// Possibly allocate a local context. // Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) { if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context"); Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in rdi. // Argument to NewContext is the function, which is still in rdi.

268
deps/v8/src/x64/ic-x64.cc

@ -727,131 +727,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
} }
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label slow;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &slow);
// Check that the object is a JS object.
__ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
__ j(not_equal, &slow);
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks. The map is already in rdx.
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// rax: index (as a smi)
// rdx: JSObject
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
// Check that the index is in range.
__ SmiToInteger32(rcx, rax);
__ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// rax: index (as a smi)
// rdx: receiver (JSObject)
// rcx: untagged index
// rbx: elements array
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
__ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalUnsignedByteArray:
__ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalShortArray:
__ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalUnsignedShortArray:
__ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalIntArray:
__ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalUnsignedIntArray:
__ movl(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalFloatArray:
__ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
// rax: index
// rdx: receiver
// For integer array types:
// rcx: value
// For floating-point array type:
// xmm0: value as double.
ASSERT(kSmiValueSize == 32);
if (array_type == kExternalUnsignedIntArray) {
// For the UnsignedInt array type, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
NearLabel box_int;
__ JumpIfUIntNotValidSmiValue(rcx, &box_int);
__ Integer32ToSmi(rax, rcx);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
// The value is zero-extended since we loaded the value from memory
// with movl.
__ cvtqsi2sd(xmm0, rcx);
__ AllocateHeapNumber(rcx, rbx, &slow);
// Set the value.
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(rcx, rbx, &slow);
// Set the value.
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
} else {
__ Integer32ToSmi(rax, rcx);
__ ret(0);
}
// Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
GenerateRuntimeGetProperty(masm);
}
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- rax : key // -- rax : key
@ -1023,149 +898,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
} }
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label slow;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
// Get the map from the receiver.
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rcx, &slow);
// Check that the object is a JS object.
__ CmpInstanceType(rbx, JS_OBJECT_TYPE);
__ j(not_equal, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
// Check that the index is in range.
__ SmiToInteger32(rdi, rcx); // Untag the index.
__ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
NearLabel check_heap_number;
__ JumpIfNotSmi(rax, &check_heap_number);
// No more branches to slow case on this path. Key and receiver not needed.
__ SmiToInteger32(rdx, rax);
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
__ cvtlsi2ss(xmm0, rdx);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
break;
default:
UNREACHABLE();
break;
}
__ ret(0);
__ bind(&check_heap_number);
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
__ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
__ j(not_equal, &slow);
// No more branches to slow case on this path.
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rdi: untagged index
// rbx: base pointer of external storage
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
__ cvtsd2ss(xmm0, xmm0);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
__ ret(0);
} else {
// Need to perform float-to-int conversion.
// Test the value for NaN.
// Convert to int32 and store the low byte/word.
// If the value is NaN or +/-infinity, the result is 0x80000000,
// which is automatically zero when taken mod 2^n, n < 32.
// rdx: value (converted to an untagged integer)
// rdi: untagged index
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ cvtsd2si(rdx, xmm0);
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ cvtsd2si(rdx, xmm0);
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray: {
// Convert to int64, so that NaN and infinities become
// 0x8000000000000000, which is zero mod 2^32.
__ cvtsd2siq(rdx, xmm0);
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
}
default:
UNREACHABLE();
break;
}
__ ret(0);
}
// Slow case: call runtime.
__ bind(&slow);
GenerateRuntimeSetProperty(masm);
}
// The generated code does not accept smi keys. // The generated code does not accept smi keys.
// The generated code falls through if both probes miss. // The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm, static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,

444
deps/v8/src/x64/lithium-codegen-x64.cc

@ -930,12 +930,88 @@ int LCodeGen::GetNextEmittedBlock(int block) {
void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
Abort("Unimplemented: %s", "EmitBranch"); int next_block = GetNextEmittedBlock(current_block_);
right_block = chunk_->LookupDestination(right_block);
left_block = chunk_->LookupDestination(left_block);
if (right_block == left_block) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
} else if (right_block == next_block) {
__ j(cc, chunk_->GetAssemblyLabel(left_block));
} else {
__ j(cc, chunk_->GetAssemblyLabel(left_block));
if (cc != always) {
__ jmp(chunk_->GetAssemblyLabel(right_block));
}
}
} }
void LCodeGen::DoBranch(LBranch* instr) { void LCodeGen::DoBranch(LBranch* instr) {
Abort("Unimplemented: %s", "DoBranch"); int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Representation r = instr->hydrogen()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ testl(reg, reg);
EmitBranch(true_block, false_block, not_zero);
} else if (r.IsDouble()) {
XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
__ xorpd(xmm0, xmm0);
__ ucomisd(reg, xmm0);
EmitBranch(true_block, false_block, not_equal);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
HType type = instr->hydrogen()->type();
if (type.IsBoolean()) {
__ Cmp(reg, Factory::true_value());
EmitBranch(true_block, false_block, equal);
} else if (type.IsSmi()) {
__ SmiCompare(reg, Smi::FromInt(0));
EmitBranch(true_block, false_block, not_equal);
} else {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ j(equal, false_label);
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
__ j(equal, true_label);
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ j(equal, false_label);
__ SmiCompare(reg, Smi::FromInt(0));
__ j(equal, false_label);
__ JumpIfSmi(reg, true_label);
// Test for double values. Plus/minus zero and NaN are false.
NearLabel call_stub;
__ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_stub);
// HeapNumber => false iff +0, -0, or NaN. These three cases set the
// zero flag when compared to zero using ucomisd.
__ xorpd(xmm0, xmm0);
__ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, false_label);
__ jmp(true_label);
// The conversion stub doesn't cause garbage collections so it's
// safe to not record a safepoint after the call.
__ bind(&call_stub);
ToBooleanStub stub;
__ Pushad();
__ push(reg);
__ CallStub(&stub);
__ testq(rax, rax);
__ Popad();
EmitBranch(true_block, false_block, not_zero);
}
}
} }
@ -979,7 +1055,7 @@ void LCodeGen::DoGoto(LGoto* instr) {
} }
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
Condition cond = no_condition; Condition cond = no_condition;
switch (op) { switch (op) {
case Token::EQ: case Token::EQ:
@ -1008,17 +1084,64 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
Abort("Unimplemented: %s", "EmitCmpI"); if (right->IsConstantOperand()) {
int32_t value = ToInteger32(LConstantOperand::cast(right));
if (left->IsRegister()) {
__ cmpl(ToRegister(left), Immediate(value));
} else {
__ cmpl(ToOperand(left), Immediate(value));
}
} else if (right->IsRegister()) {
__ cmpq(ToRegister(left), ToRegister(right));
} else {
__ cmpq(ToRegister(left), ToOperand(right));
}
} }
void LCodeGen::DoCmpID(LCmpID* instr) { void LCodeGen::DoCmpID(LCmpID* instr) {
Abort("Unimplemented: %s", "DoCmpID"); LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
LOperand* result = instr->result();
NearLabel unordered;
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the unordered case, which produces a false value.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, &unordered);
} else {
EmitCmpI(left, right);
}
NearLabel done;
Condition cc = TokenToCondition(instr->op(), instr->is_double());
__ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
__ j(cc, &done);
__ bind(&unordered);
__ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
__ bind(&done);
} }
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
Abort("Unimplemented: %s", "DoCmpIDAndBranch"); LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, chunk_->GetAssemblyLabel(false_block));
} else {
EmitCmpI(left, right);
}
Condition cc = TokenToCondition(instr->op(), instr->is_double());
EmitBranch(true_block, false_block, cc);
} }
@ -1028,7 +1151,13 @@ void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) { void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
Abort("Unimplemented: %s", "DoCmpJSObjectAndBranch"); Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
__ cmpq(left, right);
EmitBranch(true_block, false_block, equal);
} }
@ -1038,7 +1167,39 @@ void LCodeGen::DoIsNull(LIsNull* instr) {
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Abort("Unimplemented: %s", "DoIsNullAndBranch"); Register reg = ToRegister(instr->InputAt(0));
int false_block = chunk_->LookupDestination(instr->false_block_id());
if (instr->hydrogen()->representation().IsSpecialization() ||
instr->hydrogen()->type().IsSmi()) {
// If the expression is known to untagged or smi, then it's definitely
// not null, and it can't be a an undetectable object.
// Jump directly to the false block.
EmitGoto(false_block);
return;
}
int true_block = chunk_->LookupDestination(instr->true_block_id());
__ Cmp(reg, Factory::null_value());
if (instr->is_strict()) {
EmitBranch(true_block, false_block, equal);
} else {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label);
__ Cmp(reg, Factory::undefined_value());
__ j(equal, true_label);
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = ToRegister(instr->TempAt(0));
__ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ testb(FieldOperand(scratch, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
EmitBranch(true_block, false_block, not_zero);
}
} }
@ -1047,7 +1208,25 @@ Condition LCodeGen::EmitIsObject(Register input,
Register temp2, Register temp2,
Label* is_not_object, Label* is_not_object,
Label* is_object) { Label* is_object) {
Abort("Unimplemented: %s", "EmitIsObject"); ASSERT(!input.is(temp1));
ASSERT(!input.is(temp2));
ASSERT(!temp1.is(temp2));
__ JumpIfSmi(input, is_not_object);
__ Cmp(input, Factory::null_value());
__ j(equal, is_object);
__ movq(temp1, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
__ testb(FieldOperand(temp1, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, is_not_object);
__ movzxbl(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
__ cmpb(temp2, Immediate(FIRST_JS_OBJECT_TYPE));
__ j(below, is_not_object);
__ cmpb(temp2, Immediate(LAST_JS_OBJECT_TYPE));
return below_equal; return below_equal;
} }
@ -1058,7 +1237,18 @@ void LCodeGen::DoIsObject(LIsObject* instr) {
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Abort("Unimplemented: %s", "DoIsObjectAndBranch"); Register reg = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
EmitBranch(true_block, false_block, true_cond);
} }
@ -1068,7 +1258,38 @@ void LCodeGen::DoIsSmi(LIsSmi* instr) {
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
Abort("Unimplemented: %s", "DoIsSmiAndBranch"); int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Condition is_smi;
if (instr->InputAt(0)->IsRegister()) {
Register input = ToRegister(instr->InputAt(0));
is_smi = masm()->CheckSmi(input);
} else {
Operand input = ToOperand(instr->InputAt(0));
is_smi = masm()->CheckSmi(input);
}
EmitBranch(true_block, false_block, is_smi);
}
static InstanceType TestType(HHasInstanceType* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
ASSERT(from == to || to == LAST_TYPE);
return from;
}
static Condition BranchCondition(HHasInstanceType* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return equal;
if (to == LAST_TYPE) return above_equal;
if (from == FIRST_TYPE) return below_equal;
UNREACHABLE();
return equal;
} }
@ -1078,7 +1299,17 @@ void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Abort("Unimplemented: %s", "DoHasInstanceTypeAndBranch"); Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
} }
@ -1089,19 +1320,68 @@ void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
void LCodeGen::DoHasCachedArrayIndexAndBranch( void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) { LHasCachedArrayIndexAndBranch* instr) {
Abort("Unimplemented: %s", "DoHasCachedArrayIndexAndBranch"); Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
__ testl(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
EmitBranch(true_block, false_block, not_equal);
} }
// Branches to a label or falls through with the answer in the z flag. Trashes // Branches to a label or falls through with the answer in the z flag.
// the temp registers, but not the input. Only input and temp2 may alias. // Trashes the temp register and possibly input (if it and temp are aliased).
void LCodeGen::EmitClassOfTest(Label* is_true, void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false, Label* is_false,
Handle<String> class_name, Handle<String> class_name,
Register input, Register input,
Register temp, Register temp) {
Register temp2) { __ JumpIfSmi(input, is_false);
Abort("Unimplemented: %s", "EmitClassOfTest"); __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
__ j(below, is_false);
// Map is now in temp.
// Functions have class 'Function'.
__ CmpInstanceType(temp, JS_FUNCTION_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
__ j(equal, is_true);
} else {
__ j(equal, is_false);
}
// Check if the constructor in the map is a function.
__ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
// As long as JS_FUNCTION_TYPE is the last instance type and it is
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for
// LAST_JS_OBJECT_TYPE.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
if (class_name->IsEqualTo(CStrVector("Object"))) {
__ j(not_equal, is_true);
} else {
__ j(not_equal, is_false);
}
// temp now contains the constructor function. Grab the
// instance class name from there.
__ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
__ movq(temp, FieldOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
// The class name we are testing against is a symbol because it's a literal.
// The name in the constructor is a symbol because of the way the context is
// booted. This routine isn't expected to work for random API-created
// classes and it doesn't have to because you can't access it with natives
// syntax. Since both sides are symbols it is sufficient to use an identity
// comparison.
ASSERT(class_name->IsSymbol());
__ Cmp(temp, class_name);
// End with the answer in the z flag.
} }
@ -1111,7 +1391,19 @@ void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Abort("Unimplemented: %s", "DoClassOfTestAndBranch"); Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
EmitClassOfTest(true_label, false_label, class_name, input, temp);
EmitBranch(true_block, false_block, equal);
} }
@ -1126,7 +1418,13 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
Abort("Unimplemented: %s", "DoInstanceOfAndBranch"); int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ testq(rax, rax);
EmitBranch(true_block, false_block, zero);
} }
@ -1142,12 +1440,42 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void LCodeGen::DoCmpT(LCmpT* instr) { void LCodeGen::DoCmpT(LCmpT* instr) {
Abort("Unimplemented: %s", "DoCmpT"); Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
NearLabel true_value, done;
__ testq(rax, rax);
__ j(condition, &true_value);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done);
__ bind(&true_value);
__ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
__ bind(&done);
} }
void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) { void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
Abort("Unimplemented: %s", "DoCmpTAndBranch"); Token::Value op = instr->op();
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Handle<Code> ic = CompareIC::GetUninitialized(op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// The compare stub expects compare condition and the input operands
// reversed for GT and LTE.
Condition condition = TokenToCondition(op, false);
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
__ testq(rax, rax);
EmitBranch(true_block, false_block, condition);
} }
@ -1494,7 +1822,18 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Abort("Unimplemented: %s", "DoTypeofIsAndBranch"); Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition final_branch_condition = EmitTypeofIs(true_label,
false_label,
input,
instr->type_literal());
EmitBranch(true_block, false_block, final_branch_condition);
} }
@ -1502,8 +1841,63 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Label* false_label, Label* false_label,
Register input, Register input,
Handle<String> type_name) { Handle<String> type_name) {
Abort("Unimplemented: %s", "EmitTypeofIs"); Condition final_branch_condition = no_condition;
return no_condition; if (type_name->Equals(Heap::number_symbol())) {
__ JumpIfSmi(input, true_label);
__ Cmp(FieldOperand(input, HeapObject::kMapOffset),
Factory::heap_number_map());
final_branch_condition = equal;
} else if (type_name->Equals(Heap::string_symbol())) {
__ JumpIfSmi(input, false_label);
__ movq(input, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, false_label);
__ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
final_branch_condition = below;
} else if (type_name->Equals(Heap::boolean_symbol())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ j(equal, true_label);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = equal;
} else if (type_name->Equals(Heap::undefined_symbol())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label);
__ JumpIfSmi(input, false_label);
// Check for undetectable objects => true.
__ movq(input, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
} else if (type_name->Equals(Heap::function_symbol())) {
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input);
final_branch_condition = above_equal;
} else if (type_name->Equals(Heap::object_symbol())) {
__ JumpIfSmi(input, false_label);
__ Cmp(input, Factory::null_value());
__ j(equal, true_label);
// Check for undetectable objects => false.
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, false_label);
// Check for JS objects that are not RegExp or Function => true.
__ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
__ j(below, false_label);
__ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
final_branch_condition = below_equal;
} else {
final_branch_condition = never;
__ jmp(false_label);
}
return final_branch_condition;
} }

3
deps/v8/src/x64/lithium-codegen-x64.h

@ -138,8 +138,7 @@ class LCodeGen BASE_EMBEDDED {
Label* if_false, Label* if_false,
Handle<String> class_name, Handle<String> class_name,
Register input, Register input,
Register temporary, Register temporary);
Register temporary2);
int StackSlotCount() const { return chunk()->spill_slot_count(); } int StackSlotCount() const { return chunk()->spill_slot_count(); }
int ParameterCount() const { return scope()->num_parameters(); } int ParameterCount() const { return scope()->num_parameters(); }

154
deps/v8/src/x64/lithium-x64.cc

@ -847,15 +847,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) { if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr); instr = AssignEnvironment(instr);
} }
if (current->IsBranch() && !instr->IsGoto()) { if (current->IsTest() && !instr->IsGoto()) {
// TODO(fschneider): Handle branch instructions uniformly like
// other instructions. This requires us to generate the right
// branch instruction already at the HIR level.
ASSERT(instr->IsControl()); ASSERT(instr->IsControl());
HBranch* branch = HBranch::cast(current); HTest* test = HTest::cast(current);
instr->set_hydrogen_value(branch->value()); instr->set_hydrogen_value(test->value());
HBasicBlock* first = branch->FirstSuccessor(); HBasicBlock* first = test->FirstSuccessor();
HBasicBlock* second = branch->SecondSuccessor(); HBasicBlock* second = test->SecondSuccessor();
ASSERT(first != NULL && second != NULL); ASSERT(first != NULL && second != NULL);
instr->SetBranchTargets(first->block_id(), second->block_id()); instr->SetBranchTargets(first->block_id(), second->block_id());
} else { } else {
@ -912,15 +909,109 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
} }
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { LInstruction* LChunkBuilder::DoTest(HTest* instr) {
Abort("Unimplemented: %s", "DoBranch"); HValue* v = instr->value();
if (v->EmitAtUses()) {
if (v->IsClassOfTest()) {
HClassOfTest* compare = HClassOfTest::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
TempRegister());
} else if (v->IsCompare()) {
HCompare* compare = HCompare::cast(v);
Token::Value op = compare->token();
HValue* left = compare->left();
HValue* right = compare->right();
Representation r = compare->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseOrConstantAtStart(right));
} else if (r.IsDouble()) {
ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseRegisterAtStart(right));
} else {
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
bool reversed = op == Token::GT || op == Token::LTE;
LOperand* left_operand = UseFixed(left, reversed ? rax : rdx);
LOperand* right_operand = UseFixed(right, reversed ? rdx : rax);
LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
right_operand);
return MarkAsCall(result, instr);
}
} else if (v->IsIsSmi()) {
HIsSmi* compare = HIsSmi::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LIsSmiAndBranch(Use(compare->value()));
} else if (v->IsHasInstanceType()) {
HHasInstanceType* compare = HHasInstanceType::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasInstanceTypeAndBranch(
UseRegisterAtStart(compare->value()));
} else if (v->IsHasCachedArrayIndex()) {
HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(compare->value()));
} else if (v->IsIsNull()) {
HIsNull* compare = HIsNull::cast(v);
ASSERT(compare->value()->representation().IsTagged());
// We only need a temp register for non-strict compare.
LOperand* temp = compare->is_strict() ? NULL : TempRegister();
return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
temp);
} else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
temp1,
temp2);
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
UseRegisterAtStart(compare->right()));
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstanceOfAndBranch* result =
new LInstanceOfAndBranch(
UseFixed(instance_of->left(), InstanceofStub::left()),
UseFixed(instance_of->right(), InstanceofStub::right()));
return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
} else {
if (v->IsConstant()) {
if (HConstant::cast(v)->handle()->IsTrue()) {
return new LGoto(instr->FirstSuccessor()->block_id());
} else if (HConstant::cast(v)->handle()->IsFalse()) {
return new LGoto(instr->SecondSuccessor()->block_id());
}
}
Abort("Undefined compare before branch");
return NULL; return NULL;
} }
}
return new LBranch(UseRegisterAtStart(v));
}
LInstruction* LChunkBuilder::DoCompareMapAndBranch( LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
HCompareMapAndBranch* instr) { Abort("Unimplemented: %s", "DoCompareMap");
Abort("Unimplemented: %s", "DoCompareMapAndBranch");
return NULL; return NULL;
} }
@ -1124,8 +1215,29 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) { LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
Abort("Unimplemented: %s", "DoCompare"); Token::Value op = instr->token();
return NULL; Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return DefineAsRegister(new LCmpID(left, right));
} else if (r.IsDouble()) {
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return DefineAsRegister(new LCmpID(left, right));
} else {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
}
} }
@ -1349,6 +1461,18 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
} }
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
Abort("Unimplemented: %s", "DoStringCharCodeAt");
return NULL;
}
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
Abort("Unimplemented: %s", "DoStringLength");
return NULL;
}
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
Abort("Unimplemented: %s", "DoArrayLiteral"); Abort("Unimplemented: %s", "DoArrayLiteral");
return NULL; return NULL;

159
deps/v8/src/x64/lithium-x64.h

@ -335,33 +335,36 @@ class LInstruction: public ZoneObject {
}; };
template<typename T, int N> template<typename ElementType, int NumElements>
class OperandContainer { class OperandContainer {
public: public:
OperandContainer() { OperandContainer() {
for (int i = 0; i < N; i++) elems_[i] = NULL; for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
} }
int length() { return N; } int length() { return NumElements; }
T& operator[](int i) { ElementType& operator[](int i) {
ASSERT(i < length()); ASSERT(i < length());
return elems_[i]; return elems_[i];
} }
void PrintOperandsTo(StringStream* stream); void PrintOperandsTo(StringStream* stream);
private: private:
T elems_[N]; ElementType elems_[NumElements];
}; };
template<typename T> template<typename ElementType>
class OperandContainer<T, 0> { class OperandContainer<ElementType, 0> {
public: public:
int length() { return 0; } int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { } void PrintOperandsTo(StringStream* stream) { }
}; };
template<int R, int I, int T = 0> // R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
class LTemplateInstruction: public LInstruction { class LTemplateInstruction: public LInstruction {
public: public:
// Allow 0 or 1 output operands. // Allow 0 or 1 output operands.
@ -512,7 +515,7 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
}; };
template<int I, int T = 0> template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> { class LControlInstruction: public LTemplateInstruction<0, I, T> {
public: public:
DECLARE_INSTRUCTION(ControlInstruction) DECLARE_INSTRUCTION(ControlInstruction)
@ -570,7 +573,7 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
}; };
class LArgumentsLength: public LTemplateInstruction<1, 1> { class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LArgumentsLength(LOperand* elements) { explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements; inputs_[0] = elements;
@ -627,7 +630,7 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
}; };
class LCmpID: public LTemplateInstruction<1, 2> { class LCmpID: public LTemplateInstruction<1, 2, 0> {
public: public:
LCmpID(LOperand* left, LOperand* right) { LCmpID(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -644,7 +647,7 @@ class LCmpID: public LTemplateInstruction<1, 2> {
}; };
class LCmpIDAndBranch: public LControlInstruction<2> { class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public: public:
LCmpIDAndBranch(LOperand* left, LOperand* right) { LCmpIDAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -663,7 +666,7 @@ class LCmpIDAndBranch: public LControlInstruction<2> {
}; };
class LUnaryMathOperation: public LTemplateInstruction<1, 1> { class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LUnaryMathOperation(LOperand* value) { explicit LUnaryMathOperation(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -677,7 +680,7 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1> {
}; };
class LCmpJSObjectEq: public LTemplateInstruction<1, 2> { class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
public: public:
LCmpJSObjectEq(LOperand* left, LOperand* right) { LCmpJSObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -688,7 +691,7 @@ class LCmpJSObjectEq: public LTemplateInstruction<1, 2> {
}; };
class LCmpJSObjectEqAndBranch: public LControlInstruction<2> { class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
public: public:
LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) { LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -700,7 +703,7 @@ class LCmpJSObjectEqAndBranch: public LControlInstruction<2> {
}; };
class LIsNull: public LTemplateInstruction<1, 1> { class LIsNull: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LIsNull(LOperand* value) { explicit LIsNull(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -754,7 +757,7 @@ class LIsObjectAndBranch: public LControlInstruction<1, 2> {
}; };
class LIsSmi: public LTemplateInstruction<1, 1> { class LIsSmi: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LIsSmi(LOperand* value) { explicit LIsSmi(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -765,7 +768,7 @@ class LIsSmi: public LTemplateInstruction<1, 1> {
}; };
class LIsSmiAndBranch: public LControlInstruction<1> { class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public: public:
explicit LIsSmiAndBranch(LOperand* value) { explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -777,7 +780,7 @@ class LIsSmiAndBranch: public LControlInstruction<1> {
}; };
class LHasInstanceType: public LTemplateInstruction<1, 1> { class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LHasInstanceType(LOperand* value) { explicit LHasInstanceType(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -788,11 +791,10 @@ class LHasInstanceType: public LTemplateInstruction<1, 1> {
}; };
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> { class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public: public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) { explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
temps_[0] = temp;
} }
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
@ -803,7 +805,7 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
}; };
class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> { class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LHasCachedArrayIndex(LOperand* value) { explicit LHasCachedArrayIndex(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -814,7 +816,7 @@ class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> {
}; };
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1> { class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public: public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) { explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -840,12 +842,11 @@ class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
}; };
class LClassOfTestAndBranch: public LControlInstruction<1, 2> { class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
public: public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) { LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value; inputs_[0] = value;
temps_[0] = temp; temps_[0] = temp;
temps_[1] = temp2;
} }
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
@ -856,7 +857,7 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
}; };
class LCmpT: public LTemplateInstruction<1, 2> { class LCmpT: public LTemplateInstruction<1, 2, 0> {
public: public:
LCmpT(LOperand* left, LOperand* right) { LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -870,7 +871,7 @@ class LCmpT: public LTemplateInstruction<1, 2> {
}; };
class LCmpTAndBranch: public LControlInstruction<2> { class LCmpTAndBranch: public LControlInstruction<2, 0> {
public: public:
LCmpTAndBranch(LOperand* left, LOperand* right) { LCmpTAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -884,7 +885,7 @@ class LCmpTAndBranch: public LControlInstruction<2> {
}; };
class LInstanceOf: public LTemplateInstruction<1, 2> { class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
public: public:
LInstanceOf(LOperand* left, LOperand* right) { LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -895,7 +896,7 @@ class LInstanceOf: public LTemplateInstruction<1, 2> {
}; };
class LInstanceOfAndBranch: public LControlInstruction<2> { class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
public: public:
LInstanceOfAndBranch(LOperand* left, LOperand* right) { LInstanceOfAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -935,7 +936,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
}; };
class LBitI: public LTemplateInstruction<1, 2> { class LBitI: public LTemplateInstruction<1, 2, 0> {
public: public:
LBitI(Token::Value op, LOperand* left, LOperand* right) LBitI(Token::Value op, LOperand* left, LOperand* right)
: op_(op) { : op_(op) {
@ -952,7 +953,7 @@ class LBitI: public LTemplateInstruction<1, 2> {
}; };
class LShiftI: public LTemplateInstruction<1, 2> { class LShiftI: public LTemplateInstruction<1, 2, 0> {
public: public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) { : op_(op), can_deopt_(can_deopt) {
@ -972,7 +973,7 @@ class LShiftI: public LTemplateInstruction<1, 2> {
}; };
class LSubI: public LTemplateInstruction<1, 2> { class LSubI: public LTemplateInstruction<1, 2, 0> {
public: public:
LSubI(LOperand* left, LOperand* right) { LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -1028,7 +1029,7 @@ class LConstantT: public LConstant<0> {
}; };
class LBranch: public LControlInstruction<1> { class LBranch: public LControlInstruction<1, 0> {
public: public:
explicit LBranch(LOperand* value) { explicit LBranch(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1041,28 +1042,28 @@ class LBranch: public LControlInstruction<1> {
}; };
class LCmpMapAndBranch: public LTemplateInstruction<0, 1> { class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LCmpMapAndBranch(LOperand* value) { explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
} }
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch) DECLARE_HYDROGEN_ACCESSOR(CompareMap)
virtual bool IsControl() const { return true; } virtual bool IsControl() const { return true; }
Handle<Map> map() const { return hydrogen()->map(); } Handle<Map> map() const { return hydrogen()->map(); }
int true_block_id() const { int true_block_id() const {
return hydrogen()->true_destination()->block_id(); return hydrogen()->FirstSuccessor()->block_id();
} }
int false_block_id() const { int false_block_id() const {
return hydrogen()->false_destination()->block_id(); return hydrogen()->SecondSuccessor()->block_id();
} }
}; };
class LJSArrayLength: public LTemplateInstruction<1, 1> { class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LJSArrayLength(LOperand* value) { explicit LJSArrayLength(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1073,7 +1074,7 @@ class LJSArrayLength: public LTemplateInstruction<1, 1> {
}; };
class LFixedArrayLength: public LTemplateInstruction<1, 1> { class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LFixedArrayLength(LOperand* value) { explicit LFixedArrayLength(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1096,7 +1097,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
}; };
class LThrow: public LTemplateInstruction<0, 1> { class LThrow: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LThrow(LOperand* value) { explicit LThrow(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1106,7 +1107,7 @@ class LThrow: public LTemplateInstruction<0, 1> {
}; };
class LBitNotI: public LTemplateInstruction<1, 1> { class LBitNotI: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LBitNotI(LOperand* value) { explicit LBitNotI(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1116,7 +1117,7 @@ class LBitNotI: public LTemplateInstruction<1, 1> {
}; };
class LAddI: public LTemplateInstruction<1, 2> { class LAddI: public LTemplateInstruction<1, 2, 0> {
public: public:
LAddI(LOperand* left, LOperand* right) { LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -1128,7 +1129,7 @@ class LAddI: public LTemplateInstruction<1, 2> {
}; };
class LPower: public LTemplateInstruction<1, 2> { class LPower: public LTemplateInstruction<1, 2, 0> {
public: public:
LPower(LOperand* left, LOperand* right) { LPower(LOperand* left, LOperand* right) {
inputs_[0] = left; inputs_[0] = left;
@ -1140,7 +1141,7 @@ class LPower: public LTemplateInstruction<1, 2> {
}; };
class LArithmeticD: public LTemplateInstruction<1, 2> { class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public: public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right) LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) { : op_(op) {
@ -1158,7 +1159,7 @@ class LArithmeticD: public LTemplateInstruction<1, 2> {
}; };
class LArithmeticT: public LTemplateInstruction<1, 2> { class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
public: public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right) LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) { : op_(op) {
@ -1176,7 +1177,7 @@ class LArithmeticT: public LTemplateInstruction<1, 2> {
}; };
class LReturn: public LTemplateInstruction<0, 1> { class LReturn: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LReturn(LOperand* value) { explicit LReturn(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1186,7 +1187,7 @@ class LReturn: public LTemplateInstruction<0, 1> {
}; };
class LLoadNamedField: public LTemplateInstruction<1, 1> { class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LLoadNamedField(LOperand* object) { explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object; inputs_[0] = object;
@ -1197,7 +1198,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1> {
}; };
class LLoadNamedGeneric: public LTemplateInstruction<1, 1> { class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LLoadNamedGeneric(LOperand* object) { explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object; inputs_[0] = object;
@ -1225,7 +1226,7 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
}; };
class LLoadElements: public LTemplateInstruction<1, 1> { class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LLoadElements(LOperand* object) { explicit LLoadElements(LOperand* object) {
inputs_[0] = object; inputs_[0] = object;
@ -1235,7 +1236,7 @@ class LLoadElements: public LTemplateInstruction<1, 1> {
}; };
class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> { class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public: public:
LLoadKeyedFastElement(LOperand* elements, LOperand* key) { LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements; inputs_[0] = elements;
@ -1250,7 +1251,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> {
}; };
class LLoadKeyedGeneric: public LTemplateInstruction<1, 2> { class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public: public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) { LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
inputs_[0] = obj; inputs_[0] = obj;
@ -1271,7 +1272,7 @@ class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
}; };
class LStoreGlobal: public LTemplateInstruction<0, 1> { class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LStoreGlobal(LOperand* value) { explicit LStoreGlobal(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1294,7 +1295,7 @@ class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
}; };
class LPushArgument: public LTemplateInstruction<0, 1> { class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LPushArgument(LOperand* value) { explicit LPushArgument(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1328,10 +1329,10 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
}; };
class LCallKeyed: public LTemplateInstruction<1, 0, 1> { class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LCallKeyed(LOperand* temp) { explicit LCallKeyed(LOperand* key) {
temps_[0] = temp; inputs_[0] = key;
} }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed") DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
@ -1388,7 +1389,7 @@ class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
}; };
class LCallNew: public LTemplateInstruction<1, 1> { class LCallNew: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LCallNew(LOperand* constructor) { explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor; inputs_[0] = constructor;
@ -1413,7 +1414,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
}; };
class LInteger32ToDouble: public LTemplateInstruction<1, 1> { class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LInteger32ToDouble(LOperand* value) { explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1423,7 +1424,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1> {
}; };
class LNumberTagI: public LTemplateInstruction<1, 1> { class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LNumberTagI(LOperand* value) { explicit LNumberTagI(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1474,7 +1475,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
}; };
class LSmiTag: public LTemplateInstruction<1, 1> { class LSmiTag: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LSmiTag(LOperand* value) { explicit LSmiTag(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1484,7 +1485,7 @@ class LSmiTag: public LTemplateInstruction<1, 1> {
}; };
class LNumberUntagD: public LTemplateInstruction<1, 1> { class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LNumberUntagD(LOperand* value) { explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1494,7 +1495,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1> {
}; };
class LSmiUntag: public LTemplateInstruction<1, 1> { class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
public: public:
LSmiUntag(LOperand* value, bool needs_check) LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) { : needs_check_(needs_check) {
@ -1593,7 +1594,7 @@ class LStoreKeyedGeneric: public LStoreKeyed {
}; };
class LCheckFunction: public LTemplateInstruction<0, 1> { class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LCheckFunction(LOperand* value) { explicit LCheckFunction(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1616,7 +1617,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
}; };
class LCheckMap: public LTemplateInstruction<0, 1> { class LCheckMap: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LCheckMap(LOperand* value) { explicit LCheckMap(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1641,7 +1642,7 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
}; };
class LCheckSmi: public LTemplateInstruction<0, 1> { class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public: public:
LCheckSmi(LOperand* value, Condition condition) LCheckSmi(LOperand* value, Condition condition)
: condition_(condition) { : condition_(condition) {
@ -1690,7 +1691,7 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
}; };
class LTypeof: public LTemplateInstruction<1, 1> { class LTypeof: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LTypeof(LOperand* value) { explicit LTypeof(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1700,7 +1701,7 @@ class LTypeof: public LTemplateInstruction<1, 1> {
}; };
class LTypeofIs: public LTemplateInstruction<1, 1> { class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LTypeofIs(LOperand* value) { explicit LTypeofIs(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1715,7 +1716,7 @@ class LTypeofIs: public LTemplateInstruction<1, 1> {
}; };
class LTypeofIsAndBranch: public LControlInstruction<1> { class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public: public:
explicit LTypeofIsAndBranch(LOperand* value) { explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
@ -1730,7 +1731,7 @@ class LTypeofIsAndBranch: public LControlInstruction<1> {
}; };
class LDeleteProperty: public LTemplateInstruction<1, 2> { class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public: public:
LDeleteProperty(LOperand* obj, LOperand* key) { LDeleteProperty(LOperand* obj, LOperand* key) {
inputs_[0] = obj; inputs_[0] = obj;
@ -1900,30 +1901,30 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseRegister(HValue* value); MUST_USE_RESULT LOperand* UseRegister(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
// A value in a register that may be trashed. // An input operand in a register that may be trashed.
MUST_USE_RESULT LOperand* UseTempRegister(HValue* value); MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
// An operand value in a register or stack slot. // An input operand in a register or stack slot.
MUST_USE_RESULT LOperand* Use(HValue* value); MUST_USE_RESULT LOperand* Use(HValue* value);
MUST_USE_RESULT LOperand* UseAtStart(HValue* value); MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
// An operand value in a register, stack slot or a constant operand. // An input operand in a register, stack slot or a constant operand.
MUST_USE_RESULT LOperand* UseOrConstant(HValue* value); MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value); MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
// An operand value in a register or a constant operand. // An input operand in a register or a constant operand.
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
// Temporary operand that must be in a register. // Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister(); MUST_USE_RESULT LUnallocated* TempRegister();
MUST_USE_RESULT LOperand* FixedTemp(Register reg); MUST_USE_RESULT LOperand* FixedTemp(Register reg);
MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg); MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
// An operand value in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
LOperand* UseAny(HValue* value);
// Methods for setting up define-use relationships. // Methods for setting up define-use relationships.
// Return the same instruction that they are passed. // Return the same instruction that they are passed.
template<int I, int T> template<int I, int T>

41
deps/v8/src/x64/macro-assembler-x64.cc

@ -885,6 +885,13 @@ Condition MacroAssembler::CheckSmi(Register src) {
} }
Condition MacroAssembler::CheckSmi(const Operand& src) {
ASSERT_EQ(0, kSmiTag);
testb(src, Immediate(kSmiTagMask));
return zero;
}
Condition MacroAssembler::CheckNonNegativeSmi(Register src) { Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
ASSERT_EQ(0, kSmiTag); ASSERT_EQ(0, kSmiTag);
// Make mask 0x8000000000000001 and test that both bits are zero. // Make mask 0x8000000000000001 and test that both bits are zero.
@ -1386,6 +1393,40 @@ void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
} }
void MacroAssembler::Pushad() {
push(rax);
push(rcx);
push(rdx);
push(rbx);
// Not pushing rsp or rbp.
push(rsi);
push(rdi);
push(r8);
push(r9);
// r10 is kScratchRegister.
push(r11);
push(r12);
// r13 is kRootRegister.
push(r14);
// r15 is kSmiConstantRegister
}
void MacroAssembler::Popad() {
pop(r14);
pop(r12);
pop(r11);
pop(r9);
pop(r8);
pop(rdi);
pop(rsi);
pop(rbx);
pop(rdx);
pop(rcx);
pop(rax);
}
void MacroAssembler::PushTryHandler(CodeLocation try_location, void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) { HandlerType type) {
// Adjust this code if not the case. // Adjust this code if not the case.

8
deps/v8/src/x64/macro-assembler-x64.h

@ -272,6 +272,7 @@ class MacroAssembler: public Assembler {
// Is the value a tagged smi. // Is the value a tagged smi.
Condition CheckSmi(Register src); Condition CheckSmi(Register src);
Condition CheckSmi(const Operand& src);
// Is the value a non-negative tagged smi. // Is the value a non-negative tagged smi.
Condition CheckNonNegativeSmi(Register src); Condition CheckNonNegativeSmi(Register src);
@ -590,6 +591,13 @@ class MacroAssembler: public Assembler {
void Call(ExternalReference ext); void Call(ExternalReference ext);
void Call(Handle<Code> code_object, RelocInfo::Mode rmode); void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
// Non-x64 instructions.
// Push/pop all general purpose registers.
// Does not push rsp/rbp nor any of the assembler's special purpose registers
// (kScratchRegister, kSmiConstantRegister, kRootRegister).
void Pushad();
void Popad();
// Compare object type for heap object. // Compare object type for heap object.
// Always use unsigned comparisons: above and below, not less and greater. // Always use unsigned comparisons: above and below, not less and greater.
// Incoming register is heap_object and outgoing register is map. // Incoming register is heap_object and outgoing register is map.

300
deps/v8/src/x64/stub-cache-x64.cc

@ -3144,6 +3144,306 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
} }
MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
ExternalArrayType array_type, Code::Flags flags) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label slow;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &slow);
// Check that the object is a JS object.
__ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
__ j(not_equal, &slow);
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks. The map is already in rdx.
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// rax: index (as a smi)
// rdx: JSObject
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
// Check that the index is in range.
__ SmiToInteger32(rcx, rax);
__ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// rax: index (as a smi)
// rdx: receiver (JSObject)
// rcx: untagged index
// rbx: elements array
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
__ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalUnsignedByteArray:
__ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalShortArray:
__ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalUnsignedShortArray:
__ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalIntArray:
__ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalUnsignedIntArray:
__ movl(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalFloatArray:
__ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
// rax: index
// rdx: receiver
// For integer array types:
// rcx: value
// For floating-point array type:
// xmm0: value as double.
ASSERT(kSmiValueSize == 32);
if (array_type == kExternalUnsignedIntArray) {
// For the UnsignedInt array type, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
NearLabel box_int;
__ JumpIfUIntNotValidSmiValue(rcx, &box_int);
__ Integer32ToSmi(rax, rcx);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
// The value is zero-extended since we loaded the value from memory
// with movl.
__ cvtqsi2sd(xmm0, rcx);
__ AllocateHeapNumber(rcx, rbx, &slow);
// Set the value.
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(rcx, rbx, &slow);
// Set the value.
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
} else {
__ Integer32ToSmi(rax, rcx);
__ ret(0);
}
// Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
__ pop(rbx);
__ push(rdx); // receiver
__ push(rax); // name
__ push(rbx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
// Return the generated code.
return GetCode(flags);
}
MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
ExternalArrayType array_type, Code::Flags flags) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label slow;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
// Get the map from the receiver.
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rcx, &slow);
// Check that the object is a JS object.
__ CmpInstanceType(rbx, JS_OBJECT_TYPE);
__ j(not_equal, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
// Check that the index is in range.
__ SmiToInteger32(rdi, rcx); // Untag the index.
__ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
NearLabel check_heap_number;
__ JumpIfNotSmi(rax, &check_heap_number);
// No more branches to slow case on this path. Key and receiver not needed.
__ SmiToInteger32(rdx, rax);
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
__ cvtlsi2ss(xmm0, rdx);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
break;
default:
UNREACHABLE();
break;
}
__ ret(0);
__ bind(&check_heap_number);
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
__ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
__ j(not_equal, &slow);
// No more branches to slow case on this path.
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rdi: untagged index
// rbx: base pointer of external storage
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
__ cvtsd2ss(xmm0, xmm0);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
__ ret(0);
} else {
// Perform float-to-int conversion with truncation (round-to-zero)
// behavior.
// Convert to int32 and store the low byte/word.
// If the value is NaN or +/-infinity, the result is 0x80000000,
// which is automatically zero when taken mod 2^n, n < 32.
// rdx: value (converted to an untagged integer)
// rdi: untagged index
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ cvttsd2si(rdx, xmm0);
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ cvttsd2si(rdx, xmm0);
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray: {
// Convert to int64, so that NaN and infinities become
// 0x8000000000000000, which is zero mod 2^32.
__ cvttsd2siq(rdx, xmm0);
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
}
default:
UNREACHABLE();
break;
}
__ ret(0);
}
// Slow case: call runtime.
__ bind(&slow);
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
__ pop(rbx);
__ push(rdx); // receiver
__ push(rcx); // key
__ push(rax); // value
__ push(rbx); // return address
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
return GetCode(flags);
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

2
deps/v8/test/cctest/cctest.status

@ -82,7 +82,7 @@ test-serialize/ContextSerialization: SKIP
test-serialize/ContextDeserialization: SKIP test-serialize/ContextDeserialization: SKIP
test-debug/BreakPointReturn: SKIP test-debug/BreakPointReturn: SKIP
test-debug/DebugStepLinearMixedICs: SKIP test-debug/DebugStepLinearMixedICs: SKIP
test-debug/DebugConditional: SKIP
############################################################################## ##############################################################################
[ $arch == arm ] [ $arch == arm ]

35
deps/v8/test/cctest/test-api.cc

@ -874,6 +874,10 @@ THREADED_TEST(ExternalWrap) {
TestExternalPointerWrapping(); TestExternalPointerWrapping();
#if defined(V8_HOST_ARCH_X64) #if defined(V8_HOST_ARCH_X64)
// Check a value with a leading 1 bit in x64 Smi encoding.
expected_ptr = reinterpret_cast<void*>(0x400000000);
TestExternalPointerWrapping();
expected_ptr = reinterpret_cast<void*>(0xdeadbeefdeadbeef); expected_ptr = reinterpret_cast<void*>(0xdeadbeefdeadbeef);
TestExternalPointerWrapping(); TestExternalPointerWrapping();
@ -2375,6 +2379,10 @@ TEST(APIThrowMessageOverwrittenToString) {
CompileRun("ReferenceError.prototype.toString =" CompileRun("ReferenceError.prototype.toString ="
" function() { return 'Whoops' }"); " function() { return 'Whoops' }");
CompileRun("asdf;"); CompileRun("asdf;");
CompileRun("ReferenceError.prototype.constructor.name = void 0;");
CompileRun("asdf;");
CompileRun("ReferenceError.prototype.constructor = void 0;");
CompileRun("asdf;");
v8::Handle<Value> string = CompileRun("try { asdf; } catch(e) { e + ''; }"); v8::Handle<Value> string = CompileRun("try { asdf; } catch(e) { e + ''; }");
CHECK(string->Equals(v8_str("Whoops"))); CHECK(string->Equals(v8_str("Whoops")));
v8::V8::RemoveMessageListeners(check_message); v8::V8::RemoveMessageListeners(check_message);
@ -10583,6 +10591,33 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
CHECK_EQ(0, result->Int32Value()); CHECK_EQ(0, result->Int32Value());
CHECK_EQ(0, CHECK_EQ(0,
i::Smi::cast(jsobj->GetElement(5)->ToObjectChecked())->value()); i::Smi::cast(jsobj->GetElement(5)->ToObjectChecked())->value());
// Check truncation behavior of integral arrays.
const char* unsigned_data =
"var source_data = [0.6, 10.6];"
"var expected_results = [0, 10];";
const char* signed_data =
"var source_data = [0.6, 10.6, -0.6, -10.6];"
"var expected_results = [0, 10, 0, -10];";
bool is_unsigned =
(array_type == v8::kExternalUnsignedByteArray ||
array_type == v8::kExternalUnsignedShortArray ||
array_type == v8::kExternalUnsignedIntArray);
i::OS::SNPrintF(test_buf,
"%s"
"var all_passed = true;"
"for (var i = 0; i < source_data.length; i++) {"
" for (var j = 0; j < 8; j++) {"
" ext_array[j] = source_data[i];"
" }"
" all_passed = all_passed &&"
" (ext_array[5] == expected_results[i]);"
"}"
"all_passed;",
(is_unsigned ? unsigned_data : signed_data));
result = CompileRun(test_buf.start());
CHECK_EQ(true, result->BooleanValue());
} }
result = CompileRun("ext_array[3] = 33;" result = CompileRun("ext_array[3] = 33;"

46
deps/v8/test/mjsunit/cyclic-error-to-string.js

@ -0,0 +1,46 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test printing of cyclic errors which return the empty string for
// compatibility with Safari and Firefox.
var e = new Error();
assertEquals('Error', e + '');
e = new Error();
e.name = e;
e.message = e;
e.stack = e;
e.arguments = e;
assertEquals(': ', e + '');
e = new Error();
e.name = [ e ];
e.message = [ e ];
e.stack = [ e ];
e.arguments = [ e ];
assertEquals(': ', e + '');

23
deps/v8/test/mjsunit/debug-evaluate-locals.js

@ -34,18 +34,18 @@ exception = false;
function checkFrame0(name, value) { function checkFrame0(name, value) {
assertTrue(name == 'a' || name == 'b'); assertTrue(name == 'a' || name == 'b', 'frame0 name');
if (name == 'a') { if (name == 'a') {
assertEquals(1, value); assertEquals(1, value);
} } else if (name == 'b') {
if (name == 'b') {
assertEquals(2, value); assertEquals(2, value);
} }
} }
function checkFrame1(name, value) { function checkFrame1(name, value) {
assertTrue(name == '.arguments' || name == 'a'); assertTrue(name == '.arguments' || name == 'arguments' || name == 'a',
'frame1 name');
if (name == 'a') { if (name == 'a') {
assertEquals(3, value); assertEquals(3, value);
} }
@ -53,12 +53,10 @@ function checkFrame1(name, value) {
function checkFrame2(name, value) { function checkFrame2(name, value) {
assertTrue(name == '.arguments' || name == 'a' || assertTrue(name == 'a' || name == 'b', 'frame2 name');
name == 'arguments' || name == 'b');
if (name == 'a') { if (name == 'a') {
assertEquals(5, value); assertEquals(5, value);
} } else if (name == 'b') {
if (name == 'b') {
assertEquals(0, value); assertEquals(0, value);
} }
} }
@ -73,18 +71,17 @@ function listener(event, exec_state, event_data, data) {
checkFrame0(frame0.localName(0), frame0.localValue(0).value()); checkFrame0(frame0.localName(0), frame0.localValue(0).value());
checkFrame0(frame0.localName(1), frame0.localValue(1).value()); checkFrame0(frame0.localName(1), frame0.localValue(1).value());
// Frame 1 has normal variable a (and the .arguments variable). // Frame 1 has normal variables a and arguments (and the .arguments
// variable).
var frame1 = exec_state.frame(1); var frame1 = exec_state.frame(1);
checkFrame1(frame1.localName(0), frame1.localValue(0).value()); checkFrame1(frame1.localName(0), frame1.localValue(0).value());
checkFrame1(frame1.localName(1), frame1.localValue(1).value()); checkFrame1(frame1.localName(1), frame1.localValue(1).value());
checkFrame1(frame1.localName(2), frame1.localValue(2).value());
// Frame 2 has normal variables a and b (and both the .arguments and // Frame 2 has normal variables a and b.
// arguments variable).
var frame2 = exec_state.frame(2); var frame2 = exec_state.frame(2);
checkFrame2(frame2.localName(0), frame2.localValue(0).value()); checkFrame2(frame2.localName(0), frame2.localValue(0).value());
checkFrame2(frame2.localName(1), frame2.localValue(1).value()); checkFrame2(frame2.localName(1), frame2.localValue(1).value());
checkFrame2(frame2.localName(2), frame2.localValue(2).value());
checkFrame2(frame2.localName(3), frame2.localValue(3).value());
// Evaluating a and b on frames 0, 1 and 2 produces 1, 2, 3, 4, 5 and 6. // Evaluating a and b on frames 0, 1 and 2 produces 1, 2, 3, 4, 5 and 6.
assertEquals(1, exec_state.frame(0).evaluate('a').value()); assertEquals(1, exec_state.frame(0).evaluate('a').value());

117
deps/v8/test/mjsunit/strict-mode.js

@ -0,0 +1,117 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
function CheckStrictMode(code, exception) {
assertDoesNotThrow(code);
assertThrows("'use strict';\n" + code, exception);
assertThrows('"use strict";\n' + code, exception);
assertDoesNotThrow("\
function outer() {\
function inner() {\n"
+ code +
"\n}\
}");
assertThrows("\
function outer() {\
'use strict';\
function inner() {\n"
+ code +
"\n}\
}", exception);
}
// Incorrect 'use strict' directive.
function UseStrictEscape() {
"use\\x20strict";
with ({}) {};
}
// 'use strict' in non-directive position.
function UseStrictNonDirective() {
void(0);
"use strict";
with ({}) {};
}
// Multiple directives, including "use strict".
assertThrows('\
"directive 1";\
"another directive";\
"use strict";\
"directive after strict";\
"and one more";\
with({}) {}', SyntaxError);
// 'with' disallowed in strict mode.
CheckStrictMode("with({}) {}", SyntaxError);
// Function named 'eval'.
CheckStrictMode("function eval() {}", SyntaxError)
// Function named 'arguments'.
CheckStrictMode("function arguments() {}", SyntaxError)
// Function parameter named 'eval'.
//CheckStrictMode("function foo(a, b, eval, c, d) {}", SyntaxError)
// Function parameter named 'arguments'.
//CheckStrictMode("function foo(a, b, arguments, c, d) {}", SyntaxError)
// Property accessor parameter named 'eval'.
//CheckStrictMode("var o = { set foo(eval) {} }", SyntaxError)
// Property accessor parameter named 'arguments'.
//CheckStrictMode("var o = { set foo(arguments) {} }", SyntaxError)
// Duplicate function parameter name.
//CheckStrictMode("function foo(a, b, c, d, b) {}", SyntaxError)
// catch(eval)
CheckStrictMode("try{}catch(eval){};", SyntaxError)
// catch(arguments)
CheckStrictMode("try{}catch(arguments){};", SyntaxError)
// var eval
CheckStrictMode("var eval;", SyntaxError)
// var arguments
CheckStrictMode("var arguments;", SyntaxError)
// Strict mode applies to the function in which the directive is used..
//assertThrows('\
//function foo(eval) {\
// "use strict";\
//}', SyntaxError);
// Strict mode doesn't affect the outer stop of strict code.
function NotStrict(eval) {
function Strict() {
"use strict";
}
with ({}) {};
}

11
deps/v8/test/mjsunit/string-charcodeat.js

@ -153,6 +153,17 @@ TestStringType(Slice16End, true);
TestStringType(Flat16, true); TestStringType(Flat16, true);
TestStringType(NotAString16, true); TestStringType(NotAString16, true);
function ConsNotSmiIndex() {
var str = Cons();
assertTrue(isNaN(str.charCodeAt(0x7fffffff)));
}
for (var i = 0; i < 100000; i++) {
ConsNotSmiIndex();
}
for (var i = 0; i != 10; i++) { for (var i = 0; i != 10; i++) {
assertEquals(101, Cons16().charCodeAt(1.1)); assertEquals(101, Cons16().charCodeAt(1.1));
assertEquals('e', Cons16().charAt(1.1)); assertEquals('e', Cons16().charAt(1.1));

3
deps/v8/test/mozilla/mozilla.status

@ -198,6 +198,9 @@ js1_5/Regress/regress-404755: PASS || FAIL
js1_5/extensions/regress-363258: PASS || FAIL js1_5/extensions/regress-363258: PASS || FAIL
# Test that assumes specific runtime for a regexp, flaky in debug mode.
ecma_3/RegExp/regress-85721: PASS || FAIL if $mode == debug
##################### INCOMPATIBLE TESTS ##################### ##################### INCOMPATIBLE TESTS #####################

14
deps/v8/tools/gyp/v8.gyp

@ -32,6 +32,7 @@
'gcc_version%': 'unknown', 'gcc_version%': 'unknown',
'v8_target_arch%': '<(target_arch)', 'v8_target_arch%': '<(target_arch)',
'v8_use_snapshot%': 'true', 'v8_use_snapshot%': 'true',
'v8_use_liveobjectlist%': 'false',
}, },
'conditions': [ 'conditions': [
['use_system_v8==0', { ['use_system_v8==0', {
@ -66,6 +67,14 @@
}], }],
], ],
}], }],
['v8_use_liveobjectlist=="true"', {
'defines': [
'ENABLE_DEBUGGER_SUPPORT',
'INSPECTOR',
'OBJECT_PRINT',
'LIVEOBJECTLIST',
],
}],
], ],
'configurations': { 'configurations': {
'Debug': { 'Debug': {
@ -417,6 +426,8 @@
'../../src/ic-inl.h', '../../src/ic-inl.h',
'../../src/ic.cc', '../../src/ic.cc',
'../../src/ic.h', '../../src/ic.h',
'../../src/inspector.cc',
'../../src/inspector.h',
'../../src/interpreter-irregexp.cc', '../../src/interpreter-irregexp.cc',
'../../src/interpreter-irregexp.h', '../../src/interpreter-irregexp.h',
'../../src/jump-target-inl.h', '../../src/jump-target-inl.h',
@ -432,6 +443,9 @@
'../../src/lithium-allocator.h', '../../src/lithium-allocator.h',
'../../src/liveedit.cc', '../../src/liveedit.cc',
'../../src/liveedit.h', '../../src/liveedit.h',
'../../src/liveobjectlist-inl.h',
'../../src/liveobjectlist.cc',
'../../src/liveobjectlist.h',
'../../src/log-inl.h', '../../src/log-inl.h',
'../../src/log-utils.cc', '../../src/log-utils.cc',
'../../src/log-utils.h', '../../src/log-utils.h',

30
deps/v8/tools/v8.xcodeproj/project.pbxproj

@ -319,6 +319,13 @@
89B91BFB12D4F1BB002FF4BC /* libv8-x64.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 895692AA12D4ED240072C313 /* libv8-x64.a */; }; 89B91BFB12D4F1BB002FF4BC /* libv8-x64.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 895692AA12D4ED240072C313 /* libv8-x64.a */; };
89B933AF0FAA0F9600201304 /* version.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF32F0FAA0ED200136CF6 /* version.cc */; }; 89B933AF0FAA0F9600201304 /* version.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF32F0FAA0ED200136CF6 /* version.cc */; };
89B933B00FAA0F9D00201304 /* version.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF32F0FAA0ED200136CF6 /* version.cc */; }; 89B933B00FAA0F9D00201304 /* version.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF32F0FAA0ED200136CF6 /* version.cc */; };
89D7DDD512E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */; };
89D7DDDA12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; };
89D7DDDB12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; };
89D7DDDC12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; };
89D7DDDD12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; };
89D7DDDE12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; };
89D7DDDF12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; };
89F23C3F0E78D5B2006B2466 /* accessors.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F60E719B8F00D62E90 /* accessors.cc */; }; 89F23C3F0E78D5B2006B2466 /* accessors.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F60E719B8F00D62E90 /* accessors.cc */; };
89F23C400E78D5B2006B2466 /* allocation.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F80E719B8F00D62E90 /* allocation.cc */; }; 89F23C400E78D5B2006B2466 /* allocation.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F80E719B8F00D62E90 /* allocation.cc */; };
89F23C410E78D5B2006B2466 /* api.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0FA0E719B8F00D62E90 /* api.cc */; }; 89F23C410E78D5B2006B2466 /* api.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0FA0E719B8F00D62E90 /* api.cc */; };
@ -886,6 +893,12 @@
89B91B9A12D4EF95002FF4BC /* virtual-frame-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "virtual-frame-x64.h"; path = "x64/virtual-frame-x64.h"; sourceTree = "<group>"; }; 89B91B9A12D4EF95002FF4BC /* virtual-frame-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "virtual-frame-x64.h"; path = "x64/virtual-frame-x64.h"; sourceTree = "<group>"; };
89B91BBE12D4F02A002FF4BC /* v8_shell-x64 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-x64"; sourceTree = BUILT_PRODUCTS_DIR; }; 89B91BBE12D4F02A002FF4BC /* v8_shell-x64 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-x64"; sourceTree = BUILT_PRODUCTS_DIR; };
89B91BCE12D4F02A002FF4BC /* d8-x64 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "d8-x64"; sourceTree = BUILT_PRODUCTS_DIR; }; 89B91BCE12D4F02A002FF4BC /* d8-x64 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "d8-x64"; sourceTree = BUILT_PRODUCTS_DIR; };
89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-gap-resolver-ia32.cc"; path = "ia32/lithium-gap-resolver-ia32.cc"; sourceTree = "<group>"; };
89D7DDD412E8DDCF001E2B82 /* lithium-gap-resolver-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-gap-resolver-ia32.h"; path = "ia32/lithium-gap-resolver-ia32.h"; sourceTree = "<group>"; };
89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "gdb-jit.cc"; sourceTree = "<group>"; };
89D7DDD712E8DE09001E2B82 /* gdb-jit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gdb-jit.h"; sourceTree = "<group>"; };
89D7DDD812E8DE09001E2B82 /* inspector.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = inspector.cc; sourceTree = "<group>"; };
89D7DDD912E8DE09001E2B82 /* inspector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inspector.h; sourceTree = "<group>"; };
89F23C870E78D5B2006B2466 /* libv8-arm.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libv8-arm.a"; sourceTree = BUILT_PRODUCTS_DIR; }; 89F23C870E78D5B2006B2466 /* libv8-arm.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libv8-arm.a"; sourceTree = BUILT_PRODUCTS_DIR; };
89F23C950E78D5B6006B2466 /* v8_shell-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-arm"; sourceTree = BUILT_PRODUCTS_DIR; }; 89F23C950E78D5B6006B2466 /* v8_shell-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-arm"; sourceTree = BUILT_PRODUCTS_DIR; };
89F3605A12DCDF6400ACF8A6 /* lithium-codegen-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-codegen-x64.cc"; path = "x64/lithium-codegen-x64.cc"; sourceTree = "<group>"; }; 89F3605A12DCDF6400ACF8A6 /* lithium-codegen-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-codegen-x64.cc"; path = "x64/lithium-codegen-x64.cc"; sourceTree = "<group>"; };
@ -1101,7 +1114,6 @@
897FF1270E719B8F00D62E90 /* dateparser.h */, 897FF1270E719B8F00D62E90 /* dateparser.h */,
8956B6CD0F5D86570033B5A2 /* debug-agent.cc */, 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */,
8956B6CE0F5D86570033B5A2 /* debug-agent.h */, 8956B6CE0F5D86570033B5A2 /* debug-agent.h */,
898BD20C0EF6CC850068B00A /* debug-arm.cc */,
897FF1280E719B8F00D62E90 /* debug.cc */, 897FF1280E719B8F00D62E90 /* debug.cc */,
897FF1290E719B8F00D62E90 /* debug.h */, 897FF1290E719B8F00D62E90 /* debug.h */,
893E248B12B14B3D0083370F /* deoptimizer.cc */, 893E248B12B14B3D0083370F /* deoptimizer.cc */,
@ -1141,6 +1153,8 @@
9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */, 9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */,
893E24DA12B14B9F0083370F /* gc-extension.cc */, 893E24DA12B14B9F0083370F /* gc-extension.cc */,
893E24DB12B14B9F0083370F /* gc-extension.h */, 893E24DB12B14B9F0083370F /* gc-extension.h */,
89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */,
89D7DDD712E8DE09001E2B82 /* gdb-jit.h */,
897FF13E0E719B8F00D62E90 /* global-handles.cc */, 897FF13E0E719B8F00D62E90 /* global-handles.cc */,
897FF13F0E719B8F00D62E90 /* global-handles.h */, 897FF13F0E719B8F00D62E90 /* global-handles.h */,
897FF1400E719B8F00D62E90 /* globals.h */, 897FF1400E719B8F00D62E90 /* globals.h */,
@ -1162,6 +1176,8 @@
897FF14B0E719B8F00D62E90 /* ic-inl.h */, 897FF14B0E719B8F00D62E90 /* ic-inl.h */,
897FF14C0E719B8F00D62E90 /* ic.cc */, 897FF14C0E719B8F00D62E90 /* ic.cc */,
897FF14D0E719B8F00D62E90 /* ic.h */, 897FF14D0E719B8F00D62E90 /* ic.h */,
89D7DDD812E8DE09001E2B82 /* inspector.cc */,
89D7DDD912E8DE09001E2B82 /* inspector.h */,
89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */, 89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */,
89A15C670EE4665300B48DEB /* interpreter-irregexp.h */, 89A15C670EE4665300B48DEB /* interpreter-irregexp.h */,
897FF14E0E719B8F00D62E90 /* jsregexp.cc */, 897FF14E0E719B8F00D62E90 /* jsregexp.cc */,
@ -1463,6 +1479,8 @@
89B91C0312D4F275002FF4BC /* ia32 */ = { 89B91C0312D4F275002FF4BC /* ia32 */ = {
isa = PBXGroup; isa = PBXGroup;
children = ( children = (
89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */,
89D7DDD412E8DDCF001E2B82 /* lithium-gap-resolver-ia32.h */,
897FF1000E719B8F00D62E90 /* assembler-ia32-inl.h */, 897FF1000E719B8F00D62E90 /* assembler-ia32-inl.h */,
897FF1010E719B8F00D62E90 /* assembler-ia32.cc */, 897FF1010E719B8F00D62E90 /* assembler-ia32.cc */,
897FF1020E719B8F00D62E90 /* assembler-ia32.h */, 897FF1020E719B8F00D62E90 /* assembler-ia32.h */,
@ -1515,6 +1533,7 @@
896448BC0E9D530500E7C516 /* codegen-arm.h */, 896448BC0E9D530500E7C516 /* codegen-arm.h */,
895FA748107FFE73006F39D4 /* constants-arm.cc */, 895FA748107FFE73006F39D4 /* constants-arm.cc */,
897FF11B0E719B8F00D62E90 /* constants-arm.h */, 897FF11B0E719B8F00D62E90 /* constants-arm.h */,
898BD20C0EF6CC850068B00A /* debug-arm.cc */,
893E24C612B14B510083370F /* deoptimizer-arm.cc */, 893E24C612B14B510083370F /* deoptimizer-arm.cc */,
9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */, 9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */,
9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */, 9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */,
@ -1958,6 +1977,8 @@
8938A2A312D63B630080CDDE /* lithium-x64.cc in Sources */, 8938A2A312D63B630080CDDE /* lithium-x64.cc in Sources */,
894A59E912D777E80000766D /* lithium.cc in Sources */, 894A59E912D777E80000766D /* lithium.cc in Sources */,
89F3605B12DCDF6400ACF8A6 /* lithium-codegen-x64.cc in Sources */, 89F3605B12DCDF6400ACF8A6 /* lithium-codegen-x64.cc in Sources */,
89D7DDDE12E8DE09001E2B82 /* gdb-jit.cc in Sources */,
89D7DDDF12E8DE09001E2B82 /* inspector.cc in Sources */,
); );
runOnlyForDeploymentPostprocessing = 0; runOnlyForDeploymentPostprocessing = 0;
}; };
@ -2093,6 +2114,9 @@
893E24DD12B14B9F0083370F /* gc-extension.cc in Sources */, 893E24DD12B14B9F0083370F /* gc-extension.cc in Sources */,
8946827512C26EB700C914BC /* objects-printer.cc in Sources */, 8946827512C26EB700C914BC /* objects-printer.cc in Sources */,
894A59EB12D777E80000766D /* lithium.cc in Sources */, 894A59EB12D777E80000766D /* lithium.cc in Sources */,
89D7DDD512E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc in Sources */,
89D7DDDA12E8DE09001E2B82 /* gdb-jit.cc in Sources */,
89D7DDDB12E8DE09001E2B82 /* inspector.cc in Sources */,
); );
runOnlyForDeploymentPostprocessing = 0; runOnlyForDeploymentPostprocessing = 0;
}; };
@ -2268,6 +2292,8 @@
893E24DF12B14B9F0083370F /* gc-extension.cc in Sources */, 893E24DF12B14B9F0083370F /* gc-extension.cc in Sources */,
8946827612C26EB700C914BC /* objects-printer.cc in Sources */, 8946827612C26EB700C914BC /* objects-printer.cc in Sources */,
894A59EA12D777E80000766D /* lithium.cc in Sources */, 894A59EA12D777E80000766D /* lithium.cc in Sources */,
89D7DDDC12E8DE09001E2B82 /* gdb-jit.cc in Sources */,
89D7DDDD12E8DE09001E2B82 /* inspector.cc in Sources */,
); );
runOnlyForDeploymentPostprocessing = 0; runOnlyForDeploymentPostprocessing = 0;
}; };
@ -2395,6 +2421,7 @@
V8_ENABLE_CHECKS, V8_ENABLE_CHECKS,
OBJECT_PRINT, OBJECT_PRINT,
ENABLE_VMSTATE_TRACKING, ENABLE_VMSTATE_TRACKING,
ENABLE_DEBUGGER_SUPPORT,
); );
GCC_SYMBOLS_PRIVATE_EXTERN = YES; GCC_SYMBOLS_PRIVATE_EXTERN = YES;
GCC_TREAT_WARNINGS_AS_ERRORS = YES; GCC_TREAT_WARNINGS_AS_ERRORS = YES;
@ -2434,6 +2461,7 @@
GCC_PREPROCESSOR_DEFINITIONS = ( GCC_PREPROCESSOR_DEFINITIONS = (
"$(GCC_PREPROCESSOR_DEFINITIONS)", "$(GCC_PREPROCESSOR_DEFINITIONS)",
NDEBUG, NDEBUG,
ENABLE_DEBUGGER_SUPPORT,
); );
GCC_SYMBOLS_PRIVATE_EXTERN = YES; GCC_SYMBOLS_PRIVATE_EXTERN = YES;
GCC_TREAT_WARNINGS_AS_ERRORS = NO; GCC_TREAT_WARNINGS_AS_ERRORS = NO;

Loading…
Cancel
Save