diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index c9ff2978d5..e2f87c5a1d 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,22 @@ +2011-01-24: Version 3.0.10 + + Fixed External::Wrap for 64-bit addresses (issue 1037). + + Fixed incorrect .arguments variable proxy handling in the full + code generator (issue 1060). + + Introduced partial strict mode support. + + Changed formatting of recursive error messages to match Firefox and Safari + (issue http://crbug.com/70334). + + Fixed incorrect rounding for float-to-integer conversions for external + array types, which implement the Typed Array spec + (issue http://crbug.com/50972). + + Performance improvements on the IA32 platform. + + 2011-01-19: Version 3.0.9 Added basic GDB JIT Interface integration. diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 7d18107d92..5e57bedb9a 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -3367,7 +3367,7 @@ template <> struct SmiTagging<4> { // For 32-bit systems any 2 bytes aligned pointer can be encoded as smi // with a plain reinterpret_cast. - static const intptr_t kEncodablePointerMask = 0x1; + static const uintptr_t kEncodablePointerMask = 0x1; static const int kPointerToSmiShift = 0; }; @@ -3387,8 +3387,8 @@ template <> struct SmiTagging<8> { // It might be not enough to cover stack allocated objects on some platforms. static const int kPointerAlignment = 3; - static const intptr_t kEncodablePointerMask = - ~(intptr_t(0xffffffff) << kPointerAlignment); + static const uintptr_t kEncodablePointerMask = + ~(uintptr_t(0xffffffff) << kPointerAlignment); static const int kPointerToSmiShift = kSmiTagSize + kSmiShiftSize - kPointerAlignment; @@ -3397,7 +3397,7 @@ template <> struct SmiTagging<8> { typedef SmiTagging PlatformSmiTagging; const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize; const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize; -const intptr_t kEncodablePointerMask = +const uintptr_t kEncodablePointerMask = PlatformSmiTagging::kEncodablePointerMask; const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift; @@ -3457,7 +3457,7 @@ class Internals { } static inline void* GetExternalPointerFromSmi(internal::Object* value) { - const intptr_t address = reinterpret_cast(value); + const uintptr_t address = reinterpret_cast(value); return reinterpret_cast(address >> kPointerToSmiShift); } diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 708edeff06..f3b9d43281 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -89,6 +89,7 @@ SOURCES = { lithium-allocator.cc lithium.cc liveedit.cc + liveobjectlist.cc log-utils.cc log.cc mark-compact.cc diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 073306f071..93037822cf 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -3267,14 +3267,14 @@ void v8::Object::SetInternalField(int index, v8::Handle value) { static bool CanBeEncodedAsSmi(void* ptr) { - const intptr_t address = reinterpret_cast(ptr); + const uintptr_t address = reinterpret_cast(ptr); return ((address & i::kEncodablePointerMask) == 0); } static i::Smi* EncodeAsSmi(void* ptr) { ASSERT(CanBeEncodedAsSmi(ptr)); - const intptr_t address = reinterpret_cast(ptr); + const uintptr_t address = reinterpret_cast(ptr); i::Smi* result = reinterpret_cast(address << i::kPointerToSmiShift); ASSERT(i::Internals::HasSmiTag(result)); ASSERT_EQ(result, i::Smi::FromInt(result->value())); diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 11a9c3930f..155aef8b8d 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -1656,8 +1656,14 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) { emit(reinterpret_cast(msg)); #else // def __arm__ #ifdef CAN_USE_ARMV5_INSTRUCTIONS - ASSERT(cond == al); - bkpt(0); + if (cond != al) { + Label skip; + b(&skip, NegateCondition(cond)); + bkpt(0); + bind(&skip); + } else { + bkpt(0); + } #else // ndef CAN_USE_ARMV5_INSTRUCTIONS svc(0x9f0001, cond); #endif // ndef CAN_USE_ARMV5_INSTRUCTIONS diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index 8589cf0ef9..a446007275 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -112,10 +112,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { void FastNewContextStub::Generate(MacroAssembler* masm) { // Try to allocate the context in new space. Label gc; - int length = slots_ + Context::MIN_CONTEXT_SLOTS; // Attempt to allocate the context in new space. - __ AllocateInNewSpace(FixedArray::SizeFor(length), + __ AllocateInNewSpace(FixedArray::SizeFor(slots_), r0, r1, r2, @@ -128,7 +127,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Setup the object header. __ LoadRoot(r2, Heap::kContextMapRootIndex); __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ mov(r2, Operand(Smi::FromInt(length))); + __ mov(r2, Operand(Smi::FromInt(slots_))); __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); // Setup the fixed slots. @@ -144,7 +143,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Initialize the rest of the slots to undefined. __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); - for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { + for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) { __ str(r1, MemOperand(r0, Context::SlotOffset(i))); } @@ -2890,18 +2889,33 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { } -// Uses registers r0 to r4. Expected input is -// object in r0 (or at sp+1*kPointerSize) and function in -// r1 (or at sp), depending on whether or not -// args_in_registers() is true. +// Uses registers r0 to r4. +// Expected input (depending on whether args are in registers or on the stack): +// * object: r0 or at sp + 1 * kPointerSize. +// * function: r1 or at sp. +// +// An inlined call site may have been generated before calling this stub. +// In this case the offset to the inline site to patch is passed on the stack, +// in the safepoint slot for register r4. +// (See LCodeGen::DoInstanceOfKnownGlobal) void InstanceofStub::Generate(MacroAssembler* masm) { + // Call site inlining and patching implies arguments in registers. + ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); + // ReturnTrueFalse is only implemented for inlined call sites. + ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); + // Fixed register usage throughout the stub: const Register object = r0; // Object (lhs). - const Register map = r3; // Map of the object. + Register map = r3; // Map of the object. const Register function = r1; // Function (rhs). const Register prototype = r4; // Prototype of the function. + const Register inline_site = r9; const Register scratch = r2; + + const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize; + Label slow, loop, is_instance, is_not_instance, not_js_object; + if (!HasArgsInRegisters()) { __ ldr(object, MemOperand(sp, 1 * kPointerSize)); __ ldr(function, MemOperand(sp, 0)); @@ -2911,50 +2925,100 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ BranchOnSmi(object, ¬_js_object); __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); - // Look up the function and the map in the instanceof cache. - Label miss; - __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); - __ cmp(function, ip); - __ b(ne, &miss); - __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); - __ cmp(map, ip); - __ b(ne, &miss); - __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); - __ Ret(HasArgsInRegisters() ? 0 : 2); + // If there is a call site cache don't look in the global cache, but do the + // real lookup and update the call site cache. + if (!HasCallSiteInlineCheck()) { + Label miss; + __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); + __ cmp(function, ip); + __ b(ne, &miss); + __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); + __ cmp(map, ip); + __ b(ne, &miss); + __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + __ Ret(HasArgsInRegisters() ? 0 : 2); + + __ bind(&miss); + } - __ bind(&miss); + // Get the prototype of the function. __ TryGetFunctionPrototype(function, prototype, scratch, &slow); // Check that the function prototype is a JS object. __ BranchOnSmi(prototype, &slow); __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); - __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); - __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); + // Update the global instanceof or call site inlined cache with the current + // map and function. The cached answer will be set when it is known below. + if (!HasCallSiteInlineCheck()) { + __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); + __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); + } else { + ASSERT(HasArgsInRegisters()); + // Patch the (relocated) inlined map check. + + // The offset was stored in r4 safepoint slot. + // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) + __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4)); + __ sub(inline_site, lr, scratch); + // Get the map location in scratch and patch it. + __ GetRelocatedValueLocation(inline_site, scratch); + __ str(map, MemOperand(scratch)); + } // Register mapping: r3 is object map and r4 is function prototype. // Get prototype of object into r2. __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); + // We don't need map any more. Use it as a scratch register. + Register scratch2 = map; + map = no_reg; + // Loop through the prototype chain looking for the function prototype. + __ LoadRoot(scratch2, Heap::kNullValueRootIndex); __ bind(&loop); __ cmp(scratch, Operand(prototype)); __ b(eq, &is_instance); - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(scratch, ip); + __ cmp(scratch, scratch2); __ b(eq, &is_not_instance); __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); __ jmp(&loop); __ bind(&is_instance); - __ mov(r0, Operand(Smi::FromInt(0))); - __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + if (!HasCallSiteInlineCheck()) { + __ mov(r0, Operand(Smi::FromInt(0))); + __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + } else { + // Patch the call site to return true. + __ LoadRoot(r0, Heap::kTrueValueRootIndex); + __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // Get the boolean result location in scratch and patch it. + __ GetRelocatedValueLocation(inline_site, scratch); + __ str(r0, MemOperand(scratch)); + + if (!ReturnTrueFalseObject()) { + __ mov(r0, Operand(Smi::FromInt(0))); + } + } __ Ret(HasArgsInRegisters() ? 0 : 2); __ bind(&is_not_instance); - __ mov(r0, Operand(Smi::FromInt(1))); - __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + if (!HasCallSiteInlineCheck()) { + __ mov(r0, Operand(Smi::FromInt(1))); + __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + } else { + // Patch the call site to return false. + __ LoadRoot(r0, Heap::kFalseValueRootIndex); + __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // Get the boolean result location in scratch and patch it. + __ GetRelocatedValueLocation(inline_site, scratch); + __ str(r0, MemOperand(scratch)); + + if (!ReturnTrueFalseObject()) { + __ mov(r0, Operand(Smi::FromInt(1))); + } + } __ Ret(HasArgsInRegisters() ? 0 : 2); Label object_not_null, object_not_null_or_smi; @@ -2962,7 +3026,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // Before null, smi and string value checks, check that the rhs is a function // as for a non-function rhs an exception needs to be thrown. __ BranchOnSmi(function, &slow); - __ CompareObjectType(function, map, scratch, JS_FUNCTION_TYPE); + __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE); __ b(ne, &slow); // Null is not instance of anything. @@ -2985,13 +3049,30 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // Slow-case. Tail call builtin. __ bind(&slow); - if (HasArgsInRegisters()) { + if (!ReturnTrueFalseObject()) { + if (HasArgsInRegisters()) { + __ Push(r0, r1); + } + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); + } else { + __ EnterInternalFrame(); __ Push(r0, r1); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS); + __ LeaveInternalFrame(); + __ cmp(r0, Operand(0)); + __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); + __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); + __ Ret(HasArgsInRegisters() ? 0 : 2); } - __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); } +Register InstanceofStub::left() { return r0; } + + +Register InstanceofStub::right() { return r1; } + + void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // The displacement is the offset of the last parameter (if any) // relative to the frame pointer. @@ -3703,7 +3784,6 @@ int CompareStub::MinorKey() { // StringCharCodeAtGenerator - void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { Label flat_string; Label ascii_string; @@ -4862,6 +4942,56 @@ void StringAddStub::Generate(MacroAssembler* masm) { } +void StringCharAtStub::Generate(MacroAssembler* masm) { + // Expects two arguments (object, index) on the stack: + // lr: return address + // sp[0]: index + // sp[4]: object + Register object = r1; + Register index = r0; + Register scratch1 = r2; + Register scratch2 = r3; + Register result = r0; + + // Get object and index from the stack. + __ pop(index); + __ pop(object); + + Label need_conversion; + Label index_out_of_range; + Label done; + StringCharAtGenerator generator(object, + index, + scratch1, + scratch2, + result, + &need_conversion, + &need_conversion, + &index_out_of_range, + STRING_INDEX_IS_NUMBER); + generator.GenerateFast(masm); + __ b(&done); + + __ bind(&index_out_of_range); + // When the index is out of range, the spec requires us to return + // the empty string. + __ LoadRoot(result, Heap::kEmptyStringRootIndex); + __ jmp(&done); + + __ bind(&need_conversion); + // Move smi zero into the result register, which will trigger + // conversion. + __ mov(result, Operand(Smi::FromInt(0))); + __ b(&done); + + StubRuntimeCallHelper call_helper; + generator.GenerateSlow(masm, call_helper); + + __ bind(&done); + __ Ret(); +} + + void ICCompareStub::GenerateSmis(MacroAssembler* masm) { ASSERT(state_ == CompareIC::SMIS); Label miss; diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 4a982f6e5c..2fa0711959 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -209,7 +209,7 @@ void CodeGenerator::Generate(CompilationInfo* info) { frame_->AllocateStackSlots(); frame_->AssertIsSpilled(); - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + int heap_slots = scope()->num_heap_slots(); if (heap_slots > 0) { // Allocate local context. // Get outer context and create a new context based on it. diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 36f6283c96..ff814476ae 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -352,6 +352,11 @@ class Instr { && (Bit(20) == 0) && ((Bit(7) == 0)); } + // Test for a stop instruction. + inline bool IsStop() const { + return (TypeField() == 7) && (Bit(24) == 1) && (SvcField() >= stop); + } + // Special accessors that test for existence of a value. inline bool HasS() const { return SField() == 1; } inline bool HasB() const { return BField() == 1; } diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h index 00c20efa8c..f1be27f4bf 100644 --- a/deps/v8/src/arm/frames-arm.h +++ b/deps/v8/src/arm/frames-arm.h @@ -66,8 +66,7 @@ static const RegList kCalleeSaved = 1 << 6 | // r6 v3 1 << 7 | // r7 v4 1 << 8 | // r8 v5 (cp in JavaScript code) - kR9Available - << 9 | // r9 v6 + kR9Available << 9 | // r9 v6 1 << 10 | // r10 v7 1 << 11; // r11 v8 (fp in JavaScript code) diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index d8ca130da4..ddc74e2f73 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -92,7 +92,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { bool function_in_register = true; // Possibly allocate a local context. - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + int heap_slots = scope()->num_heap_slots(); if (heap_slots > 0) { Comment cmnt(masm_, "[ Allocate local context"); // Argument to NewContext is the function, which is in r1. diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 6120bba458..51a8149efb 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1337,311 +1337,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { } -// Convert unsigned integer with specified number of leading zeroes in binary -// representation to IEEE 754 double. -// Integer to convert is passed in register hiword. -// Resulting double is returned in registers hiword:loword. -// This functions does not work correctly for 0. -static void GenerateUInt2Double(MacroAssembler* masm, - Register hiword, - Register loword, - Register scratch, - int leading_zeroes) { - const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; - const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; - - const int mantissa_shift_for_hi_word = - meaningful_bits - HeapNumber::kMantissaBitsInTopWord; - - const int mantissa_shift_for_lo_word = - kBitsPerInt - mantissa_shift_for_hi_word; - - __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); - if (mantissa_shift_for_hi_word > 0) { - __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); - __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); - } else { - __ mov(loword, Operand(0, RelocInfo::NONE)); - __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); - } - - // If least significant bit of biased exponent was not 1 it was corrupted - // by most significant bit of mantissa so we should fix that. - if (!(biased_exponent & 1)) { - __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); - } -} - - -void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, - ExternalArrayType array_type) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label slow, failed_allocation; - - Register key = r0; - Register receiver = r1; - - // Check that the object isn't a smi - __ BranchOnSmi(receiver, &slow); - - // Check that the key is a smi. - __ BranchOnNotSmi(key, &slow); - - // Check that the object is a JS object. Load map into r2. - __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE); - __ b(lt, &slow); - - // Check that the receiver does not require access checks. We need - // to check this explicitly since this generic stub does not perform - // map checks. - __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); - __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded)); - __ b(ne, &slow); - - // Check that the elements array is the appropriate type of - // ExternalArray. - __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); - __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); - __ cmp(r2, ip); - __ b(ne, &slow); - - // Check that the index is in range. - __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); - __ cmp(ip, Operand(key, ASR, kSmiTagSize)); - // Unsigned comparison catches both negative and too-large values. - __ b(lo, &slow); - - // r3: elements array - __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); - // r3: base pointer of external storage - - // We are not untagging smi key and instead work with it - // as if it was premultiplied by 2. - ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); - - Register value = r2; - switch (array_type) { - case kExternalByteArray: - __ ldrsb(value, MemOperand(r3, key, LSR, 1)); - break; - case kExternalUnsignedByteArray: - __ ldrb(value, MemOperand(r3, key, LSR, 1)); - break; - case kExternalShortArray: - __ ldrsh(value, MemOperand(r3, key, LSL, 0)); - break; - case kExternalUnsignedShortArray: - __ ldrh(value, MemOperand(r3, key, LSL, 0)); - break; - case kExternalIntArray: - case kExternalUnsignedIntArray: - __ ldr(value, MemOperand(r3, key, LSL, 1)); - break; - case kExternalFloatArray: - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - __ add(r2, r3, Operand(key, LSL, 1)); - __ vldr(s0, r2, 0); - } else { - __ ldr(value, MemOperand(r3, key, LSL, 1)); - } - break; - default: - UNREACHABLE(); - break; - } - - // For integer array types: - // r2: value - // For floating-point array type - // s0: value (if VFP3 is supported) - // r2: value (if VFP3 is not supported) - - if (array_type == kExternalIntArray) { - // For the Int and UnsignedInt array types, we need to see whether - // the value can be represented in a Smi. If not, we need to convert - // it to a HeapNumber. - Label box_int; - __ cmp(value, Operand(0xC0000000)); - __ b(mi, &box_int); - // Tag integer as smi and return it. - __ mov(r0, Operand(value, LSL, kSmiTagSize)); - __ Ret(); - - __ bind(&box_int); - // Allocate a HeapNumber for the result and perform int-to-double - // conversion. Don't touch r0 or r1 as they are needed if allocation - // fails. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r5, r3, r4, r6, &slow); - // Now we can use r0 for the result as key is not needed any more. - __ mov(r0, r5); - - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - __ vmov(s0, value); - __ vcvt_f64_s32(d0, s0); - __ sub(r3, r0, Operand(kHeapObjectTag)); - __ vstr(d0, r3, HeapNumber::kValueOffset); - __ Ret(); - } else { - WriteInt32ToHeapNumberStub stub(value, r0, r3); - __ TailCallStub(&stub); - } - } else if (array_type == kExternalUnsignedIntArray) { - // The test is different for unsigned int values. Since we need - // the value to be in the range of a positive smi, we can't - // handle either of the top two bits being set in the value. - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - Label box_int, done; - __ tst(value, Operand(0xC0000000)); - __ b(ne, &box_int); - // Tag integer as smi and return it. - __ mov(r0, Operand(value, LSL, kSmiTagSize)); - __ Ret(); - - __ bind(&box_int); - __ vmov(s0, value); - // Allocate a HeapNumber for the result and perform int-to-double - // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all - // registers - also when jumping due to exhausted young space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r2, r3, r4, r6, &slow); - - __ vcvt_f64_u32(d0, s0); - __ sub(r1, r2, Operand(kHeapObjectTag)); - __ vstr(d0, r1, HeapNumber::kValueOffset); - - __ mov(r0, r2); - __ Ret(); - } else { - // Check whether unsigned integer fits into smi. - Label box_int_0, box_int_1, done; - __ tst(value, Operand(0x80000000)); - __ b(ne, &box_int_0); - __ tst(value, Operand(0x40000000)); - __ b(ne, &box_int_1); - // Tag integer as smi and return it. - __ mov(r0, Operand(value, LSL, kSmiTagSize)); - __ Ret(); - - Register hiword = value; // r2. - Register loword = r3; - - __ bind(&box_int_0); - // Integer does not have leading zeros. - GenerateUInt2Double(masm, hiword, loword, r4, 0); - __ b(&done); - - __ bind(&box_int_1); - // Integer has one leading zero. - GenerateUInt2Double(masm, hiword, loword, r4, 1); - - - __ bind(&done); - // Integer was converted to double in registers hiword:loword. - // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber - // clobbers all registers - also when jumping due to exhausted young - // space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r4, r5, r7, r6, &slow); - - __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); - __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); - - __ mov(r0, r4); - __ Ret(); - } - } else if (array_type == kExternalFloatArray) { - // For the floating-point array type, we need to always allocate a - // HeapNumber. - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - // Allocate a HeapNumber for the result. Don't use r0 and r1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r2, r3, r4, r6, &slow); - __ vcvt_f64_f32(d0, s0); - __ sub(r1, r2, Operand(kHeapObjectTag)); - __ vstr(d0, r1, HeapNumber::kValueOffset); - - __ mov(r0, r2); - __ Ret(); - } else { - // Allocate a HeapNumber for the result. Don't use r0 and r1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r3, r4, r5, r6, &slow); - // VFP is not available, do manual single to double conversion. - - // r2: floating point value (binary32) - // r3: heap number for result - - // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to - // the slow case from here. - __ and_(r0, value, Operand(kBinary32MantissaMask)); - - // Extract exponent to r1. OK to clobber r1 now as there are no jumps to - // the slow case from here. - __ mov(r1, Operand(value, LSR, kBinary32MantissaBits)); - __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); - - Label exponent_rebiased; - __ teq(r1, Operand(0x00, RelocInfo::NONE)); - __ b(eq, &exponent_rebiased); - - __ teq(r1, Operand(0xff)); - __ mov(r1, Operand(0x7ff), LeaveCC, eq); - __ b(eq, &exponent_rebiased); - - // Rebias exponent. - __ add(r1, - r1, - Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); - - __ bind(&exponent_rebiased); - __ and_(r2, value, Operand(kBinary32SignMask)); - value = no_reg; - __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord)); - - // Shift mantissa. - static const int kMantissaShiftForHiWord = - kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; - - static const int kMantissaShiftForLoWord = - kBitsPerInt - kMantissaShiftForHiWord; - - __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord)); - __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord)); - - __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); - __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); - - __ mov(r0, r3); - __ Ret(); - } - - } else { - // Tag integer as smi and return it. - __ mov(r0, Operand(value, LSL, kSmiTagSize)); - __ Ret(); - } - - // Slow case, key and receiver still in r0 and r1. - __ bind(&slow); - __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3); - GenerateRuntimeGetProperty(masm); -} - - void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address @@ -1838,384 +1533,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { } -// Convert and store int passed in register ival to IEEE 754 single precision -// floating point value at memory location (dst + 4 * wordoffset) -// If VFP3 is available use it for conversion. -static void StoreIntAsFloat(MacroAssembler* masm, - Register dst, - Register wordoffset, - Register ival, - Register fval, - Register scratch1, - Register scratch2) { - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - __ vmov(s0, ival); - __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); - __ vcvt_f32_s32(s0, s0); - __ vstr(s0, scratch1, 0); - } else { - Label not_special, done; - // Move sign bit from source to destination. This works because the sign - // bit in the exponent word of the double has the same position and polarity - // as the 2's complement sign bit in a Smi. - ASSERT(kBinary32SignMask == 0x80000000u); - - __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); - // Negate value if it is negative. - __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne); - - // We have -1, 0 or 1, which we treat specially. Register ival contains - // absolute value: it is either equal to 1 (special case of -1 and 1), - // greater than 1 (not a special case) or less than 1 (special case of 0). - __ cmp(ival, Operand(1)); - __ b(gt, ¬_special); - - // For 1 or -1 we need to or in the 0 exponent (biased). - static const uint32_t exponent_word_for_1 = - kBinary32ExponentBias << kBinary32ExponentShift; - - __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq); - __ b(&done); - - __ bind(¬_special); - // Count leading zeros. - // Gets the wrong answer for 0, but we already checked for that case above. - Register zeros = scratch2; - __ CountLeadingZeros(zeros, ival, scratch1); - - // Compute exponent and or it into the exponent register. - __ rsb(scratch1, - zeros, - Operand((kBitsPerInt - 1) + kBinary32ExponentBias)); - - __ orr(fval, - fval, - Operand(scratch1, LSL, kBinary32ExponentShift)); - - // Shift up the source chopping the top bit off. - __ add(zeros, zeros, Operand(1)); - // This wouldn't work for 1 and -1 as the shift would be 32 which means 0. - __ mov(ival, Operand(ival, LSL, zeros)); - // And the top (top 20 bits). - __ orr(fval, - fval, - Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); - - __ bind(&done); - __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); - } -} - - -static bool IsElementTypeSigned(ExternalArrayType array_type) { - switch (array_type) { - case kExternalByteArray: - case kExternalShortArray: - case kExternalIntArray: - return true; - - case kExternalUnsignedByteArray: - case kExternalUnsignedShortArray: - case kExternalUnsignedIntArray: - return false; - - default: - UNREACHABLE(); - return false; - } -} - - -void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, - ExternalArrayType array_type) { - // ---------- S t a t e -------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // ----------------------------------- - Label slow, check_heap_number; - - // Register usage. - Register value = r0; - Register key = r1; - Register receiver = r2; - // r3 mostly holds the elements array or the destination external array. - - // Check that the object isn't a smi. - __ BranchOnSmi(receiver, &slow); - - // Check that the object is a JS object. Load map into r3. - __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE); - __ b(le, &slow); - - // Check that the receiver does not require access checks. We need - // to do this because this generic stub does not perform map checks. - __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset)); - __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); - __ b(ne, &slow); - - // Check that the key is a smi. - __ BranchOnNotSmi(key, &slow); - - // Check that the elements array is the appropriate type of ExternalArray. - __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); - __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); - __ cmp(r4, ip); - __ b(ne, &slow); - - // Check that the index is in range. - __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index. - __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); - __ cmp(r4, ip); - // Unsigned comparison catches both negative and too-large values. - __ b(hs, &slow); - - // Handle both smis and HeapNumbers in the fast path. Go to the - // runtime for all other kinds of values. - // r3: external array. - // r4: key (integer). - __ BranchOnNotSmi(value, &check_heap_number); - __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. - __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); - - // r3: base pointer of external storage. - // r4: key (integer). - // r5: value (integer). - switch (array_type) { - case kExternalByteArray: - case kExternalUnsignedByteArray: - __ strb(r5, MemOperand(r3, r4, LSL, 0)); - break; - case kExternalShortArray: - case kExternalUnsignedShortArray: - __ strh(r5, MemOperand(r3, r4, LSL, 1)); - break; - case kExternalIntArray: - case kExternalUnsignedIntArray: - __ str(r5, MemOperand(r3, r4, LSL, 2)); - break; - case kExternalFloatArray: - // Perform int-to-float conversion and store to memory. - StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9); - break; - default: - UNREACHABLE(); - break; - } - - // Entry registers are intact, r0 holds the value which is the return value. - __ Ret(); - - - // r3: external array. - // r4: index (integer). - __ bind(&check_heap_number); - __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE); - __ b(ne, &slow); - - __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); - - // r3: base pointer of external storage. - // r4: key (integer). - - // The WebGL specification leaves the behavior of storing NaN and - // +/-Infinity into integer arrays basically undefined. For more - // reproducible behavior, convert these to zero. - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - - - if (array_type == kExternalFloatArray) { - // vldr requires offset to be a multiple of 4 so we can not - // include -kHeapObjectTag into it. - __ sub(r5, r0, Operand(kHeapObjectTag)); - __ vldr(d0, r5, HeapNumber::kValueOffset); - __ add(r5, r3, Operand(r4, LSL, 2)); - __ vcvt_f32_f64(s0, d0); - __ vstr(s0, r5, 0); - } else { - // Need to perform float-to-int conversion. - // Test for NaN or infinity (both give zero). - __ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset)); - - // Hoisted load. vldr requires offset to be a multiple of 4 so we can not - // include -kHeapObjectTag into it. - __ sub(r5, r0, Operand(kHeapObjectTag)); - __ vldr(d0, r5, HeapNumber::kValueOffset); - - __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits); - // NaNs and Infinities have all-one exponents so they sign extend to -1. - __ cmp(r6, Operand(-1)); - __ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq); - - // Not infinity or NaN simply convert to int. - if (IsElementTypeSigned(array_type)) { - __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne); - } else { - __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne); - } - __ vmov(r5, s0, ne); - - switch (array_type) { - case kExternalByteArray: - case kExternalUnsignedByteArray: - __ strb(r5, MemOperand(r3, r4, LSL, 0)); - break; - case kExternalShortArray: - case kExternalUnsignedShortArray: - __ strh(r5, MemOperand(r3, r4, LSL, 1)); - break; - case kExternalIntArray: - case kExternalUnsignedIntArray: - __ str(r5, MemOperand(r3, r4, LSL, 2)); - break; - default: - UNREACHABLE(); - break; - } - } - - // Entry registers are intact, r0 holds the value which is the return value. - __ Ret(); - } else { - // VFP3 is not available do manual conversions. - __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset)); - __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset)); - - if (array_type == kExternalFloatArray) { - Label done, nan_or_infinity_or_zero; - static const int kMantissaInHiWordShift = - kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; - - static const int kMantissaInLoWordShift = - kBitsPerInt - kMantissaInHiWordShift; - - // Test for all special exponent values: zeros, subnormal numbers, NaNs - // and infinities. All these should be converted to 0. - __ mov(r7, Operand(HeapNumber::kExponentMask)); - __ and_(r9, r5, Operand(r7), SetCC); - __ b(eq, &nan_or_infinity_or_zero); - - __ teq(r9, Operand(r7)); - __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq); - __ b(eq, &nan_or_infinity_or_zero); - - // Rebias exponent. - __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); - __ add(r9, - r9, - Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); - - __ cmp(r9, Operand(kBinary32MaxExponent)); - __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt); - __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt); - __ b(gt, &done); - - __ cmp(r9, Operand(kBinary32MinExponent)); - __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt); - __ b(lt, &done); - - __ and_(r7, r5, Operand(HeapNumber::kSignMask)); - __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); - __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift)); - __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift)); - __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift)); - - __ bind(&done); - __ str(r5, MemOperand(r3, r4, LSL, 2)); - // Entry registers are intact, r0 holds the value which is the return - // value. - __ Ret(); - - __ bind(&nan_or_infinity_or_zero); - __ and_(r7, r5, Operand(HeapNumber::kSignMask)); - __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); - __ orr(r9, r9, r7); - __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift)); - __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift)); - __ b(&done); - } else { - bool is_signed_type = IsElementTypeSigned(array_type); - int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; - int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; - - Label done, sign; - - // Test for all special exponent values: zeros, subnormal numbers, NaNs - // and infinities. All these should be converted to 0. - __ mov(r7, Operand(HeapNumber::kExponentMask)); - __ and_(r9, r5, Operand(r7), SetCC); - __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); - __ b(eq, &done); - - __ teq(r9, Operand(r7)); - __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); - __ b(eq, &done); - - // Unbias exponent. - __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); - __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); - // If exponent is negative than result is 0. - __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi); - __ b(mi, &done); - - // If exponent is too big than result is minimal value. - __ cmp(r9, Operand(meaningfull_bits - 1)); - __ mov(r5, Operand(min_value), LeaveCC, ge); - __ b(ge, &done); - - __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); - __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); - __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); - - __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); - __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); - __ b(pl, &sign); - - __ rsb(r9, r9, Operand(0, RelocInfo::NONE)); - __ mov(r5, Operand(r5, LSL, r9)); - __ rsb(r9, r9, Operand(meaningfull_bits)); - __ orr(r5, r5, Operand(r6, LSR, r9)); - - __ bind(&sign); - __ teq(r7, Operand(0, RelocInfo::NONE)); - __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne); - - __ bind(&done); - switch (array_type) { - case kExternalByteArray: - case kExternalUnsignedByteArray: - __ strb(r5, MemOperand(r3, r4, LSL, 0)); - break; - case kExternalShortArray: - case kExternalUnsignedShortArray: - __ strh(r5, MemOperand(r3, r4, LSL, 1)); - break; - case kExternalIntArray: - case kExternalUnsignedIntArray: - __ str(r5, MemOperand(r3, r4, LSL, 2)); - break; - default: - UNREACHABLE(); - break; - } - } - } - - // Slow case: call runtime. - __ bind(&slow); - - // Entry registers are intact. - // r0: value - // r1: key - // r2: receiver - GenerateRuntimeSetProperty(masm); -} - - void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0 : value diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index b51633e706..c484e39edc 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -64,12 +64,12 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index, } -void LInstruction::PrintTo(StringStream* stream) const { +void LInstruction::PrintTo(StringStream* stream) { stream->Add("%s ", this->Mnemonic()); if (HasResult()) { - result()->PrintTo(stream); - stream->Add(" "); + PrintOutputOperandTo(stream); } + PrintDataTo(stream); if (HasEnvironment()) { @@ -84,7 +84,29 @@ void LInstruction::PrintTo(StringStream* stream) const { } -void LLabel::PrintDataTo(StringStream* stream) const { +template +void LTemplateInstruction::PrintDataTo(StringStream* stream) { + stream->Add("= "); + inputs_.PrintOperandsTo(stream); +} + + +template +void LTemplateInstruction::PrintOutputOperandTo(StringStream* stream) { + results_.PrintOperandsTo(stream); +} + + +template +void OperandContainer::PrintOperandsTo(StringStream* stream) { + for (int i = 0; i < N; i++) { + if (i > 0) stream->Add(" "); + elems_[i]->PrintTo(stream); + } +} + + +void LLabel::PrintDataTo(StringStream* stream) { LGap::PrintDataTo(stream); LLabel* rep = replacement(); if (rep != NULL) { @@ -143,74 +165,65 @@ const char* LArithmeticT::Mnemonic() const { } - -void LBinaryOperation::PrintDataTo(StringStream* stream) const { - stream->Add("= "); - left()->PrintTo(stream); - stream->Add(" "); - right()->PrintTo(stream); -} - - -void LGoto::PrintDataTo(StringStream* stream) const { +void LGoto::PrintDataTo(StringStream* stream) { stream->Add("B%d", block_id()); } -void LBranch::PrintDataTo(StringStream* stream) const { +void LBranch::PrintDataTo(StringStream* stream) { stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); - input()->PrintTo(stream); + InputAt(0)->PrintTo(stream); } -void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const { +void LCmpIDAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if "); - left()->PrintTo(stream); + InputAt(0)->PrintTo(stream); stream->Add(" %s ", Token::String(op())); - right()->PrintTo(stream); + InputAt(1)->PrintTo(stream); stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); } -void LIsNullAndBranch::PrintDataTo(StringStream* stream) const { +void LIsNullAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if "); - input()->PrintTo(stream); + InputAt(0)->PrintTo(stream); stream->Add(is_strict() ? " === null" : " == null"); stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); } -void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const { +void LIsObjectAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if is_object("); - input()->PrintTo(stream); + InputAt(0)->PrintTo(stream); stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); } -void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const { +void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if is_smi("); - input()->PrintTo(stream); + InputAt(0)->PrintTo(stream); stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); } -void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) const { +void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if has_instance_type("); - input()->PrintTo(stream); + InputAt(0)->PrintTo(stream); stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); } -void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) const { +void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if has_cached_array_index("); - input()->PrintTo(stream); + InputAt(0)->PrintTo(stream); stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); } -void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const { +void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if class_of_test("); - input()->PrintTo(stream); + InputAt(0)->PrintTo(stream); stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(), true_block_id(), @@ -218,29 +231,29 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const { } -void LTypeofIs::PrintDataTo(StringStream* stream) const { - input()->PrintTo(stream); +void LTypeofIs::PrintDataTo(StringStream* stream) { + InputAt(0)->PrintTo(stream); stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString()); } -void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const { +void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if typeof "); - input()->PrintTo(stream); + InputAt(0)->PrintTo(stream); stream->Add(" == \"%s\" then B%d else B%d", *hydrogen()->type_literal()->ToCString(), true_block_id(), false_block_id()); } -void LCallConstantFunction::PrintDataTo(StringStream* stream) const { +void LCallConstantFunction::PrintDataTo(StringStream* stream) { stream->Add("#%d / ", arity()); } -void LUnaryMathOperation::PrintDataTo(StringStream* stream) const { +void LUnaryMathOperation::PrintDataTo(StringStream* stream) { stream->Add("/%s ", hydrogen()->OpName()); - input()->PrintTo(stream); + InputAt(0)->PrintTo(stream); } @@ -249,48 +262,43 @@ void LLoadContextSlot::PrintDataTo(StringStream* stream) { } -void LCallKeyed::PrintDataTo(StringStream* stream) const { +void LCallKeyed::PrintDataTo(StringStream* stream) { stream->Add("[r2] #%d / ", arity()); } -void LCallNamed::PrintDataTo(StringStream* stream) const { +void LCallNamed::PrintDataTo(StringStream* stream) { SmartPointer name_string = name()->ToCString(); stream->Add("%s #%d / ", *name_string, arity()); } -void LCallGlobal::PrintDataTo(StringStream* stream) const { +void LCallGlobal::PrintDataTo(StringStream* stream) { SmartPointer name_string = name()->ToCString(); stream->Add("%s #%d / ", *name_string, arity()); } -void LCallKnownGlobal::PrintDataTo(StringStream* stream) const { +void LCallKnownGlobal::PrintDataTo(StringStream* stream) { stream->Add("#%d / ", arity()); } -void LCallNew::PrintDataTo(StringStream* stream) const { - LUnaryOperation::PrintDataTo(stream); +void LCallNew::PrintDataTo(StringStream* stream) { + stream->Add("= "); + InputAt(0)->PrintTo(stream); stream->Add(" #%d / ", arity()); } -void LClassOfTest::PrintDataTo(StringStream* stream) const { +void LClassOfTest::PrintDataTo(StringStream* stream) { stream->Add("= class_of_test("); - input()->PrintTo(stream); + InputAt(0)->PrintTo(stream); stream->Add(", \"%o\")", *hydrogen()->class_name()); } -void LUnaryOperation::PrintDataTo(StringStream* stream) const { - stream->Add("= "); - input()->PrintTo(stream); -} - - -void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const { +void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { arguments()->PrintTo(stream); stream->Add(" length "); @@ -301,6 +309,24 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const { } +void LStoreNamed::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("."); + stream->Add(*String::cast(*name())->ToCString()); + stream->Add(" <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyed::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + stream->Add("] <- "); + value()->PrintTo(stream); +} + + LChunk::LChunk(HGraph* graph) : spill_slot_count_(0), graph_(graph), @@ -310,11 +336,6 @@ LChunk::LChunk(HGraph* graph) } -void LChunk::Verify() const { - // TODO(twuerthinger): Implement verification for chunk. -} - - int LChunk::GetNextSpillIndex(bool is_double) { // Skip a slot if for a double-width slot. if (is_double) spill_slot_count_++; @@ -369,24 +390,6 @@ void LChunk::MarkEmptyBlocks() { } -void LStoreNamed::PrintDataTo(StringStream* stream) const { - object()->PrintTo(stream); - stream->Add("."); - stream->Add(*String::cast(*name())->ToCString()); - stream->Add(" <- "); - value()->PrintTo(stream); -} - - -void LStoreKeyed::PrintDataTo(StringStream* stream) const { - object()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - stream->Add("] <- "); - value()->PrintTo(stream); -} - - int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { LGap* gap = new LGap(block); int index = -1; @@ -593,33 +596,52 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) { } -LInstruction* LChunkBuilder::Define(LInstruction* instr) { +template +LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr, + LUnallocated* result) { + allocator_->RecordDefinition(current_instruction_, result); + instr->set_result(result); + return instr; +} + + +template +LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) { return Define(instr, new LUnallocated(LUnallocated::NONE)); } -LInstruction* LChunkBuilder::DefineAsRegister(LInstruction* instr) { +template +LInstruction* LChunkBuilder::DefineAsRegister( + LTemplateInstruction<1, I, T>* instr) { return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); } -LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) { +template +LInstruction* LChunkBuilder::DefineAsSpilled( + LTemplateInstruction<1, I, T>* instr, int index) { return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index)); } -LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) { +template +LInstruction* LChunkBuilder::DefineSameAsFirst( + LTemplateInstruction<1, I, T>* instr) { return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); } -LInstruction* LChunkBuilder::DefineFixed(LInstruction* instr, Register reg) { +template +LInstruction* LChunkBuilder::DefineFixed( + LTemplateInstruction<1, I, T>* instr, Register reg) { return Define(instr, ToUnallocated(reg)); } -LInstruction* LChunkBuilder::DefineFixedDouble(LInstruction* instr, - DoubleRegister reg) { +template +LInstruction* LChunkBuilder::DefineFixedDouble( + LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) { return Define(instr, ToUnallocated(reg)); } @@ -674,16 +696,15 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, } -LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - ASSERT(!instr->HasPointerMap()); - instr->set_pointer_map(new LPointerMap(position_)); +LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) { + allocator_->MarkAsSaveDoubles(); return instr; } -LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) { - allocator_->RecordDefinition(current_instruction_, result); - instr->set_result(result); +LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { + ASSERT(!instr->HasPointerMap()); + instr->set_pointer_map(new LPointerMap(position_)); return instr; } @@ -795,7 +816,7 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, ASSERT(right->representation().IsTagged()); LOperand* left_operand = UseFixed(left, r1); LOperand* right_operand = UseFixed(right, r0); - LInstruction* result = new LArithmeticT(op, left_operand, right_operand); + LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand); return MarkAsCall(DefineFixed(result, r0), instr); } @@ -876,8 +897,14 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { if (FLAG_stress_environments && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); } - if (current->IsBranch()) { - instr->set_hydrogen_value(HBranch::cast(current)->value()); + if (current->IsTest() && !instr->IsGoto()) { + ASSERT(instr->IsControl()); + HTest* test = HTest::cast(current); + instr->set_hydrogen_value(test->value()); + HBasicBlock* first = test->FirstSuccessor(); + HBasicBlock* second = test->SecondSuccessor(); + ASSERT(first != NULL && second != NULL); + instr->SetBranchTargets(first->block_id(), second->block_id()); } else { instr->set_hydrogen_value(current); } @@ -931,23 +958,15 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { } -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { +LInstruction* LChunkBuilder::DoTest(HTest* instr) { HValue* v = instr->value(); - HBasicBlock* first = instr->FirstSuccessor(); - HBasicBlock* second = instr->SecondSuccessor(); - ASSERT(first != NULL && second != NULL); - int first_id = first->block_id(); - int second_id = second->block_id(); - if (v->EmitAtUses()) { if (v->IsClassOfTest()) { HClassOfTest* compare = HClassOfTest::cast(v); ASSERT(compare->value()->representation().IsTagged()); return new LClassOfTestAndBranch(UseTempRegister(compare->value()), - TempRegister(), - first_id, - second_id); + TempRegister()); } else if (v->IsCompare()) { HCompare* compare = HCompare::cast(v); Token::Value op = compare->token(); @@ -958,16 +977,12 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { ASSERT(left->representation().IsInteger32()); ASSERT(right->representation().IsInteger32()); return new LCmpIDAndBranch(UseRegisterAtStart(left), - UseOrConstantAtStart(right), - first_id, - second_id); + UseOrConstantAtStart(right)); } else if (r.IsDouble()) { ASSERT(left->representation().IsDouble()); ASSERT(right->representation().IsDouble()); return new LCmpIDAndBranch(UseRegisterAtStart(left), - UseRegisterAtStart(right), - first_id, - second_id); + UseRegisterAtStart(right)); } else { ASSERT(left->representation().IsTagged()); ASSERT(right->representation().IsTagged()); @@ -975,38 +990,30 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { LOperand* left_operand = UseFixed(left, reversed ? r0 : r1); LOperand* right_operand = UseFixed(right, reversed ? r1 : r0); LInstruction* result = new LCmpTAndBranch(left_operand, - right_operand, - first_id, - second_id); + right_operand); return MarkAsCall(result, instr); } } else if (v->IsIsSmi()) { HIsSmi* compare = HIsSmi::cast(v); ASSERT(compare->value()->representation().IsTagged()); - return new LIsSmiAndBranch(Use(compare->value()), - first_id, - second_id); + return new LIsSmiAndBranch(Use(compare->value())); } else if (v->IsHasInstanceType()) { HHasInstanceType* compare = HHasInstanceType::cast(v); ASSERT(compare->value()->representation().IsTagged()); - - return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()), - first_id, - second_id); + return new LHasInstanceTypeAndBranch( + UseRegisterAtStart(compare->value())); } else if (v->IsHasCachedArrayIndex()) { HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v); ASSERT(compare->value()->representation().IsTagged()); return new LHasCachedArrayIndexAndBranch( - UseRegisterAtStart(compare->value()), first_id, second_id); + UseRegisterAtStart(compare->value())); } else if (v->IsIsNull()) { HIsNull* compare = HIsNull::cast(v); ASSERT(compare->value()->representation().IsTagged()); - return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), - first_id, - second_id); + return new LIsNullAndBranch(UseRegisterAtStart(compare->value())); } else if (v->IsIsObject()) { HIsObject* compare = HIsObject::cast(v); ASSERT(compare->value()->representation().IsTagged()); @@ -1015,46 +1022,37 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { LOperand* temp2 = TempRegister(); return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), temp1, - temp2, - first_id, - second_id); + temp2); } else if (v->IsCompareJSObjectEq()) { HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v); return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()), - UseRegisterAtStart(compare->right()), - first_id, - second_id); + UseRegisterAtStart(compare->right())); } else if (v->IsInstanceOf()) { HInstanceOf* instance_of = HInstanceOf::cast(v); LInstruction* result = new LInstanceOfAndBranch(Use(instance_of->left()), - Use(instance_of->right()), - first_id, - second_id); + Use(instance_of->right())); return MarkAsCall(result, instr); } else if (v->IsTypeofIs()) { HTypeofIs* typeof_is = HTypeofIs::cast(v); - return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()), - first_id, - second_id); + return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value())); } else { if (v->IsConstant()) { if (HConstant::cast(v)->handle()->IsTrue()) { - return new LGoto(first_id); + return new LGoto(instr->FirstSuccessor()->block_id()); } else if (HConstant::cast(v)->handle()->IsFalse()) { - return new LGoto(second_id); + return new LGoto(instr->SecondSuccessor()->block_id()); } } Abort("Undefined compare before branch"); return NULL; } } - return new LBranch(UseRegisterAtStart(v), first_id, second_id); + return new LBranch(UseRegisterAtStart(v)); } -LInstruction* LChunkBuilder::DoCompareMapAndBranch( - HCompareMapAndBranch* instr) { +LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { ASSERT(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); LOperand* temp = TempRegister(); @@ -1073,7 +1071,7 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) { - LInstruction* result = + LInstanceOf* result = new LInstanceOf(UseFixed(instr->left(), r0), UseFixed(instr->right(), r1)); return MarkAsCall(DefineFixed(result, r0), instr); @@ -1082,9 +1080,10 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) { LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( HInstanceOfKnownGlobal* instr) { - LInstruction* result = - new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0)); - return MarkAsCall(DefineFixed(result, r0), instr); + LInstanceOfKnownGlobal* result = + new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4)); + MarkAsSaveDoubles(result); + return AssignEnvironment(AssignPointerMap(DefineFixed(result, r0))); } @@ -1093,10 +1092,10 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { LOperand* receiver = UseFixed(instr->receiver(), r0); LOperand* length = UseRegisterAtStart(instr->length()); LOperand* elements = UseRegisterAtStart(instr->elements()); - LInstruction* result = new LApplyArguments(function, - receiver, - length, - elements); + LApplyArguments* result = new LApplyArguments(function, + receiver, + length, + elements); return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY); } @@ -1129,7 +1128,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { BuiltinFunctionId op = instr->op(); LOperand* input = UseRegisterAtStart(instr->value()); LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL; - LInstruction* result = new LUnaryMathOperation(input, temp); + LUnaryMathOperation* result = new LUnaryMathOperation(input, temp); switch (op) { case kMathAbs: return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); @@ -1162,8 +1161,8 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) { ASSERT(instr->key()->representation().IsTagged()); argument_count_ -= instr->argument_count(); - UseFixed(instr->key(), r2); - return MarkAsCall(DefineFixed(new LCallKeyed, r0), instr); + LOperand* key = UseFixed(instr->key(), r2); + return MarkAsCall(DefineFixed(new LCallKeyed(key), r0), instr); } @@ -1188,7 +1187,7 @@ LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) { LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { LOperand* constructor = UseFixed(instr->constructor(), r1); argument_count_ -= instr->argument_count(); - LInstruction* result = new LCallNew(constructor); + LCallNew* result = new LCallNew(constructor); return MarkAsCall(DefineFixed(result, r0), instr); } @@ -1378,7 +1377,7 @@ LInstruction* LChunkBuilder::DoCompare(HCompare* instr) { bool reversed = (op == Token::GT || op == Token::LTE); LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1); LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0); - LInstruction* result = new LCmpT(left, right); + LCmpT* result = new LCmpT(left, right); return MarkAsCall(DefineFixed(result, r0), instr); } } @@ -1388,7 +1387,7 @@ LInstruction* LChunkBuilder::DoCompareJSObjectEq( HCompareJSObjectEq* instr) { LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); - LInstruction* result = new LCmpJSObjectEq(left, right); + LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right); return DefineAsRegister(result); } @@ -1455,7 +1454,7 @@ LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) { LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) { LOperand* object = UseRegister(instr->value()); - LInstruction* result = new LValueOf(object, TempRegister()); + LValueOf* result = new LValueOf(object, TempRegister()); return AssignEnvironment(DefineSameAsFirst(result)); } @@ -1478,7 +1477,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { if (from.IsTagged()) { if (to.IsDouble()) { LOperand* value = UseRegister(instr->value()); - LInstruction* res = new LNumberUntagD(value); + LNumberUntagD* res = new LNumberUntagD(value); return AssignEnvironment(DefineAsRegister(res)); } else { ASSERT(to.IsInteger32()); @@ -1504,13 +1503,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { // Make sure that the temp and result_temp registers are // different. LUnallocated* result_temp = TempRegister(); - LInstruction* result = new LNumberTagD(value, temp1, temp2); + LNumberTagD* result = new LNumberTagD(value, temp1, temp2); Define(result, result_temp); return AssignPointerMap(result); } else { ASSERT(to.IsInteger32()); LOperand* value = UseRegister(instr->value()); - LInstruction* res = new LDoubleToI(value); + LDoubleToI* res = new LDoubleToI(value); return AssignEnvironment(DefineAsRegister(res)); } } else if (from.IsInteger32()) { @@ -1520,7 +1519,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { if (val->HasRange() && val->range()->IsInSmiRange()) { return DefineSameAsFirst(new LSmiTag(value)); } else { - LInstruction* result = new LNumberTagI(value); + LNumberTagI* result = new LNumberTagI(value); return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); } } else { @@ -1597,7 +1596,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) { - LInstruction* result = new LLoadGlobal(); + LLoadGlobal* result = new LLoadGlobal(); return instr->check_hole_value() ? AssignEnvironment(DefineAsRegister(result)) : DefineAsRegister(result); @@ -1646,7 +1645,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement( ASSERT(instr->key()->representation().IsInteger32()); LOperand* obj = UseRegisterAtStart(instr->object()); LOperand* key = UseRegisterAtStart(instr->key()); - LInstruction* result = new LLoadKeyedFastElement(obj, key); + LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key); return AssignEnvironment(DefineSameAsFirst(result)); } @@ -1717,6 +1716,20 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { } +LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { + LOperand* string = UseRegister(instr->string()); + LOperand* index = UseRegisterOrConstant(instr->index()); + LStringCharCodeAt* result = new LStringCharCodeAt(string, index); + return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); +} + + +LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { + LOperand* string = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new LStringLength(string)); +} + + LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr); } @@ -1740,7 +1753,7 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) { LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) { LOperand* object = UseRegisterAtStart(instr->object()); LOperand* key = UseRegisterAtStart(instr->key()); - LInstruction* result = new LDeleteProperty(object, key); + LDeleteProperty* result = new LDeleteProperty(object, key); return MarkAsCall(DefineFixed(result, r0), instr); } @@ -1781,13 +1794,13 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { LOperand* arguments = UseRegister(instr->arguments()); LOperand* length = UseTempRegister(instr->length()); LOperand* index = UseRegister(instr->index()); - LInstruction* result = new LAccessArgumentsAt(arguments, length, index); - return DefineAsRegister(AssignEnvironment(result)); + LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index); + return AssignEnvironment(DefineAsRegister(result)); } LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LInstruction* result = new LTypeof(UseRegisterAtStart(instr->value())); + LTypeof* result = new LTypeof(UseRegisterAtStart(instr->value())); return MarkAsCall(DefineFixed(result, r0), instr); } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index aab408111c..81a0266b2f 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -43,10 +43,22 @@ class LCodeGen; // Type hierarchy: // // LInstruction -// LAccessArgumentsAt -// LArgumentsElements -// LArgumentsLength -// LBinaryOperation +// LTemplateInstruction +// LControlInstruction +// LBranch +// LClassOfTestAndBranch +// LCmpJSObjectEqAndBranch +// LCmpIDAndBranch +// LHasCachedArrayIndexAndBranch +// LHasInstanceTypeAndBranch +// LInstanceOfAndBranch +// LIsNullAndBranch +// LIsObjectAndBranch +// LIsSmiAndBranch +// LTypeofIsAndBranch +// LAccessArgumentsAt +// LArgumentsElements +// LArgumentsLength // LAddI // LApplyArguments // LArithmeticD @@ -54,79 +66,73 @@ class LCodeGen; // LBitI // LBoundsCheck // LCmpID -// LCmpIDAndBranch // LCmpJSObjectEq -// LCmpJSObjectEqAndBranch // LCmpT // LDivI // LInstanceOf -// LInstanceOfAndBranch // LInstanceOfKnownGlobal // LLoadKeyedFastElement // LLoadKeyedGeneric // LModI // LMulI +// LPower // LShiftI // LSubI -// LCallConstantFunction -// LCallFunction -// LCallGlobal -// LCallKeyed -// LCallKnownGlobal -// LCallNamed -// LCallRuntime -// LCallStub -// LCheckPrototypeMaps -// LConstant -// LConstantD -// LConstantI -// LConstantT -// LDeoptimize -// LFunctionLiteral -// LGlobalObject -// LGlobalReceiver -// LLabel -// LLazyBailout -// LLoadContextSlot -// LLoadGlobal -// LMaterializedLiteral +// LCallConstantFunction +// LCallFunction +// LCallGlobal +// LCallKeyed +// LCallKnownGlobal +// LCallNamed +// LCallRuntime +// LCallStub +// LConstant +// LConstantD +// LConstantI +// LConstantT +// LDeoptimize +// LFunctionLiteral +// LGap +// LLabel +// LGlobalObject +// LGlobalReceiver +// LGoto +// LLazyBailout +// LLoadGlobal +// LCheckPrototypeMaps +// LLoadContextSlot // LArrayLiteral // LObjectLiteral // LRegExpLiteral -// LOsrEntry -// LParameter -// LStackCheck -// LStoreKeyed -// LStoreKeyedFastElement -// LStoreKeyedGeneric -// LStoreNamed -// LStoreNamedField -// LStoreNamedGeneric -// LUnaryOperation -// LJSArrayLength -// LFixedArrayLength +// LOsrEntry +// LParameter +// LRegExpConstructResult +// LStackCheck +// LStoreKeyed +// LStoreKeyedFastElement +// LStoreKeyedGeneric +// LStoreNamed +// LStoreNamedField +// LStoreNamedGeneric +// LStringCharCodeAt // LBitNotI -// LBranch // LCallNew // LCheckFunction +// LCheckPrototypeMaps // LCheckInstanceType // LCheckMap // LCheckSmi // LClassOfTest -// LClassOfTestAndBranch // LDeleteProperty // LDoubleToI +// LFixedArrayLength // LHasCachedArrayIndex -// LHasCachedArrayIndexAndBranch // LHasInstanceType -// LHasInstanceTypeAndBranch // LInteger32ToDouble // LIsNull -// LIsNullAndBranch // LIsObject -// LIsObjectAndBranch // LIsSmi -// LIsSmiAndBranch +// LJSArrayLength // LLoadNamedField // LLoadNamedGeneric // LLoadFunctionPrototype @@ -136,23 +142,21 @@ class LCodeGen; // LReturn // LSmiTag // LStoreGlobal +// LStringLength // LTaggedToI // LThrow // LTypeof // LTypeofIs -// LTypeofIsAndBranch // LUnaryMathOperation // LValueOf -// LUnknownOSRValue +// LUnknownOSRValue #define LITHIUM_ALL_INSTRUCTION_LIST(V) \ - V(BinaryOperation) \ + V(ControlInstruction) \ V(Constant) \ V(Call) \ - V(MaterializedLiteral) \ V(StoreKeyed) \ V(StoreNamed) \ - V(UnaryOperation) \ LITHIUM_CONCRETE_INSTRUCTION_LIST(V) @@ -251,6 +255,8 @@ class LCodeGen; V(StoreNamedField) \ V(StoreNamedGeneric) \ V(SubI) \ + V(StringCharCodeAt) \ + V(StringLength) \ V(TaggedToI) \ V(Throw) \ V(Typeof) \ @@ -289,14 +295,17 @@ class LInstruction: public ZoneObject { virtual void CompileToNative(LCodeGen* generator) = 0; virtual const char* Mnemonic() const = 0; - virtual void PrintTo(StringStream* stream) const; - virtual void PrintDataTo(StringStream* stream) const { } + virtual void PrintTo(StringStream* stream); + virtual void PrintDataTo(StringStream* stream) = 0; + virtual void PrintOutputOperandTo(StringStream* stream) = 0; // Declare virtual type testers. #define DECLARE_DO(type) virtual bool Is##type() const { return false; } LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO) #undef DECLARE_DO + virtual bool IsControl() const { return false; } + virtual void SetBranchTargets(int true_block_id, int false_block_id) { } void set_environment(LEnvironment* env) { environment_.set(env); } LEnvironment* environment() const { return environment_.get(); } @@ -306,9 +315,7 @@ class LInstruction: public ZoneObject { LPointerMap* pointer_map() const { return pointer_map_.get(); } bool HasPointerMap() const { return pointer_map_.is_set(); } - void set_result(LOperand* operand) { result_.set(operand); } - LOperand* result() const { return result_.get(); } - bool HasResult() const { return result_.is_set(); } + virtual bool HasResult() const = 0; void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } HValue* hydrogen_value() const { return hydrogen_value_; } @@ -326,13 +333,66 @@ class LInstruction: public ZoneObject { private: SetOncePointer environment_; SetOncePointer pointer_map_; - SetOncePointer result_; HValue* hydrogen_value_; SetOncePointer deoptimization_environment_; }; -class LGap: public LInstruction { +template +class OperandContainer { + public: + OperandContainer() { + for (int i = 0; i < NumElements; i++) elems_[i] = NULL; + } + int length() { return NumElements; } + ElementType& operator[](int i) { + ASSERT(i < length()); + return elems_[i]; + } + void PrintOperandsTo(StringStream* stream); + + private: + ElementType elems_[NumElements]; +}; + + +template +class OperandContainer { + public: + int length() { return 0; } + void PrintOperandsTo(StringStream* stream) { } +}; + + +// R = number of result operands (0 or 1). +// I = number of input operands. +// T = number of temporary operands. +template +class LTemplateInstruction: public LInstruction { + public: + // Allow 0 or 1 output operands. + STATIC_ASSERT(R == 0 || R == 1); + virtual bool HasResult() const { return R != 0; } + void set_result(LOperand* operand) { results_[0] = operand; } + LOperand* result() { return results_[0]; } + + int InputCount() { return I; } + LOperand* InputAt(int i) { return inputs_[i]; } + + int TempCount() { return T; } + LOperand* TempAt(int i) { return temps_[i]; } + + virtual void PrintDataTo(StringStream* stream); + virtual void PrintOutputOperandTo(StringStream* stream); + + protected: + OperandContainer results_; + OperandContainer inputs_; + OperandContainer temps_; +}; + + +class LGap: public LTemplateInstruction<0, 0, 0> { public: explicit LGap(HBasicBlock* block) : block_(block) { @@ -373,13 +433,13 @@ class LGap: public LInstruction { }; -class LGoto: public LInstruction { +class LGoto: public LTemplateInstruction<0, 0, 0> { public: LGoto(int block_id, bool include_stack_check = false) : block_id_(block_id), include_stack_check_(include_stack_check) { } DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); virtual bool IsControl() const { return true; } int block_id() const { return block_id_; } @@ -391,7 +451,7 @@ class LGoto: public LInstruction { }; -class LLazyBailout: public LInstruction { +class LLazyBailout: public LTemplateInstruction<0, 0, 0> { public: LLazyBailout() : gap_instructions_size_(0) { } @@ -407,7 +467,7 @@ class LLazyBailout: public LInstruction { }; -class LDeoptimize: public LInstruction { +class LDeoptimize: public LTemplateInstruction<0, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") }; @@ -420,7 +480,7 @@ class LLabel: public LGap { DECLARE_CONCRETE_INSTRUCTION(Label, "label") - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); int block_id() const { return block()->block_id(); } bool is_loop_header() const { return block()->IsLoopHeader(); } @@ -435,13 +495,13 @@ class LLabel: public LGap { }; -class LParameter: public LInstruction { +class LParameter: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") }; -class LCallStub: public LInstruction { +class LCallStub: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub") DECLARE_HYDROGEN_ACCESSOR(CallStub) @@ -452,96 +512,81 @@ class LCallStub: public LInstruction { }; -class LUnknownOSRValue: public LInstruction { +class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") }; -class LUnaryOperation: public LInstruction { - public: - explicit LUnaryOperation(LOperand* input) : input_(input) { } - - DECLARE_INSTRUCTION(UnaryOperation) - - LOperand* input() const { return input_; } - - virtual void PrintDataTo(StringStream* stream) const; - - private: - LOperand* input_; -}; - - -class LBinaryOperation: public LInstruction { +template +class LControlInstruction: public LTemplateInstruction<0, I, T> { public: - LBinaryOperation(LOperand* left, LOperand* right) - : left_(left), right_(right) { } - - DECLARE_INSTRUCTION(BinaryOperation) + DECLARE_INSTRUCTION(ControlInstruction) + virtual bool IsControl() const { return true; } - LOperand* left() const { return left_; } - LOperand* right() const { return right_; } - virtual void PrintDataTo(StringStream* stream) const; + int true_block_id() const { return true_block_id_; } + int false_block_id() const { return false_block_id_; } + void SetBranchTargets(int true_block_id, int false_block_id) { + true_block_id_ = true_block_id; + false_block_id_ = false_block_id; + } private: - LOperand* left_; - LOperand* right_; + int true_block_id_; + int false_block_id_; }; -class LApplyArguments: public LBinaryOperation { +class LApplyArguments: public LTemplateInstruction<1, 4, 0> { public: LApplyArguments(LOperand* function, LOperand* receiver, LOperand* length, - LOperand* elements) - : LBinaryOperation(function, receiver), - length_(length), - elements_(elements) { } + LOperand* elements) { + inputs_[0] = function; + inputs_[1] = receiver; + inputs_[2] = length; + inputs_[3] = elements; + } DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments") - LOperand* function() const { return left(); } - LOperand* receiver() const { return right(); } - LOperand* length() const { return length_; } - LOperand* elements() const { return elements_; } - - private: - LOperand* length_; - LOperand* elements_; + LOperand* function() { return inputs_[0]; } + LOperand* receiver() { return inputs_[1]; } + LOperand* length() { return inputs_[2]; } + LOperand* elements() { return inputs_[3]; } }; -class LAccessArgumentsAt: public LInstruction { +class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> { public: - LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) - : arguments_(arguments), length_(length), index_(index) { } + LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) { + inputs_[0] = arguments; + inputs_[1] = length; + inputs_[2] = index; + } DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at") - LOperand* arguments() const { return arguments_; } - LOperand* length() const { return length_; } - LOperand* index() const { return index_; } + LOperand* arguments() { return inputs_[0]; } + LOperand* length() { return inputs_[1]; } + LOperand* index() { return inputs_[2]; } - virtual void PrintDataTo(StringStream* stream) const; - - private: - LOperand* arguments_; - LOperand* length_; - LOperand* index_; + virtual void PrintDataTo(StringStream* stream); }; -class LArgumentsLength: public LUnaryOperation { +class LArgumentsLength: public LTemplateInstruction<1, 1, 0> { public: - explicit LArgumentsLength(LOperand* elements) : LUnaryOperation(elements) {} + explicit LArgumentsLength(LOperand* elements) { + inputs_[0] = elements; + } DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") }; -class LArgumentsElements: public LInstruction { +class LArgumentsElements: public LTemplateInstruction<1, 0, 0> { public: LArgumentsElements() { } @@ -549,341 +594,274 @@ class LArgumentsElements: public LInstruction { }; -class LModI: public LBinaryOperation { +class LModI: public LTemplateInstruction<1, 2, 0> { public: - LModI(LOperand* left, LOperand* right) : LBinaryOperation(left, right) { } + LModI(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") DECLARE_HYDROGEN_ACCESSOR(Mod) }; -class LDivI: public LBinaryOperation { +class LDivI: public LTemplateInstruction<1, 2, 0> { public: - LDivI(LOperand* left, LOperand* right) - : LBinaryOperation(left, right) { } + LDivI(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") DECLARE_HYDROGEN_ACCESSOR(Div) }; -class LMulI: public LBinaryOperation { +class LMulI: public LTemplateInstruction<1, 2, 1> { public: - LMulI(LOperand* left, LOperand* right, LOperand* temp) - : LBinaryOperation(left, right), temp_(temp) { } + LMulI(LOperand* left, LOperand* right, LOperand* temp) { + inputs_[0] = left; + inputs_[1] = right; + temps_[0] = temp; + } DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") DECLARE_HYDROGEN_ACCESSOR(Mul) - - LOperand* temp() const { return temp_; } - - private: - LOperand* temp_; }; -class LCmpID: public LBinaryOperation { +class LCmpID: public LTemplateInstruction<1, 2, 0> { public: - LCmpID(LOperand* left, LOperand* right) - : LBinaryOperation(left, right) { } + LCmpID(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id") + DECLARE_HYDROGEN_ACCESSOR(Compare) Token::Value op() const { return hydrogen()->token(); } bool is_double() const { return hydrogen()->GetInputRepresentation().IsDouble(); } - - DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id") - DECLARE_HYDROGEN_ACCESSOR(Compare) }; -class LCmpIDAndBranch: public LCmpID { +class LCmpIDAndBranch: public LControlInstruction<2, 0> { public: - LCmpIDAndBranch(LOperand* left, - LOperand* right, - int true_block_id, - int false_block_id) - : LCmpID(left, right), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } + LCmpIDAndBranch(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } + DECLARE_HYDROGEN_ACCESSOR(Compare) - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } + Token::Value op() const { return hydrogen()->token(); } + bool is_double() const { + return hydrogen()->GetInputRepresentation().IsDouble(); + } - private: - int true_block_id_; - int false_block_id_; + virtual void PrintDataTo(StringStream* stream); }; -class LUnaryMathOperation: public LUnaryOperation { +class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> { public: - explicit LUnaryMathOperation(LOperand* value, LOperand* temp) - : LUnaryOperation(value), temp_(temp) { } + LUnaryMathOperation(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation") DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); BuiltinFunctionId op() const { return hydrogen()->op(); } - LOperand* temp() const { return temp_; } - - private: - LOperand* temp_; }; -class LCmpJSObjectEq: public LBinaryOperation { +class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> { public: - LCmpJSObjectEq(LOperand* left, LOperand* right) - : LBinaryOperation(left, right) {} + LCmpJSObjectEq(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq") }; -class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq { +class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> { public: - LCmpJSObjectEqAndBranch(LOperand* left, - LOperand* right, - int true_block_id, - int false_block_id) - : LCmpJSObjectEq(left, right), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } + LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch, "cmp-jsobject-eq-and-branch") - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; }; -class LIsNull: public LUnaryOperation { +class LIsNull: public LTemplateInstruction<1, 1, 0> { public: - explicit LIsNull(LOperand* value) : LUnaryOperation(value) {} + explicit LIsNull(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null") - DECLARE_HYDROGEN_ACCESSOR(IsNull); + DECLARE_HYDROGEN_ACCESSOR(IsNull) bool is_strict() const { return hydrogen()->is_strict(); } }; - -class LIsNullAndBranch: public LIsNull { +class LIsNullAndBranch: public LControlInstruction<1, 0> { public: - LIsNullAndBranch(LOperand* value, - int true_block_id, - int false_block_id) - : LIsNull(value), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } + explicit LIsNullAndBranch(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } + DECLARE_HYDROGEN_ACCESSOR(IsNull) - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } + bool is_strict() const { return hydrogen()->is_strict(); } - private: - int true_block_id_; - int false_block_id_; + virtual void PrintDataTo(StringStream* stream); }; -class LIsObject: public LUnaryOperation { +class LIsObject: public LTemplateInstruction<1, 1, 1> { public: - LIsObject(LOperand* value, LOperand* temp) - : LUnaryOperation(value), temp_(temp) {} + LIsObject(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object") - - LOperand* temp() const { return temp_; } - - private: - LOperand* temp_; }; -class LIsObjectAndBranch: public LIsObject { +class LIsObjectAndBranch: public LControlInstruction<1, 2> { public: - LIsObjectAndBranch(LOperand* value, - LOperand* temp, - LOperand* temp2, - int true_block_id, - int false_block_id) - : LIsObject(value, temp), - temp2_(temp2), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } + LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp; + temps_[1] = temp2; + } DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - LOperand* temp2() const { return temp2_; } - private: - LOperand* temp2_; - int true_block_id_; - int false_block_id_; + virtual void PrintDataTo(StringStream* stream); }; -class LIsSmi: public LUnaryOperation { +class LIsSmi: public LTemplateInstruction<1, 1, 0> { public: - explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {} + explicit LIsSmi(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi") DECLARE_HYDROGEN_ACCESSOR(IsSmi) }; -class LIsSmiAndBranch: public LIsSmi { +class LIsSmiAndBranch: public LControlInstruction<1, 0> { public: - LIsSmiAndBranch(LOperand* value, - int true_block_id, - int false_block_id) - : LIsSmi(value), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } + explicit LIsSmiAndBranch(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; + virtual void PrintDataTo(StringStream* stream); }; -class LHasInstanceType: public LUnaryOperation { +class LHasInstanceType: public LTemplateInstruction<1, 1, 0> { public: - explicit LHasInstanceType(LOperand* value) - : LUnaryOperation(value) { } + explicit LHasInstanceType(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type") DECLARE_HYDROGEN_ACCESSOR(HasInstanceType) - - InstanceType TestType(); // The type to test against when generating code. - Condition BranchCondition(); // The branch condition for 'true'. }; -class LHasInstanceTypeAndBranch: public LHasInstanceType { +class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> { public: - LHasInstanceTypeAndBranch(LOperand* value, - int true_block_id, - int false_block_id) - : LHasInstanceType(value), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } + explicit LHasInstanceTypeAndBranch(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, "has-instance-type-and-branch") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } + DECLARE_HYDROGEN_ACCESSOR(HasInstanceType) - private: - int true_block_id_; - int false_block_id_; + virtual void PrintDataTo(StringStream* stream); }; -class LHasCachedArrayIndex: public LUnaryOperation { +class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { public: - explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation(value) {} + explicit LHasCachedArrayIndex(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index") DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex) }; -class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex { +class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> { public: - LHasCachedArrayIndexAndBranch(LOperand* value, - int true_block_id, - int false_block_id) - : LHasCachedArrayIndex(value), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } + explicit LHasCachedArrayIndexAndBranch(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch, "has-cached-array-index-and-branch") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; + virtual void PrintDataTo(StringStream* stream); }; -class LClassOfTest: public LUnaryOperation { +class LClassOfTest: public LTemplateInstruction<1, 1, 0> { public: - explicit LClassOfTest(LOperand* value) : LUnaryOperation(value) {} + explicit LClassOfTest(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test") DECLARE_HYDROGEN_ACCESSOR(ClassOfTest) - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); }; -class LClassOfTestAndBranch: public LClassOfTest { +class LClassOfTestAndBranch: public LControlInstruction<1, 1> { public: - LClassOfTestAndBranch(LOperand* value, - LOperand* temporary, - int true_block_id, - int false_block_id) - : LClassOfTest(value), - temporary_(temporary), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } + LClassOfTestAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - LOperand* temporary() { return temporary_; } + DECLARE_HYDROGEN_ACCESSOR(ClassOfTest) - private: - LOperand* temporary_; - int true_block_id_; - int false_block_id_; + virtual void PrintDataTo(StringStream* stream); }; -class LCmpT: public LBinaryOperation { +class LCmpT: public LTemplateInstruction<1, 2, 0> { public: - LCmpT(LOperand* left, LOperand* right) : LBinaryOperation(left, right) {} + LCmpT(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") DECLARE_HYDROGEN_ACCESSOR(Compare) @@ -892,61 +870,48 @@ class LCmpT: public LBinaryOperation { }; -class LCmpTAndBranch: public LCmpT { +class LCmpTAndBranch: public LControlInstruction<2, 0> { public: - LCmpTAndBranch(LOperand* left, - LOperand* right, - int true_block_id, - int false_block_id) - : LCmpT(left, right), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } + LCmpTAndBranch(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch") + DECLARE_HYDROGEN_ACCESSOR(Compare) - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; + Token::Value op() const { return hydrogen()->token(); } }; -class LInstanceOf: public LBinaryOperation { +class LInstanceOf: public LTemplateInstruction<1, 2, 0> { public: - LInstanceOf(LOperand* left, LOperand* right) - : LBinaryOperation(left, right) { } + LInstanceOf(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of") }; -class LInstanceOfAndBranch: public LInstanceOf { +class LInstanceOfAndBranch: public LControlInstruction<2, 0> { public: - LInstanceOfAndBranch(LOperand* left, - LOperand* right, - int true_block_id, - int false_block_id) - : LInstanceOf(left, right), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } + LInstanceOfAndBranch(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch") - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; }; -class LInstanceOfKnownGlobal: public LUnaryOperation { +class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> { public: - explicit LInstanceOfKnownGlobal(LOperand* left) - : LUnaryOperation(left) { } + LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal, "instance-of-known-global") @@ -956,22 +921,27 @@ class LInstanceOfKnownGlobal: public LUnaryOperation { }; -class LBoundsCheck: public LBinaryOperation { +class LBoundsCheck: public LTemplateInstruction<0, 2, 0> { public: - LBoundsCheck(LOperand* index, LOperand* length) - : LBinaryOperation(index, length) { } + LBoundsCheck(LOperand* index, LOperand* length) { + inputs_[0] = index; + inputs_[1] = length; + } - LOperand* index() const { return left(); } - LOperand* length() const { return right(); } + LOperand* index() { return inputs_[0]; } + LOperand* length() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check") }; -class LBitI: public LBinaryOperation { +class LBitI: public LTemplateInstruction<1, 2, 0> { public: LBitI(Token::Value op, LOperand* left, LOperand* right) - : LBinaryOperation(left, right), op_(op) { } + : op_(op) { + inputs_[0] = left; + inputs_[1] = right; + } Token::Value op() const { return op_; } @@ -982,10 +952,13 @@ class LBitI: public LBinaryOperation { }; -class LShiftI: public LBinaryOperation { +class LShiftI: public LTemplateInstruction<1, 2, 0> { public: LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) - : LBinaryOperation(left, right), op_(op), can_deopt_(can_deopt) { } + : op_(op), can_deopt_(can_deopt) { + inputs_[0] = left; + inputs_[1] = right; + } Token::Value op() const { return op_; } @@ -999,17 +972,19 @@ class LShiftI: public LBinaryOperation { }; -class LSubI: public LBinaryOperation { +class LSubI: public LTemplateInstruction<1, 2, 0> { public: - LSubI(LOperand* left, LOperand* right) - : LBinaryOperation(left, right) { } + LSubI(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") DECLARE_HYDROGEN_ACCESSOR(Sub) }; -class LConstant: public LInstruction { +class LConstant: public LTemplateInstruction<1, 0, 0> { DECLARE_INSTRUCTION(Constant) }; @@ -1050,115 +1025,114 @@ class LConstantT: public LConstant { }; -class LBranch: public LUnaryOperation { +class LBranch: public LControlInstruction<1, 0> { public: - LBranch(LOperand* input, int true_block_id, int false_block_id) - : LUnaryOperation(input), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } + explicit LBranch(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") DECLARE_HYDROGEN_ACCESSOR(Value) - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; + virtual void PrintDataTo(StringStream* stream); }; -class LCmpMapAndBranch: public LUnaryOperation { +class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> { public: - LCmpMapAndBranch(LOperand* value, LOperand* temp) - : LUnaryOperation(value), temp_(temp) { } + LCmpMapAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch) + DECLARE_HYDROGEN_ACCESSOR(CompareMap) virtual bool IsControl() const { return true; } - LOperand* temp() const { return temp_; } Handle map() const { return hydrogen()->map(); } int true_block_id() const { - return hydrogen()->true_destination()->block_id(); + return hydrogen()->FirstSuccessor()->block_id(); } int false_block_id() const { - return hydrogen()->false_destination()->block_id(); + return hydrogen()->SecondSuccessor()->block_id(); } - - private: - LOperand* temp_; }; -class LJSArrayLength: public LUnaryOperation { +class LJSArrayLength: public LTemplateInstruction<1, 1, 0> { public: - explicit LJSArrayLength(LOperand* input) : LUnaryOperation(input) { } + explicit LJSArrayLength(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length") DECLARE_HYDROGEN_ACCESSOR(JSArrayLength) }; -class LFixedArrayLength: public LUnaryOperation { +class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> { public: - explicit LFixedArrayLength(LOperand* input) : LUnaryOperation(input) { } + explicit LFixedArrayLength(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length") DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength) }; -class LValueOf: public LUnaryOperation { +class LValueOf: public LTemplateInstruction<1, 1, 1> { public: - LValueOf(LOperand* input, LOperand* temporary) - : LUnaryOperation(input), temporary_(temporary) { } - - LOperand* temporary() const { return temporary_; } + LValueOf(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of") DECLARE_HYDROGEN_ACCESSOR(ValueOf) - - private: - LOperand* temporary_; }; -class LThrow: public LUnaryOperation { +class LThrow: public LTemplateInstruction<0, 1, 0> { public: - explicit LThrow(LOperand* value) : LUnaryOperation(value) { } + explicit LThrow(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(Throw, "throw") }; -class LBitNotI: public LUnaryOperation { +class LBitNotI: public LTemplateInstruction<1, 1, 0> { public: - explicit LBitNotI(LOperand* use) : LUnaryOperation(use) { } + explicit LBitNotI(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i") }; -class LAddI: public LBinaryOperation { +class LAddI: public LTemplateInstruction<1, 2, 0> { public: - LAddI(LOperand* left, LOperand* right) - : LBinaryOperation(left, right) { } + LAddI(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") DECLARE_HYDROGEN_ACCESSOR(Add) }; -class LArithmeticD: public LBinaryOperation { +class LArithmeticD: public LTemplateInstruction<1, 2, 0> { public: LArithmeticD(Token::Value op, LOperand* left, LOperand* right) - : LBinaryOperation(left, right), op_(op) { } + : op_(op) { + inputs_[0] = left; + inputs_[1] = right; + } Token::Value op() const { return op_; } @@ -1170,10 +1144,13 @@ class LArithmeticD: public LBinaryOperation { }; -class LArithmeticT: public LBinaryOperation { +class LArithmeticT: public LTemplateInstruction<1, 2, 0> { public: LArithmeticT(Token::Value op, LOperand* left, LOperand* right) - : LBinaryOperation(left, right), op_(op) { } + : op_(op) { + inputs_[0] = left; + inputs_[1] = right; + } virtual void CompileToNative(LCodeGen* generator); virtual const char* Mnemonic() const; @@ -1185,166 +1162,186 @@ class LArithmeticT: public LBinaryOperation { }; -class LReturn: public LUnaryOperation { +class LReturn: public LTemplateInstruction<0, 1, 0> { public: - explicit LReturn(LOperand* use) : LUnaryOperation(use) { } + explicit LReturn(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(Return, "return") }; -class LLoadNamedField: public LUnaryOperation { +class LLoadNamedField: public LTemplateInstruction<1, 1, 0> { public: - explicit LLoadNamedField(LOperand* object) : LUnaryOperation(object) { } + explicit LLoadNamedField(LOperand* object) { + inputs_[0] = object; + } DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) }; -class LLoadNamedGeneric: public LUnaryOperation { +class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> { public: - explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation(object) { } + explicit LLoadNamedGeneric(LOperand* object) { + inputs_[0] = object; + } DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic") DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric) - LOperand* object() const { return input(); } + LOperand* object() { return inputs_[0]; } Handle name() const { return hydrogen()->name(); } }; -class LLoadFunctionPrototype: public LUnaryOperation { +class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> { public: - explicit LLoadFunctionPrototype(LOperand* function) - : LUnaryOperation(function) { } + explicit LLoadFunctionPrototype(LOperand* function) { + inputs_[0] = function; + } DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype") DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype) - LOperand* function() const { return input(); } + LOperand* function() { return inputs_[0]; } }; -class LLoadElements: public LUnaryOperation { +class LLoadElements: public LTemplateInstruction<1, 1, 0> { public: - explicit LLoadElements(LOperand* obj) : LUnaryOperation(obj) { } + explicit LLoadElements(LOperand* object) { + inputs_[0] = object; + } DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements") }; -class LLoadKeyedFastElement: public LBinaryOperation { +class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedFastElement(LOperand* elements, LOperand* key) - : LBinaryOperation(elements, key) { } + LLoadKeyedFastElement(LOperand* elements, LOperand* key) { + inputs_[0] = elements; + inputs_[1] = key; + } DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element") DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement) - LOperand* elements() const { return left(); } - LOperand* key() const { return right(); } + LOperand* elements() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } }; -class LLoadKeyedGeneric: public LBinaryOperation { +class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedGeneric(LOperand* obj, LOperand* key) - : LBinaryOperation(obj, key) { } + LLoadKeyedGeneric(LOperand* obj, LOperand* key) { + inputs_[0] = obj; + inputs_[1] = key; + } DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic") - LOperand* object() const { return left(); } - LOperand* key() const { return right(); } + LOperand* object() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } }; -class LLoadGlobal: public LInstruction { +class LLoadGlobal: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global") DECLARE_HYDROGEN_ACCESSOR(LoadGlobal) }; -class LStoreGlobal: public LUnaryOperation { +class LStoreGlobal: public LTemplateInstruction<0, 1, 0> { public: - explicit LStoreGlobal(LOperand* value) : LUnaryOperation(value) {} + explicit LStoreGlobal(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global") DECLARE_HYDROGEN_ACCESSOR(StoreGlobal) }; -class LLoadContextSlot: public LInstruction { +class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) - int context_chain_length() const { - return hydrogen()->context_chain_length(); - } - int slot_index() const { return hydrogen()->slot_index(); } + int context_chain_length() { return hydrogen()->context_chain_length(); } + int slot_index() { return hydrogen()->slot_index(); } virtual void PrintDataTo(StringStream* stream); }; -class LPushArgument: public LUnaryOperation { +class LPushArgument: public LTemplateInstruction<0, 1, 0> { public: - explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {} + explicit LPushArgument(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") }; -class LGlobalObject: public LInstruction { +class LGlobalObject: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object") }; -class LGlobalReceiver: public LInstruction { +class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver") }; -class LCallConstantFunction: public LInstruction { +class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function") DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction) - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - Handle function() const { return hydrogen()->function(); } + Handle function() { return hydrogen()->function(); } int arity() const { return hydrogen()->argument_count() - 1; } }; -class LCallKeyed: public LInstruction { +class LCallKeyed: public LTemplateInstruction<1, 1, 0> { public: + explicit LCallKeyed(LOperand* key) { + inputs_[0] = key; + } + DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed") DECLARE_HYDROGEN_ACCESSOR(CallKeyed) - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); int arity() const { return hydrogen()->argument_count() - 1; } }; -class LCallNamed: public LInstruction { + +class LCallNamed: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named") DECLARE_HYDROGEN_ACCESSOR(CallNamed) - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); Handle name() const { return hydrogen()->name(); } int arity() const { return hydrogen()->argument_count() - 1; } }; -class LCallFunction: public LInstruction { +class LCallFunction: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function") DECLARE_HYDROGEN_ACCESSOR(CallFunction) @@ -1353,44 +1350,46 @@ class LCallFunction: public LInstruction { }; -class LCallGlobal: public LInstruction { +class LCallGlobal: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global") DECLARE_HYDROGEN_ACCESSOR(CallGlobal) - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); Handle name() const {return hydrogen()->name(); } int arity() const { return hydrogen()->argument_count() - 1; } }; -class LCallKnownGlobal: public LInstruction { +class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global") DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal) - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); Handle target() const { return hydrogen()->target(); } int arity() const { return hydrogen()->argument_count() - 1; } }; -class LCallNew: public LUnaryOperation { +class LCallNew: public LTemplateInstruction<1, 1, 0> { public: - explicit LCallNew(LOperand* constructor) : LUnaryOperation(constructor) { } + explicit LCallNew(LOperand* constructor) { + inputs_[0] = constructor; + } DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new") DECLARE_HYDROGEN_ACCESSOR(CallNew) - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); int arity() const { return hydrogen()->argument_count() - 1; } }; -class LCallRuntime: public LInstruction { +class LCallRuntime: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_HYDROGEN_ACCESSOR(CallRuntime) @@ -1400,42 +1399,44 @@ class LCallRuntime: public LInstruction { }; -class LInteger32ToDouble: public LUnaryOperation { +class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> { public: - explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation(use) { } + explicit LInteger32ToDouble(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") }; -class LNumberTagI: public LUnaryOperation { +class LNumberTagI: public LTemplateInstruction<1, 1, 0> { public: - explicit LNumberTagI(LOperand* use) : LUnaryOperation(use) { } + explicit LNumberTagI(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i") }; -class LNumberTagD: public LUnaryOperation { +class LNumberTagD: public LTemplateInstruction<1, 1, 2> { public: - LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) - : LUnaryOperation(value), temp1_(temp1), temp2_(temp2) { } + LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp1; + temps_[1] = temp2; + } DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") - - LOperand* temp1() const { return temp1_; } - LOperand* temp2() const { return temp2_; } - - private: - LOperand* temp1_; - LOperand* temp2_; }; // Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI: public LUnaryOperation { +class LDoubleToI: public LTemplateInstruction<1, 1, 0> { public: - explicit LDoubleToI(LOperand* value) : LUnaryOperation(value) { } + explicit LDoubleToI(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") DECLARE_HYDROGEN_ACCESSOR(Change) @@ -1445,42 +1446,46 @@ class LDoubleToI: public LUnaryOperation { // Truncating conversion from a tagged value to an int32. -class LTaggedToI: public LUnaryOperation { +class LTaggedToI: public LTemplateInstruction<1, 1, 1> { public: - LTaggedToI(LOperand* value, LOperand* temp) - : LUnaryOperation(value), temp_(temp) { } + LTaggedToI(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") DECLARE_HYDROGEN_ACCESSOR(Change) bool truncating() { return hydrogen()->CanTruncateToInt32(); } - LOperand* temp() const { return temp_; } - - private: - LOperand* temp_; }; -class LSmiTag: public LUnaryOperation { +class LSmiTag: public LTemplateInstruction<1, 1, 0> { public: - explicit LSmiTag(LOperand* use) : LUnaryOperation(use) { } + explicit LSmiTag(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") }; -class LNumberUntagD: public LUnaryOperation { +class LNumberUntagD: public LTemplateInstruction<1, 1, 0> { public: - explicit LNumberUntagD(LOperand* value) : LUnaryOperation(value) { } + explicit LNumberUntagD(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") }; -class LSmiUntag: public LUnaryOperation { +class LSmiUntag: public LTemplateInstruction<1, 1, 0> { public: - LSmiUntag(LOperand* use, bool needs_check) - : LUnaryOperation(use), needs_check_(needs_check) { } + LSmiUntag(LOperand* value, bool needs_check) + : needs_check_(needs_check) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") @@ -1491,23 +1496,21 @@ class LSmiUntag: public LUnaryOperation { }; -class LStoreNamed: public LInstruction { +class LStoreNamed: public LTemplateInstruction<0, 2, 0> { public: - LStoreNamed(LOperand* obj, LOperand* val) - : object_(obj), value_(val) { } + LStoreNamed(LOperand* obj, LOperand* val) { + inputs_[0] = obj; + inputs_[1] = val; + } DECLARE_INSTRUCTION(StoreNamed) DECLARE_HYDROGEN_ACCESSOR(StoreNamed) - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - LOperand* object() const { return object_; } + LOperand* object() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } Handle name() const { return hydrogen()->name(); } - LOperand* value() const { return value_; } - - private: - LOperand* object_; - LOperand* value_; }; @@ -1522,7 +1525,7 @@ class LStoreNamedField: public LStoreNamed { bool is_in_object() { return hydrogen()->is_in_object(); } int offset() { return hydrogen()->offset(); } bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); } - Handle transition() { return hydrogen()->transition(); } + Handle transition() const { return hydrogen()->transition(); } }; @@ -1536,23 +1539,21 @@ class LStoreNamedGeneric: public LStoreNamed { }; -class LStoreKeyed: public LInstruction { +class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) - : object_(obj), key_(key), value_(val) { } + LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) { + inputs_[0] = obj; + inputs_[1] = key; + inputs_[2] = val; + } DECLARE_INSTRUCTION(StoreKeyed) - virtual void PrintDataTo(StringStream* stream) const; - - LOperand* object() const { return object_; } - LOperand* key() const { return key_; } - LOperand* value() const { return value_; } + virtual void PrintDataTo(StringStream* stream); - private: - LOperand* object_; - LOperand* key_; - LOperand* value_; + LOperand* object() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } }; @@ -1576,62 +1577,88 @@ class LStoreKeyedGeneric: public LStoreKeyed { }; -class LCheckFunction: public LUnaryOperation { +class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> { + public: + LStringCharCodeAt(LOperand* string, LOperand* index) { + inputs_[0] = string; + inputs_[1] = index; + } + + DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") + DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) + + LOperand* string() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } +}; + + +class LStringLength: public LTemplateInstruction<1, 1, 0> { public: - explicit LCheckFunction(LOperand* use) : LUnaryOperation(use) { } + explicit LStringLength(LOperand* string) { + inputs_[0] = string; + } + + DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length") + DECLARE_HYDROGEN_ACCESSOR(StringLength) + + LOperand* string() { return inputs_[0]; } +}; + + +class LCheckFunction: public LTemplateInstruction<0, 1, 0> { + public: + explicit LCheckFunction(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function") DECLARE_HYDROGEN_ACCESSOR(CheckFunction) }; -class LCheckInstanceType: public LUnaryOperation { +class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> { public: - explicit LCheckInstanceType(LOperand* use) : LUnaryOperation(use) { } + explicit LCheckInstanceType(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) - - LOperand* temp() const { return temp_; } - - private: - LOperand* temp_; }; -class LCheckMap: public LUnaryOperation { +class LCheckMap: public LTemplateInstruction<0, 1, 0> { public: - explicit LCheckMap(LOperand* use) : LUnaryOperation(use) { } + explicit LCheckMap(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map") DECLARE_HYDROGEN_ACCESSOR(CheckMap) }; -class LCheckPrototypeMaps: public LInstruction { +class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> { public: - LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2) - : temp1_(temp1), temp2_(temp2) { } + LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2) { + temps_[0] = temp1; + temps_[1] = temp2; + } DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps") DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps) Handle prototype() const { return hydrogen()->prototype(); } Handle holder() const { return hydrogen()->holder(); } - - LOperand* temp1() const { return temp1_; } - LOperand* temp2() const { return temp2_; } - - private: - LOperand* temp1_; - LOperand* temp2_; }; -class LCheckSmi: public LUnaryOperation { +class LCheckSmi: public LTemplateInstruction<0, 1, 0> { public: - LCheckSmi(LOperand* use, Condition condition) - : LUnaryOperation(use), condition_(condition) { } + LCheckSmi(LOperand* value, Condition condition) + : condition_(condition) { + inputs_[0] = value; + } Condition condition() const { return condition_; } @@ -1645,34 +1672,28 @@ class LCheckSmi: public LUnaryOperation { }; -class LMaterializedLiteral: public LInstruction { - public: - DECLARE_INSTRUCTION(MaterializedLiteral) -}; - - -class LArrayLiteral: public LMaterializedLiteral { +class LArrayLiteral: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) }; -class LObjectLiteral: public LMaterializedLiteral { +class LObjectLiteral: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal") DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral) }; -class LRegExpLiteral: public LMaterializedLiteral { +class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal") DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral) }; -class LFunctionLiteral: public LInstruction { +class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal") DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral) @@ -1681,61 +1702,61 @@ class LFunctionLiteral: public LInstruction { }; -class LTypeof: public LUnaryOperation { +class LTypeof: public LTemplateInstruction<1, 1, 0> { public: - explicit LTypeof(LOperand* input) : LUnaryOperation(input) { } + explicit LTypeof(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") }; -class LTypeofIs: public LUnaryOperation { +class LTypeofIs: public LTemplateInstruction<1, 1, 0> { public: - explicit LTypeofIs(LOperand* input) : LUnaryOperation(input) { } - virtual void PrintDataTo(StringStream* stream) const; + explicit LTypeofIs(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is") DECLARE_HYDROGEN_ACCESSOR(TypeofIs) Handle type_literal() { return hydrogen()->type_literal(); } + + virtual void PrintDataTo(StringStream* stream); }; -class LTypeofIsAndBranch: public LTypeofIs { +class LTypeofIsAndBranch: public LControlInstruction<1, 0> { public: - LTypeofIsAndBranch(LOperand* value, - int true_block_id, - int false_block_id) - : LTypeofIs(value), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } + explicit LTypeofIsAndBranch(LOperand* value) { + inputs_[0] = value; + } DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") + DECLARE_HYDROGEN_ACCESSOR(TypeofIs) - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } + Handle type_literal() { return hydrogen()->type_literal(); } - private: - int true_block_id_; - int false_block_id_; + virtual void PrintDataTo(StringStream* stream); }; -class LDeleteProperty: public LBinaryOperation { +class LDeleteProperty: public LTemplateInstruction<1, 2, 0> { public: - LDeleteProperty(LOperand* obj, LOperand* key) : LBinaryOperation(obj, key) {} + LDeleteProperty(LOperand* obj, LOperand* key) { + inputs_[0] = obj; + inputs_[1] = key; + } DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property") - LOperand* object() const { return left(); } - LOperand* key() const { return right(); } + LOperand* object() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } }; -class LOsrEntry: public LInstruction { +class LOsrEntry: public LTemplateInstruction<0, 0, 0> { public: LOsrEntry(); @@ -1758,7 +1779,7 @@ class LOsrEntry: public LInstruction { }; -class LStackCheck: public LInstruction { +class LStackCheck: public LTemplateInstruction<0, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") }; @@ -1814,8 +1835,6 @@ class LChunk: public ZoneObject { inlined_closures_.Add(closure); } - void Verify() const; - private: int spill_slot_count_; HGraph* const graph_; @@ -1872,9 +1891,10 @@ class LChunkBuilder BASE_EMBEDDED { LUnallocated* ToUnallocated(DoubleRegister reg); // Methods for setting up define-use relationships. - LOperand* Use(HValue* value, LUnallocated* operand); - LOperand* UseFixed(HValue* value, Register fixed_register); - LOperand* UseFixedDouble(HValue* value, DoubleRegister fixed_register); + MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); + MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register); + MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value, + DoubleRegister fixed_register); // A value that is guaranteed to be allocated to a register. // Operand created by UseRegister is guaranteed to be live until the end of @@ -1884,37 +1904,53 @@ class LChunkBuilder BASE_EMBEDDED { // instruction start. Register allocator is free to assign the same register // to some other operand used inside instruction (i.e. temporary or // output). - LOperand* UseRegister(HValue* value); - LOperand* UseRegisterAtStart(HValue* value); + MUST_USE_RESULT LOperand* UseRegister(HValue* value); + MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); // An input operand in a register that may be trashed. - LOperand* UseTempRegister(HValue* value); + MUST_USE_RESULT LOperand* UseTempRegister(HValue* value); // An input operand in a register or stack slot. - LOperand* Use(HValue* value); - LOperand* UseAtStart(HValue* value); + MUST_USE_RESULT LOperand* Use(HValue* value); + MUST_USE_RESULT LOperand* UseAtStart(HValue* value); // An input operand in a register, stack slot or a constant operand. - LOperand* UseOrConstant(HValue* value); - LOperand* UseOrConstantAtStart(HValue* value); + MUST_USE_RESULT LOperand* UseOrConstant(HValue* value); + MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value); // An input operand in a register or a constant operand. - LOperand* UseRegisterOrConstant(HValue* value); - LOperand* UseRegisterOrConstantAtStart(HValue* value); + MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); + MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); // An input operand in register, stack slot or a constant operand. // Will not be moved to a register even if one is freely available. - LOperand* UseAny(HValue* value); + MUST_USE_RESULT LOperand* UseAny(HValue* value); + + // Temporary operand that must be in a register. + MUST_USE_RESULT LUnallocated* TempRegister(); + MUST_USE_RESULT LOperand* FixedTemp(Register reg); + MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg); // Methods for setting up define-use relationships. // Return the same instruction that they are passed. - LInstruction* Define(LInstruction* instr, LUnallocated* result); - LInstruction* Define(LInstruction* instr); - LInstruction* DefineAsRegister(LInstruction* instr); - LInstruction* DefineAsSpilled(LInstruction* instr, int index); - LInstruction* DefineSameAsFirst(LInstruction* instr); - LInstruction* DefineFixed(LInstruction* instr, Register reg); - LInstruction* DefineFixedDouble(LInstruction* instr, DoubleRegister reg); + template + LInstruction* Define(LTemplateInstruction<1, I, T>* instr, + LUnallocated* result); + template + LInstruction* Define(LTemplateInstruction<1, I, T>* instr); + template + LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr); + template + LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr, + int index); + template + LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr); + template + LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr, + Register reg); + template + LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr, + DoubleRegister reg); LInstruction* AssignEnvironment(LInstruction* instr); LInstruction* AssignPointerMap(LInstruction* instr); @@ -1927,6 +1963,7 @@ class LChunkBuilder BASE_EMBEDDED { LInstruction* instr, HInstruction* hinstr, CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); + LInstruction* MarkAsSaveDoubles(LInstruction* instr); LInstruction* SetInstructionPendingDeoptimizationEnvironment( LInstruction* instr, int ast_id); @@ -1934,11 +1971,6 @@ class LChunkBuilder BASE_EMBEDDED { LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env); - // Temporary operand that must be in a register. - LUnallocated* TempRegister(); - LOperand* FixedTemp(Register reg); - LOperand* FixedTemp(DoubleRegister reg); - void VisitInstruction(HInstruction* current); void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 55df8b4cb8..6abb830f83 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -964,7 +964,8 @@ void LCodeGen::DoCallStub(LCallStub* instr) { break; } case CodeStub::StringCharAt: { - Abort("StringCharAtStub unimplemented."); + StringCharAtStub stub; + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::MathPow: { @@ -1015,8 +1016,8 @@ void LCodeGen::DoModI(LModI* instr) { LModI* instr_; }; // These registers hold untagged 32 bit values. - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); + Register left = ToRegister(instr->InputAt(0)); + Register right = ToRegister(instr->InputAt(1)); Register result = ToRegister(instr->result()); Register scratch = scratch0(); @@ -1038,6 +1039,37 @@ void LCodeGen::DoModI(LModI* instr) { __ bind(&ok); } + // Try a few common cases before using the generic stub. + Label call_stub; + const int kUnfolds = 3; + // Skip if either side is negative. + __ cmp(left, Operand(0)); + __ cmp(right, Operand(0), NegateCondition(mi)); + __ b(mi, &call_stub); + // If the right hand side is smaller than the (nonnegative) + // left hand side, it is the result. Else try a few subtractions + // of the left hand side. + __ mov(scratch, left); + for (int i = 0; i < kUnfolds; i++) { + // Check if the left hand side is less or equal than the + // the right hand side. + __ cmp(scratch, right); + __ mov(result, scratch, LeaveCC, lt); + __ b(lt, &done); + // If not, reduce the left hand side by the right hand + // side and check again. + if (i < kUnfolds - 1) __ sub(scratch, scratch, right); + } + + // Check for power of two on the right hand side. + __ sub(scratch, right, Operand(1), SetCC); + __ b(mi, &call_stub); + __ tst(scratch, right); + __ b(ne, &call_stub); + // Perform modulo operation. + __ and_(result, scratch, Operand(left)); + + __ bind(&call_stub); // Call the generic stub. The numbers in r0 and r1 have // to be tagged to Smis. If that is not possible, deoptimize. DeferredModI* deferred = new DeferredModI(this, instr); @@ -1049,7 +1081,7 @@ void LCodeGen::DoModI(LModI* instr) { // If the result in r0 is a Smi, untag it, else deoptimize. __ BranchOnNotSmi(result, &deoptimize); - __ mov(result, Operand(result, ASR, 1)); + __ SmiUntag(result); __ b(al, &done); __ bind(&deoptimize); @@ -1070,8 +1102,8 @@ void LCodeGen::DoDivI(LDivI* instr) { LDivI* instr_; }; - const Register left = ToRegister(instr->left()); - const Register right = ToRegister(instr->right()); + const Register left = ToRegister(instr->InputAt(0)); + const Register right = ToRegister(instr->InputAt(1)); const Register scratch = scratch0(); const Register result = ToRegister(instr->result()); @@ -1138,10 +1170,11 @@ void LCodeGen::DoDivI(LDivI* instr) { } -void LCodeGen::DoDeferredGenericBinaryStub(LBinaryOperation* instr, +template +void LCodeGen::DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr, Token::Value op) { - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); + Register left = ToRegister(instr->InputAt(0)); + Register right = ToRegister(instr->InputAt(1)); __ PushSafepointRegistersAndDoubles(); GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right); @@ -1158,12 +1191,12 @@ void LCodeGen::DoDeferredGenericBinaryStub(LBinaryOperation* instr, void LCodeGen::DoMulI(LMulI* instr) { Register scratch = scratch0(); - Register left = ToRegister(instr->left()); - Register right = EmitLoadRegister(instr->right(), scratch); + Register left = ToRegister(instr->InputAt(0)); + Register right = EmitLoadRegister(instr->InputAt(1), scratch); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) && - !instr->right()->IsConstantOperand()) { - __ orr(ToRegister(instr->temp()), left, right); + !instr->InputAt(1)->IsConstantOperand()) { + __ orr(ToRegister(instr->TempAt(0)), left, right); } if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { @@ -1181,13 +1214,13 @@ void LCodeGen::DoMulI(LMulI* instr) { Label done; __ tst(left, Operand(left)); __ b(ne, &done); - if (instr->right()->IsConstantOperand()) { - if (ToInteger32(LConstantOperand::cast(instr->right())) < 0) { + if (instr->InputAt(1)->IsConstantOperand()) { + if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) { DeoptimizeIf(no_condition, instr->environment()); } } else { // Test the non-zero operand for negative sign. - __ cmp(ToRegister(instr->temp()), Operand(0)); + __ cmp(ToRegister(instr->TempAt(0)), Operand(0)); DeoptimizeIf(mi, instr->environment()); } __ bind(&done); @@ -1196,8 +1229,8 @@ void LCodeGen::DoMulI(LMulI* instr) { void LCodeGen::DoBitI(LBitI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); ASSERT(left->Equals(instr->result())); ASSERT(left->IsRegister()); Register result = ToRegister(left); @@ -1221,8 +1254,8 @@ void LCodeGen::DoBitI(LBitI* instr) { void LCodeGen::DoShiftI(LShiftI* instr) { Register scratch = scratch0(); - LOperand* left = instr->left(); - LOperand* right = instr->right(); + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); ASSERT(left->Equals(instr->result())); ASSERT(left->IsRegister()); Register result = ToRegister(left); @@ -1279,9 +1312,9 @@ void LCodeGen::DoShiftI(LShiftI* instr) { void LCodeGen::DoSubI(LSubI* instr) { - Register left = ToRegister(instr->left()); - Register right = EmitLoadRegister(instr->right(), ip); - ASSERT(instr->left()->Equals(instr->result())); + Register left = ToRegister(instr->InputAt(0)); + Register right = EmitLoadRegister(instr->InputAt(1), ip); + ASSERT(instr->InputAt(0)->Equals(instr->result())); __ sub(left, left, right, SetCC); if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { DeoptimizeIf(vs, instr->environment()); @@ -1311,22 +1344,22 @@ void LCodeGen::DoConstantT(LConstantT* instr) { void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) { Register result = ToRegister(instr->result()); - Register array = ToRegister(instr->input()); + Register array = ToRegister(instr->InputAt(0)); __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset)); } void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) { Register result = ToRegister(instr->result()); - Register array = ToRegister(instr->input()); + Register array = ToRegister(instr->InputAt(0)); __ ldr(result, FieldMemOperand(array, FixedArray::kLengthOffset)); } void LCodeGen::DoValueOf(LValueOf* instr) { - Register input = ToRegister(instr->input()); + Register input = ToRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); - Register map = ToRegister(instr->temporary()); + Register map = ToRegister(instr->TempAt(0)); ASSERT(input.is(result)); Label done; @@ -1344,14 +1377,14 @@ void LCodeGen::DoValueOf(LValueOf* instr) { void LCodeGen::DoBitNotI(LBitNotI* instr) { - LOperand* input = instr->input(); + LOperand* input = instr->InputAt(0); ASSERT(input->Equals(instr->result())); __ mvn(ToRegister(input), Operand(ToRegister(input))); } void LCodeGen::DoThrow(LThrow* instr) { - Register input_reg = EmitLoadRegister(instr->input(), ip); + Register input_reg = EmitLoadRegister(instr->InputAt(0), ip); __ push(input_reg); CallRuntime(Runtime::kThrow, 1, instr); @@ -1362,8 +1395,8 @@ void LCodeGen::DoThrow(LThrow* instr) { void LCodeGen::DoAddI(LAddI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); ASSERT(left->Equals(instr->result())); Register right_reg = EmitLoadRegister(right, ip); @@ -1376,8 +1409,8 @@ void LCodeGen::DoAddI(LAddI* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - DoubleRegister left = ToDoubleRegister(instr->left()); - DoubleRegister right = ToDoubleRegister(instr->right()); + DoubleRegister left = ToDoubleRegister(instr->InputAt(0)); + DoubleRegister right = ToDoubleRegister(instr->InputAt(1)); switch (instr->op()) { case Token::ADD: __ vadd(left, left, right); @@ -1403,8 +1436,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - ASSERT(ToRegister(instr->left()).is(r1)); - ASSERT(ToRegister(instr->right()).is(r0)); + ASSERT(ToRegister(instr->InputAt(0)).is(r1)); + ASSERT(ToRegister(instr->InputAt(1)).is(r0)); ASSERT(ToRegister(instr->result()).is(r0)); // TODO(regis): Implement TypeRecordingBinaryOpStub and replace current @@ -1448,11 +1481,11 @@ void LCodeGen::DoBranch(LBranch* instr) { Representation r = instr->hydrogen()->representation(); if (r.IsInteger32()) { - Register reg = ToRegister(instr->input()); + Register reg = ToRegister(instr->InputAt(0)); __ cmp(reg, Operand(0)); EmitBranch(true_block, false_block, nz); } else if (r.IsDouble()) { - DoubleRegister reg = ToDoubleRegister(instr->input()); + DoubleRegister reg = ToDoubleRegister(instr->InputAt(0)); Register scratch = scratch0(); // Test the double value. Zero and NaN are false. @@ -1461,7 +1494,7 @@ void LCodeGen::DoBranch(LBranch* instr) { EmitBranch(true_block, false_block, ne); } else { ASSERT(r.IsTagged()); - Register reg = ToRegister(instr->input()); + Register reg = ToRegister(instr->InputAt(0)); if (instr->hydrogen()->type().IsBoolean()) { __ LoadRoot(ip, Heap::kTrueValueRootIndex); __ cmp(reg, ip); @@ -1604,24 +1637,29 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) { - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); + Register left = ToRegister(instr->InputAt(0)); + Register right = ToRegister(instr->InputAt(1)); Register result = ToRegister(instr->result()); __ cmp(left, Operand(right)); __ LoadRoot(result, Heap::kTrueValueRootIndex, eq); __ LoadRoot(result, Heap::kFalseValueRootIndex, ne); - Abort("DoCmpJSObjectEq untested."); } void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) { - Abort("DoCmpJSObjectEqAndBranch unimplemented."); + Register left = ToRegister(instr->InputAt(0)); + Register right = ToRegister(instr->InputAt(1)); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + + __ cmp(left, Operand(right)); + EmitBranch(true_block, false_block, eq); } void LCodeGen::DoIsNull(LIsNull* instr) { - Register reg = ToRegister(instr->input()); + Register reg = ToRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); __ LoadRoot(ip, Heap::kNullValueRootIndex); @@ -1656,7 +1694,7 @@ void LCodeGen::DoIsNull(LIsNull* instr) { void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { Register scratch = scratch0(); - Register reg = ToRegister(instr->input()); + Register reg = ToRegister(instr->InputAt(0)); // TODO(fsc): If the expression is known to be a smi, then it's // definitely not null. Jump to the false block. @@ -1710,7 +1748,7 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { void LCodeGen::DoIsSmi(LIsSmi* instr) { ASSERT(instr->hydrogen()->value()->representation().IsTagged()); Register result = ToRegister(instr->result()); - Register input_reg = EmitLoadRegister(instr->input(), ip); + Register input_reg = EmitLoadRegister(instr->InputAt(0), ip); __ tst(input_reg, Operand(kSmiTagMask)); __ LoadRoot(result, Heap::kTrueValueRootIndex); Label done; @@ -1724,24 +1762,24 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); - Register input_reg = EmitLoadRegister(instr->input(), ip); + Register input_reg = EmitLoadRegister(instr->InputAt(0), ip); __ tst(input_reg, Operand(kSmiTagMask)); EmitBranch(true_block, false_block, eq); } -InstanceType LHasInstanceType::TestType() { - InstanceType from = hydrogen()->from(); - InstanceType to = hydrogen()->to(); +static InstanceType TestType(HHasInstanceType* instr) { + InstanceType from = instr->from(); + InstanceType to = instr->to(); if (from == FIRST_TYPE) return to; ASSERT(from == to || to == LAST_TYPE); return from; } -Condition LHasInstanceType::BranchCondition() { - InstanceType from = hydrogen()->from(); - InstanceType to = hydrogen()->to(); +static Condition BranchCondition(HHasInstanceType* instr) { + InstanceType from = instr->from(); + InstanceType to = instr->to(); if (from == to) return eq; if (to == LAST_TYPE) return hs; if (from == FIRST_TYPE) return ls; @@ -1751,13 +1789,25 @@ Condition LHasInstanceType::BranchCondition() { void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) { - Abort("DoHasInstanceType unimplemented."); + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + + ASSERT(instr->hydrogen()->value()->representation().IsTagged()); + Label done; + __ tst(input, Operand(kSmiTagMask)); + __ LoadRoot(result, Heap::kFalseValueRootIndex, eq); + __ b(eq, &done); + __ CompareObjectType(input, result, result, TestType(instr->hydrogen())); + Condition cond = BranchCondition(instr->hydrogen()); + __ LoadRoot(result, Heap::kTrueValueRootIndex, cond); + __ LoadRoot(result, Heap::kFalseValueRootIndex, NegateCondition(cond)); + __ bind(&done); } void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { Register scratch = scratch0(); - Register input = ToRegister(instr->input()); + Register input = ToRegister(instr->InputAt(0)); int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); @@ -1767,8 +1817,8 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { __ tst(input, Operand(kSmiTagMask)); __ b(eq, false_label); - __ CompareObjectType(input, scratch, scratch, instr->TestType()); - EmitBranch(true_block, false_block, instr->BranchCondition()); + __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); + EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen())); } @@ -1841,7 +1891,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true, void LCodeGen::DoClassOfTest(LClassOfTest* instr) { - Register input = ToRegister(instr->input()); + Register input = ToRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); ASSERT(input.is(result)); Handle class_name = instr->hydrogen()->class_name(); @@ -1862,9 +1912,9 @@ void LCodeGen::DoClassOfTest(LClassOfTest* instr) { void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { - Register input = ToRegister(instr->input()); + Register input = ToRegister(instr->InputAt(0)); Register temp = scratch0(); - Register temp2 = ToRegister(instr->temporary()); + Register temp2 = ToRegister(instr->TempAt(0)); Handle class_name = instr->hydrogen()->class_name(); int true_block = chunk_->LookupDestination(instr->true_block_id()); @@ -1880,8 +1930,8 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { - Register reg = ToRegister(instr->input()); - Register temp = ToRegister(instr->temp()); + Register reg = ToRegister(instr->InputAt(0)); + Register temp = ToRegister(instr->TempAt(0)); int true_block = instr->true_block_id(); int false_block = instr->false_block_id(); @@ -1892,8 +1942,8 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { void LCodeGen::DoInstanceOf(LInstanceOf* instr) { - ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0. - ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1. + ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0. + ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1. InstanceofStub stub(InstanceofStub::kArgsInRegisters); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); @@ -1911,7 +1961,119 @@ void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { - Abort("DoInstanceOfKnownGlobal unimplemented."); + class DeferredInstanceOfKnownGlobal: public LDeferredCode { + public: + DeferredInstanceOfKnownGlobal(LCodeGen* codegen, + LInstanceOfKnownGlobal* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { + codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); + } + + Label* map_check() { return &map_check_; } + + private: + LInstanceOfKnownGlobal* instr_; + Label map_check_; + }; + + DeferredInstanceOfKnownGlobal* deferred; + deferred = new DeferredInstanceOfKnownGlobal(this, instr); + + Label done, false_result; + Register object = ToRegister(instr->InputAt(0)); + Register temp = ToRegister(instr->TempAt(0)); + Register result = ToRegister(instr->result()); + + ASSERT(object.is(r0)); + ASSERT(result.is(r0)); + + // A Smi is not instance of anything. + __ BranchOnSmi(object, &false_result); + + // This is the inlined call site instanceof cache. The two occurences of the + // hole value will be patched to the last map/result pair generated by the + // instanceof stub. + Label cache_miss; + Register map = temp; + __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); + __ bind(deferred->map_check()); // Label for calculating code patching. + // We use Factory::the_hole_value() on purpose instead of loading from the + // root array to force relocation to be able to later patch with + // the cached map. + __ mov(ip, Operand(Factory::the_hole_value())); + __ cmp(map, Operand(ip)); + __ b(ne, &cache_miss); + // We use Factory::the_hole_value() on purpose instead of loading from the + // root array to force relocation to be able to later patch + // with true or false. + __ mov(result, Operand(Factory::the_hole_value())); + __ b(&done); + + // The inlined call site cache did not match. Check null and string before + // calling the deferred code. + __ bind(&cache_miss); + // Null is not instance of anything. + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(object, Operand(ip)); + __ b(eq, &false_result); + + // String values is not instance of anything. + Condition is_string = masm_->IsObjectStringType(object, temp); + __ b(is_string, &false_result); + + // Go to the deferred code. + __ b(deferred->entry()); + + __ bind(&false_result); + __ LoadRoot(result, Heap::kFalseValueRootIndex); + + // Here result has either true or false. Deferred code also produces true or + // false object. + __ bind(deferred->exit()); + __ bind(&done); +} + + +void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, + Label* map_check) { + Register result = ToRegister(instr->result()); + ASSERT(result.is(r0)); + + InstanceofStub::Flags flags = InstanceofStub::kNoFlags; + flags = static_cast( + flags | InstanceofStub::kArgsInRegisters); + flags = static_cast( + flags | InstanceofStub::kCallSiteInlineCheck); + flags = static_cast( + flags | InstanceofStub::kReturnTrueFalseObject); + InstanceofStub stub(flags); + + __ PushSafepointRegisters(); + + // Get the temp register reserved by the instruction. This needs to be r4 as + // its slot of the pushing of safepoint registers is used to communicate the + // offset to the location of the map check. + Register temp = ToRegister(instr->TempAt(0)); + ASSERT(temp.is(r4)); + __ mov(InstanceofStub::right(), Operand(instr->function())); + static const int kAdditionalDelta = 4; + int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; + Label before_push_delta; + __ bind(&before_push_delta); + __ BlockConstPoolFor(kAdditionalDelta); + __ mov(temp, Operand(delta * kPointerSize)); + __ StoreToSafepointRegisterSlot(temp); + __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); + ASSERT_EQ(kAdditionalDelta, + masm_->InstructionsGeneratedSince(&before_push_delta)); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); + // Put the result value into the result register slot and + // restore all registers. + __ StoreToSafepointRegisterSlot(result); + + __ PopSafepointRegisters(); } @@ -1988,7 +2150,7 @@ void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) { void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { - Register value = ToRegister(instr->input()); + Register value = ToRegister(instr->InputAt(0)); __ mov(ip, Operand(Handle(instr->hydrogen()->cell()))); __ str(value, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); } @@ -2003,7 +2165,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { - Register object = ToRegister(instr->input()); + Register object = ToRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); if (instr->hydrogen()->is_in_object()) { __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset())); @@ -2070,8 +2232,8 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { void LCodeGen::DoLoadElements(LLoadElements* instr) { - ASSERT(instr->result()->Equals(instr->input())); - Register reg = ToRegister(instr->input()); + ASSERT(instr->result()->Equals(instr->InputAt(0))); + Register reg = ToRegister(instr->InputAt(0)); Register scratch = scratch0(); __ ldr(reg, FieldMemOperand(reg, JSObject::kElementsOffset)); @@ -2152,7 +2314,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { - Register elem = ToRegister(instr->input()); + Register elem = ToRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); Label done; @@ -2237,7 +2399,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { void LCodeGen::DoPushArgument(LPushArgument* instr) { - LOperand* argument = instr->input(); + LOperand* argument = instr->InputAt(0); if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { Abort("DoPushArgument not implemented for double type."); } else { @@ -2301,19 +2463,120 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { - Abort("DoDeferredMathAbsTaggedHeapNumber unimplemented."); + Register input = ToRegister(instr->InputAt(0)); + Register scratch = scratch0(); + + // Deoptimize if not a heap number. + __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(scratch, Operand(ip)); + DeoptimizeIf(ne, instr->environment()); + + Label done; + + Label negative; + __ ldr(scratch, FieldMemOperand(input, HeapNumber::kExponentOffset)); + // Check the sign of the argument. If the argument is positive, just + // return it. We do not need to patch the stack since |input| and + // |result| are the same register and |input| will be restored + // unchanged by popping safepoint registers. + __ tst(scratch, Operand(HeapNumber::kSignMask)); + __ b(ne, &negative); + __ jmp(&done); + + __ bind(&negative); + // Preserve the value of all registers. + __ PushSafepointRegisters(); + + Register tmp = input.is(r0) ? r1 : r0; + Register tmp2 = input.is(r2) ? r3 : r2; + Register tmp3 = input.is(r4) ? r5 : r4; + + Label allocated, slow; + __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(tmp, tmp2, tmp3, scratch, &slow); + __ b(&allocated); + + // Slow case: Call the runtime system to do the number allocation. + __ bind(&slow); + + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); + // Set the pointer to the new heap number in tmp. + if (!tmp.is(r0)) __ mov(tmp, Operand(r0)); + + // Restore input_reg after call to runtime. + MemOperand input_register_slot = masm()->SafepointRegisterSlot(input); + __ ldr(input, input_register_slot); + + __ bind(&allocated); + __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kExponentOffset)); + __ bic(tmp2, tmp2, Operand(HeapNumber::kSignMask)); + __ str(tmp2, FieldMemOperand(tmp, HeapNumber::kExponentOffset)); + __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); + __ str(tmp2, FieldMemOperand(tmp, HeapNumber::kMantissaOffset)); + + __ str(tmp, input_register_slot); + __ PopSafepointRegisters(); + + __ bind(&done); +} + + +void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { + Label is_positive; + uint32_t kSignMask = 0x80000000u; + Register input = ToRegister(instr->InputAt(0)); + __ tst(input, Operand(kSignMask)); + __ b(eq, &is_positive); + __ rsb(input, input, Operand(0), SetCC); + // Deoptimize on overflow. + DeoptimizeIf(vs, instr->environment()); + __ bind(&is_positive); } void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { - Abort("DoMathAbs unimplemented."); + // Class for deferred case. + class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { + public: + DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, + LUnaryMathOperation* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { + codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); + } + private: + LUnaryMathOperation* instr_; + }; + + ASSERT(instr->InputAt(0)->Equals(instr->result())); + Representation r = instr->hydrogen()->value()->representation(); + if (r.IsDouble()) { + DwVfpRegister input = ToDoubleRegister(instr->InputAt(0)); + // __ vabs(input, input); + Abort("Double DoMathAbs unimplemented"); + } else if (r.IsInteger32()) { + EmitIntegerMathAbs(instr); + } else { + // Representation is tagged. + DeferredMathAbsTaggedHeapNumber* deferred = + new DeferredMathAbsTaggedHeapNumber(this, instr); + Register input = ToRegister(instr->InputAt(0)); + // Smi check. + __ BranchOnNotSmi(input, deferred->entry()); + // If smi, handle it directly. + EmitIntegerMathAbs(instr); + __ bind(deferred->exit()); + } } void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->input()); + DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); - Register prev_fpscr = ToRegister(instr->temp()); + Register prev_fpscr = ToRegister(instr->TempAt(0)); SwVfpRegister single_scratch = double_scratch0().low(); Register scratch = scratch0(); @@ -2355,7 +2618,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->input()); + DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); ASSERT(ToDoubleRegister(instr->result()).is(input)); __ vsqrt(input, input); } @@ -2431,7 +2694,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { void LCodeGen::DoCallNew(LCallNew* instr) { - ASSERT(ToRegister(instr->input()).is(r1)); + ASSERT(ToRegister(instr->InputAt(0)).is(r1)); ASSERT(ToRegister(instr->result()).is(r0)); Handle builtin(Builtins::builtin(Builtins::JSConstructCall)); @@ -2530,8 +2793,157 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { } +void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { + class DeferredStringCharCodeAt: public LDeferredCode { + public: + DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } + private: + LStringCharCodeAt* instr_; + }; + + Register scratch = scratch0(); + Register string = ToRegister(instr->string()); + Register index = no_reg; + int const_index = -1; + if (instr->index()->IsConstantOperand()) { + const_index = ToInteger32(LConstantOperand::cast(instr->index())); + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); + if (!Smi::IsValid(const_index)) { + // Guaranteed to be out of bounds because of the assert above. + // So the bounds check that must dominate this instruction must + // have deoptimized already. + if (FLAG_debug_code) { + __ Abort("StringCharCodeAt: out of bounds index."); + } + // No code needs to be generated. + return; + } + } else { + index = ToRegister(instr->index()); + } + Register result = ToRegister(instr->result()); + + DeferredStringCharCodeAt* deferred = + new DeferredStringCharCodeAt(this, instr); + + Label flat_string, ascii_string, done; + + // Fetch the instance type of the receiver into result register. + __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset)); + __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); + + // We need special handling for non-flat strings. + STATIC_ASSERT(kSeqStringTag == 0); + __ tst(result, Operand(kStringRepresentationMask)); + __ b(eq, &flat_string); + + // Handle non-flat strings. + __ tst(result, Operand(kIsConsStringMask)); + __ b(eq, deferred->entry()); + + // ConsString. + // Check whether the right hand side is the empty string (i.e. if + // this is really a flat string in a cons string). If that is not + // the case we would rather go to the runtime system now to flatten + // the string. + __ ldr(scratch, FieldMemOperand(string, ConsString::kSecondOffset)); + __ LoadRoot(ip, Heap::kEmptyStringRootIndex); + __ cmp(scratch, ip); + __ b(ne, deferred->entry()); + // Get the first of the two strings and load its instance type. + __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset)); + __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset)); + __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); + // If the first cons component is also non-flat, then go to runtime. + STATIC_ASSERT(kSeqStringTag == 0); + __ tst(result, Operand(kStringRepresentationMask)); + __ b(ne, deferred->entry()); + + // Check for 1-byte or 2-byte string. + __ bind(&flat_string); + STATIC_ASSERT(kAsciiStringTag != 0); + __ tst(result, Operand(kStringEncodingMask)); + __ b(ne, &ascii_string); + + // 2-byte string. + // Load the 2-byte character code into the result register. + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + if (instr->index()->IsConstantOperand()) { + __ ldrh(result, + FieldMemOperand(string, + SeqTwoByteString::kHeaderSize + 2 * const_index)); + } else { + __ add(scratch, + string, + Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ ldrh(result, MemOperand(scratch, index, LSL, 1)); + } + __ jmp(&done); + + // ASCII string. + // Load the byte into the result register. + __ bind(&ascii_string); + if (instr->index()->IsConstantOperand()) { + __ ldrb(result, FieldMemOperand(string, + SeqAsciiString::kHeaderSize + const_index)); + } else { + __ add(scratch, + string, + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ ldrb(result, MemOperand(scratch, index)); + } + __ bind(&done); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { + Register string = ToRegister(instr->string()); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ mov(result, Operand(0)); + + __ PushSafepointRegisters(); + __ push(string); + // Push the index as a smi. This is safe because of the checks in + // DoStringCharCodeAt above. + if (instr->index()->IsConstantOperand()) { + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + __ mov(scratch, Operand(Smi::FromInt(const_index))); + __ push(scratch); + } else { + Register index = ToRegister(instr->index()); + __ SmiTag(index); + __ push(index); + } + __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt); + RecordSafepointWithRegisters( + instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex); + if (FLAG_debug_code) { + __ AbortIfNotSmi(r0); + } + __ SmiUntag(r0); + MemOperand result_stack_slot = masm()->SafepointRegisterSlot(result); + __ str(r0, result_stack_slot); + __ PopSafepointRegisters(); +} + + +void LCodeGen::DoStringLength(LStringLength* instr) { + Register string = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + __ ldr(result, FieldMemOperand(string, String::kLengthOffset)); +} + + void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - LOperand* input = instr->input(); + LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); ASSERT(output->IsDoubleRegister()); @@ -2557,7 +2969,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { LNumberTagI* instr_; }; - LOperand* input = instr->input(); + LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister() && input->Equals(instr->result())); Register reg = ToRegister(input); @@ -2570,7 +2982,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { Label slow; - Register reg = ToRegister(instr->input()); + Register reg = ToRegister(instr->InputAt(0)); DoubleRegister dbl_scratch = d0; SwVfpRegister flt_scratch = s0; @@ -2627,11 +3039,11 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { LNumberTagD* instr_; }; - DoubleRegister input_reg = ToDoubleRegister(instr->input()); + DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0)); Register scratch = scratch0(); Register reg = ToRegister(instr->result()); - Register temp1 = ToRegister(instr->temp1()); - Register temp2 = ToRegister(instr->temp2()); + Register temp1 = ToRegister(instr->TempAt(0)); + Register temp2 = ToRegister(instr->TempAt(1)); DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr); if (FLAG_inline_new) { @@ -2664,7 +3076,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { void LCodeGen::DoSmiTag(LSmiTag* instr) { - LOperand* input = instr->input(); + LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister() && input->Equals(instr->result())); ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); __ SmiTag(ToRegister(input)); @@ -2672,7 +3084,7 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) { void LCodeGen::DoSmiUntag(LSmiUntag* instr) { - LOperand* input = instr->input(); + LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister() && input->Equals(instr->result())); if (instr->needs_check()) { __ tst(ToRegister(input), Operand(kSmiTagMask)); @@ -2739,11 +3151,11 @@ class DeferredTaggedToI: public LDeferredCode { void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { Label done; - Register input_reg = ToRegister(instr->input()); + Register input_reg = ToRegister(instr->InputAt(0)); Register scratch = scratch0(); DoubleRegister dbl_scratch = d0; SwVfpRegister flt_scratch = s0; - DoubleRegister dbl_tmp = ToDoubleRegister(instr->temp()); + DoubleRegister dbl_tmp = ToDoubleRegister(instr->TempAt(0)); // Heap number map check. __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); @@ -2800,7 +3212,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - LOperand* input = instr->input(); + LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); ASSERT(input->Equals(instr->result())); @@ -2820,7 +3232,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { - LOperand* input = instr->input(); + LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); LOperand* result = instr->result(); ASSERT(result->IsDoubleRegister()); @@ -2838,7 +3250,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { void LCodeGen::DoCheckSmi(LCheckSmi* instr) { - LOperand* input = instr->input(); + LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); __ tst(ToRegister(input), Operand(kSmiTagMask)); DeoptimizeIf(instr->condition(), instr->environment()); @@ -2846,7 +3258,7 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) { void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { - Register input = ToRegister(instr->input()); + Register input = ToRegister(instr->InputAt(0)); Register scratch = scratch0(); InstanceType first = instr->hydrogen()->first(); InstanceType last = instr->hydrogen()->last(); @@ -2870,8 +3282,8 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckFunction(LCheckFunction* instr) { - ASSERT(instr->input()->IsRegister()); - Register reg = ToRegister(instr->input()); + ASSERT(instr->InputAt(0)->IsRegister()); + Register reg = ToRegister(instr->InputAt(0)); __ cmp(reg, Operand(instr->hydrogen()->target())); DeoptimizeIf(ne, instr->environment()); } @@ -2879,7 +3291,7 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) { void LCodeGen::DoCheckMap(LCheckMap* instr) { Register scratch = scratch0(); - LOperand* input = instr->input(); + LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); Register reg = ToRegister(input); __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); @@ -2902,8 +3314,8 @@ void LCodeGen::LoadHeapObject(Register result, void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { - Register temp1 = ToRegister(instr->temp1()); - Register temp2 = ToRegister(instr->temp2()); + Register temp1 = ToRegister(instr->TempAt(0)); + Register temp2 = ToRegister(instr->TempAt(1)); Handle holder = instr->holder(); Handle current_prototype = instr->prototype(); @@ -3051,14 +3463,14 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { void LCodeGen::DoTypeof(LTypeof* instr) { - Register input = ToRegister(instr->input()); + Register input = ToRegister(instr->InputAt(0)); __ push(input); CallRuntime(Runtime::kTypeof, 1, instr); } void LCodeGen::DoTypeofIs(LTypeofIs* instr) { - Register input = ToRegister(instr->input()); + Register input = ToRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); Label true_label; Label false_label; @@ -3081,7 +3493,7 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) { void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { - Register input = ToRegister(instr->input()); + Register input = ToRegister(instr->InputAt(0)); int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); Label* true_label = chunk_->GetAssemblyLabel(true_block); diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 9eed64b455..3b2ad80c5f 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -93,12 +93,17 @@ class LCodeGen BASE_EMBEDDED { void FinishCode(Handle code); // Deferred code support. - void DoDeferredGenericBinaryStub(LBinaryOperation* instr, Token::Value op); + template + void DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr, + Token::Value op); void DoDeferredNumberTagD(LNumberTagD* instr); void DoDeferredNumberTagI(LNumberTagI* instr); void DoDeferredTaggedToI(LTaggedToI* instr); void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr); void DoDeferredStackCheck(LGoto* instr); + void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); + void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, + Label* map_check); // Parallel move support. void DoParallelMove(LParallelMove* move); @@ -212,6 +217,7 @@ class LCodeGen BASE_EMBEDDED { MemOperand ToMemOperand(LOperand* op) const; // Specific math operations - used from DoUnaryMathOperation. + void EmitIntegerMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr); void DoMathFloor(LUnaryMathOperation* instr); void DoMathSqrt(LUnaryMathOperation* instr); diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 1028b0e69f..a78de986e7 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -485,6 +485,11 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() { PopSafepointRegisters(); } +void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) { + str(reg, SafepointRegisterSlot(reg)); +} + + int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { // The registers are pushed starting with the highest encoding, // which means that lowest encodings are closest to the stack pointer. @@ -493,6 +498,11 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { } +MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { + return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); +} + + void MacroAssembler::Ldrd(Register dst1, Register dst2, const MemOperand& src, Condition cond) { ASSERT(src.rm().is(no_reg)); @@ -1960,6 +1970,13 @@ void MacroAssembler::AbortIfSmi(Register object) { } +void MacroAssembler::AbortIfNotSmi(Register object) { + ASSERT_EQ(0, kSmiTag); + tst(object, Operand(kSmiTagMask)); + Assert(eq, "Operand is not smi"); +} + + void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( Register first, Register second, @@ -2185,6 +2202,26 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) { } +void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, + Register result) { + const uint32_t kLdrOffsetMask = (1 << 12) - 1; + const int32_t kPCRegOffset = 2 * kPointerSize; + ldr(result, MemOperand(ldr_location)); + if (FLAG_debug_code) { + // Check that the instruction is a ldr reg, [pc + offset] . + and_(result, result, Operand(kLdrPCPattern)); + cmp(result, Operand(kLdrPCPattern)); + Check(eq, "The instruction to patch should be a load from pc."); + // Result was clobbered. Restore it. + ldr(result, MemOperand(ldr_location)); + } + // Get the address of the constant. + and_(result, result, Operand(kLdrOffsetMask)); + add(result, ldr_location, Operand(result)); + add(result, result, Operand(kPCRegOffset)); +} + + #ifdef ENABLE_DEBUGGER_SUPPORT CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 324fbb2dde..7392d36659 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -234,8 +234,9 @@ class MacroAssembler: public Assembler { void PopSafepointRegisters(); void PushSafepointRegistersAndDoubles(); void PopSafepointRegistersAndDoubles(); - + void StoreToSafepointRegisterSlot(Register reg); static int SafepointRegisterStackIndex(int reg_code); + static MemOperand SafepointRegisterSlot(Register reg); // Load two consecutive registers with two consecutive memory locations. void Ldrd(Register dst1, @@ -740,6 +741,7 @@ class MacroAssembler: public Assembler { // Abort execution if argument is a smi. Used in debug code. void AbortIfSmi(Register object); + void AbortIfNotSmi(Register object); // --------------------------------------------------------------------------- // String utilities @@ -776,6 +778,15 @@ class MacroAssembler: public Assembler { Label* failure); + // --------------------------------------------------------------------------- + // Patching helpers. + + // Get the location of a relocated constant (its address in the constant pool) + // from its load site. + void GetRelocatedValueLocation(Register ldr_location, + Register result); + + private: void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 00650576c0..138e8f8955 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -153,7 +153,12 @@ void Debugger::Stop(Instr* instr) { if (sim_->isWatchedStop(code) && !sim_->watched_stops[code].desc) { sim_->watched_stops[code].desc = msg; } - PrintF("Simulator hit %s\n", msg); + // Print the stop message and code if it is not the default code. + if (code != kMaxStopCode) { + PrintF("Simulator hit stop %u: %s\n", code, msg); + } else { + PrintF("Simulator hit %s\n", msg); + } sim_->set_pc(sim_->get_pc() + 2 * Instr::kInstrSize); Debug(); } @@ -450,7 +455,7 @@ void Debugger::Debug() { PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_); PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_); PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_); - PrintF("INEXACT flag: %d; ", sim_->inexact_vfp_flag_); + PrintF("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_); } else if (strcmp(cmd, "stop") == 0) { int32_t value; intptr_t stop_pc = sim_->get_pc() - 2 * Instr::kInstrSize; @@ -2902,6 +2907,10 @@ void Simulator::InstructionDecode(Instr* instr) { break; } } + // If the instruction is a non taken conditional stop, we need to skip the + // inlined message address. + } else if (instr->IsStop()) { + set_pc(get_pc() + 2 * Instr::kInstrSize); } if (!pc_modified_) { set_register(pc, reinterpret_cast(instr) + Instr::kInstrSize); diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index b7ec5d245a..ce1d854481 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -902,6 +902,111 @@ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells( } +// Convert and store int passed in register ival to IEEE 754 single precision +// floating point value at memory location (dst + 4 * wordoffset) +// If VFP3 is available use it for conversion. +static void StoreIntAsFloat(MacroAssembler* masm, + Register dst, + Register wordoffset, + Register ival, + Register fval, + Register scratch1, + Register scratch2) { + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ vmov(s0, ival); + __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); + __ vcvt_f32_s32(s0, s0); + __ vstr(s0, scratch1, 0); + } else { + Label not_special, done; + // Move sign bit from source to destination. This works because the sign + // bit in the exponent word of the double has the same position and polarity + // as the 2's complement sign bit in a Smi. + ASSERT(kBinary32SignMask == 0x80000000u); + + __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); + // Negate value if it is negative. + __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne); + + // We have -1, 0 or 1, which we treat specially. Register ival contains + // absolute value: it is either equal to 1 (special case of -1 and 1), + // greater than 1 (not a special case) or less than 1 (special case of 0). + __ cmp(ival, Operand(1)); + __ b(gt, ¬_special); + + // For 1 or -1 we need to or in the 0 exponent (biased). + static const uint32_t exponent_word_for_1 = + kBinary32ExponentBias << kBinary32ExponentShift; + + __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq); + __ b(&done); + + __ bind(¬_special); + // Count leading zeros. + // Gets the wrong answer for 0, but we already checked for that case above. + Register zeros = scratch2; + __ CountLeadingZeros(zeros, ival, scratch1); + + // Compute exponent and or it into the exponent register. + __ rsb(scratch1, + zeros, + Operand((kBitsPerInt - 1) + kBinary32ExponentBias)); + + __ orr(fval, + fval, + Operand(scratch1, LSL, kBinary32ExponentShift)); + + // Shift up the source chopping the top bit off. + __ add(zeros, zeros, Operand(1)); + // This wouldn't work for 1 and -1 as the shift would be 32 which means 0. + __ mov(ival, Operand(ival, LSL, zeros)); + // And the top (top 20 bits). + __ orr(fval, + fval, + Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); + + __ bind(&done); + __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); + } +} + + +// Convert unsigned integer with specified number of leading zeroes in binary +// representation to IEEE 754 double. +// Integer to convert is passed in register hiword. +// Resulting double is returned in registers hiword:loword. +// This functions does not work correctly for 0. +static void GenerateUInt2Double(MacroAssembler* masm, + Register hiword, + Register loword, + Register scratch, + int leading_zeroes) { + const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; + const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; + + const int mantissa_shift_for_hi_word = + meaningful_bits - HeapNumber::kMantissaBitsInTopWord; + + const int mantissa_shift_for_lo_word = + kBitsPerInt - mantissa_shift_for_hi_word; + + __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); + if (mantissa_shift_for_hi_word > 0) { + __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); + __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); + } else { + __ mov(loword, Operand(0, RelocInfo::NONE)); + __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); + } + + // If least significant bit of biased exponent was not 1 it was corrupted + // by most significant bit of mantissa so we should fix that. + if (!(biased_exponent & 1)) { + __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); + } +} + #undef __ #define __ ACCESS_MASM(masm()) @@ -3224,6 +3329,603 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { } +static bool IsElementTypeSigned(ExternalArrayType array_type) { + switch (array_type) { + case kExternalByteArray: + case kExternalShortArray: + case kExternalIntArray: + return true; + + case kExternalUnsignedByteArray: + case kExternalUnsignedShortArray: + case kExternalUnsignedIntArray: + return false; + + default: + UNREACHABLE(); + return false; + } +} + + +MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub( + ExternalArrayType array_type, Code::Flags flags) { + // ---------- S t a t e -------------- + // -- lr : return address + // -- r0 : key + // -- r1 : receiver + // ----------------------------------- + Label slow, failed_allocation; + + Register key = r0; + Register receiver = r1; + + // Check that the object isn't a smi + __ BranchOnSmi(receiver, &slow); + + // Check that the key is a smi. + __ BranchOnNotSmi(key, &slow); + + // Check that the object is a JS object. Load map into r2. + __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE); + __ b(lt, &slow); + + // Check that the receiver does not require access checks. We need + // to check this explicitly since this generic stub does not perform + // map checks. + __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); + __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded)); + __ b(ne, &slow); + + // Check that the elements array is the appropriate type of + // ExternalArray. + __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); + __ cmp(r2, ip); + __ b(ne, &slow); + + // Check that the index is in range. + __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); + __ cmp(ip, Operand(key, ASR, kSmiTagSize)); + // Unsigned comparison catches both negative and too-large values. + __ b(lo, &slow); + + // r3: elements array + __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); + // r3: base pointer of external storage + + // We are not untagging smi key and instead work with it + // as if it was premultiplied by 2. + ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); + + Register value = r2; + switch (array_type) { + case kExternalByteArray: + __ ldrsb(value, MemOperand(r3, key, LSR, 1)); + break; + case kExternalUnsignedByteArray: + __ ldrb(value, MemOperand(r3, key, LSR, 1)); + break; + case kExternalShortArray: + __ ldrsh(value, MemOperand(r3, key, LSL, 0)); + break; + case kExternalUnsignedShortArray: + __ ldrh(value, MemOperand(r3, key, LSL, 0)); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: + __ ldr(value, MemOperand(r3, key, LSL, 1)); + break; + case kExternalFloatArray: + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ add(r2, r3, Operand(key, LSL, 1)); + __ vldr(s0, r2, 0); + } else { + __ ldr(value, MemOperand(r3, key, LSL, 1)); + } + break; + default: + UNREACHABLE(); + break; + } + + // For integer array types: + // r2: value + // For floating-point array type + // s0: value (if VFP3 is supported) + // r2: value (if VFP3 is not supported) + + if (array_type == kExternalIntArray) { + // For the Int and UnsignedInt array types, we need to see whether + // the value can be represented in a Smi. If not, we need to convert + // it to a HeapNumber. + Label box_int; + __ cmp(value, Operand(0xC0000000)); + __ b(mi, &box_int); + // Tag integer as smi and return it. + __ mov(r0, Operand(value, LSL, kSmiTagSize)); + __ Ret(); + + __ bind(&box_int); + // Allocate a HeapNumber for the result and perform int-to-double + // conversion. Don't touch r0 or r1 as they are needed if allocation + // fails. + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r5, r3, r4, r6, &slow); + // Now we can use r0 for the result as key is not needed any more. + __ mov(r0, r5); + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ vmov(s0, value); + __ vcvt_f64_s32(d0, s0); + __ sub(r3, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r3, HeapNumber::kValueOffset); + __ Ret(); + } else { + WriteInt32ToHeapNumberStub stub(value, r0, r3); + __ TailCallStub(&stub); + } + } else if (array_type == kExternalUnsignedIntArray) { + // The test is different for unsigned int values. Since we need + // the value to be in the range of a positive smi, we can't + // handle either of the top two bits being set in the value. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + Label box_int, done; + __ tst(value, Operand(0xC0000000)); + __ b(ne, &box_int); + // Tag integer as smi and return it. + __ mov(r0, Operand(value, LSL, kSmiTagSize)); + __ Ret(); + + __ bind(&box_int); + __ vmov(s0, value); + // Allocate a HeapNumber for the result and perform int-to-double + // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all + // registers - also when jumping due to exhausted young space. + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r2, r3, r4, r6, &slow); + + __ vcvt_f64_u32(d0, s0); + __ sub(r1, r2, Operand(kHeapObjectTag)); + __ vstr(d0, r1, HeapNumber::kValueOffset); + + __ mov(r0, r2); + __ Ret(); + } else { + // Check whether unsigned integer fits into smi. + Label box_int_0, box_int_1, done; + __ tst(value, Operand(0x80000000)); + __ b(ne, &box_int_0); + __ tst(value, Operand(0x40000000)); + __ b(ne, &box_int_1); + // Tag integer as smi and return it. + __ mov(r0, Operand(value, LSL, kSmiTagSize)); + __ Ret(); + + Register hiword = value; // r2. + Register loword = r3; + + __ bind(&box_int_0); + // Integer does not have leading zeros. + GenerateUInt2Double(masm(), hiword, loword, r4, 0); + __ b(&done); + + __ bind(&box_int_1); + // Integer has one leading zero. + GenerateUInt2Double(masm(), hiword, loword, r4, 1); + + + __ bind(&done); + // Integer was converted to double in registers hiword:loword. + // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber + // clobbers all registers - also when jumping due to exhausted young + // space. + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r4, r5, r7, r6, &slow); + + __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); + __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); + + __ mov(r0, r4); + __ Ret(); + } + } else if (array_type == kExternalFloatArray) { + // For the floating-point array type, we need to always allocate a + // HeapNumber. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Allocate a HeapNumber for the result. Don't use r0 and r1 as + // AllocateHeapNumber clobbers all registers - also when jumping due to + // exhausted young space. + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r2, r3, r4, r6, &slow); + __ vcvt_f64_f32(d0, s0); + __ sub(r1, r2, Operand(kHeapObjectTag)); + __ vstr(d0, r1, HeapNumber::kValueOffset); + + __ mov(r0, r2); + __ Ret(); + } else { + // Allocate a HeapNumber for the result. Don't use r0 and r1 as + // AllocateHeapNumber clobbers all registers - also when jumping due to + // exhausted young space. + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r3, r4, r5, r6, &slow); + // VFP is not available, do manual single to double conversion. + + // r2: floating point value (binary32) + // r3: heap number for result + + // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to + // the slow case from here. + __ and_(r0, value, Operand(kBinary32MantissaMask)); + + // Extract exponent to r1. OK to clobber r1 now as there are no jumps to + // the slow case from here. + __ mov(r1, Operand(value, LSR, kBinary32MantissaBits)); + __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); + + Label exponent_rebiased; + __ teq(r1, Operand(0x00)); + __ b(eq, &exponent_rebiased); + + __ teq(r1, Operand(0xff)); + __ mov(r1, Operand(0x7ff), LeaveCC, eq); + __ b(eq, &exponent_rebiased); + + // Rebias exponent. + __ add(r1, + r1, + Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); + + __ bind(&exponent_rebiased); + __ and_(r2, value, Operand(kBinary32SignMask)); + value = no_reg; + __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord)); + + // Shift mantissa. + static const int kMantissaShiftForHiWord = + kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; + + static const int kMantissaShiftForLoWord = + kBitsPerInt - kMantissaShiftForHiWord; + + __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord)); + __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord)); + + __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); + __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); + + __ mov(r0, r3); + __ Ret(); + } + + } else { + // Tag integer as smi and return it. + __ mov(r0, Operand(value, LSL, kSmiTagSize)); + __ Ret(); + } + + // Slow case, key and receiver still in r0 and r1. + __ bind(&slow); + __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3); + + // ---------- S t a t e -------------- + // -- lr : return address + // -- r0 : key + // -- r1 : receiver + // ----------------------------------- + + __ Push(r1, r0); + + __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); + + return GetCode(flags); +} + + +MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub( + ExternalArrayType array_type, Code::Flags flags) { + // ---------- S t a t e -------------- + // -- r0 : value + // -- r1 : key + // -- r2 : receiver + // -- lr : return address + // ----------------------------------- + Label slow, check_heap_number; + + // Register usage. + Register value = r0; + Register key = r1; + Register receiver = r2; + // r3 mostly holds the elements array or the destination external array. + + // Check that the object isn't a smi. + __ BranchOnSmi(receiver, &slow); + + // Check that the object is a JS object. Load map into r3. + __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE); + __ b(le, &slow); + + // Check that the receiver does not require access checks. We need + // to do this because this generic stub does not perform map checks. + __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset)); + __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); + __ b(ne, &slow); + + // Check that the key is a smi. + __ BranchOnNotSmi(key, &slow); + + // Check that the elements array is the appropriate type of ExternalArray. + __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); + __ cmp(r4, ip); + __ b(ne, &slow); + + // Check that the index is in range. + __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index. + __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); + __ cmp(r4, ip); + // Unsigned comparison catches both negative and too-large values. + __ b(hs, &slow); + + // Handle both smis and HeapNumbers in the fast path. Go to the + // runtime for all other kinds of values. + // r3: external array. + // r4: key (integer). + __ BranchOnNotSmi(value, &check_heap_number); + __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. + __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); + + // r3: base pointer of external storage. + // r4: key (integer). + // r5: value (integer). + switch (array_type) { + case kExternalByteArray: + case kExternalUnsignedByteArray: + __ strb(r5, MemOperand(r3, r4, LSL, 0)); + break; + case kExternalShortArray: + case kExternalUnsignedShortArray: + __ strh(r5, MemOperand(r3, r4, LSL, 1)); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: + __ str(r5, MemOperand(r3, r4, LSL, 2)); + break; + case kExternalFloatArray: + // Perform int-to-float conversion and store to memory. + StoreIntAsFloat(masm(), r3, r4, r5, r6, r7, r9); + break; + default: + UNREACHABLE(); + break; + } + + // Entry registers are intact, r0 holds the value which is the return value. + __ Ret(); + + + // r3: external array. + // r4: index (integer). + __ bind(&check_heap_number); + __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE); + __ b(ne, &slow); + + __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); + + // r3: base pointer of external storage. + // r4: key (integer). + + // The WebGL specification leaves the behavior of storing NaN and + // +/-Infinity into integer arrays basically undefined. For more + // reproducible behavior, convert these to zero. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + + + if (array_type == kExternalFloatArray) { + // vldr requires offset to be a multiple of 4 so we can not + // include -kHeapObjectTag into it. + __ sub(r5, r0, Operand(kHeapObjectTag)); + __ vldr(d0, r5, HeapNumber::kValueOffset); + __ add(r5, r3, Operand(r4, LSL, 2)); + __ vcvt_f32_f64(s0, d0); + __ vstr(s0, r5, 0); + } else { + // Need to perform float-to-int conversion. + // Test for NaN or infinity (both give zero). + __ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset)); + + // Hoisted load. vldr requires offset to be a multiple of 4 so we can not + // include -kHeapObjectTag into it. + __ sub(r5, value, Operand(kHeapObjectTag)); + __ vldr(d0, r5, HeapNumber::kValueOffset); + + __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits); + // NaNs and Infinities have all-one exponents so they sign extend to -1. + __ cmp(r6, Operand(-1)); + __ mov(r5, Operand(0), LeaveCC, eq); + + // Not infinity or NaN simply convert to int. + if (IsElementTypeSigned(array_type)) { + __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne); + } else { + __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne); + } + __ vmov(r5, s0, ne); + + switch (array_type) { + case kExternalByteArray: + case kExternalUnsignedByteArray: + __ strb(r5, MemOperand(r3, r4, LSL, 0)); + break; + case kExternalShortArray: + case kExternalUnsignedShortArray: + __ strh(r5, MemOperand(r3, r4, LSL, 1)); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: + __ str(r5, MemOperand(r3, r4, LSL, 2)); + break; + default: + UNREACHABLE(); + break; + } + } + + // Entry registers are intact, r0 holds the value which is the return value. + __ Ret(); + } else { + // VFP3 is not available do manual conversions. + __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset)); + __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset)); + + if (array_type == kExternalFloatArray) { + Label done, nan_or_infinity_or_zero; + static const int kMantissaInHiWordShift = + kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; + + static const int kMantissaInLoWordShift = + kBitsPerInt - kMantissaInHiWordShift; + + // Test for all special exponent values: zeros, subnormal numbers, NaNs + // and infinities. All these should be converted to 0. + __ mov(r7, Operand(HeapNumber::kExponentMask)); + __ and_(r9, r5, Operand(r7), SetCC); + __ b(eq, &nan_or_infinity_or_zero); + + __ teq(r9, Operand(r7)); + __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq); + __ b(eq, &nan_or_infinity_or_zero); + + // Rebias exponent. + __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); + __ add(r9, + r9, + Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); + + __ cmp(r9, Operand(kBinary32MaxExponent)); + __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt); + __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt); + __ b(gt, &done); + + __ cmp(r9, Operand(kBinary32MinExponent)); + __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt); + __ b(lt, &done); + + __ and_(r7, r5, Operand(HeapNumber::kSignMask)); + __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); + __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift)); + __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift)); + __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift)); + + __ bind(&done); + __ str(r5, MemOperand(r3, r4, LSL, 2)); + // Entry registers are intact, r0 holds the value which is the return + // value. + __ Ret(); + + __ bind(&nan_or_infinity_or_zero); + __ and_(r7, r5, Operand(HeapNumber::kSignMask)); + __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); + __ orr(r9, r9, r7); + __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift)); + __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift)); + __ b(&done); + } else { + bool is_signed_type = IsElementTypeSigned(array_type); + int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; + int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; + + Label done, sign; + + // Test for all special exponent values: zeros, subnormal numbers, NaNs + // and infinities. All these should be converted to 0. + __ mov(r7, Operand(HeapNumber::kExponentMask)); + __ and_(r9, r5, Operand(r7), SetCC); + __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); + __ b(eq, &done); + + __ teq(r9, Operand(r7)); + __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); + __ b(eq, &done); + + // Unbias exponent. + __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); + __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); + // If exponent is negative then result is 0. + __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi); + __ b(mi, &done); + + // If exponent is too big then result is minimal value. + __ cmp(r9, Operand(meaningfull_bits - 1)); + __ mov(r5, Operand(min_value), LeaveCC, ge); + __ b(ge, &done); + + __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); + __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); + __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); + + __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); + __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); + __ b(pl, &sign); + + __ rsb(r9, r9, Operand(0, RelocInfo::NONE)); + __ mov(r5, Operand(r5, LSL, r9)); + __ rsb(r9, r9, Operand(meaningfull_bits)); + __ orr(r5, r5, Operand(r6, LSR, r9)); + + __ bind(&sign); + __ teq(r7, Operand(0, RelocInfo::NONE)); + __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne); + + __ bind(&done); + switch (array_type) { + case kExternalByteArray: + case kExternalUnsignedByteArray: + __ strb(r5, MemOperand(r3, r4, LSL, 0)); + break; + case kExternalShortArray: + case kExternalUnsignedShortArray: + __ strh(r5, MemOperand(r3, r4, LSL, 1)); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: + __ str(r5, MemOperand(r3, r4, LSL, 2)); + break; + default: + UNREACHABLE(); + break; + } + } + } + + // Slow case: call runtime. + __ bind(&slow); + + // Entry registers are intact. + // ---------- S t a t e -------------- + // -- r0 : value + // -- r1 : key + // -- r2 : receiver + // -- lr : return address + // ----------------------------------- + + // Push receiver, key and value for runtime call. + __ Push(r2, r1, r0); + + __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + + return GetCode(flags); +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 4fe89be172..fa01be016c 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -32,7 +32,6 @@ #include "parser.h" #include "scopes.h" #include "string-stream.h" -#include "stub-cache.h" namespace v8 { namespace internal { @@ -560,20 +559,13 @@ void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) { } -static bool CallWithoutIC(Handle target, int arity) { +static bool CanCallWithoutIC(Handle target, int arity) { SharedFunctionInfo* info = target->shared(); - if (target->NeedsArgumentsAdaption()) { - // If the number of formal parameters of the target function - // does not match the number of arguments we're passing, we - // don't want to deal with it. - return info->formal_parameter_count() == arity; - } else { - // If the target doesn't need arguments adaption, we can call - // it directly, but we avoid to do so if it has a custom call - // generator, because that is likely to generate better code. - return !info->HasBuiltinFunctionId() || - !CallStubCompiler::HasCustomCallGenerator(info->builtin_function_id()); - } + // If the number of formal parameters of the target function does + // not match the number of arguments we're passing, we don't want to + // deal with it. Otherwise, we can call it directly. + return !target->NeedsArgumentsAdaption() || + info->formal_parameter_count() == arity; } @@ -589,7 +581,7 @@ bool Call::ComputeTarget(Handle type, Handle name) { type = Handle(holder()->map()); } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) { target_ = Handle(lookup.GetConstantFunctionFromMap(*type)); - return CallWithoutIC(target_, arguments()->length()); + return CanCallWithoutIC(target_, arguments()->length()); } else { return false; } @@ -609,8 +601,8 @@ bool Call::ComputeGlobalTarget(Handle global, Handle candidate(JSFunction::cast(cell_->value())); // If the function is in new space we assume it's more likely to // change and thus prefer the general IC code. - if (!Heap::InNewSpace(*candidate) - && CallWithoutIC(candidate, arguments()->length())) { + if (!Heap::InNewSpace(*candidate) && + CanCallWithoutIC(candidate, arguments()->length())) { target_ = candidate; return true; } diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index f55ddcd56b..a897e886dd 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -1675,7 +1675,8 @@ class FunctionLiteral: public Expression { int start_position, int end_position, bool is_expression, - bool contains_loops) + bool contains_loops, + bool strict_mode) : name_(name), scope_(scope), body_(body), @@ -1689,6 +1690,7 @@ class FunctionLiteral: public Expression { end_position_(end_position), is_expression_(is_expression), contains_loops_(contains_loops), + strict_mode_(strict_mode), function_token_position_(RelocInfo::kNoPosition), inferred_name_(Heap::empty_string()), try_full_codegen_(false), @@ -1705,6 +1707,7 @@ class FunctionLiteral: public Expression { int end_position() const { return end_position_; } bool is_expression() const { return is_expression_; } bool contains_loops() const { return contains_loops_; } + bool strict_mode() const { return strict_mode_; } int materialized_literal_count() { return materialized_literal_count_; } int expected_property_count() { return expected_property_count_; } @@ -1747,6 +1750,7 @@ class FunctionLiteral: public Expression { int end_position_; bool is_expression_; bool contains_loops_; + bool strict_mode_; int function_token_position_; Handle inferred_name_; bool try_full_codegen_; diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index c4c9fc11c8..7c2c2bca3d 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -1282,44 +1282,6 @@ static void Generate_KeyedLoadIC_String(MacroAssembler* masm) { } -static void Generate_KeyedLoadIC_ExternalByteArray(MacroAssembler* masm) { - KeyedLoadIC::GenerateExternalArray(masm, kExternalByteArray); -} - - -static void Generate_KeyedLoadIC_ExternalUnsignedByteArray( - MacroAssembler* masm) { - KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedByteArray); -} - - -static void Generate_KeyedLoadIC_ExternalShortArray(MacroAssembler* masm) { - KeyedLoadIC::GenerateExternalArray(masm, kExternalShortArray); -} - - -static void Generate_KeyedLoadIC_ExternalUnsignedShortArray( - MacroAssembler* masm) { - KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedShortArray); -} - - -static void Generate_KeyedLoadIC_ExternalIntArray(MacroAssembler* masm) { - KeyedLoadIC::GenerateExternalArray(masm, kExternalIntArray); -} - - -static void Generate_KeyedLoadIC_ExternalUnsignedIntArray( - MacroAssembler* masm) { - KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedIntArray); -} - - -static void Generate_KeyedLoadIC_ExternalFloatArray(MacroAssembler* masm) { - KeyedLoadIC::GenerateExternalArray(masm, kExternalFloatArray); -} - - static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) { KeyedLoadIC::GeneratePreMonomorphic(masm); } @@ -1364,44 +1326,6 @@ static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) { } -static void Generate_KeyedStoreIC_ExternalByteArray(MacroAssembler* masm) { - KeyedStoreIC::GenerateExternalArray(masm, kExternalByteArray); -} - - -static void Generate_KeyedStoreIC_ExternalUnsignedByteArray( - MacroAssembler* masm) { - KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedByteArray); -} - - -static void Generate_KeyedStoreIC_ExternalShortArray(MacroAssembler* masm) { - KeyedStoreIC::GenerateExternalArray(masm, kExternalShortArray); -} - - -static void Generate_KeyedStoreIC_ExternalUnsignedShortArray( - MacroAssembler* masm) { - KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedShortArray); -} - - -static void Generate_KeyedStoreIC_ExternalIntArray(MacroAssembler* masm) { - KeyedStoreIC::GenerateExternalArray(masm, kExternalIntArray); -} - - -static void Generate_KeyedStoreIC_ExternalUnsignedIntArray( - MacroAssembler* masm) { - KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedIntArray); -} - - -static void Generate_KeyedStoreIC_ExternalFloatArray(MacroAssembler* masm) { - KeyedStoreIC::GenerateExternalArray(masm, kExternalFloatArray); -} - - static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) { KeyedStoreIC::GenerateMiss(masm); } diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index d2b4be2f78..39f35469ff 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -93,13 +93,6 @@ enum BuiltinExtraArguments { V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \ V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \ V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC) \ - V(KeyedLoadIC_ExternalByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \ - V(KeyedLoadIC_ExternalUnsignedByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \ - V(KeyedLoadIC_ExternalShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \ - V(KeyedLoadIC_ExternalUnsignedShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \ - V(KeyedLoadIC_ExternalIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \ - V(KeyedLoadIC_ExternalUnsignedIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \ - V(KeyedLoadIC_ExternalFloatArray, KEYED_LOAD_IC, MEGAMORPHIC) \ V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC) \ \ V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \ @@ -110,13 +103,6 @@ enum BuiltinExtraArguments { \ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \ V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \ - V(KeyedStoreIC_ExternalByteArray, KEYED_STORE_IC, MEGAMORPHIC) \ - V(KeyedStoreIC_ExternalUnsignedByteArray, KEYED_STORE_IC, MEGAMORPHIC) \ - V(KeyedStoreIC_ExternalShortArray, KEYED_STORE_IC, MEGAMORPHIC) \ - V(KeyedStoreIC_ExternalUnsignedShortArray, KEYED_STORE_IC, MEGAMORPHIC) \ - V(KeyedStoreIC_ExternalIntArray, KEYED_STORE_IC, MEGAMORPHIC) \ - V(KeyedStoreIC_ExternalUnsignedIntArray, KEYED_STORE_IC, MEGAMORPHIC) \ - V(KeyedStoreIC_ExternalFloatArray, KEYED_STORE_IC, MEGAMORPHIC) \ \ /* Uses KeyedLoadIC_Initialize; must be after in list. */ \ V(FunctionCall, BUILTIN, UNINITIALIZED) \ diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 76f29f082f..bc7516a3fe 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -273,20 +273,21 @@ class FastNewClosureStub : public CodeStub { class FastNewContextStub : public CodeStub { public: - static const int kMaximumSlots = 64; + // We want no more than 64 different stubs. + static const int kMaximumSlots = Context::MIN_CONTEXT_SLOTS + 63; explicit FastNewContextStub(int slots) : slots_(slots) { - ASSERT(slots_ > 0 && slots <= kMaximumSlots); + ASSERT(slots_ >= Context::MIN_CONTEXT_SLOTS && slots_ <= kMaximumSlots); } void Generate(MacroAssembler* masm); private: - int slots_; + virtual const char* GetName() { return "FastNewContextStub"; } + virtual Major MajorKey() { return FastNewContext; } + virtual int MinorKey() { return slots_; } - const char* GetName() { return "FastNewContextStub"; } - Major MajorKey() { return FastNewContext; } - int MinorKey() { return slots_; } + int slots_; }; diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index c7e6f1c83f..f9a2453a09 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -267,7 +267,7 @@ bool CodeGenerator::MakeCode(CompilationInfo* info) { CodeGenerator::PrintCode(code, info); info->SetCode(code); // May be an empty handle. #ifdef ENABLE_GDB_JIT_INTERFACE - if (!code.is_null()) { + if (FLAG_gdbjit && !code.is_null()) { GDBJITLineInfo* lineinfo = masm.positions_recorder()->DetachGDBJITLineInfo(); diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index bbe7f2fc9c..5c18c3e53e 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -37,7 +37,7 @@ #include "full-codegen.h" #include "gdb-jit.h" #include "hydrogen.h" -#include "lithium-allocator.h" +#include "lithium.h" #include "liveedit.h" #include "oprofile-agent.h" #include "parser.h" diff --git a/deps/v8/src/extensions/experimental/experimental.gyp b/deps/v8/src/extensions/experimental/experimental.gyp index 73888fc045..4d7a9363bf 100644 --- a/deps/v8/src/extensions/experimental/experimental.gyp +++ b/deps/v8/src/extensions/experimental/experimental.gyp @@ -27,7 +27,10 @@ { 'variables': { - 'icu_src_dir%': '', + # TODO(cira): Find out how to pass this value for arbitrary embedder. + # Chromium sets it in common.gypi and does force include of that file for + # all sub projects. + 'icu_src_dir%': '../../../../third_party/icu', }, 'targets': [ { diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index fb892d6248..b90534c0cc 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -301,6 +301,7 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing") // parser.cc DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") +DEFINE_bool(strict_mode, true, "allow strict mode directives") // rewriter.cc DEFINE_bool(optimize_ast, true, "optimize the ast") diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index 9366e427a1..af3ac00bae 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -308,7 +308,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { CodeGenerator::PrintCode(code, info); info->SetCode(code); // may be an empty handle. #ifdef ENABLE_GDB_JIT_INTERFACE - if (!code.is_null()) { + if (FLAG_gdbjit && !code.is_null()) { GDBJITLineInfo* lineinfo = masm.positions_recorder()->DetachGDBJITLineInfo(); diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 0d79081a23..1c86817aa8 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -203,7 +203,10 @@ namespace internal { V(zero_symbol, "0") \ V(global_eval_symbol, "GlobalEval") \ V(identity_hash_symbol, "v8::IdentityHash") \ - V(closure_symbol, "(closure)") + V(closure_symbol, "(closure)") \ + V(use_strict, "use strict") \ + V(KeyedLoadExternalArray_symbol, "KeyedLoadExternalArray") \ + V(KeyedStoreExternalArray_symbol, "KeyedStoreExternalArray") // Forward declarations. diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc index b13bb0c4d8..d1a4782a01 100644 --- a/deps/v8/src/hydrogen-instructions.cc +++ b/deps/v8/src/hydrogen-instructions.cc @@ -570,34 +570,29 @@ void HCallConstantFunction::PrintDataTo(StringStream* stream) const { } -void HBranch::PrintDataTo(StringStream* stream) const { - int first_id = FirstSuccessor()->block_id(); - int second_id = SecondSuccessor()->block_id(); - stream->Add("on "); - value()->PrintNameTo(stream); - stream->Add(" (B%d, B%d)", first_id, second_id); -} - - -void HCompareMapAndBranch::PrintDataTo(StringStream* stream) const { - stream->Add("on "); - value()->PrintNameTo(stream); - stream->Add(" (%p)", *map()); -} - - -void HGoto::PrintDataTo(StringStream* stream) const { - stream->Add("B%d", FirstSuccessor()->block_id()); +void HControlInstruction::PrintDataTo(StringStream* stream) const { + if (FirstSuccessor() != NULL) { + int first_id = FirstSuccessor()->block_id(); + if (SecondSuccessor() == NULL) { + stream->Add(" B%d", first_id); + } else { + int second_id = SecondSuccessor()->block_id(); + stream->Add(" goto (B%d, B%d)", first_id, second_id); + } + } } -void HReturn::PrintDataTo(StringStream* stream) const { +void HUnaryControlInstruction::PrintDataTo(StringStream* stream) const { value()->PrintNameTo(stream); + HControlInstruction::PrintDataTo(stream); } -void HThrow::PrintDataTo(StringStream* stream) const { +void HCompareMap::PrintDataTo(StringStream* stream) const { value()->PrintNameTo(stream); + stream->Add(" (%p)", *map()); + HControlInstruction::PrintDataTo(stream); } @@ -1255,6 +1250,11 @@ HType HUnaryPredicate::CalculateInferredType() const { } +HType HBitwiseBinaryOperation::CalculateInferredType() const { + return HType::TaggedNumber(); +} + + HType HArithmeticBinaryOperation::CalculateInferredType() const { return HType::TaggedNumber(); } diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h index eebec5a9e2..d57655ab79 100644 --- a/deps/v8/src/hydrogen-instructions.h +++ b/deps/v8/src/hydrogen-instructions.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -81,6 +81,7 @@ class LChunkBuilder; // HStoreNamed // HStoreNamedField // HStoreNamedGeneric +// HStringCharCodeAt // HBlockEntry // HCall // HCallConstantFunction @@ -98,9 +99,9 @@ class LChunkBuilder; // HDeoptimize // HGoto // HUnaryControlInstruction -// HBranch -// HCompareMapAndBranch +// HCompareMap // HReturn +// HTest // HThrow // HEnterInlined // HFunctionLiteral @@ -137,6 +138,7 @@ class LChunkBuilder; // HLoadNamedGeneric // HLoadFunctionPrototype // HPushArgument +// HStringLength // HTypeof // HUnaryMathOperation // HUnaryPredicate @@ -181,7 +183,6 @@ class LChunkBuilder; V(BitXor) \ V(BlockEntry) \ V(BoundsCheck) \ - V(Branch) \ V(CallConstantFunction) \ V(CallFunction) \ V(CallGlobal) \ @@ -200,7 +201,7 @@ class LChunkBuilder; V(CheckSmi) \ V(Compare) \ V(CompareJSObjectEq) \ - V(CompareMapAndBranch) \ + V(CompareMap) \ V(Constant) \ V(DeleteProperty) \ V(Deoptimize) \ @@ -248,7 +249,10 @@ class LChunkBuilder; V(StoreKeyedGeneric) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ + V(StringCharCodeAt) \ + V(StringLength) \ V(Sub) \ + V(Test) \ V(Throw) \ V(Typeof) \ V(TypeofIs) \ @@ -811,44 +815,55 @@ class HBlockEntry: public HInstruction { class HControlInstruction: public HInstruction { public: - virtual HBasicBlock* FirstSuccessor() const { return NULL; } - virtual HBasicBlock* SecondSuccessor() const { return NULL; } + HControlInstruction(HBasicBlock* first, HBasicBlock* second) + : first_successor_(first), second_successor_(second) { + } + + HBasicBlock* FirstSuccessor() const { return first_successor_; } + HBasicBlock* SecondSuccessor() const { return second_successor_; } + + virtual void PrintDataTo(StringStream* stream) const; DECLARE_INSTRUCTION(ControlInstruction) + + private: + HBasicBlock* first_successor_; + HBasicBlock* second_successor_; }; class HDeoptimize: public HControlInstruction { public: + HDeoptimize() : HControlInstruction(NULL, NULL) { } + DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") }; class HGoto: public HControlInstruction { public: - explicit HGoto(HBasicBlock* destination) - : destination_(destination), - include_stack_check_(false) {} + explicit HGoto(HBasicBlock* target) + : HControlInstruction(target, NULL), include_stack_check_(false) { + } - virtual HBasicBlock* FirstSuccessor() const { return destination_; } void set_include_stack_check(bool include_stack_check) { include_stack_check_ = include_stack_check; } bool include_stack_check() const { return include_stack_check_; } - virtual void PrintDataTo(StringStream* stream) const; - DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") private: - HBasicBlock* destination_; bool include_stack_check_; }; class HUnaryControlInstruction: public HControlInstruction { public: - explicit HUnaryControlInstruction(HValue* value) { + explicit HUnaryControlInstruction(HValue* value, + HBasicBlock* true_target, + HBasicBlock* false_target) + : HControlInstruction(true_target, false_target) { SetOperandAt(0, value); } @@ -856,6 +871,8 @@ class HUnaryControlInstruction: public HControlInstruction { return Representation::Tagged(); } + virtual void PrintDataTo(StringStream* stream) const; + HValue* value() const { return OperandAt(0); } virtual int OperandCount() const { return 1; } virtual HValue* OperandAt(int index) const { return operands_[index]; } @@ -872,73 +889,50 @@ class HUnaryControlInstruction: public HControlInstruction { }; -class HBranch: public HUnaryControlInstruction { +class HTest: public HUnaryControlInstruction { public: - HBranch(HBasicBlock* true_destination, - HBasicBlock* false_destination, - HValue* boolean_value) - : HUnaryControlInstruction(boolean_value), - true_destination_(true_destination), - false_destination_(false_destination) { - ASSERT(true_destination != NULL && false_destination != NULL); + HTest(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target) + : HUnaryControlInstruction(value, true_target, false_target) { + ASSERT(true_target != NULL && false_target != NULL); } virtual Representation RequiredInputRepresentation(int index) const { return Representation::None(); } - virtual HBasicBlock* FirstSuccessor() const { return true_destination_; } - virtual HBasicBlock* SecondSuccessor() const { return false_destination_; } - - virtual void PrintDataTo(StringStream* stream) const; - - DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") - - private: - HBasicBlock* true_destination_; - HBasicBlock* false_destination_; + DECLARE_CONCRETE_INSTRUCTION(Test, "test") }; -class HCompareMapAndBranch: public HUnaryControlInstruction { +class HCompareMap: public HUnaryControlInstruction { public: - HCompareMapAndBranch(HValue* result, - Handle map, - HBasicBlock* true_destination, - HBasicBlock* false_destination) - : HUnaryControlInstruction(result), - map_(map), - true_destination_(true_destination), - false_destination_(false_destination) { - ASSERT(true_destination != NULL); - ASSERT(false_destination != NULL); + HCompareMap(HValue* value, + Handle map, + HBasicBlock* true_target, + HBasicBlock* false_target) + : HUnaryControlInstruction(value, true_target, false_target), + map_(map) { + ASSERT(true_target != NULL); + ASSERT(false_target != NULL); ASSERT(!map.is_null()); } - virtual HBasicBlock* FirstSuccessor() const { return true_destination_; } - virtual HBasicBlock* SecondSuccessor() const { return false_destination_; } - - HBasicBlock* true_destination() const { return true_destination_; } - HBasicBlock* false_destination() const { return false_destination_; } - virtual void PrintDataTo(StringStream* stream) const; Handle map() const { return map_; } - DECLARE_CONCRETE_INSTRUCTION(CompareMapAndBranch, "compare_map_and_branch") + DECLARE_CONCRETE_INSTRUCTION(CompareMap, "compare_map") private: Handle map_; - HBasicBlock* true_destination_; - HBasicBlock* false_destination_; }; class HReturn: public HUnaryControlInstruction { public: - explicit HReturn(HValue* result) : HUnaryControlInstruction(result) { } - - virtual void PrintDataTo(StringStream* stream) const; + explicit HReturn(HValue* value) + : HUnaryControlInstruction(value, NULL, NULL) { + } DECLARE_CONCRETE_INSTRUCTION(Return, "return") }; @@ -946,9 +940,8 @@ class HReturn: public HUnaryControlInstruction { class HThrow: public HUnaryControlInstruction { public: - explicit HThrow(HValue* value) : HUnaryControlInstruction(value) { } - - virtual void PrintDataTo(StringStream* stream) const; + explicit HThrow(HValue* value) + : HUnaryControlInstruction(value, NULL, NULL) { } DECLARE_CONCRETE_INSTRUCTION(Throw, "throw") }; @@ -1579,6 +1572,12 @@ class HCheckInstanceType: public HUnaryOperation { ASSERT(first <= last); set_representation(Representation::Tagged()); SetFlag(kUseGVN); + if ((FIRST_STRING_TYPE < first && last <= LAST_STRING_TYPE) || + (FIRST_STRING_TYPE <= first && last < LAST_STRING_TYPE)) { + // A particular string instance type can change because of GC or + // externalization, but the value still remains a string. + SetFlag(kDependsOnMaps); + } } virtual bool IsCheckInstruction() const { return true; } @@ -2033,16 +2032,26 @@ class HBitwiseBinaryOperation: public HBinaryOperation { public: HBitwiseBinaryOperation(HValue* left, HValue* right) : HBinaryOperation(left, right) { - // Default to truncating, Integer32, UseGVN. - set_representation(Representation::Integer32()); - SetFlag(kTruncatingToInt32); - SetFlag(kUseGVN); + set_representation(Representation::Tagged()); + SetFlag(kFlexibleRepresentation); + SetFlagMask(AllSideEffects()); } virtual Representation RequiredInputRepresentation(int index) const { - return Representation::Integer32(); + return representation(); + } + + virtual void RepresentationChanged(Representation to) { + if (!to.IsTagged()) { + ASSERT(to.IsInteger32()); + ClearFlagMask(AllSideEffects()); + SetFlag(kTruncatingToInt32); + SetFlag(kUseGVN); + } } + HType CalculateInferredType() const; + DECLARE_INSTRUCTION(BitwiseBinaryOperation) }; @@ -2937,6 +2946,61 @@ class HStoreKeyedGeneric: public HStoreKeyed { }; +class HStringCharCodeAt: public HBinaryOperation { + public: + HStringCharCodeAt(HValue* string, HValue* index) + : HBinaryOperation(string, index) { + set_representation(Representation::Integer32()); + SetFlag(kUseGVN); + } + + virtual Representation RequiredInputRepresentation(int index) const { + // The index is supposed to be Integer32. + return (index == 1) ? Representation::Integer32() + : Representation::Tagged(); + } + + virtual bool DataEquals(HValue* other) const { return true; } + + HValue* string() const { return OperandAt(0); } + HValue* index() const { return OperandAt(1); } + + DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at") + + protected: + virtual Range* InferRange() { + return new Range(0, String::kMaxUC16CharCode); + } +}; + + +class HStringLength: public HUnaryOperation { + public: + explicit HStringLength(HValue* string) : HUnaryOperation(string) { + set_representation(Representation::Tagged()); + SetFlag(kUseGVN); + } + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + + virtual HType CalculateInferredType() const { + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); + return HType::Smi(); + } + + virtual bool DataEquals(HValue* other) const { return true; } + + DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length") + + protected: + virtual Range* InferRange() { + return new Range(0, String::kMaxLength); + } +}; + + class HMaterializedLiteral: public HInstruction { public: HMaterializedLiteral(int index, int depth) diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc index da41ef94dc..ae91065d59 100644 --- a/deps/v8/src/hydrogen.cc +++ b/deps/v8/src/hydrogen.cc @@ -34,6 +34,7 @@ #include "lithium-allocator.h" #include "parser.h" #include "scopes.h" +#include "stub-cache.h" #if V8_TARGET_ARCH_IA32 #include "ia32/lithium-codegen-ia32.h" @@ -504,19 +505,15 @@ HConstant* HGraph::GetConstantFalse() { void HSubgraph::AppendOptional(HSubgraph* graph, bool on_true_branch, - HValue* boolean_value) { + HValue* value) { ASSERT(HasExit() && graph->HasExit()); HBasicBlock* other_block = graph_->CreateBasicBlock(); HBasicBlock* join_block = graph_->CreateBasicBlock(); - HBasicBlock* true_branch = other_block; - HBasicBlock* false_branch = graph->entry_block(); - if (on_true_branch) { - true_branch = graph->entry_block(); - false_branch = other_block; - } - - exit_block_->Finish(new HBranch(true_branch, false_branch, boolean_value)); + HTest* test = on_true_branch + ? new HTest(value, graph->entry_block(), other_block) + : new HTest(value, other_block, graph->entry_block()); + exit_block_->Finish(test); other_block->Goto(join_block); graph->exit_block()->Goto(join_block); exit_block_ = join_block; @@ -934,7 +931,7 @@ class HRangeAnalysis BASE_EMBEDDED { private: void TraceRange(const char* msg, ...); void Analyze(HBasicBlock* block); - void InferControlFlowRange(HBranch* branch, HBasicBlock* dest); + void InferControlFlowRange(HTest* test, HBasicBlock* dest); void InferControlFlowRange(Token::Value op, HValue* value, HValue* other); void InferPhiRange(HPhi* phi); void InferRange(HValue* value); @@ -970,8 +967,8 @@ void HRangeAnalysis::Analyze(HBasicBlock* block) { // Infer range based on control flow. if (block->predecessors()->length() == 1) { HBasicBlock* pred = block->predecessors()->first(); - if (pred->end()->IsBranch()) { - InferControlFlowRange(HBranch::cast(pred->end()), block); + if (pred->end()->IsTest()) { + InferControlFlowRange(HTest::cast(pred->end()), block); } } @@ -997,14 +994,12 @@ void HRangeAnalysis::Analyze(HBasicBlock* block) { } -void HRangeAnalysis::InferControlFlowRange(HBranch* branch, HBasicBlock* dest) { - ASSERT(branch->FirstSuccessor() == dest || branch->SecondSuccessor() == dest); - ASSERT(branch->FirstSuccessor() != dest || branch->SecondSuccessor() != dest); - - if (branch->value()->IsCompare()) { - HCompare* compare = HCompare::cast(branch->value()); +void HRangeAnalysis::InferControlFlowRange(HTest* test, HBasicBlock* dest) { + ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest)); + if (test->value()->IsCompare()) { + HCompare* compare = HCompare::cast(test->value()); Token::Value op = compare->token(); - if (branch->SecondSuccessor() == dest) { + if (test->SecondSuccessor() == dest) { op = Token::NegateCompareOp(op); } Token::Value inverted_op = Token::InvertCompareOp(op); @@ -2067,8 +2062,8 @@ void TestContext::BuildBranch(HValue* value) { HGraphBuilder* builder = owner(); HBasicBlock* empty_true = builder->graph()->CreateBasicBlock(); HBasicBlock* empty_false = builder->graph()->CreateBasicBlock(); - HBranch* branch = new HBranch(empty_true, empty_false, value); - builder->CurrentBlock()->Finish(branch); + HTest* test = new HTest(value, empty_true, empty_false); + builder->CurrentBlock()->Finish(test); HValue* const no_return_value = NULL; HBasicBlock* true_target = if_true(); @@ -2596,9 +2591,9 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { prev_graph->exit_block()->Finish(new HGoto(subgraph->entry_block())); } else { HBasicBlock* empty = graph()->CreateBasicBlock(); - prev_graph->exit_block()->Finish(new HBranch(empty, - subgraph->entry_block(), - prev_compare_inst)); + prev_graph->exit_block()->Finish(new HTest(prev_compare_inst, + empty, + subgraph->entry_block())); } // Build instructions for current subgraph. @@ -2617,9 +2612,9 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { if (prev_graph != current_subgraph_) { last_false_block = graph()->CreateBasicBlock(); HBasicBlock* empty = graph()->CreateBasicBlock(); - prev_graph->exit_block()->Finish(new HBranch(empty, - last_false_block, - prev_compare_inst)); + prev_graph->exit_block()->Finish(new HTest(prev_compare_inst, + empty, + last_false_block)); } // If we have a non-smi compare clause, we deoptimize after trying @@ -2702,8 +2697,8 @@ void HSubgraph::PreProcessOsrEntry(IterationStatement* statement) { HBasicBlock* non_osr_entry = graph()->CreateBasicBlock(); HBasicBlock* osr_entry = graph()->CreateBasicBlock(); HValue* true_value = graph()->GetConstantTrue(); - HBranch* branch = new HBranch(non_osr_entry, osr_entry, true_value); - exit_block()->Finish(branch); + HTest* test = new HTest(true_value, non_osr_entry, osr_entry); + exit_block()->Finish(test); HBasicBlock* loop_predecessor = graph()->CreateBasicBlock(); non_osr_entry->Goto(loop_predecessor); @@ -3105,11 +3100,11 @@ HBasicBlock* HGraphBuilder::BuildTypeSwitch(ZoneMapList* maps, (i == (maps->length() - 1)) ? subgraphs->last() : map_compare_subgraphs.last(); - current_subgraph_->exit_block()->Finish( - new HCompareMapAndBranch(receiver, - maps->at(i), - subgraphs->at(i)->entry_block(), - else_subgraph->entry_block())); + HCompareMap* compare = new HCompareMap(receiver, + maps->at(i), + subgraphs->at(i)->entry_block(), + else_subgraph->entry_block()); + current_subgraph_->exit_block()->Finish(compare); map_compare_subgraphs.Add(subgraph); } @@ -3117,11 +3112,11 @@ HBasicBlock* HGraphBuilder::BuildTypeSwitch(ZoneMapList* maps, AddInstruction(new HCheckNonSmi(receiver)); HSubgraph* else_subgraph = (maps->length() == 1) ? subgraphs->at(1) : map_compare_subgraphs.last(); - current_subgraph_->exit_block()->Finish( - new HCompareMapAndBranch(receiver, - Handle(maps->first()), - subgraphs->first()->entry_block(), - else_subgraph->entry_block())); + HCompareMap* compare = new HCompareMap(receiver, + Handle(maps->first()), + subgraphs->first()->entry_block(), + else_subgraph->entry_block()); + current_subgraph_->exit_block()->Finish(compare); // Join all the call subgraphs in a new basic block and make // this basic block the current basic block. @@ -4075,9 +4070,8 @@ bool HGraphBuilder::TryInline(Call* expr) { // TODO(3168478): refactor to avoid this. HBasicBlock* empty_true = graph()->CreateBasicBlock(); HBasicBlock* empty_false = graph()->CreateBasicBlock(); - HBranch* branch = - new HBranch(empty_true, empty_false, return_value); - body->exit_block()->Finish(branch); + HTest* test = new HTest(return_value, empty_true, empty_false); + body->exit_block()->Finish(test); HValue* const no_return_value = NULL; empty_true->AddLeaveInlined(no_return_value, test_context->if_true()); @@ -4146,12 +4140,29 @@ void HBasicBlock::AddLeaveInlined(HValue* return_value, HBasicBlock* target) { } -bool HGraphBuilder::TryMathFunctionInline(Call* expr) { +bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr, + HValue* receiver, + Handle receiver_map, + CheckType check_type) { + ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null()); // Try to inline calls like Math.* as operations in the calling function. - if (!expr->target()->shared()->IsBuiltinMathFunction()) return false; + if (!expr->target()->shared()->HasBuiltinFunctionId()) return false; BuiltinFunctionId id = expr->target()->shared()->builtin_function_id(); int argument_count = expr->arguments()->length() + 1; // Plus receiver. switch (id) { + case kStringCharCodeAt: + if (argument_count == 2 && check_type == STRING_CHECK) { + HValue* index = Pop(); + HValue* string = Pop(); + ASSERT(!expr->holder().is_null()); + AddInstruction(new HCheckPrototypeMaps( + oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK), + expr->holder())); + HStringCharCodeAt* result = BuildStringCharCodeAt(string, index); + ast_context()->ReturnInstruction(result, expr->id()); + return true; + } + break; case kMathRound: case kMathFloor: case kMathAbs: @@ -4159,7 +4170,8 @@ bool HGraphBuilder::TryMathFunctionInline(Call* expr) { case kMathLog: case kMathSin: case kMathCos: - if (argument_count == 2) { + if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) { + AddCheckConstantFunction(expr, receiver, receiver_map, true); HValue* argument = Pop(); Drop(1); // Receiver. HUnaryMathOperation* op = new HUnaryMathOperation(argument, id); @@ -4169,7 +4181,8 @@ bool HGraphBuilder::TryMathFunctionInline(Call* expr) { } break; case kMathPow: - if (argument_count == 3) { + if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) { + AddCheckConstantFunction(expr, receiver, receiver_map, true); HValue* right = Pop(); HValue* left = Pop(); Pop(); // Pop receiver. @@ -4179,8 +4192,6 @@ bool HGraphBuilder::TryMathFunctionInline(Call* expr) { double exponent = HConstant::cast(right)->DoubleValue(); if (exponent == 0.5) { result = new HUnaryMathOperation(left, kMathPowHalf); - ast_context()->ReturnInstruction(result, expr->id()); - return true; } else if (exponent == -0.5) { HConstant* double_one = new HConstant(Handle(Smi::FromInt(1)), @@ -4193,22 +4204,18 @@ bool HGraphBuilder::TryMathFunctionInline(Call* expr) { // an environment simulation here. ASSERT(!square_root->HasSideEffects()); result = new HDiv(double_one, square_root); - ast_context()->ReturnInstruction(result, expr->id()); - return true; } else if (exponent == 2.0) { result = new HMul(left, left); - ast_context()->ReturnInstruction(result, expr->id()); - return true; } } else if (right->IsConstant() && - HConstant::cast(right)->HasInteger32Value() && - HConstant::cast(right)->Integer32Value() == 2) { + HConstant::cast(right)->HasInteger32Value() && + HConstant::cast(right)->Integer32Value() == 2) { result = new HMul(left, left); - ast_context()->ReturnInstruction(result, expr->id()); - return true; } - result = new HPower(left, right); + if (result == NULL) { + result = new HPower(left, right); + } ast_context()->ReturnInstruction(result, expr->id()); return true; } @@ -4263,6 +4270,13 @@ bool HGraphBuilder::TryCallApply(Call* expr) { } +static bool HasCustomCallGenerator(Handle function) { + SharedFunctionInfo* info = function->shared(); + return info->HasBuiltinFunctionId() && + CallStubCompiler::HasCustomCallGenerator(info->builtin_function_id()); +} + + void HGraphBuilder::VisitCall(Call* expr) { Expression* callee = expr->expression(); int argument_count = expr->arguments()->length() + 1; // Plus receiver. @@ -4309,30 +4323,44 @@ void HGraphBuilder::VisitCall(Call* expr) { expr->RecordTypeFeedback(oracle()); ZoneMapList* types = expr->GetReceiverTypes(); - if (expr->IsMonomorphic() && expr->check_type() == RECEIVER_MAP_CHECK) { - AddCheckConstantFunction(expr, receiver, types->first(), true); - - if (TryMathFunctionInline(expr)) { - return; - } else if (TryInline(expr)) { - if (subgraph()->HasExit()) { - HValue* return_value = Pop(); - // If we inlined a function in a test context then we need to emit - // a simulate here to shadow the ones at the end of the - // predecessor blocks. Those environments contain the return - // value on top and do not correspond to any actual state of the - // unoptimized code. - if (ast_context()->IsEffect()) AddSimulate(expr->id()); - ast_context()->ReturnValue(return_value); - } + if (expr->IsMonomorphic()) { + Handle receiver_map = + (types == NULL) ? Handle::null() : types->first(); + if (TryInlineBuiltinFunction(expr, + receiver, + receiver_map, + expr->check_type())) { return; - } else { - // Check for bailout, as the TryInline call in the if condition above - // might return false due to bailout during hydrogen processing. - CHECK_BAILOUT; - call = new HCallConstantFunction(expr->target(), argument_count); } + if (HasCustomCallGenerator(expr->target()) || + expr->check_type() != RECEIVER_MAP_CHECK) { + // When the target has a custom call IC generator, use the IC, + // because it is likely to generate better code. Also use the + // IC when a primitive receiver check is required. + call = new HCallNamed(name, argument_count); + } else { + AddCheckConstantFunction(expr, receiver, receiver_map, true); + + if (TryInline(expr)) { + if (subgraph()->HasExit()) { + HValue* return_value = Pop(); + // If we inlined a function in a test context then we need to emit + // a simulate here to shadow the ones at the end of the + // predecessor blocks. Those environments contain the return + // value on top and do not correspond to any actual state of the + // unoptimized code. + if (ast_context()->IsEffect()) AddSimulate(expr->id()); + ast_context()->ReturnValue(return_value); + } + return; + } else { + // Check for bailout, as the TryInline call in the if condition above + // might return false due to bailout during hydrogen processing. + CHECK_BAILOUT; + call = new HCallConstantFunction(expr->target(), argument_count); + } + } } else if (types != NULL && types->length() > 1) { ASSERT(expr->check_type() == RECEIVER_MAP_CHECK); HandlePolymorphicCallNamed(expr, receiver, types, name); @@ -4720,6 +4748,18 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) { } +HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* string, + HValue* index) { + AddInstruction(new HCheckNonSmi(string)); + AddInstruction(new HCheckInstanceType( + string, FIRST_STRING_TYPE, LAST_STRING_TYPE)); + HStringLength* length = new HStringLength(string); + AddInstruction(length); + AddInstruction(new HBoundsCheck(index, length)); + return new HStringCharCodeAt(string, index); +} + + HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr, HValue* left, HValue* right) { @@ -4773,7 +4813,12 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr, if (FLAG_trace_representation) { PrintF("Info: %s/%s\n", info.ToString(), ToRepresentation(info).Mnemonic()); } - AssumeRepresentation(instr, ToRepresentation(info)); + Representation rep = ToRepresentation(info); + // We only generate either int32 or generic tagged bitwise operations. + if (instr->IsBitwiseBinaryOperation() && rep.IsDouble()) { + rep = Representation::Integer32(); + } + AssumeRepresentation(instr, rep); return instr; } @@ -4854,7 +4899,8 @@ void HGraphBuilder::AssumeRepresentation(HValue* value, Representation r) { graph_->GetMaximumValueID()); } value->ChangeRepresentation(r); - // The representation of the value is dictated by type feedback. + // The representation of the value is dictated by type feedback and + // will not be changed later. value->ClearFlag(HValue::kFlexibleRepresentation); } else if (FLAG_trace_representation) { PrintF("No representation assumed\n"); @@ -5129,7 +5175,11 @@ void HGraphBuilder::GenerateSetValueOf(int argument_count, int ast_id) { // Fast support for charCodeAt(n). void HGraphBuilder::GenerateStringCharCodeAt(int argument_count, int ast_id) { - BAILOUT("inlined runtime function: StringCharCodeAt"); + ASSERT(argument_count == 2); + HValue* index = Pop(); + HValue* string = Pop(); + HStringCharCodeAt* result = BuildStringCharCodeAt(string, index); + ast_context()->ReturnInstruction(result, ast_id); } diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h index 19f898381f..3524df9495 100644 --- a/deps/v8/src/hydrogen.h +++ b/deps/v8/src/hydrogen.h @@ -748,7 +748,10 @@ class HGraphBuilder: public AstVisitor { bool TryArgumentsAccess(Property* expr); bool TryCallApply(Call* expr); bool TryInline(Call* expr); - bool TryMathFunctionInline(Call* expr); + bool TryInlineBuiltinFunction(Call* expr, + HValue* receiver, + Handle receiver_map, + CheckType check_type); void TraceInline(Handle target, bool result); void HandleGlobalVariableAssignment(Variable* var, @@ -772,6 +775,8 @@ class HGraphBuilder: public AstVisitor { ZoneMapList* types, Handle name); + HStringCharCodeAt* BuildStringCharCodeAt(HValue* string, + HValue* index); HInstruction* BuildBinaryOperation(BinaryOperation* expr, HValue* left, HValue* right); diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h index 54cfb5c386..d5fd7b87bb 100644 --- a/deps/v8/src/ia32/assembler-ia32-inl.h +++ b/deps/v8/src/ia32/assembler-ia32-inl.h @@ -49,20 +49,24 @@ void RelocInfo::apply(intptr_t delta) { if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) { int32_t* p = reinterpret_cast(pc_); *p -= delta; // Relocate entry. + CPU::FlushICache(p, sizeof(uint32_t)); } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) { // Special handling of js_return when a break point is set (call // instruction has been inserted). int32_t* p = reinterpret_cast(pc_ + 1); *p -= delta; // Relocate entry. + CPU::FlushICache(p, sizeof(uint32_t)); } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) { // Special handling of a debug break slot when a break point is set (call // instruction has been inserted). int32_t* p = reinterpret_cast(pc_ + 1); *p -= delta; // Relocate entry. + CPU::FlushICache(p, sizeof(uint32_t)); } else if (IsInternalReference(rmode_)) { // absolute code pointer inside code object moves with the code object. int32_t* p = reinterpret_cast(pc_); *p += delta; // Relocate entry. + CPU::FlushICache(p, sizeof(uint32_t)); } } @@ -111,6 +115,7 @@ Object** RelocInfo::target_object_address() { void RelocInfo::set_target_object(Object* target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); Memory::Object_at(pc_) = target; + CPU::FlushICache(pc_, sizeof(Address)); } @@ -141,6 +146,7 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) { ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Memory::Address_at(pc_) = address; + CPU::FlushICache(pc_, sizeof(Address)); } @@ -189,12 +195,14 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { visitor->VisitPointer(target_object_address()); + CPU::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { visitor->VisitGlobalPropertyCell(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(target_reference_address()); + CPU::FlushICache(pc_, sizeof(Address)); #ifdef ENABLE_DEBUGGER_SUPPORT } else if (Debug::has_break_points() && ((RelocInfo::IsJSReturn(mode) && @@ -214,12 +222,14 @@ void RelocInfo::Visit() { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { StaticVisitor::VisitPointer(target_object_address()); + CPU::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(this); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { StaticVisitor::VisitGlobalPropertyCell(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(target_reference_address()); + CPU::FlushICache(pc_, sizeof(Address)); #ifdef ENABLE_DEBUGGER_SUPPORT } else if (Debug::has_break_points() && ((RelocInfo::IsJSReturn(mode) && diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index 72213dc817..2c83662efd 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -91,8 +91,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { void FastNewContextStub::Generate(MacroAssembler* masm) { // Try to allocate the context in new space. Label gc; - int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, + __ AllocateInNewSpace((slots_ * kPointerSize) + FixedArray::kHeaderSize, eax, ebx, ecx, &gc, TAG_OBJECT); // Get the function from the stack. @@ -101,7 +100,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Setup the object header. __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map()); __ mov(FieldOperand(eax, Context::kLengthOffset), - Immediate(Smi::FromInt(length))); + Immediate(Smi::FromInt(slots_))); // Setup the fixed slots. __ Set(ebx, Immediate(0)); // Set to NULL. @@ -119,7 +118,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Initialize the rest of the slots to undefined. __ mov(ebx, Factory::undefined_value()); - for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { + for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) { __ mov(Operand(eax, Context::SlotOffset(i)), ebx); } diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 1ecfd39ca1..29c8c0e406 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -209,7 +209,7 @@ void CodeGenerator::Generate(CompilationInfo* info) { frame_->AllocateStackSlots(); // Allocate the local context if needed. - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + int heap_slots = scope()->num_heap_slots(); if (heap_slots > 0) { Comment cmnt(masm_, "[ allocate local context"); // Allocate local context. diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index ea947eedaf..772eb8f905 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -142,7 +142,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { bool function_in_register = true; // Possibly allocate a local context. - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + int heap_slots = scope()->num_heap_slots(); if (heap_slots > 0) { Comment cmnt(masm_, "[ Allocate local context"); // Argument to NewContext is the function, which is still in edi. diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index f570fe01e9..c234b364ce 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -718,160 +718,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { } -void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, - ExternalArrayType array_type) { - // ----------- S t a t e ------------- - // -- eax : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label slow, failed_allocation; - - // Check that the object isn't a smi. - __ test(edx, Immediate(kSmiTagMask)); - __ j(zero, &slow, not_taken); - - // Check that the key is a smi. - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &slow, not_taken); - - // Get the map of the receiver. - __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); - // Check that the receiver does not require access checks. We need - // to check this explicitly since this generic stub does not perform - // map checks. - __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), - 1 << Map::kIsAccessCheckNeeded); - __ j(not_zero, &slow, not_taken); - - __ CmpInstanceType(ecx, JS_OBJECT_TYPE); - __ j(not_equal, &slow, not_taken); - - // Check that the elements array is the appropriate type of - // ExternalArray. - __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); - Handle map(Heap::MapForExternalArrayType(array_type)); - __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), - Immediate(map)); - __ j(not_equal, &slow, not_taken); - - // eax: key, known to be a smi. - // edx: receiver, known to be a JSObject. - // ebx: elements object, known to be an external array. - // Check that the index is in range. - __ mov(ecx, eax); - __ SmiUntag(ecx); // Untag the index. - __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset)); - // Unsigned comparison catches both negative and too-large values. - __ j(above_equal, &slow); - - __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset)); - // ebx: base pointer of external storage - switch (array_type) { - case kExternalByteArray: - __ movsx_b(ecx, Operand(ebx, ecx, times_1, 0)); - break; - case kExternalUnsignedByteArray: - __ movzx_b(ecx, Operand(ebx, ecx, times_1, 0)); - break; - case kExternalShortArray: - __ movsx_w(ecx, Operand(ebx, ecx, times_2, 0)); - break; - case kExternalUnsignedShortArray: - __ movzx_w(ecx, Operand(ebx, ecx, times_2, 0)); - break; - case kExternalIntArray: - case kExternalUnsignedIntArray: - __ mov(ecx, Operand(ebx, ecx, times_4, 0)); - break; - case kExternalFloatArray: - __ fld_s(Operand(ebx, ecx, times_4, 0)); - break; - default: - UNREACHABLE(); - break; - } - - // For integer array types: - // ecx: value - // For floating-point array type: - // FP(0): value - - if (array_type == kExternalIntArray || - array_type == kExternalUnsignedIntArray) { - // For the Int and UnsignedInt array types, we need to see whether - // the value can be represented in a Smi. If not, we need to convert - // it to a HeapNumber. - Label box_int; - if (array_type == kExternalIntArray) { - __ cmp(ecx, 0xC0000000); - __ j(sign, &box_int); - } else { - ASSERT_EQ(array_type, kExternalUnsignedIntArray); - // The test is different for unsigned int values. Since we need - // the value to be in the range of a positive smi, we can't - // handle either of the top two bits being set in the value. - __ test(ecx, Immediate(0xC0000000)); - __ j(not_zero, &box_int); - } - - __ mov(eax, ecx); - __ SmiTag(eax); - __ ret(0); - - __ bind(&box_int); - - // Allocate a HeapNumber for the int and perform int-to-double - // conversion. - if (array_type == kExternalIntArray) { - __ push(ecx); - __ fild_s(Operand(esp, 0)); - __ pop(ecx); - } else { - ASSERT(array_type == kExternalUnsignedIntArray); - // Need to zero-extend the value. - // There's no fild variant for unsigned values, so zero-extend - // to a 64-bit int manually. - __ push(Immediate(0)); - __ push(ecx); - __ fild_d(Operand(esp, 0)); - __ pop(ecx); - __ pop(ecx); - } - // FP(0): value - __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation); - // Set the value. - __ mov(eax, ecx); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ ret(0); - } else if (array_type == kExternalFloatArray) { - // For the floating-point array type, we need to always allocate a - // HeapNumber. - __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation); - // Set the value. - __ mov(eax, ecx); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ ret(0); - } else { - __ mov(eax, ecx); - __ SmiTag(eax); - __ ret(0); - } - - // If we fail allocation of the HeapNumber, we still have a value on - // top of the FPU stack. Remove it. - __ bind(&failed_allocation); - __ ffree(); - __ fincstp(); - // Fall through to slow case. - - // Slow case: Jump to runtime. - __ bind(&slow); - __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1); - GenerateRuntimeGetProperty(masm); -} - - void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax : key @@ -1031,194 +877,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { } -void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, - ExternalArrayType array_type) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label slow, check_heap_number; - - // Check that the object isn't a smi. - __ test(edx, Immediate(kSmiTagMask)); - __ j(zero, &slow); - // Get the map from the receiver. - __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); - // Check that the receiver does not require access checks. We need - // to do this because this generic stub does not perform map checks. - __ test_b(FieldOperand(edi, Map::kBitFieldOffset), - 1 << Map::kIsAccessCheckNeeded); - __ j(not_zero, &slow); - // Check that the key is a smi. - __ test(ecx, Immediate(kSmiTagMask)); - __ j(not_zero, &slow); - // Get the instance type from the map of the receiver. - __ CmpInstanceType(edi, JS_OBJECT_TYPE); - __ j(not_equal, &slow); - - // Check that the elements array is the appropriate type of - // ExternalArray. - // eax: value - // edx: receiver, a JSObject - // ecx: key, a smi - __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); - __ CheckMap(edi, Handle(Heap::MapForExternalArrayType(array_type)), - &slow, true); - - // Check that the index is in range. - __ mov(ebx, ecx); - __ SmiUntag(ebx); - __ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset)); - // Unsigned comparison catches both negative and too-large values. - __ j(above_equal, &slow); - - // Handle both smis and HeapNumbers in the fast path. Go to the - // runtime for all other kinds of values. - // eax: value - // edx: receiver - // ecx: key - // edi: elements array - // ebx: untagged index - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_equal, &check_heap_number); - // smi case - __ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed. - __ SmiUntag(ecx); - __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset)); - // ecx: base pointer of external storage - switch (array_type) { - case kExternalByteArray: - case kExternalUnsignedByteArray: - __ mov_b(Operand(edi, ebx, times_1, 0), ecx); - break; - case kExternalShortArray: - case kExternalUnsignedShortArray: - __ mov_w(Operand(edi, ebx, times_2, 0), ecx); - break; - case kExternalIntArray: - case kExternalUnsignedIntArray: - __ mov(Operand(edi, ebx, times_4, 0), ecx); - break; - case kExternalFloatArray: - // Need to perform int-to-float conversion. - __ push(ecx); - __ fild_s(Operand(esp, 0)); - __ pop(ecx); - __ fstp_s(Operand(edi, ebx, times_4, 0)); - break; - default: - UNREACHABLE(); - break; - } - __ ret(0); // Return the original value. - - __ bind(&check_heap_number); - // eax: value - // edx: receiver - // ecx: key - // edi: elements array - // ebx: untagged index - __ cmp(FieldOperand(eax, HeapObject::kMapOffset), - Immediate(Factory::heap_number_map())); - __ j(not_equal, &slow); - - // The WebGL specification leaves the behavior of storing NaN and - // +/-Infinity into integer arrays basically undefined. For more - // reproducible behavior, convert these to zero. - __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset)); - // ebx: untagged index - // edi: base pointer of external storage - // top of FPU stack: value - if (array_type == kExternalFloatArray) { - __ fstp_s(Operand(edi, ebx, times_4, 0)); - __ ret(0); - } else { - // Need to perform float-to-int conversion. - // Test the top of the FP stack for NaN. - Label is_nan; - __ fucomi(0); - __ j(parity_even, &is_nan); - - if (array_type != kExternalUnsignedIntArray) { - __ push(ecx); // Make room on stack - __ fistp_s(Operand(esp, 0)); - __ pop(ecx); - } else { - // fistp stores values as signed integers. - // To represent the entire range, we need to store as a 64-bit - // int and discard the high 32 bits. - __ sub(Operand(esp), Immediate(2 * kPointerSize)); - __ fistp_d(Operand(esp, 0)); - __ pop(ecx); - __ add(Operand(esp), Immediate(kPointerSize)); - } - // ecx: untagged integer value - switch (array_type) { - case kExternalByteArray: - case kExternalUnsignedByteArray: - __ mov_b(Operand(edi, ebx, times_1, 0), ecx); - break; - case kExternalShortArray: - case kExternalUnsignedShortArray: - __ mov_w(Operand(edi, ebx, times_2, 0), ecx); - break; - case kExternalIntArray: - case kExternalUnsignedIntArray: { - // We also need to explicitly check for +/-Infinity. These are - // converted to MIN_INT, but we need to be careful not to - // confuse with legal uses of MIN_INT. - Label not_infinity; - // This test would apparently detect both NaN and Infinity, - // but we've already checked for NaN using the FPU hardware - // above. - __ mov_w(edx, FieldOperand(eax, HeapNumber::kValueOffset + 6)); - __ and_(edx, 0x7FF0); - __ cmp(edx, 0x7FF0); - __ j(not_equal, ¬_infinity); - __ mov(ecx, 0); - __ bind(¬_infinity); - __ mov(Operand(edi, ebx, times_4, 0), ecx); - break; - } - default: - UNREACHABLE(); - break; - } - __ ret(0); // Return original value. - - __ bind(&is_nan); - __ ffree(); - __ fincstp(); - switch (array_type) { - case kExternalByteArray: - case kExternalUnsignedByteArray: - __ mov_b(Operand(edi, ebx, times_1, 0), 0); - break; - case kExternalShortArray: - case kExternalUnsignedShortArray: - __ Set(ecx, Immediate(0)); - __ mov_w(Operand(edi, ebx, times_2, 0), ecx); - break; - case kExternalIntArray: - case kExternalUnsignedIntArray: - __ mov(Operand(edi, ebx, times_4, 0), Immediate(0)); - break; - default: - UNREACHABLE(); - break; - } - __ ret(0); // Return the original value. - } - - // Slow case: call runtime. - __ bind(&slow); - GenerateRuntimeSetProperty(masm); -} - - // The generated code does not accept smi keys. // The generated code falls through if both probes miss. static void GenerateMonomorphicCacheProbe(MacroAssembler* masm, diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc index 2d3eac1480..3bfb10f80e 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.cc +++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc @@ -1285,11 +1285,11 @@ void LCodeGen::DoCmpID(LCmpID* instr) { NearLabel done; Condition cc = TokenToCondition(instr->op(), instr->is_double()); - __ mov(ToRegister(result), Handle(Heap::true_value())); + __ mov(ToRegister(result), Factory::true_value()); __ j(cc, &done); __ bind(&unordered); - __ mov(ToRegister(result), Handle(Heap::false_value())); + __ mov(ToRegister(result), Factory::false_value()); __ bind(&done); } @@ -1320,10 +1320,10 @@ void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) { Register result = ToRegister(instr->result()); __ cmp(left, Operand(right)); - __ mov(result, Handle(Heap::true_value())); + __ mov(result, Factory::true_value()); NearLabel done; __ j(equal, &done); - __ mov(result, Handle(Heap::false_value())); + __ mov(result, Factory::false_value()); __ bind(&done); } @@ -1348,10 +1348,10 @@ void LCodeGen::DoIsNull(LIsNull* instr) { __ cmp(reg, Factory::null_value()); if (instr->is_strict()) { - __ mov(result, Handle(Heap::true_value())); + __ mov(result, Factory::true_value()); NearLabel done; __ j(equal, &done); - __ mov(result, Handle(Heap::false_value())); + __ mov(result, Factory::false_value()); __ bind(&done); } else { NearLabel true_value, false_value, done; @@ -1368,10 +1368,10 @@ void LCodeGen::DoIsNull(LIsNull* instr) { __ test(scratch, Immediate(1 << Map::kIsUndetectable)); __ j(not_zero, &true_value); __ bind(&false_value); - __ mov(result, Handle(Heap::false_value())); + __ mov(result, Factory::false_value()); __ jmp(&done); __ bind(&true_value); - __ mov(result, Handle(Heap::true_value())); + __ mov(result, Factory::true_value()); __ bind(&done); } } @@ -1447,11 +1447,11 @@ void LCodeGen::DoIsObject(LIsObject* instr) { __ j(true_cond, &is_true); __ bind(&is_false); - __ mov(result, Handle(Heap::false_value())); + __ mov(result, Factory::false_value()); __ jmp(&done); __ bind(&is_true); - __ mov(result, Handle(Heap::true_value())); + __ mov(result, Factory::true_value()); __ bind(&done); } @@ -1479,10 +1479,10 @@ void LCodeGen::DoIsSmi(LIsSmi* instr) { ASSERT(instr->hydrogen()->value()->representation().IsTagged()); __ test(input, Immediate(kSmiTagMask)); - __ mov(result, Handle(Heap::true_value())); + __ mov(result, Factory::true_value()); NearLabel done; __ j(zero, &done); - __ mov(result, Handle(Heap::false_value())); + __ mov(result, Factory::false_value()); __ bind(&done); } @@ -1507,7 +1507,6 @@ static InstanceType TestType(HHasInstanceType* instr) { } - static Condition BranchCondition(HHasInstanceType* instr) { InstanceType from = instr->from(); InstanceType to = instr->to(); @@ -1529,10 +1528,10 @@ void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) { __ j(zero, &is_false); __ CmpObjectType(input, TestType(instr->hydrogen()), result); __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false); - __ mov(result, Handle(Heap::true_value())); + __ mov(result, Factory::true_value()); __ jmp(&done); __ bind(&is_false); - __ mov(result, Handle(Heap::false_value())); + __ mov(result, Factory::false_value()); __ bind(&done); } @@ -1559,12 +1558,12 @@ void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { Register result = ToRegister(instr->result()); ASSERT(instr->hydrogen()->value()->representation().IsTagged()); - __ mov(result, Handle(Heap::true_value())); + __ mov(result, Factory::true_value()); __ test(FieldOperand(input, String::kHashFieldOffset), Immediate(String::kContainsCachedArrayIndexMask)); NearLabel done; __ j(not_zero, &done); - __ mov(result, Handle(Heap::false_value())); + __ mov(result, Factory::false_value()); __ bind(&done); } @@ -1653,11 +1652,11 @@ void LCodeGen::DoClassOfTest(LClassOfTest* instr) { __ j(not_equal, &is_false); __ bind(&is_true); - __ mov(result, Handle(Heap::true_value())); + __ mov(result, Factory::true_value()); __ jmp(&done); __ bind(&is_false); - __ mov(result, Handle(Heap::false_value())); + __ mov(result, Factory::false_value()); __ bind(&done); } @@ -2221,11 +2220,12 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { Label negative; __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); - // Check the sign of the argument. If the argument is positive, - // just return it. + // Check the sign of the argument. If the argument is positive, just + // return it. We do not need to patch the stack since |input| and + // |result| are the same register and |input| will be restored + // unchanged by popping safepoint registers. __ test(tmp, Immediate(HeapNumber::kSignMask)); __ j(not_zero, &negative); - __ mov(tmp, input_reg); __ jmp(&done); __ bind(&negative); @@ -2252,14 +2252,25 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2); __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2); - - __ bind(&done); __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp); + __ bind(&done); __ PopSafepointRegisters(); } +void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { + Register input_reg = ToRegister(instr->InputAt(0)); + __ test(input_reg, Operand(input_reg)); + Label is_positive; + __ j(not_sign, &is_positive); + __ neg(input_reg); + __ test(input_reg, Operand(input_reg)); + DeoptimizeIf(negative, instr->environment()); + __ bind(&is_positive); +} + + void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { // Class for deferred case. class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { @@ -2284,31 +2295,15 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { __ subsd(scratch, input_reg); __ pand(input_reg, scratch); } else if (r.IsInteger32()) { - Register input_reg = ToRegister(instr->InputAt(0)); - __ test(input_reg, Operand(input_reg)); - Label is_positive; - __ j(not_sign, &is_positive); - __ neg(input_reg); - __ test(input_reg, Operand(input_reg)); - DeoptimizeIf(negative, instr->environment()); - __ bind(&is_positive); + EmitIntegerMathAbs(instr); } else { // Tagged case. DeferredMathAbsTaggedHeapNumber* deferred = new DeferredMathAbsTaggedHeapNumber(this, instr); - Label not_smi; Register input_reg = ToRegister(instr->InputAt(0)); // Smi check. __ test(input_reg, Immediate(kSmiTagMask)); __ j(not_zero, deferred->entry()); - __ test(input_reg, Operand(input_reg)); - Label is_positive; - __ j(not_sign, &is_positive); - __ neg(input_reg); - - __ test(input_reg, Operand(input_reg)); - DeoptimizeIf(negative, instr->environment()); - - __ bind(&is_positive); + EmitIntegerMathAbs(instr); __ bind(deferred->exit()); } } @@ -2651,6 +2646,151 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { } +void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { + class DeferredStringCharCodeAt: public LDeferredCode { + public: + DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } + private: + LStringCharCodeAt* instr_; + }; + + Register string = ToRegister(instr->string()); + Register index = no_reg; + int const_index = -1; + if (instr->index()->IsConstantOperand()) { + const_index = ToInteger32(LConstantOperand::cast(instr->index())); + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); + if (!Smi::IsValid(const_index)) { + // Guaranteed to be out of bounds because of the assert above. + // So the bounds check that must dominate this instruction must + // have deoptimized already. + if (FLAG_debug_code) { + __ Abort("StringCharCodeAt: out of bounds index."); + } + // No code needs to be generated. + return; + } + } else { + index = ToRegister(instr->index()); + } + Register result = ToRegister(instr->result()); + + DeferredStringCharCodeAt* deferred = + new DeferredStringCharCodeAt(this, instr); + + NearLabel flat_string, ascii_string, done; + + // Fetch the instance type of the receiver into result register. + __ mov(result, FieldOperand(string, HeapObject::kMapOffset)); + __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset)); + + // We need special handling for non-flat strings. + STATIC_ASSERT(kSeqStringTag == 0); + __ test(result, Immediate(kStringRepresentationMask)); + __ j(zero, &flat_string); + + // Handle non-flat strings. + __ test(result, Immediate(kIsConsStringMask)); + __ j(zero, deferred->entry()); + + // ConsString. + // Check whether the right hand side is the empty string (i.e. if + // this is really a flat string in a cons string). If that is not + // the case we would rather go to the runtime system now to flatten + // the string. + __ cmp(FieldOperand(string, ConsString::kSecondOffset), + Immediate(Factory::empty_string())); + __ j(not_equal, deferred->entry()); + // Get the first of the two strings and load its instance type. + __ mov(string, FieldOperand(string, ConsString::kFirstOffset)); + __ mov(result, FieldOperand(string, HeapObject::kMapOffset)); + __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset)); + // If the first cons component is also non-flat, then go to runtime. + STATIC_ASSERT(kSeqStringTag == 0); + __ test(result, Immediate(kStringRepresentationMask)); + __ j(not_zero, deferred->entry()); + + // Check for 1-byte or 2-byte string. + __ bind(&flat_string); + STATIC_ASSERT(kAsciiStringTag != 0); + __ test(result, Immediate(kStringEncodingMask)); + __ j(not_zero, &ascii_string); + + // 2-byte string. + // Load the 2-byte character code into the result register. + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + if (instr->index()->IsConstantOperand()) { + __ movzx_w(result, + FieldOperand(string, + SeqTwoByteString::kHeaderSize + 2 * const_index)); + } else { + __ movzx_w(result, FieldOperand(string, + index, + times_2, + SeqTwoByteString::kHeaderSize)); + } + __ jmp(&done); + + // ASCII string. + // Load the byte into the result register. + __ bind(&ascii_string); + if (instr->index()->IsConstantOperand()) { + __ movzx_b(result, FieldOperand(string, + SeqAsciiString::kHeaderSize + const_index)); + } else { + __ movzx_b(result, FieldOperand(string, + index, + times_1, + SeqAsciiString::kHeaderSize)); + } + __ bind(&done); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { + Register string = ToRegister(instr->string()); + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ Set(result, Immediate(0)); + + __ PushSafepointRegisters(); + __ push(string); + // Push the index as a smi. This is safe because of the checks in + // DoStringCharCodeAt above. + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); + if (instr->index()->IsConstantOperand()) { + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + __ push(Immediate(Smi::FromInt(const_index))); + } else { + Register index = ToRegister(instr->index()); + __ SmiTag(index); + __ push(index); + } + __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt); + RecordSafepointWithRegisters( + instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex); + if (FLAG_debug_code) { + __ AbortIfNotSmi(eax); + } + __ SmiUntag(eax); + __ mov(Operand(esp, EspIndexForPushAll(result) * kPointerSize), eax); + __ PopSafepointRegisters(); +} + + +void LCodeGen::DoStringLength(LStringLength* instr) { + Register string = ToRegister(instr->string()); + Register result = ToRegister(instr->result()); + __ mov(result, FieldOperand(string, String::kLengthOffset)); +} + + void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister() || input->IsStackSlot()); @@ -3077,13 +3217,19 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { InstanceType last = instr->hydrogen()->last(); __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); - __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), - static_cast(first)); // If there is only one type in the interval check for equality. if (first == last) { + __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), + static_cast(first)); DeoptimizeIf(not_equal, instr->environment()); - } else { + } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) { + // String has a dedicated bit in instance type. + __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), kIsNotStringMask); + DeoptimizeIf(not_zero, instr->environment()); + } else { + __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), + static_cast(first)); DeoptimizeIf(below, instr->environment()); // Omit check for the last type. if (last != LAST_TYPE) { @@ -3292,11 +3438,11 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) { instr->type_literal()); __ j(final_branch_condition, &true_label); __ bind(&false_label); - __ mov(result, Handle(Heap::false_value())); + __ mov(result, Factory::false_value()); __ jmp(&done); __ bind(&true_label); - __ mov(result, Handle(Heap::true_value())); + __ mov(result, Factory::true_value()); __ bind(&done); } @@ -3341,9 +3487,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, final_branch_condition = below; } else if (type_name->Equals(Heap::boolean_symbol())) { - __ cmp(input, Handle(Heap::true_value())); + __ cmp(input, Factory::true_value()); __ j(equal, true_label); - __ cmp(input, Handle(Heap::false_value())); + __ cmp(input, Factory::false_value()); final_branch_condition = equal; } else if (type_name->Equals(Heap::undefined_symbol())) { diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h index ab62e6fe92..780525a590 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.h +++ b/deps/v8/src/ia32/lithium-codegen-ia32.h @@ -92,6 +92,7 @@ class LCodeGen BASE_EMBEDDED { void DoDeferredTaggedToI(LTaggedToI* instr); void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr); void DoDeferredStackCheck(LGoto* instr); + void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); @@ -186,6 +187,7 @@ class LCodeGen BASE_EMBEDDED { int ToInteger32(LConstantOperand* op) const; // Specific math operations - used from DoUnaryMathOperation. + void EmitIntegerMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr); void DoMathFloor(LUnaryMathOperation* instr); void DoMathRound(LUnaryMathOperation* instr); diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h b/deps/v8/src/ia32/lithium-gap-resolver-ia32.h index f0bd260aad..0c81d72ee3 100644 --- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h +++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.h @@ -30,7 +30,7 @@ #include "v8.h" -#include "lithium-allocator.h" +#include "lithium.h" namespace v8 { namespace internal { diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc index cca07c8c8b..f422514235 100644 --- a/deps/v8/src/ia32/lithium-ia32.cc +++ b/deps/v8/src/ia32/lithium-ia32.cc @@ -162,6 +162,12 @@ const char* LArithmeticT::Mnemonic() const { case Token::MUL: return "mul-t"; case Token::MOD: return "mod-t"; case Token::DIV: return "div-t"; + case Token::BIT_AND: return "bit-and-t"; + case Token::BIT_OR: return "bit-or-t"; + case Token::BIT_XOR: return "bit-xor-t"; + case Token::SHL: return "sal-t"; + case Token::SAR: return "sar-t"; + case Token::SHR: return "shr-t"; default: UNREACHABLE(); return NULL; @@ -739,18 +745,38 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { LInstruction* LChunkBuilder::DoBit(Token::Value op, HBitwiseBinaryOperation* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); + if (instr->representation().IsInteger32()) { + ASSERT(instr->left()->representation().IsInteger32()); + ASSERT(instr->right()->representation().IsInteger32()); - LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); - LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand()); - return DefineSameAsFirst(new LBitI(op, left, right)); + LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); + LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand()); + return DefineSameAsFirst(new LBitI(op, left, right)); + } else { + ASSERT(instr->representation().IsTagged()); + ASSERT(instr->left()->representation().IsTagged()); + ASSERT(instr->right()->representation().IsTagged()); + + LOperand* left = UseFixed(instr->left(), edx); + LOperand* right = UseFixed(instr->right(), eax); + LArithmeticT* result = new LArithmeticT(op, left, right); + return MarkAsCall(DefineFixed(result, eax), instr); + } } LInstruction* LChunkBuilder::DoShift(Token::Value op, HBitwiseBinaryOperation* instr) { + if (instr->representation().IsTagged()) { + ASSERT(instr->left()->representation().IsTagged()); + ASSERT(instr->right()->representation().IsTagged()); + + LOperand* left = UseFixed(instr->left(), edx); + LOperand* right = UseFixed(instr->right(), eax); + LArithmeticT* result = new LArithmeticT(op, left, right); + return MarkAsCall(DefineFixed(result, eax), instr); + } + ASSERT(instr->representation().IsInteger32()); ASSERT(instr->OperandAt(0)->representation().IsInteger32()); ASSERT(instr->OperandAt(1)->representation().IsInteger32()); @@ -894,15 +920,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { if (FLAG_stress_environments && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); } - if (current->IsBranch() && !instr->IsGoto()) { - // TODO(fschneider): Handle branch instructions uniformly like - // other instructions. This requires us to generate the right - // branch instruction already at the HIR level. + if (current->IsTest() && !instr->IsGoto()) { ASSERT(instr->IsControl()); - HBranch* branch = HBranch::cast(current); - instr->set_hydrogen_value(branch->value()); - HBasicBlock* first = branch->FirstSuccessor(); - HBasicBlock* second = branch->SecondSuccessor(); + HTest* test = HTest::cast(current); + instr->set_hydrogen_value(test->value()); + HBasicBlock* first = test->FirstSuccessor(); + HBasicBlock* second = test->SecondSuccessor(); ASSERT(first != NULL && second != NULL); instr->SetBranchTargets(first->block_id(), second->block_id()); } else { @@ -959,7 +982,7 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { } -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { +LInstruction* LChunkBuilder::DoTest(HTest* instr) { HValue* v = instr->value(); if (v->EmitAtUses()) { if (v->IsClassOfTest()) { @@ -1061,8 +1084,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { } -LInstruction* LChunkBuilder::DoCompareMapAndBranch( - HCompareMapAndBranch* instr) { +LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { ASSERT(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return new LCmpMapAndBranch(value); @@ -1741,6 +1763,20 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { } +LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { + LOperand* string = UseRegister(instr->string()); + LOperand* index = UseRegisterOrConstant(instr->index()); + LStringCharCodeAt* result = new LStringCharCodeAt(string, index); + return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); +} + + +LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { + LOperand* string = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new LStringLength(string)); +} + + LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { return MarkAsCall(DefineFixed(new LArrayLiteral, eax), instr); } diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h index 67f87518a9..1cdd31ede8 100644 --- a/deps/v8/src/ia32/lithium-ia32.h +++ b/deps/v8/src/ia32/lithium-ia32.h @@ -114,6 +114,7 @@ class LCodeGen; // LStoreNamed // LStoreNamedField // LStoreNamedGeneric +// LStringCharCodeAt // LBitNotI // LCallNew // LCheckFunction @@ -141,6 +142,7 @@ class LCodeGen; // LReturn // LSmiTag // LStoreGlobal +// LStringLength // LTaggedToI // LThrow // LTypeof @@ -253,6 +255,8 @@ class LCodeGen; V(StoreKeyedGeneric) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ + V(StringCharCodeAt) \ + V(StringLength) \ V(SubI) \ V(TaggedToI) \ V(Throw) \ @@ -335,33 +339,36 @@ class LInstruction: public ZoneObject { }; -template +template class OperandContainer { public: OperandContainer() { - for (int i = 0; i < N; i++) elems_[i] = NULL; + for (int i = 0; i < NumElements; i++) elems_[i] = NULL; } - int length() { return N; } - T& operator[](int i) { + int length() { return NumElements; } + ElementType& operator[](int i) { ASSERT(i < length()); return elems_[i]; } void PrintOperandsTo(StringStream* stream); private: - T elems_[N]; + ElementType elems_[NumElements]; }; -template -class OperandContainer { +template +class OperandContainer { public: int length() { return 0; } void PrintOperandsTo(StringStream* stream) { } }; -template +// R = number of result operands (0 or 1). +// I = number of input operands. +// T = number of temporary operands. +template class LTemplateInstruction: public LInstruction { public: // Allow 0 or 1 output operands. @@ -512,7 +519,7 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> { }; -template +template class LControlInstruction: public LTemplateInstruction<0, I, T> { public: DECLARE_INSTRUCTION(ControlInstruction) @@ -570,7 +577,7 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> { }; -class LArgumentsLength: public LTemplateInstruction<1, 1> { +class LArgumentsLength: public LTemplateInstruction<1, 1, 0> { public: explicit LArgumentsLength(LOperand* elements) { inputs_[0] = elements; @@ -627,7 +634,7 @@ class LMulI: public LTemplateInstruction<1, 2, 1> { }; -class LCmpID: public LTemplateInstruction<1, 2> { +class LCmpID: public LTemplateInstruction<1, 2, 0> { public: LCmpID(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -644,7 +651,7 @@ class LCmpID: public LTemplateInstruction<1, 2> { }; -class LCmpIDAndBranch: public LControlInstruction<2> { +class LCmpIDAndBranch: public LControlInstruction<2, 0> { public: LCmpIDAndBranch(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -663,7 +670,7 @@ class LCmpIDAndBranch: public LControlInstruction<2> { }; -class LUnaryMathOperation: public LTemplateInstruction<1, 1> { +class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> { public: explicit LUnaryMathOperation(LOperand* value) { inputs_[0] = value; @@ -677,7 +684,7 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1> { }; -class LCmpJSObjectEq: public LTemplateInstruction<1, 2> { +class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> { public: LCmpJSObjectEq(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -688,7 +695,7 @@ class LCmpJSObjectEq: public LTemplateInstruction<1, 2> { }; -class LCmpJSObjectEqAndBranch: public LControlInstruction<2> { +class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> { public: LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -700,7 +707,7 @@ class LCmpJSObjectEqAndBranch: public LControlInstruction<2> { }; -class LIsNull: public LTemplateInstruction<1, 1> { +class LIsNull: public LTemplateInstruction<1, 1, 0> { public: explicit LIsNull(LOperand* value) { inputs_[0] = value; @@ -754,7 +761,7 @@ class LIsObjectAndBranch: public LControlInstruction<1, 2> { }; -class LIsSmi: public LTemplateInstruction<1, 1> { +class LIsSmi: public LTemplateInstruction<1, 1, 0> { public: explicit LIsSmi(LOperand* value) { inputs_[0] = value; @@ -765,7 +772,7 @@ class LIsSmi: public LTemplateInstruction<1, 1> { }; -class LIsSmiAndBranch: public LControlInstruction<1> { +class LIsSmiAndBranch: public LControlInstruction<1, 0> { public: explicit LIsSmiAndBranch(LOperand* value) { inputs_[0] = value; @@ -777,7 +784,7 @@ class LIsSmiAndBranch: public LControlInstruction<1> { }; -class LHasInstanceType: public LTemplateInstruction<1, 1> { +class LHasInstanceType: public LTemplateInstruction<1, 1, 0> { public: explicit LHasInstanceType(LOperand* value) { inputs_[0] = value; @@ -803,7 +810,7 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> { }; -class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> { +class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { public: explicit LHasCachedArrayIndex(LOperand* value) { inputs_[0] = value; @@ -814,7 +821,7 @@ class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> { }; -class LHasCachedArrayIndexAndBranch: public LControlInstruction<1> { +class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> { public: explicit LHasCachedArrayIndexAndBranch(LOperand* value) { inputs_[0] = value; @@ -856,7 +863,7 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> { }; -class LCmpT: public LTemplateInstruction<1, 2> { +class LCmpT: public LTemplateInstruction<1, 2, 0> { public: LCmpT(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -870,7 +877,7 @@ class LCmpT: public LTemplateInstruction<1, 2> { }; -class LCmpTAndBranch: public LControlInstruction<2> { +class LCmpTAndBranch: public LControlInstruction<2, 0> { public: LCmpTAndBranch(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -884,7 +891,7 @@ class LCmpTAndBranch: public LControlInstruction<2> { }; -class LInstanceOf: public LTemplateInstruction<1, 2> { +class LInstanceOf: public LTemplateInstruction<1, 2, 0> { public: LInstanceOf(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -895,7 +902,7 @@ class LInstanceOf: public LTemplateInstruction<1, 2> { }; -class LInstanceOfAndBranch: public LControlInstruction<2> { +class LInstanceOfAndBranch: public LControlInstruction<2, 0> { public: LInstanceOfAndBranch(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -935,7 +942,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> { }; -class LBitI: public LTemplateInstruction<1, 2> { +class LBitI: public LTemplateInstruction<1, 2, 0> { public: LBitI(Token::Value op, LOperand* left, LOperand* right) : op_(op) { @@ -952,7 +959,7 @@ class LBitI: public LTemplateInstruction<1, 2> { }; -class LShiftI: public LTemplateInstruction<1, 2> { +class LShiftI: public LTemplateInstruction<1, 2, 0> { public: LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) : op_(op), can_deopt_(can_deopt) { @@ -972,7 +979,7 @@ class LShiftI: public LTemplateInstruction<1, 2> { }; -class LSubI: public LTemplateInstruction<1, 2> { +class LSubI: public LTemplateInstruction<1, 2, 0> { public: LSubI(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -1025,7 +1032,7 @@ class LConstantT: public LConstant { }; -class LBranch: public LControlInstruction<1> { +class LBranch: public LControlInstruction<1, 0> { public: explicit LBranch(LOperand* value) { inputs_[0] = value; @@ -1038,28 +1045,28 @@ class LBranch: public LControlInstruction<1> { }; -class LCmpMapAndBranch: public LTemplateInstruction<0, 1> { +class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> { public: explicit LCmpMapAndBranch(LOperand* value) { inputs_[0] = value; } DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch) + DECLARE_HYDROGEN_ACCESSOR(CompareMap) virtual bool IsControl() const { return true; } Handle map() const { return hydrogen()->map(); } int true_block_id() const { - return hydrogen()->true_destination()->block_id(); + return hydrogen()->FirstSuccessor()->block_id(); } int false_block_id() const { - return hydrogen()->false_destination()->block_id(); + return hydrogen()->SecondSuccessor()->block_id(); } }; -class LJSArrayLength: public LTemplateInstruction<1, 1> { +class LJSArrayLength: public LTemplateInstruction<1, 1, 0> { public: explicit LJSArrayLength(LOperand* value) { inputs_[0] = value; @@ -1070,7 +1077,7 @@ class LJSArrayLength: public LTemplateInstruction<1, 1> { }; -class LFixedArrayLength: public LTemplateInstruction<1, 1> { +class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> { public: explicit LFixedArrayLength(LOperand* value) { inputs_[0] = value; @@ -1093,7 +1100,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> { }; -class LThrow: public LTemplateInstruction<0, 1> { +class LThrow: public LTemplateInstruction<0, 1, 0> { public: explicit LThrow(LOperand* value) { inputs_[0] = value; @@ -1103,7 +1110,7 @@ class LThrow: public LTemplateInstruction<0, 1> { }; -class LBitNotI: public LTemplateInstruction<1, 1> { +class LBitNotI: public LTemplateInstruction<1, 1, 0> { public: explicit LBitNotI(LOperand* value) { inputs_[0] = value; @@ -1113,7 +1120,7 @@ class LBitNotI: public LTemplateInstruction<1, 1> { }; -class LAddI: public LTemplateInstruction<1, 2> { +class LAddI: public LTemplateInstruction<1, 2, 0> { public: LAddI(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -1125,7 +1132,7 @@ class LAddI: public LTemplateInstruction<1, 2> { }; -class LPower: public LTemplateInstruction<1, 2> { +class LPower: public LTemplateInstruction<1, 2, 0> { public: LPower(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -1137,7 +1144,7 @@ class LPower: public LTemplateInstruction<1, 2> { }; -class LArithmeticD: public LTemplateInstruction<1, 2> { +class LArithmeticD: public LTemplateInstruction<1, 2, 0> { public: LArithmeticD(Token::Value op, LOperand* left, LOperand* right) : op_(op) { @@ -1155,7 +1162,7 @@ class LArithmeticD: public LTemplateInstruction<1, 2> { }; -class LArithmeticT: public LTemplateInstruction<1, 2> { +class LArithmeticT: public LTemplateInstruction<1, 2, 0> { public: LArithmeticT(Token::Value op, LOperand* left, LOperand* right) : op_(op) { @@ -1173,7 +1180,7 @@ class LArithmeticT: public LTemplateInstruction<1, 2> { }; -class LReturn: public LTemplateInstruction<0, 1> { +class LReturn: public LTemplateInstruction<0, 1, 0> { public: explicit LReturn(LOperand* value) { inputs_[0] = value; @@ -1183,7 +1190,7 @@ class LReturn: public LTemplateInstruction<0, 1> { }; -class LLoadNamedField: public LTemplateInstruction<1, 1> { +class LLoadNamedField: public LTemplateInstruction<1, 1, 0> { public: explicit LLoadNamedField(LOperand* object) { inputs_[0] = object; @@ -1194,7 +1201,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1> { }; -class LLoadNamedGeneric: public LTemplateInstruction<1, 1> { +class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> { public: explicit LLoadNamedGeneric(LOperand* object) { inputs_[0] = object; @@ -1222,7 +1229,7 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> { }; -class LLoadElements: public LTemplateInstruction<1, 1> { +class LLoadElements: public LTemplateInstruction<1, 1, 0> { public: explicit LLoadElements(LOperand* object) { inputs_[0] = object; @@ -1232,7 +1239,7 @@ class LLoadElements: public LTemplateInstruction<1, 1> { }; -class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> { +class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { public: LLoadKeyedFastElement(LOperand* elements, LOperand* key) { inputs_[0] = elements; @@ -1247,7 +1254,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> { }; -class LLoadKeyedGeneric: public LTemplateInstruction<1, 2> { +class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> { public: LLoadKeyedGeneric(LOperand* obj, LOperand* key) { inputs_[0] = obj; @@ -1268,7 +1275,7 @@ class LLoadGlobal: public LTemplateInstruction<1, 0, 0> { }; -class LStoreGlobal: public LTemplateInstruction<0, 1> { +class LStoreGlobal: public LTemplateInstruction<0, 1, 0> { public: explicit LStoreGlobal(LOperand* value) { inputs_[0] = value; @@ -1291,7 +1298,7 @@ class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> { }; -class LPushArgument: public LTemplateInstruction<0, 1> { +class LPushArgument: public LTemplateInstruction<0, 1, 0> { public: explicit LPushArgument(LOperand* value) { inputs_[0] = value; @@ -1385,7 +1392,7 @@ class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> { }; -class LCallNew: public LTemplateInstruction<1, 1> { +class LCallNew: public LTemplateInstruction<1, 1, 0> { public: explicit LCallNew(LOperand* constructor) { inputs_[0] = constructor; @@ -1410,7 +1417,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> { }; -class LInteger32ToDouble: public LTemplateInstruction<1, 1> { +class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> { public: explicit LInteger32ToDouble(LOperand* value) { inputs_[0] = value; @@ -1420,7 +1427,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1> { }; -class LNumberTagI: public LTemplateInstruction<1, 1> { +class LNumberTagI: public LTemplateInstruction<1, 1, 0> { public: explicit LNumberTagI(LOperand* value) { inputs_[0] = value; @@ -1432,7 +1439,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1> { class LNumberTagD: public LTemplateInstruction<1, 1, 1> { public: - explicit LNumberTagD(LOperand* value, LOperand* temp) { + LNumberTagD(LOperand* value, LOperand* temp) { inputs_[0] = value; temps_[0] = temp; } @@ -1471,7 +1478,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> { }; -class LSmiTag: public LTemplateInstruction<1, 1> { +class LSmiTag: public LTemplateInstruction<1, 1, 0> { public: explicit LSmiTag(LOperand* value) { inputs_[0] = value; @@ -1481,7 +1488,7 @@ class LSmiTag: public LTemplateInstruction<1, 1> { }; -class LNumberUntagD: public LTemplateInstruction<1, 1> { +class LNumberUntagD: public LTemplateInstruction<1, 1, 0> { public: explicit LNumberUntagD(LOperand* value) { inputs_[0] = value; @@ -1491,7 +1498,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1> { }; -class LSmiUntag: public LTemplateInstruction<1, 1> { +class LSmiUntag: public LTemplateInstruction<1, 1, 0> { public: LSmiUntag(LOperand* value, bool needs_check) : needs_check_(needs_check) { @@ -1590,7 +1597,35 @@ class LStoreKeyedGeneric: public LStoreKeyed { }; -class LCheckFunction: public LTemplateInstruction<0, 1> { +class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> { + public: + LStringCharCodeAt(LOperand* string, LOperand* index) { + inputs_[0] = string; + inputs_[1] = index; + } + + DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") + DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) + + LOperand* string() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } +}; + + +class LStringLength: public LTemplateInstruction<1, 1, 0> { + public: + explicit LStringLength(LOperand* string) { + inputs_[0] = string; + } + + DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length") + DECLARE_HYDROGEN_ACCESSOR(StringLength) + + LOperand* string() { return inputs_[0]; } +}; + + +class LCheckFunction: public LTemplateInstruction<0, 1, 0> { public: explicit LCheckFunction(LOperand* value) { inputs_[0] = value; @@ -1613,7 +1648,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> { }; -class LCheckMap: public LTemplateInstruction<0, 1> { +class LCheckMap: public LTemplateInstruction<0, 1, 0> { public: explicit LCheckMap(LOperand* value) { inputs_[0] = value; @@ -1638,7 +1673,7 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> { }; -class LCheckSmi: public LTemplateInstruction<0, 1> { +class LCheckSmi: public LTemplateInstruction<0, 1, 0> { public: LCheckSmi(LOperand* value, Condition condition) : condition_(condition) { @@ -1687,7 +1722,7 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> { }; -class LTypeof: public LTemplateInstruction<1, 1> { +class LTypeof: public LTemplateInstruction<1, 1, 0> { public: explicit LTypeof(LOperand* value) { inputs_[0] = value; @@ -1697,7 +1732,7 @@ class LTypeof: public LTemplateInstruction<1, 1> { }; -class LTypeofIs: public LTemplateInstruction<1, 1> { +class LTypeofIs: public LTemplateInstruction<1, 1, 0> { public: explicit LTypeofIs(LOperand* value) { inputs_[0] = value; @@ -1712,7 +1747,7 @@ class LTypeofIs: public LTemplateInstruction<1, 1> { }; -class LTypeofIsAndBranch: public LControlInstruction<1> { +class LTypeofIsAndBranch: public LControlInstruction<1, 0> { public: explicit LTypeofIsAndBranch(LOperand* value) { inputs_[0] = value; @@ -1727,7 +1762,7 @@ class LTypeofIsAndBranch: public LControlInstruction<1> { }; -class LDeleteProperty: public LTemplateInstruction<1, 2> { +class LDeleteProperty: public LTemplateInstruction<1, 2, 0> { public: LDeleteProperty(LOperand* obj, LOperand* key) { inputs_[0] = obj; diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index 45d63c5a0e..6d353c2670 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -3306,6 +3306,364 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { } +MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub( + ExternalArrayType array_type, Code::Flags flags) { + // ----------- S t a t e ------------- + // -- eax : key + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + Label slow, failed_allocation; + + // Check that the object isn't a smi. + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, &slow, not_taken); + + // Check that the key is a smi. + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &slow, not_taken); + + // Get the map of the receiver. + __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); + // Check that the receiver does not require access checks. We need + // to check this explicitly since this generic stub does not perform + // map checks. + __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), + 1 << Map::kIsAccessCheckNeeded); + __ j(not_zero, &slow, not_taken); + + __ CmpInstanceType(ecx, JS_OBJECT_TYPE); + __ j(not_equal, &slow, not_taken); + + // Check that the elements array is the appropriate type of + // ExternalArray. + __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); + Handle map(Heap::MapForExternalArrayType(array_type)); + __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), + Immediate(map)); + __ j(not_equal, &slow, not_taken); + + // eax: key, known to be a smi. + // edx: receiver, known to be a JSObject. + // ebx: elements object, known to be an external array. + // Check that the index is in range. + __ mov(ecx, eax); + __ SmiUntag(ecx); // Untag the index. + __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset)); + // Unsigned comparison catches both negative and too-large values. + __ j(above_equal, &slow); + + __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset)); + // ebx: base pointer of external storage + switch (array_type) { + case kExternalByteArray: + __ movsx_b(ecx, Operand(ebx, ecx, times_1, 0)); + break; + case kExternalUnsignedByteArray: + __ movzx_b(ecx, Operand(ebx, ecx, times_1, 0)); + break; + case kExternalShortArray: + __ movsx_w(ecx, Operand(ebx, ecx, times_2, 0)); + break; + case kExternalUnsignedShortArray: + __ movzx_w(ecx, Operand(ebx, ecx, times_2, 0)); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: + __ mov(ecx, Operand(ebx, ecx, times_4, 0)); + break; + case kExternalFloatArray: + __ fld_s(Operand(ebx, ecx, times_4, 0)); + break; + default: + UNREACHABLE(); + break; + } + + // For integer array types: + // ecx: value + // For floating-point array type: + // FP(0): value + + if (array_type == kExternalIntArray || + array_type == kExternalUnsignedIntArray) { + // For the Int and UnsignedInt array types, we need to see whether + // the value can be represented in a Smi. If not, we need to convert + // it to a HeapNumber. + Label box_int; + if (array_type == kExternalIntArray) { + __ cmp(ecx, 0xC0000000); + __ j(sign, &box_int); + } else { + ASSERT_EQ(array_type, kExternalUnsignedIntArray); + // The test is different for unsigned int values. Since we need + // the value to be in the range of a positive smi, we can't + // handle either of the top two bits being set in the value. + __ test(ecx, Immediate(0xC0000000)); + __ j(not_zero, &box_int); + } + + __ mov(eax, ecx); + __ SmiTag(eax); + __ ret(0); + + __ bind(&box_int); + + // Allocate a HeapNumber for the int and perform int-to-double + // conversion. + if (array_type == kExternalIntArray) { + __ push(ecx); + __ fild_s(Operand(esp, 0)); + __ pop(ecx); + } else { + ASSERT(array_type == kExternalUnsignedIntArray); + // Need to zero-extend the value. + // There's no fild variant for unsigned values, so zero-extend + // to a 64-bit int manually. + __ push(Immediate(0)); + __ push(ecx); + __ fild_d(Operand(esp, 0)); + __ pop(ecx); + __ pop(ecx); + } + // FP(0): value + __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation); + // Set the value. + __ mov(eax, ecx); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + __ ret(0); + } else if (array_type == kExternalFloatArray) { + // For the floating-point array type, we need to always allocate a + // HeapNumber. + __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation); + // Set the value. + __ mov(eax, ecx); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + __ ret(0); + } else { + __ mov(eax, ecx); + __ SmiTag(eax); + __ ret(0); + } + + // If we fail allocation of the HeapNumber, we still have a value on + // top of the FPU stack. Remove it. + __ bind(&failed_allocation); + __ ffree(); + __ fincstp(); + // Fall through to slow case. + + // Slow case: Jump to runtime. + __ bind(&slow); + __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1); + // ----------- S t a t e ------------- + // -- eax : key + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + + __ pop(ebx); + __ push(edx); // receiver + __ push(eax); // name + __ push(ebx); // return address + + // Perform tail call to the entry. + __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); + + // Return the generated code. + return GetCode(flags); +} + + +MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub( + ExternalArrayType array_type, Code::Flags flags) { + // ----------- S t a t e ------------- + // -- eax : value + // -- ecx : key + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + Label slow, check_heap_number; + + // Check that the object isn't a smi. + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, &slow); + // Get the map from the receiver. + __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); + // Check that the receiver does not require access checks. We need + // to do this because this generic stub does not perform map checks. + __ test_b(FieldOperand(edi, Map::kBitFieldOffset), + 1 << Map::kIsAccessCheckNeeded); + __ j(not_zero, &slow); + // Check that the key is a smi. + __ test(ecx, Immediate(kSmiTagMask)); + __ j(not_zero, &slow); + // Get the instance type from the map of the receiver. + __ CmpInstanceType(edi, JS_OBJECT_TYPE); + __ j(not_equal, &slow); + + // Check that the elements array is the appropriate type of + // ExternalArray. + // eax: value + // edx: receiver, a JSObject + // ecx: key, a smi + __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); + __ CheckMap(edi, Handle(Heap::MapForExternalArrayType(array_type)), + &slow, true); + + // Check that the index is in range. + __ mov(ebx, ecx); + __ SmiUntag(ebx); + __ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset)); + // Unsigned comparison catches both negative and too-large values. + __ j(above_equal, &slow); + + // Handle both smis and HeapNumbers in the fast path. Go to the + // runtime for all other kinds of values. + // eax: value + // edx: receiver + // ecx: key + // edi: elements array + // ebx: untagged index + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_equal, &check_heap_number); + // smi case + __ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed. + __ SmiUntag(ecx); + __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset)); + // ecx: base pointer of external storage + switch (array_type) { + case kExternalByteArray: + case kExternalUnsignedByteArray: + __ mov_b(Operand(edi, ebx, times_1, 0), ecx); + break; + case kExternalShortArray: + case kExternalUnsignedShortArray: + __ mov_w(Operand(edi, ebx, times_2, 0), ecx); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: + __ mov(Operand(edi, ebx, times_4, 0), ecx); + break; + case kExternalFloatArray: + // Need to perform int-to-float conversion. + __ push(ecx); + __ fild_s(Operand(esp, 0)); + __ pop(ecx); + __ fstp_s(Operand(edi, ebx, times_4, 0)); + break; + default: + UNREACHABLE(); + break; + } + __ ret(0); // Return the original value. + + __ bind(&check_heap_number); + // eax: value + // edx: receiver + // ecx: key + // edi: elements array + // ebx: untagged index + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), + Immediate(Factory::heap_number_map())); + __ j(not_equal, &slow); + + // The WebGL specification leaves the behavior of storing NaN and + // +/-Infinity into integer arrays basically undefined. For more + // reproducible behavior, convert these to zero. + __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset)); + // ebx: untagged index + // edi: base pointer of external storage + if (array_type == kExternalFloatArray) { + __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); + __ fstp_s(Operand(edi, ebx, times_4, 0)); + __ ret(0); + } else { + // Perform float-to-int conversion with truncation (round-to-zero) + // behavior. + + // For the moment we make the slow call to the runtime on + // processors that don't support SSE2. The code in IntegerConvert + // (code-stubs-ia32.cc) is roughly what is needed here though the + // conversion failure case does not need to be handled. + if (CpuFeatures::IsSupported(SSE2)) { + if (array_type != kExternalIntArray && + array_type != kExternalUnsignedIntArray) { + ASSERT(CpuFeatures::IsSupported(SSE2)); + CpuFeatures::Scope scope(SSE2); + __ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset)); + // ecx: untagged integer value + switch (array_type) { + case kExternalByteArray: + case kExternalUnsignedByteArray: + __ mov_b(Operand(edi, ebx, times_1, 0), ecx); + break; + case kExternalShortArray: + case kExternalUnsignedShortArray: + __ mov_w(Operand(edi, ebx, times_2, 0), ecx); + break; + default: + UNREACHABLE(); + break; + } + } else { + if (CpuFeatures::IsSupported(SSE3)) { + CpuFeatures::Scope scope(SSE3); + // fisttp stores values as signed integers. To represent the + // entire range of int and unsigned int arrays, store as a + // 64-bit int and discard the high 32 bits. + // If the value is NaN or +/-infinity, the result is 0x80000000, + // which is automatically zero when taken mod 2^n, n < 32. + __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); + __ sub(Operand(esp), Immediate(2 * kPointerSize)); + __ fisttp_d(Operand(esp, 0)); + __ pop(ecx); + __ add(Operand(esp), Immediate(kPointerSize)); + } else { + ASSERT(CpuFeatures::IsSupported(SSE2)); + CpuFeatures::Scope scope(SSE2); + // We can easily implement the correct rounding behavior for the + // range [0, 2^31-1]. For the time being, to keep this code simple, + // make the slow runtime call for values outside this range. + // Note: we could do better for signed int arrays. + __ movd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset)); + // We will need the key if we have to make the slow runtime call. + __ push(ecx); + __ LoadPowerOf2(xmm1, ecx, 31); + __ pop(ecx); + __ ucomisd(xmm1, xmm0); + __ j(above_equal, &slow); + __ cvttsd2si(ecx, Operand(xmm0)); + } + // ecx: untagged integer value + __ mov(Operand(edi, ebx, times_4, 0), ecx); + } + __ ret(0); // Return original value. + } + } + + // Slow case: call runtime. + __ bind(&slow); + // ----------- S t a t e ------------- + // -- eax : value + // -- ecx : key + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + + __ pop(ebx); + __ push(edx); + __ push(ecx); + __ push(eax); + __ push(ebx); + + // Do tail-call to runtime routine. + __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + + return GetCode(flags); +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index afae323536..555ce3f139 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -367,55 +367,6 @@ void KeyedStoreIC::Clear(Address address, Code* target) { } -Code* KeyedLoadIC::external_array_stub(JSObject::ElementsKind elements_kind) { - switch (elements_kind) { - case JSObject::EXTERNAL_BYTE_ELEMENTS: - return Builtins::builtin(Builtins::KeyedLoadIC_ExternalByteArray); - case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedByteArray); - case JSObject::EXTERNAL_SHORT_ELEMENTS: - return Builtins::builtin(Builtins::KeyedLoadIC_ExternalShortArray); - case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - return Builtins::builtin( - Builtins::KeyedLoadIC_ExternalUnsignedShortArray); - case JSObject::EXTERNAL_INT_ELEMENTS: - return Builtins::builtin(Builtins::KeyedLoadIC_ExternalIntArray); - case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: - return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedIntArray); - case JSObject::EXTERNAL_FLOAT_ELEMENTS: - return Builtins::builtin(Builtins::KeyedLoadIC_ExternalFloatArray); - default: - UNREACHABLE(); - return NULL; - } -} - - -Code* KeyedStoreIC::external_array_stub(JSObject::ElementsKind elements_kind) { - switch (elements_kind) { - case JSObject::EXTERNAL_BYTE_ELEMENTS: - return Builtins::builtin(Builtins::KeyedStoreIC_ExternalByteArray); - case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - return Builtins::builtin( - Builtins::KeyedStoreIC_ExternalUnsignedByteArray); - case JSObject::EXTERNAL_SHORT_ELEMENTS: - return Builtins::builtin(Builtins::KeyedStoreIC_ExternalShortArray); - case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - return Builtins::builtin( - Builtins::KeyedStoreIC_ExternalUnsignedShortArray); - case JSObject::EXTERNAL_INT_ELEMENTS: - return Builtins::builtin(Builtins::KeyedStoreIC_ExternalIntArray); - case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: - return Builtins::builtin(Builtins::KeyedStoreIC_ExternalUnsignedIntArray); - case JSObject::EXTERNAL_FLOAT_ELEMENTS: - return Builtins::builtin(Builtins::KeyedStoreIC_ExternalFloatArray); - default: - UNREACHABLE(); - return NULL; - } -} - - static bool HasInterceptorGetter(JSObject* object) { return !object->GetNamedInterceptor()->getter()->IsUndefined(); } @@ -1243,7 +1194,10 @@ MaybeObject* KeyedLoadIC::Load(State state, } else if (object->IsJSObject()) { Handle receiver = Handle::cast(object); if (receiver->HasExternalArrayElements()) { - stub = external_array_stub(receiver->GetElementsKind()); + MaybeObject* probe = + StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, false); + stub = + probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked()); } else if (receiver->HasIndexedInterceptor()) { stub = indexed_interceptor_stub(); } else if (state == UNINITIALIZED && @@ -1636,7 +1590,10 @@ MaybeObject* KeyedStoreIC::Store(State state, if (object->IsJSObject()) { Handle receiver = Handle::cast(object); if (receiver->HasExternalArrayElements()) { - stub = external_array_stub(receiver->GetElementsKind()); + MaybeObject* probe = + StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, true); + stub = + probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked()); } else if (state == UNINITIALIZED && key->IsSmi() && receiver->map()->has_fast_elements()) { diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h index 9996affa74..55cb34a1cd 100644 --- a/deps/v8/src/ic.h +++ b/deps/v8/src/ic.h @@ -345,12 +345,6 @@ class KeyedLoadIC: public IC { static void GenerateGeneric(MacroAssembler* masm); static void GenerateString(MacroAssembler* masm); - // Generators for external array types. See objects.h. - // These are similar to the generic IC; they optimize the case of - // operating upon external array types but fall back to the runtime - // for all other types. - static void GenerateExternalArray(MacroAssembler* masm, - ExternalArrayType array_type); static void GenerateIndexedInterceptor(MacroAssembler* masm); // Clear the use of the inlined version. @@ -386,7 +380,6 @@ class KeyedLoadIC: public IC { static Code* string_stub() { return Builtins::builtin(Builtins::KeyedLoadIC_String); } - static Code* external_array_stub(JSObject::ElementsKind elements_kind); static Code* indexed_interceptor_stub() { return Builtins::builtin(Builtins::KeyedLoadIC_IndexedInterceptor); @@ -470,13 +463,6 @@ class KeyedStoreIC: public IC { static void GenerateRuntimeSetProperty(MacroAssembler* masm); static void GenerateGeneric(MacroAssembler* masm); - // Generators for external array types. See objects.h. - // These are similar to the generic IC; they optimize the case of - // operating upon external array types but fall back to the runtime - // for all other types. - static void GenerateExternalArray(MacroAssembler* masm, - ExternalArrayType array_type); - // Clear the inlined version so the IC is always hit. static void ClearInlinedVersion(Address address); @@ -501,7 +487,6 @@ class KeyedStoreIC: public IC { static Code* generic_stub() { return Builtins::builtin(Builtins::KeyedStoreIC_Generic); } - static Code* external_array_stub(JSObject::ElementsKind elements_kind); static void Clear(Address address, Code* target); diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc index 2bbc6b6527..cac7d65e98 100644 --- a/deps/v8/src/lithium-allocator.cc +++ b/deps/v8/src/lithium-allocator.cc @@ -71,73 +71,24 @@ static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) { } -void LOperand::PrintTo(StringStream* stream) { - LUnallocated* unalloc = NULL; - switch (kind()) { - case INVALID: - break; - case UNALLOCATED: - unalloc = LUnallocated::cast(this); - stream->Add("v%d", unalloc->virtual_register()); - switch (unalloc->policy()) { - case LUnallocated::NONE: - break; - case LUnallocated::FIXED_REGISTER: { - const char* register_name = - Register::AllocationIndexToString(unalloc->fixed_index()); - stream->Add("(=%s)", register_name); - break; - } - case LUnallocated::FIXED_DOUBLE_REGISTER: { - const char* double_register_name = - DoubleRegister::AllocationIndexToString(unalloc->fixed_index()); - stream->Add("(=%s)", double_register_name); - break; - } - case LUnallocated::FIXED_SLOT: - stream->Add("(=%dS)", unalloc->fixed_index()); - break; - case LUnallocated::MUST_HAVE_REGISTER: - stream->Add("(R)"); - break; - case LUnallocated::WRITABLE_REGISTER: - stream->Add("(WR)"); - break; - case LUnallocated::SAME_AS_FIRST_INPUT: - stream->Add("(1)"); - break; - case LUnallocated::ANY: - stream->Add("(-)"); - break; - case LUnallocated::IGNORE: - stream->Add("(0)"); - break; - } - break; - case CONSTANT_OPERAND: - stream->Add("[constant:%d]", index()); - break; - case STACK_SLOT: - stream->Add("[stack:%d]", index()); - break; - case DOUBLE_STACK_SLOT: - stream->Add("[double_stack:%d]", index()); - break; - case REGISTER: - stream->Add("[%s|R]", Register::AllocationIndexToString(index())); - break; - case DOUBLE_REGISTER: - stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index())); - break; - case ARGUMENT: - stream->Add("[arg:%d]", index()); - break; +UsePosition::UsePosition(LifetimePosition pos, LOperand* operand) + : operand_(operand), + hint_(NULL), + pos_(pos), + next_(NULL), + requires_reg_(false), + register_beneficial_(true) { + if (operand_ != NULL && operand_->IsUnallocated()) { + LUnallocated* unalloc = LUnallocated::cast(operand_); + requires_reg_ = unalloc->HasRegisterPolicy(); + register_beneficial_ = !unalloc->HasAnyPolicy(); } + ASSERT(pos_.IsValid()); } -int LOperand::VirtualRegister() { - LUnallocated* unalloc = LUnallocated::cast(this); - return unalloc->virtual_register(); + +bool UsePosition::HasHint() const { + return hint_ != NULL && !hint_->IsUnallocated(); } @@ -190,6 +141,53 @@ bool LiveRange::HasOverlap(UseInterval* target) const { #endif +LiveRange::LiveRange(int id) + : id_(id), + spilled_(false), + assigned_register_(kInvalidAssignment), + assigned_register_kind_(NONE), + last_interval_(NULL), + first_interval_(NULL), + first_pos_(NULL), + parent_(NULL), + next_(NULL), + current_interval_(NULL), + last_processed_use_(NULL), + spill_start_index_(kMaxInt) { + spill_operand_ = new LUnallocated(LUnallocated::IGNORE); +} + + +void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) { + ASSERT(!HasRegisterAssigned() && !IsSpilled()); + assigned_register_ = reg; + assigned_register_kind_ = register_kind; + ConvertOperands(); +} + + +void LiveRange::MakeSpilled() { + ASSERT(!IsSpilled()); + ASSERT(TopLevel()->HasAllocatedSpillOperand()); + spilled_ = true; + assigned_register_ = kInvalidAssignment; + ConvertOperands(); +} + + +bool LiveRange::HasAllocatedSpillOperand() const { + return spill_operand_ != NULL && !spill_operand_->IsUnallocated(); +} + + +void LiveRange::SetSpillOperand(LOperand* operand) { + ASSERT(!operand->IsUnallocated()); + ASSERT(spill_operand_ != NULL); + ASSERT(spill_operand_->IsUnallocated()); + spill_operand_->ConvertTo(operand->kind(), operand->index()); +} + + UsePosition* LiveRange::NextUsePosition(LifetimePosition start) { UsePosition* use_pos = last_processed_use_; if (use_pos == NULL) use_pos = first_pos(); @@ -2015,20 +2013,6 @@ bool LAllocator::IsBlockBoundary(LifetimePosition pos) { } -void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) { - UsePosition* prev_pos = prev->AddUsePosition( - LifetimePosition::FromInstructionIndex(pos)); - UsePosition* next_pos = next->AddUsePosition( - LifetimePosition::FromInstructionIndex(pos)); - LOperand* prev_operand = prev_pos->operand(); - LOperand* next_operand = next_pos->operand(); - LGap* gap = chunk_->GetGapAt(pos); - gap->GetOrCreateParallelMove(LGap::START)-> - AddMove(prev_operand, next_operand); - next_pos->set_hint(prev_operand); -} - - LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) { ASSERT(!range->IsFixed()); TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value()); diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h index 48c65631df..83f5583e3e 100644 --- a/deps/v8/src/lithium-allocator.h +++ b/deps/v8/src/lithium-allocator.h @@ -48,6 +48,8 @@ class StringStream; class LArgument; class LChunk; +class LOperand; +class LUnallocated; class LConstantOperand; class LGap; class LParallelMove; @@ -149,355 +151,6 @@ enum RegisterKind { }; -class LOperand: public ZoneObject { - public: - enum Kind { - INVALID, - UNALLOCATED, - CONSTANT_OPERAND, - STACK_SLOT, - DOUBLE_STACK_SLOT, - REGISTER, - DOUBLE_REGISTER, - ARGUMENT - }; - - LOperand() : value_(KindField::encode(INVALID)) { } - - Kind kind() const { return KindField::decode(value_); } - int index() const { return static_cast(value_) >> kKindFieldWidth; } - bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; } - bool IsStackSlot() const { return kind() == STACK_SLOT; } - bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; } - bool IsRegister() const { return kind() == REGISTER; } - bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; } - bool IsArgument() const { return kind() == ARGUMENT; } - bool IsUnallocated() const { return kind() == UNALLOCATED; } - bool Equals(LOperand* other) const { return value_ == other->value_; } - int VirtualRegister(); - - void PrintTo(StringStream* stream); - void ConvertTo(Kind kind, int index) { - value_ = KindField::encode(kind); - value_ |= index << kKindFieldWidth; - ASSERT(this->index() == index); - } - - protected: - static const int kKindFieldWidth = 3; - class KindField : public BitField { }; - - LOperand(Kind kind, int index) { ConvertTo(kind, index); } - - unsigned value_; -}; - - -class LUnallocated: public LOperand { - public: - enum Policy { - NONE, - ANY, - FIXED_REGISTER, - FIXED_DOUBLE_REGISTER, - FIXED_SLOT, - MUST_HAVE_REGISTER, - WRITABLE_REGISTER, - SAME_AS_FIRST_INPUT, - IGNORE - }; - - // Lifetime of operand inside the instruction. - enum Lifetime { - // USED_AT_START operand is guaranteed to be live only at - // instruction start. Register allocator is free to assign the same register - // to some other operand used inside instruction (i.e. temporary or - // output). - USED_AT_START, - - // USED_AT_END operand is treated as live until the end of - // instruction. This means that register allocator will not reuse it's - // register for any other operand inside instruction. - USED_AT_END - }; - - explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) { - Initialize(policy, 0, USED_AT_END); - } - - LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) { - Initialize(policy, fixed_index, USED_AT_END); - } - - LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) { - Initialize(policy, 0, lifetime); - } - - // The superclass has a KindField. Some policies have a signed fixed - // index in the upper bits. - static const int kPolicyWidth = 4; - static const int kLifetimeWidth = 1; - static const int kVirtualRegisterWidth = 17; - - static const int kPolicyShift = kKindFieldWidth; - static const int kLifetimeShift = kPolicyShift + kPolicyWidth; - static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth; - static const int kFixedIndexShift = - kVirtualRegisterShift + kVirtualRegisterWidth; - - class PolicyField : public BitField { }; - - class LifetimeField - : public BitField { - }; - - class VirtualRegisterField - : public BitField { - }; - - static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1); - static const int kMaxFixedIndices = 128; - - bool HasIgnorePolicy() const { return policy() == IGNORE; } - bool HasNoPolicy() const { return policy() == NONE; } - bool HasAnyPolicy() const { - return policy() == ANY; - } - bool HasFixedPolicy() const { - return policy() == FIXED_REGISTER || - policy() == FIXED_DOUBLE_REGISTER || - policy() == FIXED_SLOT; - } - bool HasRegisterPolicy() const { - return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER; - } - bool HasSameAsInputPolicy() const { - return policy() == SAME_AS_FIRST_INPUT; - } - Policy policy() const { return PolicyField::decode(value_); } - void set_policy(Policy policy) { - value_ &= ~PolicyField::mask(); - value_ |= PolicyField::encode(policy); - } - int fixed_index() const { - return static_cast(value_) >> kFixedIndexShift; - } - - unsigned virtual_register() const { - return VirtualRegisterField::decode(value_); - } - - void set_virtual_register(unsigned id) { - value_ &= ~VirtualRegisterField::mask(); - value_ |= VirtualRegisterField::encode(id); - } - - LUnallocated* CopyUnconstrained() { - LUnallocated* result = new LUnallocated(ANY); - result->set_virtual_register(virtual_register()); - return result; - } - - static LUnallocated* cast(LOperand* op) { - ASSERT(op->IsUnallocated()); - return reinterpret_cast(op); - } - - bool IsUsedAtStart() { - return LifetimeField::decode(value_) == USED_AT_START; - } - - private: - void Initialize(Policy policy, int fixed_index, Lifetime lifetime) { - value_ |= PolicyField::encode(policy); - value_ |= LifetimeField::encode(lifetime); - value_ |= fixed_index << kFixedIndexShift; - ASSERT(this->fixed_index() == fixed_index); - } -}; - - -class LMoveOperands BASE_EMBEDDED { - public: - LMoveOperands(LOperand* source, LOperand* destination) - : source_(source), destination_(destination) { - } - - LOperand* source() const { return source_; } - void set_source(LOperand* operand) { source_ = operand; } - - LOperand* destination() const { return destination_; } - void set_destination(LOperand* operand) { destination_ = operand; } - - // The gap resolver marks moves as "in-progress" by clearing the - // destination (but not the source). - bool IsPending() const { - return destination_ == NULL && source_ != NULL; - } - - // True if this move a move into the given destination operand. - bool Blocks(LOperand* operand) const { - return !IsEliminated() && source()->Equals(operand); - } - - // A move is redundant if it's been eliminated, if its source and - // destination are the same, or if its destination is unneeded. - bool IsRedundant() const { - return IsEliminated() || source_->Equals(destination_) || IsIgnored(); - } - - bool IsIgnored() const { - return destination_ != NULL && - destination_->IsUnallocated() && - LUnallocated::cast(destination_)->HasIgnorePolicy(); - } - - // We clear both operands to indicate move that's been eliminated. - void Eliminate() { source_ = destination_ = NULL; } - bool IsEliminated() const { - ASSERT(source_ != NULL || destination_ == NULL); - return source_ == NULL; - } - - private: - LOperand* source_; - LOperand* destination_; -}; - - -class LConstantOperand: public LOperand { - public: - static LConstantOperand* Create(int index) { - ASSERT(index >= 0); - if (index < kNumCachedOperands) return &cache[index]; - return new LConstantOperand(index); - } - - static LConstantOperand* cast(LOperand* op) { - ASSERT(op->IsConstantOperand()); - return reinterpret_cast(op); - } - - static void SetupCache(); - - private: - static const int kNumCachedOperands = 128; - static LConstantOperand cache[]; - - LConstantOperand() : LOperand() { } - explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { } -}; - - -class LArgument: public LOperand { - public: - explicit LArgument(int index) : LOperand(ARGUMENT, index) { } - - static LArgument* cast(LOperand* op) { - ASSERT(op->IsArgument()); - return reinterpret_cast(op); - } -}; - - -class LStackSlot: public LOperand { - public: - static LStackSlot* Create(int index) { - ASSERT(index >= 0); - if (index < kNumCachedOperands) return &cache[index]; - return new LStackSlot(index); - } - - static LStackSlot* cast(LOperand* op) { - ASSERT(op->IsStackSlot()); - return reinterpret_cast(op); - } - - static void SetupCache(); - - private: - static const int kNumCachedOperands = 128; - static LStackSlot cache[]; - - LStackSlot() : LOperand() { } - explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { } -}; - - -class LDoubleStackSlot: public LOperand { - public: - static LDoubleStackSlot* Create(int index) { - ASSERT(index >= 0); - if (index < kNumCachedOperands) return &cache[index]; - return new LDoubleStackSlot(index); - } - - static LDoubleStackSlot* cast(LOperand* op) { - ASSERT(op->IsStackSlot()); - return reinterpret_cast(op); - } - - static void SetupCache(); - - private: - static const int kNumCachedOperands = 128; - static LDoubleStackSlot cache[]; - - LDoubleStackSlot() : LOperand() { } - explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { } -}; - - -class LRegister: public LOperand { - public: - static LRegister* Create(int index) { - ASSERT(index >= 0); - if (index < kNumCachedOperands) return &cache[index]; - return new LRegister(index); - } - - static LRegister* cast(LOperand* op) { - ASSERT(op->IsRegister()); - return reinterpret_cast(op); - } - - static void SetupCache(); - - private: - static const int kNumCachedOperands = 16; - static LRegister cache[]; - - LRegister() : LOperand() { } - explicit LRegister(int index) : LOperand(REGISTER, index) { } -}; - - -class LDoubleRegister: public LOperand { - public: - static LDoubleRegister* Create(int index) { - ASSERT(index >= 0); - if (index < kNumCachedOperands) return &cache[index]; - return new LDoubleRegister(index); - } - - static LDoubleRegister* cast(LOperand* op) { - ASSERT(op->IsDoubleRegister()); - return reinterpret_cast(op); - } - - static void SetupCache(); - - private: - static const int kNumCachedOperands = 16; - static LDoubleRegister cache[]; - - LDoubleRegister() : LOperand() { } - explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { } -}; - - // A register-allocator view of a Lithium instruction. It contains the id of // the output operand and a list of input operand uses. class InstructionSummary: public ZoneObject { @@ -588,27 +241,14 @@ class UseInterval: public ZoneObject { // Representation of a use position. class UsePosition: public ZoneObject { public: - UsePosition(LifetimePosition pos, LOperand* operand) - : operand_(operand), - hint_(NULL), - pos_(pos), - next_(NULL), - requires_reg_(false), - register_beneficial_(true) { - if (operand_ != NULL && operand_->IsUnallocated()) { - LUnallocated* unalloc = LUnallocated::cast(operand_); - requires_reg_ = unalloc->HasRegisterPolicy(); - register_beneficial_ = !unalloc->HasAnyPolicy(); - } - ASSERT(pos_.IsValid()); - } + UsePosition(LifetimePosition pos, LOperand* operand); LOperand* operand() const { return operand_; } bool HasOperand() const { return operand_ != NULL; } LOperand* hint() const { return hint_; } void set_hint(LOperand* hint) { hint_ = hint; } - bool HasHint() const { return hint_ != NULL && !hint_->IsUnallocated(); } + bool HasHint() const; bool RequiresRegister() const; bool RegisterIsBeneficial() const; @@ -634,21 +274,7 @@ class LiveRange: public ZoneObject { public: static const int kInvalidAssignment = 0x7fffffff; - explicit LiveRange(int id) - : id_(id), - spilled_(false), - assigned_register_(kInvalidAssignment), - assigned_register_kind_(NONE), - last_interval_(NULL), - first_interval_(NULL), - first_pos_(NULL), - parent_(NULL), - next_(NULL), - current_interval_(NULL), - last_processed_use_(NULL), - spill_start_index_(kMaxInt) { - spill_operand_ = new LUnallocated(LUnallocated::IGNORE); - } + explicit LiveRange(int id); UseInterval* first_interval() const { return first_interval_; } UsePosition* first_pos() const { return first_pos_; } @@ -663,19 +289,8 @@ class LiveRange: public ZoneObject { LOperand* CreateAssignedOperand(); int assigned_register() const { return assigned_register_; } int spill_start_index() const { return spill_start_index_; } - void set_assigned_register(int reg, RegisterKind register_kind) { - ASSERT(!HasRegisterAssigned() && !IsSpilled()); - assigned_register_ = reg; - assigned_register_kind_ = register_kind; - ConvertOperands(); - } - void MakeSpilled() { - ASSERT(!IsSpilled()); - ASSERT(TopLevel()->HasAllocatedSpillOperand()); - spilled_ = true; - assigned_register_ = kInvalidAssignment; - ConvertOperands(); - } + void set_assigned_register(int reg, RegisterKind register_kind); + void MakeSpilled(); // Returns use position in this live range that follows both start // and last processed use position. @@ -724,17 +339,9 @@ class LiveRange: public ZoneObject { return last_interval_->end(); } - bool HasAllocatedSpillOperand() const { - return spill_operand_ != NULL && !spill_operand_->IsUnallocated(); - } - + bool HasAllocatedSpillOperand() const; LOperand* GetSpillOperand() const { return spill_operand_; } - void SetSpillOperand(LOperand* operand) { - ASSERT(!operand->IsUnallocated()); - ASSERT(spill_operand_ != NULL); - ASSERT(spill_operand_->IsUnallocated()); - spill_operand_->ConvertTo(operand->kind(), operand->index()); - } + void SetSpillOperand(LOperand* operand); void SetSpillStartIndex(int start) { spill_start_index_ = Min(start, spill_start_index_); @@ -984,7 +591,6 @@ class LAllocator BASE_EMBEDDED { void Spill(LiveRange* range); bool IsBlockBoundary(LifetimePosition pos); - void AddGapMove(int pos, LiveRange* prev, LiveRange* next); // Helper methods for resolving control flow. void ResolveControlFlow(LiveRange* range, diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc index d6cff25653..e829f2f049 100644 --- a/deps/v8/src/lithium.cc +++ b/deps/v8/src/lithium.cc @@ -30,6 +30,78 @@ namespace v8 { namespace internal { + +void LOperand::PrintTo(StringStream* stream) { + LUnallocated* unalloc = NULL; + switch (kind()) { + case INVALID: + break; + case UNALLOCATED: + unalloc = LUnallocated::cast(this); + stream->Add("v%d", unalloc->virtual_register()); + switch (unalloc->policy()) { + case LUnallocated::NONE: + break; + case LUnallocated::FIXED_REGISTER: { + const char* register_name = + Register::AllocationIndexToString(unalloc->fixed_index()); + stream->Add("(=%s)", register_name); + break; + } + case LUnallocated::FIXED_DOUBLE_REGISTER: { + const char* double_register_name = + DoubleRegister::AllocationIndexToString(unalloc->fixed_index()); + stream->Add("(=%s)", double_register_name); + break; + } + case LUnallocated::FIXED_SLOT: + stream->Add("(=%dS)", unalloc->fixed_index()); + break; + case LUnallocated::MUST_HAVE_REGISTER: + stream->Add("(R)"); + break; + case LUnallocated::WRITABLE_REGISTER: + stream->Add("(WR)"); + break; + case LUnallocated::SAME_AS_FIRST_INPUT: + stream->Add("(1)"); + break; + case LUnallocated::ANY: + stream->Add("(-)"); + break; + case LUnallocated::IGNORE: + stream->Add("(0)"); + break; + } + break; + case CONSTANT_OPERAND: + stream->Add("[constant:%d]", index()); + break; + case STACK_SLOT: + stream->Add("[stack:%d]", index()); + break; + case DOUBLE_STACK_SLOT: + stream->Add("[double_stack:%d]", index()); + break; + case REGISTER: + stream->Add("[%s|R]", Register::AllocationIndexToString(index())); + break; + case DOUBLE_REGISTER: + stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index())); + break; + case ARGUMENT: + stream->Add("[arg:%d]", index()); + break; + } +} + + +int LOperand::VirtualRegister() { + LUnallocated* unalloc = LUnallocated::cast(this); + return unalloc->virtual_register(); +} + + bool LParallelMove::IsRedundant() const { for (int i = 0; i < move_operands_.length(); ++i) { if (!move_operands_[i].IsRedundant()) return false; diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h index 5f7c92fce4..e1b6fc0256 100644 --- a/deps/v8/src/lithium.h +++ b/deps/v8/src/lithium.h @@ -29,12 +29,360 @@ #define V8_LITHIUM_H_ #include "hydrogen.h" -#include "lithium-allocator.h" #include "safepoint-table.h" namespace v8 { namespace internal { +class LOperand: public ZoneObject { + public: + enum Kind { + INVALID, + UNALLOCATED, + CONSTANT_OPERAND, + STACK_SLOT, + DOUBLE_STACK_SLOT, + REGISTER, + DOUBLE_REGISTER, + ARGUMENT + }; + + LOperand() : value_(KindField::encode(INVALID)) { } + + Kind kind() const { return KindField::decode(value_); } + int index() const { return static_cast(value_) >> kKindFieldWidth; } + bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; } + bool IsStackSlot() const { return kind() == STACK_SLOT; } + bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; } + bool IsRegister() const { return kind() == REGISTER; } + bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; } + bool IsArgument() const { return kind() == ARGUMENT; } + bool IsUnallocated() const { return kind() == UNALLOCATED; } + bool Equals(LOperand* other) const { return value_ == other->value_; } + int VirtualRegister(); + + void PrintTo(StringStream* stream); + void ConvertTo(Kind kind, int index) { + value_ = KindField::encode(kind); + value_ |= index << kKindFieldWidth; + ASSERT(this->index() == index); + } + + protected: + static const int kKindFieldWidth = 3; + class KindField : public BitField { }; + + LOperand(Kind kind, int index) { ConvertTo(kind, index); } + + unsigned value_; +}; + + +class LUnallocated: public LOperand { + public: + enum Policy { + NONE, + ANY, + FIXED_REGISTER, + FIXED_DOUBLE_REGISTER, + FIXED_SLOT, + MUST_HAVE_REGISTER, + WRITABLE_REGISTER, + SAME_AS_FIRST_INPUT, + IGNORE + }; + + // Lifetime of operand inside the instruction. + enum Lifetime { + // USED_AT_START operand is guaranteed to be live only at + // instruction start. Register allocator is free to assign the same register + // to some other operand used inside instruction (i.e. temporary or + // output). + USED_AT_START, + + // USED_AT_END operand is treated as live until the end of + // instruction. This means that register allocator will not reuse it's + // register for any other operand inside instruction. + USED_AT_END + }; + + explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) { + Initialize(policy, 0, USED_AT_END); + } + + LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) { + Initialize(policy, fixed_index, USED_AT_END); + } + + LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) { + Initialize(policy, 0, lifetime); + } + + // The superclass has a KindField. Some policies have a signed fixed + // index in the upper bits. + static const int kPolicyWidth = 4; + static const int kLifetimeWidth = 1; + static const int kVirtualRegisterWidth = 17; + + static const int kPolicyShift = kKindFieldWidth; + static const int kLifetimeShift = kPolicyShift + kPolicyWidth; + static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth; + static const int kFixedIndexShift = + kVirtualRegisterShift + kVirtualRegisterWidth; + + class PolicyField : public BitField { }; + + class LifetimeField + : public BitField { + }; + + class VirtualRegisterField + : public BitField { + }; + + static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1); + static const int kMaxFixedIndices = 128; + + bool HasIgnorePolicy() const { return policy() == IGNORE; } + bool HasNoPolicy() const { return policy() == NONE; } + bool HasAnyPolicy() const { + return policy() == ANY; + } + bool HasFixedPolicy() const { + return policy() == FIXED_REGISTER || + policy() == FIXED_DOUBLE_REGISTER || + policy() == FIXED_SLOT; + } + bool HasRegisterPolicy() const { + return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER; + } + bool HasSameAsInputPolicy() const { + return policy() == SAME_AS_FIRST_INPUT; + } + Policy policy() const { return PolicyField::decode(value_); } + void set_policy(Policy policy) { + value_ &= ~PolicyField::mask(); + value_ |= PolicyField::encode(policy); + } + int fixed_index() const { + return static_cast(value_) >> kFixedIndexShift; + } + + unsigned virtual_register() const { + return VirtualRegisterField::decode(value_); + } + + void set_virtual_register(unsigned id) { + value_ &= ~VirtualRegisterField::mask(); + value_ |= VirtualRegisterField::encode(id); + } + + LUnallocated* CopyUnconstrained() { + LUnallocated* result = new LUnallocated(ANY); + result->set_virtual_register(virtual_register()); + return result; + } + + static LUnallocated* cast(LOperand* op) { + ASSERT(op->IsUnallocated()); + return reinterpret_cast(op); + } + + bool IsUsedAtStart() { + return LifetimeField::decode(value_) == USED_AT_START; + } + + private: + void Initialize(Policy policy, int fixed_index, Lifetime lifetime) { + value_ |= PolicyField::encode(policy); + value_ |= LifetimeField::encode(lifetime); + value_ |= fixed_index << kFixedIndexShift; + ASSERT(this->fixed_index() == fixed_index); + } +}; + + +class LMoveOperands BASE_EMBEDDED { + public: + LMoveOperands(LOperand* source, LOperand* destination) + : source_(source), destination_(destination) { + } + + LOperand* source() const { return source_; } + void set_source(LOperand* operand) { source_ = operand; } + + LOperand* destination() const { return destination_; } + void set_destination(LOperand* operand) { destination_ = operand; } + + // The gap resolver marks moves as "in-progress" by clearing the + // destination (but not the source). + bool IsPending() const { + return destination_ == NULL && source_ != NULL; + } + + // True if this move a move into the given destination operand. + bool Blocks(LOperand* operand) const { + return !IsEliminated() && source()->Equals(operand); + } + + // A move is redundant if it's been eliminated, if its source and + // destination are the same, or if its destination is unneeded. + bool IsRedundant() const { + return IsEliminated() || source_->Equals(destination_) || IsIgnored(); + } + + bool IsIgnored() const { + return destination_ != NULL && + destination_->IsUnallocated() && + LUnallocated::cast(destination_)->HasIgnorePolicy(); + } + + // We clear both operands to indicate move that's been eliminated. + void Eliminate() { source_ = destination_ = NULL; } + bool IsEliminated() const { + ASSERT(source_ != NULL || destination_ == NULL); + return source_ == NULL; + } + + private: + LOperand* source_; + LOperand* destination_; +}; + + +class LConstantOperand: public LOperand { + public: + static LConstantOperand* Create(int index) { + ASSERT(index >= 0); + if (index < kNumCachedOperands) return &cache[index]; + return new LConstantOperand(index); + } + + static LConstantOperand* cast(LOperand* op) { + ASSERT(op->IsConstantOperand()); + return reinterpret_cast(op); + } + + static void SetupCache(); + + private: + static const int kNumCachedOperands = 128; + static LConstantOperand cache[]; + + LConstantOperand() : LOperand() { } + explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { } +}; + + +class LArgument: public LOperand { + public: + explicit LArgument(int index) : LOperand(ARGUMENT, index) { } + + static LArgument* cast(LOperand* op) { + ASSERT(op->IsArgument()); + return reinterpret_cast(op); + } +}; + + +class LStackSlot: public LOperand { + public: + static LStackSlot* Create(int index) { + ASSERT(index >= 0); + if (index < kNumCachedOperands) return &cache[index]; + return new LStackSlot(index); + } + + static LStackSlot* cast(LOperand* op) { + ASSERT(op->IsStackSlot()); + return reinterpret_cast(op); + } + + static void SetupCache(); + + private: + static const int kNumCachedOperands = 128; + static LStackSlot cache[]; + + LStackSlot() : LOperand() { } + explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { } +}; + + +class LDoubleStackSlot: public LOperand { + public: + static LDoubleStackSlot* Create(int index) { + ASSERT(index >= 0); + if (index < kNumCachedOperands) return &cache[index]; + return new LDoubleStackSlot(index); + } + + static LDoubleStackSlot* cast(LOperand* op) { + ASSERT(op->IsStackSlot()); + return reinterpret_cast(op); + } + + static void SetupCache(); + + private: + static const int kNumCachedOperands = 128; + static LDoubleStackSlot cache[]; + + LDoubleStackSlot() : LOperand() { } + explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { } +}; + + +class LRegister: public LOperand { + public: + static LRegister* Create(int index) { + ASSERT(index >= 0); + if (index < kNumCachedOperands) return &cache[index]; + return new LRegister(index); + } + + static LRegister* cast(LOperand* op) { + ASSERT(op->IsRegister()); + return reinterpret_cast(op); + } + + static void SetupCache(); + + private: + static const int kNumCachedOperands = 16; + static LRegister cache[]; + + LRegister() : LOperand() { } + explicit LRegister(int index) : LOperand(REGISTER, index) { } +}; + + +class LDoubleRegister: public LOperand { + public: + static LDoubleRegister* Create(int index) { + ASSERT(index >= 0); + if (index < kNumCachedOperands) return &cache[index]; + return new LDoubleRegister(index); + } + + static LDoubleRegister* cast(LOperand* op) { + ASSERT(op->IsDoubleRegister()); + return reinterpret_cast(op); + } + + static void SetupCache(); + + private: + static const int kNumCachedOperands = 16; + static LDoubleRegister cache[]; + + LDoubleRegister() : LOperand() { } + explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { } +}; + + class LParallelMove : public ZoneObject { public: LParallelMove() : move_operands_(4) { } diff --git a/deps/v8/src/liveobjectlist-inl.h b/deps/v8/src/liveobjectlist-inl.h new file mode 100644 index 0000000000..997da4ee95 --- /dev/null +++ b/deps/v8/src/liveobjectlist-inl.h @@ -0,0 +1,36 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_LIVEOBJECTLIST_INL_H_ +#define V8_LIVEOBJECTLIST_INL_H_ + +#include "v8.h" + +#include "liveobjectlist.h" + +#endif // V8_LIVEOBJECTLIST_INL_H_ + diff --git a/deps/v8/src/liveobjectlist.cc b/deps/v8/src/liveobjectlist.cc new file mode 100644 index 0000000000..28a3d6d6ec --- /dev/null +++ b/deps/v8/src/liveobjectlist.cc @@ -0,0 +1,53 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifdef LIVE_OBJECT_LIST + +#include +#include + +#include "v8.h" + +#include "checks.h" +#include "global-handles.h" +#include "heap.h" +#include "inspector.h" +#include "list-inl.h" +#include "liveobjectlist.h" +#include "string-stream.h" +#include "top.h" +#include "v8utils.h" + +namespace v8 { +namespace internal { + + + +} } // namespace v8::internal + +#endif // LIVE_OBJECT_LIST + diff --git a/deps/v8/src/liveobjectlist.h b/deps/v8/src/liveobjectlist.h new file mode 100644 index 0000000000..11f5c45178 --- /dev/null +++ b/deps/v8/src/liveobjectlist.h @@ -0,0 +1,112 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_LIVEOBJECTLIST_H_ +#define V8_LIVEOBJECTLIST_H_ + +#include "v8.h" + +#include "checks.h" +#include "heap.h" +#include "objects.h" +#include "globals.h" + +namespace v8 { +namespace internal { + +#ifdef LIVE_OBJECT_LIST + + +// Temporary stubbed out LiveObjectList implementation. +class LiveObjectList { + public: + inline static void GCEpilogue() {} + inline static void GCPrologue() {} + inline static void IterateElements(ObjectVisitor* v) {} + inline static void ProcessNonLive(HeapObject *obj) {} + inline static void UpdateReferencesForScavengeGC() {} + + static MaybeObject* Capture() { return Heap::undefined_value(); } + static bool Delete(int id) { return false; } + static MaybeObject* Dump(int id1, + int id2, + int start_idx, + int dump_limit, + Handle filter_obj) { + return Heap::undefined_value(); + } + static MaybeObject* Info(int start_idx, int dump_limit) { + return Heap::undefined_value(); + } + static MaybeObject* Summarize(int id1, + int id2, + Handle filter_obj) { + return Heap::undefined_value(); + } + + static void Reset() {} + static Object* GetObj(int obj_id) { return Heap::undefined_value(); } + static Object* GetObjId(Handle address) { + return Heap::undefined_value(); + } + static MaybeObject* GetObjRetainers(int obj_id, + Handle instance_filter, + bool verbose, + int start, + int count, + Handle filter_obj) { + return Heap::undefined_value(); + } + + static Object* GetPath(int obj_id1, + int obj_id2, + Handle instance_filter) { + return Heap::undefined_value(); + } + static Object* PrintObj(int obj_id) { return Heap::undefined_value(); } +}; + + +#else // !LIVE_OBJECT_LIST + + +class LiveObjectList { + public: + static void GCEpilogue() {} + static void GCPrologue() {} + static void IterateElements(ObjectVisitor* v) {} + static void ProcessNonLive(HeapObject *obj) {} + static void UpdateReferencesForScavengeGC() {} +}; + + +#endif // LIVE_OBJECT_LIST + +} } // namespace v8::internal + +#endif // V8_LIVEOBJECTLIST_H_ + diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js index a30ef8a914..932d64d2ea 100644 --- a/deps/v8/src/messages.js +++ b/deps/v8/src/messages.js @@ -90,21 +90,28 @@ function FormatString(format, args) { } +// When formatting internally created error messages, do not +// invoke overwritten error toString methods but explicitly use +// the error to string method. This is to avoid leaking error +// objects between script tags in a browser setting. +function ToStringCheckErrorObject(obj) { + if (obj instanceof $Error) { + return %_CallFunction(obj, errorToString); + } else { + return ToString(obj); + } +} + + function ToDetailString(obj) { if (obj != null && IS_OBJECT(obj) && obj.toString === $Object.prototype.toString) { var constructor = obj.constructor; - if (!constructor) return ToString(obj); + if (!constructor) return ToStringCheckErrorObject(obj); var constructorName = constructor.name; - if (!constructorName) return ToString(obj); + if (!constructorName) return ToStringCheckErrorObject(obj); return "#<" + GetInstanceName(constructorName) + ">"; - } else if (obj instanceof $Error) { - // When formatting internally created error messages, do not - // invoke overwritten error toString methods but explicitly use - // the error to string method. This is to avoid leaking error - // objects between script tags in a browser setting. - return %_CallFunction(obj, errorToString); } else { - return ToString(obj); + return ToStringCheckErrorObject(obj); } } @@ -202,7 +209,13 @@ function FormatMessage(message) { array_indexof_not_defined: "Array.getIndexOf: Argument undefined", object_not_extensible: "Can't add property %0, object is not extensible", illegal_access: "Illegal access", - invalid_preparser_data: "Invalid preparser data for function %0" + invalid_preparser_data: "Invalid preparser data for function %0", + strict_mode_with: "Strict mode code may not include a with statement", + strict_catch_variable: "Catch variable may not be eval or arguments in strict mode", + strict_param_name: "Parameter name eval or arguments is not allowed in strict mode", + strict_param_dupe: "Strict mode function may not have duplicate parameter names", + strict_var_name: "Variable name may not be eval or arguments in strict mode", + strict_function_name: "Function name may not be eval or arguments in strict mode", }; } var format = kMessages[message.type]; @@ -1006,19 +1019,44 @@ $Error.captureStackTrace = captureStackTrace; // Setup extra properties of the Error.prototype object. $Error.prototype.message = ''; +// Global list of error objects visited during errorToString. This is +// used to detect cycles in error toString formatting. +var visited_errors = new $Array(); +var cyclic_error_marker = new $Object(); + +function errorToStringDetectCycle() { + if (!%PushIfAbsent(visited_errors, this)) throw cyclic_error_marker; + try { + var type = this.type; + if (type && !this.hasOwnProperty("message")) { + var formatted = FormatMessage({ type: type, args: this.arguments }); + return this.name + ": " + formatted; + } + var message = this.hasOwnProperty("message") ? (": " + this.message) : ""; + return this.name + message; + } finally { + visited_errors.pop(); + } +} + function errorToString() { - var type = this.type; - if (type && !this.hasOwnProperty("message")) { - return this.name + ": " + FormatMessage({ type: type, args: this.arguments }); + // This helper function is needed because access to properties on + // the builtins object do not work inside of a catch clause. + function isCyclicErrorMarker(o) { return o === cyclic_error_marker; } + + try { + return %_CallFunction(this, errorToStringDetectCycle); + } catch(e) { + // If this error message was encountered already return the empty + // string for it instead of recursively formatting it. + if (isCyclicErrorMarker(e)) return ''; + else throw e; } - var message = this.hasOwnProperty("message") ? (": " + this.message) : ""; - return this.name + message; } %FunctionSetName(errorToString, 'toString'); %SetProperty($Error.prototype, 'toString', errorToString, DONT_ENUM); - // Boilerplate for exceptions for stack overflows. Used from // Top::StackOverflow(). const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []); diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc index 519fe624b9..e5c2ad80c3 100644 --- a/deps/v8/src/mips/ic-mips.cc +++ b/deps/v8/src/mips/ic-mips.cc @@ -172,23 +172,11 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { } -void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, - ExternalArrayType array_type) { - UNIMPLEMENTED_MIPS(); -} - - void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { UNIMPLEMENTED_MIPS(); } -void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, - ExternalArrayType array_type) { - UNIMPLEMENTED_MIPS(); -} - - void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { UNIMPLEMENTED_MIPS(); } diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc index 91dec1757b..683b8626e5 100644 --- a/deps/v8/src/mips/stub-cache-mips.cc +++ b/deps/v8/src/mips/stub-cache-mips.cc @@ -397,6 +397,20 @@ Object* ConstructStubCompiler::CompileConstructStub( } +Object* ExternalArrayStubCompiler::CompileKeyedLoadStub( + ExternalArrayType array_type, Code::Flags flags) { + UNIMPLEMENTED_MIPS(); + return reinterpret_cast(NULL); // UNIMPLEMENTED RETURN +} + + +Object* ExternalArrayStubCompiler::CompileKeyedStoreStub( + ExternalArrayType array_type, Code::Flags flags) { + UNIMPLEMENTED_MIPS(); + return reinterpret_cast(NULL); // UNIMPLEMENTED RETURN +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index df44674a1a..21e318bfce 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -3079,12 +3079,6 @@ bool SharedFunctionInfo::HasBuiltinFunctionId() { } -bool SharedFunctionInfo::IsBuiltinMathFunction() { - return HasBuiltinFunctionId() && - builtin_function_id() >= kFirstMathFunctionId; -} - - BuiltinFunctionId SharedFunctionInfo::builtin_function_id() { ASSERT(HasBuiltinFunctionId()); return static_cast(Smi::cast(function_data())->value()); diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc index 9879da25d6..9f05b0f62d 100644 --- a/deps/v8/src/objects-printer.cc +++ b/deps/v8/src/objects-printer.cc @@ -395,6 +395,7 @@ static const char* TypeToString(InstanceType type) { case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT"; case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY"; case PROXY_TYPE: return "PROXY"; + case LAST_STRING_TYPE: return "LAST_STRING_TYPE"; #define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME; STRUCT_LIST(MAKE_STRUCT_CASE) #undef MAKE_STRUCT_CASE diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index f9cab45fb1..8c63022db8 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -455,6 +455,7 @@ const uint32_t kShortcutTypeTag = kConsStringTag; enum InstanceType { // String types. + // FIRST_STRING_TYPE SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag, ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag, CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag, @@ -471,6 +472,7 @@ enum InstanceType { EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag, EXTERNAL_STRING_WITH_ASCII_DATA_TYPE = kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag, + // LAST_STRING_TYPE EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag, PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE, @@ -523,7 +525,8 @@ enum InstanceType { JS_BUILTINS_OBJECT_TYPE, JS_GLOBAL_PROXY_TYPE, JS_ARRAY_TYPE, - JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE + + JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE, FIRST_FUNCTION_CLASS_TYPE JS_FUNCTION_TYPE, @@ -532,6 +535,8 @@ enum InstanceType { LAST_TYPE = JS_FUNCTION_TYPE, INVALID_TYPE = FIRST_TYPE - 1, FIRST_NONSTRING_TYPE = MAP_TYPE, + FIRST_STRING_TYPE = FIRST_TYPE, + LAST_STRING_TYPE = FIRST_NONSTRING_TYPE - 1, // Boundaries for testing for an external array. FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE, LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_FLOAT_ARRAY_TYPE, @@ -541,7 +546,10 @@ enum InstanceType { // function objects are not counted as objects, even though they are // implemented as such; only values whose typeof is "object" are included. FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE, - LAST_JS_OBJECT_TYPE = JS_REGEXP_TYPE + LAST_JS_OBJECT_TYPE = JS_REGEXP_TYPE, + // RegExp objects have [[Class]] "function" because they are callable. + // All types from this type and above are objects with [[Class]] "function". + FIRST_FUNCTION_CLASS_TYPE = JS_REGEXP_TYPE }; @@ -4066,7 +4074,6 @@ class SharedFunctionInfo: public HeapObject { inline bool IsApiFunction(); inline FunctionTemplateInfo* get_api_func_data(); inline bool HasBuiltinFunctionId(); - inline bool IsBuiltinMathFunction(); inline BuiltinFunctionId builtin_function_id(); // [script info]: Script from which the function originates. diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index 6ad9ab3162..2637281f08 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -283,6 +283,11 @@ class TemporaryScope BASE_EMBEDDED { void AddLoop() { loop_count_++; } bool ContainsLoops() const { return loop_count_ > 0; } + bool StrictMode() { return strict_mode_; } + void EnableStrictMode() { + strict_mode_ = FLAG_strict_mode; + } + private: // Captures the number of literals that need materialization in the // function. Includes regexp literals, and boilerplate for object @@ -300,6 +305,9 @@ class TemporaryScope BASE_EMBEDDED { // Captures the number of loops inside the scope. int loop_count_; + // Parsing strict mode code. + bool strict_mode_; + // Bookkeeping TemporaryScope** variable_; TemporaryScope* parent_; @@ -314,6 +322,8 @@ TemporaryScope::TemporaryScope(TemporaryScope** variable) loop_count_(0), variable_(variable), parent_(*variable) { + // Inherit the strict mode from the parent scope. + strict_mode_ = (parent_ != NULL) && parent_->strict_mode_; *variable = this; } @@ -561,7 +571,6 @@ class LexicalScope BASE_EMBEDDED { int prev_level_; }; - // ---------------------------------------------------------------------------- // The CHECK_OK macro is a convenient macro to enforce error // handling for functions that may fail (by returning !*ok). @@ -669,7 +678,8 @@ FunctionLiteral* Parser::DoParseProgram(Handle source, 0, source->length(), false, - temp_scope.ContainsLoops()); + temp_scope.ContainsLoops(), + temp_scope.StrictMode()); } else if (stack_overflow_) { Top::StackOverflow(); } @@ -1075,9 +1085,46 @@ void* Parser::ParseSourceElements(ZoneList* processor, ASSERT(processor != NULL); InitializationBlockFinder block_finder; ThisNamedPropertyAssigmentFinder this_property_assignment_finder; + bool directive_prologue = true; // Parsing directive prologue. + while (peek() != end_token) { + if (directive_prologue && peek() != Token::STRING) { + directive_prologue = false; + } + + Scanner::Location token_loc = scanner().peek_location(); Statement* stat = ParseStatement(NULL, CHECK_OK); - if (stat == NULL || stat->IsEmpty()) continue; + + if (stat == NULL || stat->IsEmpty()) { + directive_prologue = false; // End of directive prologue. + continue; + } + + if (directive_prologue) { + // A shot at a directive. + ExpressionStatement *e_stat; + Literal *literal; + // Still processing directive prologue? + if ((e_stat = stat->AsExpressionStatement()) != NULL && + (literal = e_stat->expression()->AsLiteral()) != NULL && + literal->handle()->IsString()) { + Handle directive = Handle::cast(literal->handle()); + + // Check "use strict" directive (ES5 14.1). + if (!temp_scope_->StrictMode() && + directive->Equals(Heap::use_strict()) && + token_loc.end_pos - token_loc.beg_pos == + Heap::use_strict()->length() + 2) { + temp_scope_->EnableStrictMode(); + // "use strict" is the only directive for now. + directive_prologue = false; + } + } else { + // End of the directive prologue. + directive_prologue = false; + } + } + // We find and mark the initialization blocks on top level code only. // This is because the optimization prevents reuse of the map transitions, // so it should be used only for code that will only be run once. @@ -1431,6 +1478,10 @@ Block* Parser::ParseVariableStatement(bool* ok) { return result; } +static bool IsEvalOrArguments(Handle string) { + return string.is_identical_to(Factory::eval_symbol()) || + string.is_identical_to(Factory::arguments_symbol()); +} // If the variable declaration declares exactly one non-const // variable, then *var is set to that variable. In all other cases, @@ -1479,6 +1530,13 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN, Handle name = ParseIdentifier(CHECK_OK); if (fni_ != NULL) fni_->PushVariableName(name); + // Strict mode variables may not be named eval or arguments + if (temp_scope_->StrictMode() && IsEvalOrArguments(name)) { + ReportMessage("strict_var_name", Vector::empty()); + *ok = false; + return NULL; + } + // Declare variable. // Note that we *always* must treat the initial value via a separate init // assignment for variables and constants because the value must be assigned @@ -1839,6 +1897,13 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) { // 'with' '(' Expression ')' Statement Expect(Token::WITH, CHECK_OK); + + if (temp_scope_->StrictMode()) { + ReportMessage("strict_mode_with", Vector::empty()); + *ok = false; + return NULL; + } + Expect(Token::LPAREN, CHECK_OK); Expression* expr = ParseExpression(true, CHECK_OK); Expect(Token::RPAREN, CHECK_OK); @@ -1971,6 +2036,13 @@ TryStatement* Parser::ParseTryStatement(bool* ok) { Expect(Token::LPAREN, CHECK_OK); Handle name = ParseIdentifier(CHECK_OK); + + if (temp_scope_->StrictMode() && IsEvalOrArguments(name)) { + ReportMessage("strict_catch_variable", Vector::empty()); + *ok = false; + return NULL; + } + Expect(Token::RPAREN, CHECK_OK); if (peek() == Token::LBRACE) { @@ -3224,11 +3296,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle var_name, // '(' (Identifier)*[','] ')' Expect(Token::LPAREN, CHECK_OK); int start_pos = scanner().location().beg_pos; + bool done = (peek() == Token::RPAREN); while (!done) { Handle param_name = ParseIdentifier(CHECK_OK); - top_scope_->AddParameter(top_scope_->DeclareLocal(param_name, - Variable::VAR)); + Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR); + top_scope_->AddParameter(parameter); num_parameters++; done = (peek() == Token::RPAREN); if (!done) Expect(Token::COMMA, CHECK_OK); @@ -3300,6 +3373,20 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle var_name, end_pos = scanner().location().end_pos; } + // Validate strict mode. + if (temp_scope_->StrictMode()) { + if (IsEvalOrArguments(name)) { + int position = function_token_position != RelocInfo::kNoPosition + ? function_token_position + : (start_pos > 0 ? start_pos - 1 : start_pos); + ReportMessageAt(Scanner::Location(position, start_pos), + "strict_function_name", Vector::empty()); + *ok = false; + return NULL; + } + // TODO(mmaly): Check for octal escape sequence here. + } + FunctionLiteral* function_literal = new FunctionLiteral(name, top_scope_, @@ -3312,7 +3399,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle var_name, start_pos, end_pos, function_name->length() > 0, - temp_scope.ContainsLoops()); + temp_scope.ContainsLoops(), + temp_scope.StrictMode()); function_literal->set_function_token_position(function_token_position); if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal); diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc index ad1e499ac5..7174c7f9cb 100644 --- a/deps/v8/src/platform-freebsd.cc +++ b/deps/v8/src/platform-freebsd.cc @@ -215,6 +215,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { : file_(file), memory_(memory), size_(size) { } virtual ~PosixMemoryMappedFile(); virtual void* memory() { return memory_; } + virtual int size() { return size_; } private: FILE* file_; void* memory_; @@ -222,6 +223,19 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { }; +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, void* initial) { FILE* file = fopen(name, "w+"); diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index 755e8cdaf6..761ff7e207 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -318,6 +318,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { : file_(file), memory_(memory), size_(size) { } virtual ~PosixMemoryMappedFile(); virtual void* memory() { return memory_; } + virtual int size() { return size_; } private: FILE* file_; void* memory_; @@ -325,6 +326,19 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { }; +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, void* initial) { FILE* file = fopen(name, "w+"); diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc index ce53305173..ea35c1b130 100644 --- a/deps/v8/src/platform-macos.cc +++ b/deps/v8/src/platform-macos.cc @@ -196,6 +196,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { : file_(file), memory_(memory), size_(size) { } virtual ~PosixMemoryMappedFile(); virtual void* memory() { return memory_; } + virtual int size() { return size_; } private: FILE* file_; void* memory_; @@ -203,6 +204,19 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { }; +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, void* initial) { FILE* file = fopen(name, "w+"); diff --git a/deps/v8/src/platform-nullos.cc b/deps/v8/src/platform-nullos.cc index f1b7695981..49d3dd988d 100644 --- a/deps/v8/src/platform-nullos.cc +++ b/deps/v8/src/platform-nullos.cc @@ -242,6 +242,12 @@ void OS::DebugBreak() { } +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + UNIMPLEMENTED(); + return NULL; +} + + OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, void* initial) { UNIMPLEMENTED(); diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc index 5de6081907..0002dd7620 100644 --- a/deps/v8/src/platform-openbsd.cc +++ b/deps/v8/src/platform-openbsd.cc @@ -213,6 +213,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { : file_(file), memory_(memory), size_(size) { } virtual ~PosixMemoryMappedFile(); virtual void* memory() { return memory_; } + virtual int size() { return size_; } private: FILE* file_; void* memory_; @@ -220,6 +221,19 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { }; +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, void* initial) { FILE* file = fopen(name, "w+"); diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc index ab5c0a3768..256dc75f6b 100644 --- a/deps/v8/src/platform-posix.cc +++ b/deps/v8/src/platform-posix.cc @@ -122,6 +122,11 @@ FILE* OS::FOpen(const char* path, const char* mode) { } +bool OS::Remove(const char* path) { + return (remove(path) == 0); +} + + const char* OS::LogFileOpenMode = "w"; diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc index dc4493aab9..556e26be21 100644 --- a/deps/v8/src/platform-solaris.cc +++ b/deps/v8/src/platform-solaris.cc @@ -226,6 +226,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { : file_(file), memory_(memory), size_(size) { } virtual ~PosixMemoryMappedFile(); virtual void* memory() { return memory_; } + virtual int size() { return size_; } private: FILE* file_; void* memory_; @@ -233,6 +234,19 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { }; +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, void* initial) { FILE* file = fopen(name, "w+"); diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index bf1737a54b..b5a85f6689 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -670,6 +670,11 @@ FILE* OS::FOpen(const char* path, const char* mode) { } +bool OS::Remove(const char* path) { + return (DeleteFileA(path) != 0); +} + + // Open log file in binary mode to avoid /n -> /r/n conversion. const char* OS::LogFileOpenMode = "wb"; @@ -911,17 +916,44 @@ void OS::DebugBreak() { class Win32MemoryMappedFile : public OS::MemoryMappedFile { public: - Win32MemoryMappedFile(HANDLE file, HANDLE file_mapping, void* memory) - : file_(file), file_mapping_(file_mapping), memory_(memory) { } + Win32MemoryMappedFile(HANDLE file, + HANDLE file_mapping, + void* memory, + int size) + : file_(file), + file_mapping_(file_mapping), + memory_(memory), + size_(size) { } virtual ~Win32MemoryMappedFile(); virtual void* memory() { return memory_; } + virtual int size() { return size_; } private: HANDLE file_; HANDLE file_mapping_; void* memory_; + int size_; }; +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + // Open a physical file + HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL); + if (file == NULL) return NULL; + + int size = static_cast(GetFileSize(file, NULL)); + + // Create a file mapping for the physical file + HANDLE file_mapping = CreateFileMapping(file, NULL, + PAGE_READWRITE, 0, static_cast(size), NULL); + if (file_mapping == NULL) return NULL; + + // Map a view of the file into memory + void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size); + return new Win32MemoryMappedFile(file, file_mapping, memory, size); +} + + OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, void* initial) { // Open a physical file @@ -935,7 +967,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, // Map a view of the file into memory void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size); if (memory) memmove(memory, initial, size); - return new Win32MemoryMappedFile(file, file_mapping, memory); + return new Win32MemoryMappedFile(file, file_mapping, memory, size); } diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h index 7b17067477..0d7d2e9cb8 100644 --- a/deps/v8/src/platform.h +++ b/deps/v8/src/platform.h @@ -174,6 +174,7 @@ class OS { static int GetLastError(); static FILE* FOpen(const char* path, const char* mode); + static bool Remove(const char* path); // Log file open mode is platform-dependent due to line ends issues. static const char* LogFileOpenMode; @@ -251,9 +252,11 @@ class OS { class MemoryMappedFile { public: + static MemoryMappedFile* open(const char* name); static MemoryMappedFile* create(const char* name, int size, void* initial); virtual ~MemoryMappedFile() { } virtual void* memory() = 0; + virtual int size() = 0; }; // Safe formatting print. Ensures that str is always null-terminated. diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc index d3f54ad3f2..50da1faf91 100644 --- a/deps/v8/src/scopes.cc +++ b/deps/v8/src/scopes.cc @@ -726,6 +726,7 @@ void Scope::ResolveVariable(Scope* global_scope, // Note that we must do a lookup anyway, because if we find one, // we must mark that variable as potentially accessed from this // inner scope (the property may not be in the 'with' object). + if (var != NULL) var->set_is_used(true); var = NonLocal(proxy->name(), Variable::DYNAMIC); } else { @@ -833,8 +834,8 @@ bool Scope::MustAllocate(Variable* var) { // visible name. if ((var->is_this() || var->name()->length() > 0) && (var->is_accessed_from_inner_scope() || - scope_calls_eval_ || inner_scope_calls_eval_ || - scope_contains_with_)) { + scope_calls_eval_ || + inner_scope_calls_eval_)) { var->set_is_used(true); } // Global variables do not need to be allocated. diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc index 295cc4a60b..15f128da4c 100644 --- a/deps/v8/src/stub-cache.cc +++ b/deps/v8/src/stub-cache.cc @@ -507,6 +507,74 @@ MaybeObject* StubCache::ComputeKeyedStoreSpecialized(JSObject* receiver) { } +namespace { + +ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) { + switch (kind) { + case JSObject::EXTERNAL_BYTE_ELEMENTS: + return kExternalByteArray; + case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + return kExternalUnsignedByteArray; + case JSObject::EXTERNAL_SHORT_ELEMENTS: + return kExternalShortArray; + case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + return kExternalUnsignedShortArray; + case JSObject::EXTERNAL_INT_ELEMENTS: + return kExternalIntArray; + case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: + return kExternalUnsignedIntArray; + case JSObject::EXTERNAL_FLOAT_ELEMENTS: + return kExternalFloatArray; + default: + UNREACHABLE(); + return static_cast(0); + } +} + +} // anonymous namespace + + +MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray( + JSObject* receiver, + bool is_store) { + Code::Flags flags = + Code::ComputeMonomorphicFlags( + is_store ? Code::KEYED_STORE_IC : Code::KEYED_LOAD_IC, + NORMAL); + ExternalArrayType array_type = + ElementsKindToExternalArrayType(receiver->GetElementsKind()); + String* name = + is_store ? Heap::KeyedStoreExternalArray_symbol() + : Heap::KeyedLoadExternalArray_symbol(); + // Use the global maps for the particular external array types, + // rather than the receiver's map, when looking up the cached code, + // so that we actually canonicalize these stubs. + Map* map = Heap::MapForExternalArrayType(array_type); + Object* code = map->FindInCodeCache(name, flags); + if (code->IsUndefined()) { + ExternalArrayStubCompiler compiler; + { MaybeObject* maybe_code = + is_store ? compiler.CompileKeyedStoreStub(array_type, flags) : + compiler.CompileKeyedLoadStub(array_type, flags); + if (!maybe_code->ToObject(&code)) return maybe_code; + } + if (is_store) { + PROFILE( + CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0)); + } else { + PROFILE( + CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0)); + } + Object* result; + { MaybeObject* maybe_result = + map->UpdateCodeCache(name, Code::cast(code)); + if (!maybe_result->ToObject(&result)) return maybe_result; + } + } + return code; +} + + MaybeObject* StubCache::ComputeStoreNormal() { return Builtins::builtin(Builtins::StoreIC_Normal); } @@ -1709,4 +1777,16 @@ void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) { } +MaybeObject* ExternalArrayStubCompiler::GetCode(Code::Flags flags) { + Object* result; + { MaybeObject* maybe_result = GetCodeWithFlags(flags, "ExternalArrayStub"); + if (!maybe_result->ToObject(&result)) return maybe_result; + } + Code* code = Code::cast(result); + USE(code); + PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayStub")); + return result; +} + + } } // namespace v8::internal diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h index 85dd5f6aae..1f534d9aad 100644 --- a/deps/v8/src/stub-cache.h +++ b/deps/v8/src/stub-cache.h @@ -167,6 +167,10 @@ class StubCache : public AllStatic { MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized( JSObject* receiver); + MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadOrStoreExternalArray( + JSObject* receiver, + bool is_store); + // --- MUST_USE_RESULT static MaybeObject* ComputeCallField(int argc, @@ -797,6 +801,20 @@ class CallOptimization BASE_EMBEDDED { CallHandlerInfo* api_call_info_; }; +class ExternalArrayStubCompiler: public StubCompiler { + public: + explicit ExternalArrayStubCompiler() {} + + MUST_USE_RESULT MaybeObject* CompileKeyedLoadStub( + ExternalArrayType array_type, Code::Flags flags); + + MUST_USE_RESULT MaybeObject* CompileKeyedStoreStub( + ExternalArrayType array_type, Code::Flags flags); + + private: + MaybeObject* GetCode(Code::Flags flags); +}; + } } // namespace v8::internal #endif // V8_STUB_CACHE_H_ diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc index d0ec4ef528..b466301ca5 100644 --- a/deps/v8/src/utils.cc +++ b/deps/v8/src/utils.cc @@ -276,4 +276,96 @@ char* StringBuilder::Finalize() { } +MemoryMappedExternalResource::MemoryMappedExternalResource(const char* filename) + : filename_(NULL), + data_(NULL), + length_(0), + remove_file_on_cleanup_(false) { + Init(filename); +} + + +MemoryMappedExternalResource:: + MemoryMappedExternalResource(const char* filename, + bool remove_file_on_cleanup) + : filename_(NULL), + data_(NULL), + length_(0), + remove_file_on_cleanup_(remove_file_on_cleanup) { + Init(filename); +} + + +MemoryMappedExternalResource::~MemoryMappedExternalResource() { + // Release the resources if we had successfully acquired them: + if (file_ != NULL) { + delete file_; + if (remove_file_on_cleanup_) { + OS::Remove(filename_); + } + DeleteArray(filename_); + } +} + + +void MemoryMappedExternalResource::Init(const char* filename) { + file_ = OS::MemoryMappedFile::open(filename); + if (file_ != NULL) { + filename_ = StrDup(filename); + data_ = reinterpret_cast(file_->memory()); + length_ = file_->size(); + } +} + + +bool MemoryMappedExternalResource::EnsureIsAscii(bool abort_if_failed) const { + bool is_ascii = true; + + int line_no = 1; + const char* start_of_line = data_; + const char* end = data_ + length_; + for (const char* p = data_; p < end; p++) { + char c = *p; + if ((c & 0x80) != 0) { + // Non-ascii detected: + is_ascii = false; + + // Report the error and abort if appropriate: + if (abort_if_failed) { + int char_no = static_cast(p - start_of_line) - 1; + + ASSERT(filename_ != NULL); + PrintF("\n\n\n" + "Abort: Non-Ascii character 0x%.2x in file %s line %d char %d", + c, filename_, line_no, char_no); + + // Allow for some context up to kNumberOfLeadingContextChars chars + // before the offending non-ascii char to help the user see where + // the offending char is. + const int kNumberOfLeadingContextChars = 10; + const char* err_context = p - kNumberOfLeadingContextChars; + if (err_context < data_) { + err_context = data_; + } + // Compute the length of the error context and print it. + int err_context_length = static_cast(p - err_context); + if (err_context_length != 0) { + PrintF(" after \"%.*s\"", err_context_length, err_context); + } + PrintF(".\n\n\n"); + OS::Abort(); + } + + break; // Non-ascii detected. No need to continue scanning. + } + if (c == '\n') { + start_of_line = p; + line_no++; + } + } + + return is_ascii; +} + + } } // namespace v8::internal diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h index e9623be62e..0aa53cac59 100644 --- a/deps/v8/src/v8utils.h +++ b/deps/v8/src/v8utils.h @@ -316,6 +316,39 @@ static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) { } } + +// A resource for using mmapped files to back external strings that are read +// from files. +class MemoryMappedExternalResource: public + v8::String::ExternalAsciiStringResource { + public: + explicit MemoryMappedExternalResource(const char* filename); + MemoryMappedExternalResource(const char* filename, + bool remove_file_on_cleanup); + virtual ~MemoryMappedExternalResource(); + + virtual const char* data() const { return data_; } + virtual size_t length() const { return length_; } + + bool exists() const { return file_ != NULL; } + bool is_empty() const { return length_ == 0; } + + bool EnsureIsAscii(bool abort_if_failed) const; + bool EnsureIsAscii() const { return EnsureIsAscii(true); } + bool IsAscii() const { return EnsureIsAscii(false); } + + private: + void Init(const char* filename); + + char* filename_; + OS::MemoryMappedFile* file_; + + const char* data_; + size_t length_; + bool remove_file_on_cleanup_; +}; + + } } // namespace v8::internal #endif // V8_V8UTILS_H_ diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index 495de31404..a2659a1c33 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -34,7 +34,7 @@ // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 3 #define MINOR_VERSION 0 -#define BUILD_NUMBER 9 +#define BUILD_NUMBER 10 #define PATCH_LEVEL 0 #define CANDIDATE_VERSION false diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h index 1fe9eed4da..70b40e07c8 100644 --- a/deps/v8/src/x64/assembler-x64-inl.h +++ b/deps/v8/src/x64/assembler-x64-inl.h @@ -199,8 +199,10 @@ void RelocInfo::apply(intptr_t delta) { if (IsInternalReference(rmode_)) { // absolute code pointer inside code object moves with the code object. Memory::Address_at(pc_) += static_cast(delta); + CPU::FlushICache(pc_, sizeof(Address)); } else if (IsCodeTarget(rmode_)) { Memory::int32_at(pc_) -= static_cast(delta); + CPU::FlushICache(pc_, sizeof(int32_t)); } } @@ -236,6 +238,7 @@ void RelocInfo::set_target_address(Address target) { Assembler::set_target_address_at(pc_, target); } else { Memory::Address_at(pc_) = target; + CPU::FlushICache(pc_, sizeof(Address)); } } @@ -271,6 +274,7 @@ Address* RelocInfo::target_reference_address() { void RelocInfo::set_target_object(Object* target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); *reinterpret_cast(pc_) = target; + CPU::FlushICache(pc_, sizeof(Address)); } @@ -295,6 +299,7 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) { ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Memory::Address_at(pc_) = address; + CPU::FlushICache(pc_, sizeof(Address)); } @@ -331,6 +336,8 @@ void RelocInfo::set_call_address(Address target) { (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) = target; + CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset, + sizeof(Address)); } @@ -356,10 +363,12 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { visitor->VisitPointer(target_object_address()); + CPU::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(target_reference_address()); + CPU::FlushICache(pc_, sizeof(Address)); #ifdef ENABLE_DEBUGGER_SUPPORT } else if (Debug::has_break_points() && ((RelocInfo::IsJSReturn(mode) && @@ -379,10 +388,12 @@ void RelocInfo::Visit() { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { StaticVisitor::VisitPointer(target_object_address()); + CPU::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(target_reference_address()); + CPU::FlushICache(pc_, sizeof(Address)); #ifdef ENABLE_DEBUGGER_SUPPORT } else if (Debug::has_break_points() && ((RelocInfo::IsJSReturn(mode) && diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc index de01cfa3c1..0f866a4b99 100644 --- a/deps/v8/src/x64/assembler-x64.cc +++ b/deps/v8/src/x64/assembler-x64.cc @@ -2721,6 +2721,17 @@ void Assembler::cvttss2si(Register dst, const Operand& src) { } +void Assembler::cvttss2si(Register dst, XMMRegister src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xF3); + emit_optional_rex_32(dst, src); + emit(0x0F); + emit(0x2C); + emit_sse_operand(dst, src); +} + + void Assembler::cvttsd2si(Register dst, const Operand& src) { EnsureSpace ensure_space(this); last_pc_ = pc_; @@ -2732,6 +2743,17 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) { } +void Assembler::cvttsd2si(Register dst, XMMRegister src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xF2); + emit_optional_rex_32(dst, src); + emit(0x0F); + emit(0x2C); + emit_sse_operand(dst, src); +} + + void Assembler::cvttsd2siq(Register dst, XMMRegister src) { EnsureSpace ensure_space(this); last_pc_ = pc_; diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index be837f0448..890cd8ac5b 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -707,6 +707,10 @@ class Assembler : public Malloced { arithmetic_op_32(0x1b, dst, src); } + void sbbq(Register dst, Register src) { + arithmetic_op(0x1b, dst, src); + } + void cmpb(Register dst, Immediate src) { immediate_arithmetic_op_8(0x7, dst, src); } @@ -1205,7 +1209,9 @@ class Assembler : public Malloced { void movss(const Operand& dst, XMMRegister src); void cvttss2si(Register dst, const Operand& src); + void cvttss2si(Register dst, XMMRegister src); void cvttsd2si(Register dst, const Operand& src); + void cvttsd2si(Register dst, XMMRegister src); void cvttsd2siq(Register dst, XMMRegister src); void cvtlsi2sd(XMMRegister dst, const Operand& src); diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc index 59522d22f9..a261b9d086 100644 --- a/deps/v8/src/x64/code-stubs-x64.cc +++ b/deps/v8/src/x64/code-stubs-x64.cc @@ -91,8 +91,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { void FastNewContextStub::Generate(MacroAssembler* masm) { // Try to allocate the context in new space. Label gc; - int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, + __ AllocateInNewSpace((slots_ * kPointerSize) + FixedArray::kHeaderSize, rax, rbx, rcx, &gc, TAG_OBJECT); // Get the function from the stack. @@ -101,7 +100,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Setup the object header. __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex); __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); - __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); + __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(slots_)); // Setup the fixed slots. __ Set(rbx, 0); // Set to NULL. @@ -116,7 +115,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Initialize the rest of the slots to undefined. __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); - for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { + for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) { __ movq(Operand(rax, Context::SlotOffset(i)), rbx); } @@ -3248,6 +3247,12 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } +Register InstanceofStub::left() { return rax; } + + +Register InstanceofStub::right() { return rdx; } + + int CompareStub::MinorKey() { // Encode the three parameters in a unique 16 bit value. To avoid duplicate // stubs the never NaN NaN condition is only taken into account if the @@ -4272,22 +4277,119 @@ void StringCompareStub::Generate(MacroAssembler* masm) { } void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - UNIMPLEMENTED(); + ASSERT(state_ == CompareIC::SMIS); + NearLabel miss; + __ JumpIfNotBothSmi(rdx, rax, &miss); + + if (GetCondition() == equal) { + // For equality we do not care about the sign of the result. + __ SmiSub(rax, rax, rdx); + } else { + NearLabel done; + __ SmiSub(rdx, rdx, rax); + __ j(no_overflow, &done); + // Correct sign of result in case of overflow. + __ SmiNot(rdx, rdx); + __ bind(&done); + __ movq(rax, rdx); + } + __ ret(0); + + __ bind(&miss); + GenerateMiss(masm); } void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { - UNIMPLEMENTED(); + ASSERT(state_ == CompareIC::HEAP_NUMBERS); + + NearLabel generic_stub; + NearLabel unordered; + NearLabel miss; + Condition either_smi = masm->CheckEitherSmi(rax, rdx); + __ j(either_smi, &generic_stub); + + __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx); + __ j(not_equal, &miss); + __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx); + __ j(not_equal, &miss); + + // Load left and right operand + __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); + __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); + + // Compare operands + __ ucomisd(xmm0, xmm1); + + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered); + + // Return a result of -1, 0, or 1, based on EFLAGS. + // Performing mov, because xor would destroy the flag register. + __ movl(rax, Immediate(0)); + __ movl(rcx, Immediate(0)); + __ setcc(above, rax); // Add one to zero if carry clear and not equal. + __ sbbq(rax, rcx); // Subtract one if below (aka. carry set). + __ ret(0); + + __ bind(&unordered); + + CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS); + __ bind(&generic_stub); + __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); + + __ bind(&miss); + GenerateMiss(masm); } void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - UNIMPLEMENTED(); + ASSERT(state_ == CompareIC::OBJECTS); + NearLabel miss; + Condition either_smi = masm->CheckEitherSmi(rdx, rax); + __ j(either_smi, &miss); + + __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx); + __ j(not_equal, &miss, not_taken); + __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx); + __ j(not_equal, &miss, not_taken); + + ASSERT(GetCondition() == equal); + __ subq(rax, rdx); + __ ret(0); + + __ bind(&miss); + GenerateMiss(masm); } void ICCompareStub::GenerateMiss(MacroAssembler* masm) { - UNIMPLEMENTED(); + // Save the registers. + __ pop(rcx); + __ push(rdx); + __ push(rax); + __ push(rcx); + + // Call the runtime system in a fresh internal frame. + ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss)); + __ EnterInternalFrame(); + __ push(rdx); + __ push(rax); + __ Push(Smi::FromInt(op_)); + __ CallExternalReference(miss, 3); + __ LeaveInternalFrame(); + + // Compute the entry point of the rewritten stub. + __ lea(rdi, FieldOperand(rax, Code::kHeaderSize)); + + // Restore registers. + __ pop(rcx); + __ pop(rax); + __ pop(rdx); + __ push(rcx); + + // Do a tail call to the rewritten stub. + __ jmp(rdi); } #undef __ diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index a543a50487..57720a8019 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -206,7 +206,7 @@ void CodeGenerator::Generate(CompilationInfo* info) { frame_->AllocateStackSlots(); // Allocate the local context if needed. - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + int heap_slots = scope()->num_heap_slots(); if (heap_slots > 0) { Comment cmnt(masm_, "[ allocate local context"); // Allocate local context. diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc index 7502d6182c..8fdf20b7be 100644 --- a/deps/v8/src/x64/disasm-x64.cc +++ b/deps/v8/src/x64/disasm-x64.cc @@ -1113,9 +1113,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { } else if (opcode == 0x2C) { // CVTTSS2SI: // Convert with truncation scalar single-precision FP to dword integer. - // Assert that mod is not 3, so source is memory, not an XMM register. - ASSERT_NE(0xC0, *current & 0xC0); - current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current); + int mod, regop, rm; + get_modrm(*current, &mod, ®op, &rm); + AppendToBuffer("cvttss2si%c %s,", + operand_size_code(), NameOfCPURegister(regop)); + current += PrintRightXMMOperand(current); } else if (opcode == 0x5A) { // CVTSS2SD: // Convert scalar single-precision FP to scalar double-precision FP. diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc index b6e81b0f14..896e53da60 100644 --- a/deps/v8/src/x64/full-codegen-x64.cc +++ b/deps/v8/src/x64/full-codegen-x64.cc @@ -88,7 +88,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { bool function_in_register = true; // Possibly allocate a local context. - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + int heap_slots = scope()->num_heap_slots(); if (heap_slots > 0) { Comment cmnt(masm_, "[ Allocate local context"); // Argument to NewContext is the function, which is still in rdi. diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc index b54aeb977d..e31a341dc3 100644 --- a/deps/v8/src/x64/ic-x64.cc +++ b/deps/v8/src/x64/ic-x64.cc @@ -727,131 +727,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { } -void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, - ExternalArrayType array_type) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label slow; - - // Check that the object isn't a smi. - __ JumpIfSmi(rdx, &slow); - - // Check that the key is a smi. - __ JumpIfNotSmi(rax, &slow); - - // Check that the object is a JS object. - __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx); - __ j(not_equal, &slow); - // Check that the receiver does not require access checks. We need - // to check this explicitly since this generic stub does not perform - // map checks. The map is already in rdx. - __ testb(FieldOperand(rcx, Map::kBitFieldOffset), - Immediate(1 << Map::kIsAccessCheckNeeded)); - __ j(not_zero, &slow); - - // Check that the elements array is the appropriate type of - // ExternalArray. - // rax: index (as a smi) - // rdx: JSObject - __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); - __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), - Heap::RootIndexForExternalArrayType(array_type)); - __ j(not_equal, &slow); - - // Check that the index is in range. - __ SmiToInteger32(rcx, rax); - __ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset)); - // Unsigned comparison catches both negative and too-large values. - __ j(above_equal, &slow); - - // rax: index (as a smi) - // rdx: receiver (JSObject) - // rcx: untagged index - // rbx: elements array - __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset)); - // rbx: base pointer of external storage - switch (array_type) { - case kExternalByteArray: - __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0)); - break; - case kExternalUnsignedByteArray: - __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0)); - break; - case kExternalShortArray: - __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0)); - break; - case kExternalUnsignedShortArray: - __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0)); - break; - case kExternalIntArray: - __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0)); - break; - case kExternalUnsignedIntArray: - __ movl(rcx, Operand(rbx, rcx, times_4, 0)); - break; - case kExternalFloatArray: - __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0)); - break; - default: - UNREACHABLE(); - break; - } - - // rax: index - // rdx: receiver - // For integer array types: - // rcx: value - // For floating-point array type: - // xmm0: value as double. - - ASSERT(kSmiValueSize == 32); - if (array_type == kExternalUnsignedIntArray) { - // For the UnsignedInt array type, we need to see whether - // the value can be represented in a Smi. If not, we need to convert - // it to a HeapNumber. - NearLabel box_int; - - __ JumpIfUIntNotValidSmiValue(rcx, &box_int); - - __ Integer32ToSmi(rax, rcx); - __ ret(0); - - __ bind(&box_int); - - // Allocate a HeapNumber for the int and perform int-to-double - // conversion. - // The value is zero-extended since we loaded the value from memory - // with movl. - __ cvtqsi2sd(xmm0, rcx); - - __ AllocateHeapNumber(rcx, rbx, &slow); - // Set the value. - __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); - __ movq(rax, rcx); - __ ret(0); - } else if (array_type == kExternalFloatArray) { - // For the floating-point array type, we need to always allocate a - // HeapNumber. - __ AllocateHeapNumber(rcx, rbx, &slow); - // Set the value. - __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); - __ movq(rax, rcx); - __ ret(0); - } else { - __ Integer32ToSmi(rax, rcx); - __ ret(0); - } - - // Slow case: Jump to runtime. - __ bind(&slow); - __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1); - GenerateRuntimeGetProperty(masm); -} - - void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- rax : key @@ -1023,149 +898,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { } -void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, - ExternalArrayType array_type) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label slow; - - // Check that the object isn't a smi. - __ JumpIfSmi(rdx, &slow); - // Get the map from the receiver. - __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); - // Check that the receiver does not require access checks. We need - // to do this because this generic stub does not perform map checks. - __ testb(FieldOperand(rbx, Map::kBitFieldOffset), - Immediate(1 << Map::kIsAccessCheckNeeded)); - __ j(not_zero, &slow); - // Check that the key is a smi. - __ JumpIfNotSmi(rcx, &slow); - - // Check that the object is a JS object. - __ CmpInstanceType(rbx, JS_OBJECT_TYPE); - __ j(not_equal, &slow); - - // Check that the elements array is the appropriate type of - // ExternalArray. - // rax: value - // rcx: key (a smi) - // rdx: receiver (a JSObject) - __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); - __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), - Heap::RootIndexForExternalArrayType(array_type)); - __ j(not_equal, &slow); - - // Check that the index is in range. - __ SmiToInteger32(rdi, rcx); // Untag the index. - __ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset)); - // Unsigned comparison catches both negative and too-large values. - __ j(above_equal, &slow); - - // Handle both smis and HeapNumbers in the fast path. Go to the - // runtime for all other kinds of values. - // rax: value - // rcx: key (a smi) - // rdx: receiver (a JSObject) - // rbx: elements array - // rdi: untagged key - NearLabel check_heap_number; - __ JumpIfNotSmi(rax, &check_heap_number); - // No more branches to slow case on this path. Key and receiver not needed. - __ SmiToInteger32(rdx, rax); - __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset)); - // rbx: base pointer of external storage - switch (array_type) { - case kExternalByteArray: - case kExternalUnsignedByteArray: - __ movb(Operand(rbx, rdi, times_1, 0), rdx); - break; - case kExternalShortArray: - case kExternalUnsignedShortArray: - __ movw(Operand(rbx, rdi, times_2, 0), rdx); - break; - case kExternalIntArray: - case kExternalUnsignedIntArray: - __ movl(Operand(rbx, rdi, times_4, 0), rdx); - break; - case kExternalFloatArray: - // Need to perform int-to-float conversion. - __ cvtlsi2ss(xmm0, rdx); - __ movss(Operand(rbx, rdi, times_4, 0), xmm0); - break; - default: - UNREACHABLE(); - break; - } - __ ret(0); - - __ bind(&check_heap_number); - // rax: value - // rcx: key (a smi) - // rdx: receiver (a JSObject) - // rbx: elements array - // rdi: untagged key - __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister); - __ j(not_equal, &slow); - // No more branches to slow case on this path. - - // The WebGL specification leaves the behavior of storing NaN and - // +/-Infinity into integer arrays basically undefined. For more - // reproducible behavior, convert these to zero. - __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset)); - __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset)); - // rdi: untagged index - // rbx: base pointer of external storage - // top of FPU stack: value - if (array_type == kExternalFloatArray) { - __ cvtsd2ss(xmm0, xmm0); - __ movss(Operand(rbx, rdi, times_4, 0), xmm0); - __ ret(0); - } else { - // Need to perform float-to-int conversion. - // Test the value for NaN. - - // Convert to int32 and store the low byte/word. - // If the value is NaN or +/-infinity, the result is 0x80000000, - // which is automatically zero when taken mod 2^n, n < 32. - // rdx: value (converted to an untagged integer) - // rdi: untagged index - // rbx: base pointer of external storage - switch (array_type) { - case kExternalByteArray: - case kExternalUnsignedByteArray: - __ cvtsd2si(rdx, xmm0); - __ movb(Operand(rbx, rdi, times_1, 0), rdx); - break; - case kExternalShortArray: - case kExternalUnsignedShortArray: - __ cvtsd2si(rdx, xmm0); - __ movw(Operand(rbx, rdi, times_2, 0), rdx); - break; - case kExternalIntArray: - case kExternalUnsignedIntArray: { - // Convert to int64, so that NaN and infinities become - // 0x8000000000000000, which is zero mod 2^32. - __ cvtsd2siq(rdx, xmm0); - __ movl(Operand(rbx, rdi, times_4, 0), rdx); - break; - } - default: - UNREACHABLE(); - break; - } - __ ret(0); - } - - // Slow case: call runtime. - __ bind(&slow); - GenerateRuntimeSetProperty(masm); -} - - // The generated code does not accept smi keys. // The generated code falls through if both probes miss. static void GenerateMonomorphicCacheProbe(MacroAssembler* masm, diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index 151fad7362..dc988b1a65 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -930,12 +930,88 @@ int LCodeGen::GetNextEmittedBlock(int block) { void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { - Abort("Unimplemented: %s", "EmitBranch"); + int next_block = GetNextEmittedBlock(current_block_); + right_block = chunk_->LookupDestination(right_block); + left_block = chunk_->LookupDestination(left_block); + + if (right_block == left_block) { + EmitGoto(left_block); + } else if (left_block == next_block) { + __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); + } else if (right_block == next_block) { + __ j(cc, chunk_->GetAssemblyLabel(left_block)); + } else { + __ j(cc, chunk_->GetAssemblyLabel(left_block)); + if (cc != always) { + __ jmp(chunk_->GetAssemblyLabel(right_block)); + } + } } void LCodeGen::DoBranch(LBranch* instr) { - Abort("Unimplemented: %s", "DoBranch"); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + Representation r = instr->hydrogen()->representation(); + if (r.IsInteger32()) { + Register reg = ToRegister(instr->InputAt(0)); + __ testl(reg, reg); + EmitBranch(true_block, false_block, not_zero); + } else if (r.IsDouble()) { + XMMRegister reg = ToDoubleRegister(instr->InputAt(0)); + __ xorpd(xmm0, xmm0); + __ ucomisd(reg, xmm0); + EmitBranch(true_block, false_block, not_equal); + } else { + ASSERT(r.IsTagged()); + Register reg = ToRegister(instr->InputAt(0)); + HType type = instr->hydrogen()->type(); + if (type.IsBoolean()) { + __ Cmp(reg, Factory::true_value()); + EmitBranch(true_block, false_block, equal); + } else if (type.IsSmi()) { + __ SmiCompare(reg, Smi::FromInt(0)); + EmitBranch(true_block, false_block, not_equal); + } else { + Label* true_label = chunk_->GetAssemblyLabel(true_block); + Label* false_label = chunk_->GetAssemblyLabel(false_block); + + __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); + __ j(equal, false_label); + __ CompareRoot(reg, Heap::kTrueValueRootIndex); + __ j(equal, true_label); + __ CompareRoot(reg, Heap::kFalseValueRootIndex); + __ j(equal, false_label); + __ SmiCompare(reg, Smi::FromInt(0)); + __ j(equal, false_label); + __ JumpIfSmi(reg, true_label); + + // Test for double values. Plus/minus zero and NaN are false. + NearLabel call_stub; + __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); + __ j(not_equal, &call_stub); + + // HeapNumber => false iff +0, -0, or NaN. These three cases set the + // zero flag when compared to zero using ucomisd. + __ xorpd(xmm0, xmm0); + __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset)); + __ j(zero, false_label); + __ jmp(true_label); + + // The conversion stub doesn't cause garbage collections so it's + // safe to not record a safepoint after the call. + __ bind(&call_stub); + ToBooleanStub stub; + __ Pushad(); + __ push(reg); + __ CallStub(&stub); + __ testq(rax, rax); + __ Popad(); + EmitBranch(true_block, false_block, not_zero); + } + } } @@ -979,7 +1055,7 @@ void LCodeGen::DoGoto(LGoto* instr) { } -Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { +inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { Condition cond = no_condition; switch (op) { case Token::EQ: @@ -1008,17 +1084,64 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { - Abort("Unimplemented: %s", "EmitCmpI"); + if (right->IsConstantOperand()) { + int32_t value = ToInteger32(LConstantOperand::cast(right)); + if (left->IsRegister()) { + __ cmpl(ToRegister(left), Immediate(value)); + } else { + __ cmpl(ToOperand(left), Immediate(value)); + } + } else if (right->IsRegister()) { + __ cmpq(ToRegister(left), ToRegister(right)); + } else { + __ cmpq(ToRegister(left), ToOperand(right)); + } } void LCodeGen::DoCmpID(LCmpID* instr) { - Abort("Unimplemented: %s", "DoCmpID"); + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); + LOperand* result = instr->result(); + + NearLabel unordered; + if (instr->is_double()) { + // Don't base result on EFLAGS when a NaN is involved. Instead + // jump to the unordered case, which produces a false value. + __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); + __ j(parity_even, &unordered); + } else { + EmitCmpI(left, right); + } + + NearLabel done; + Condition cc = TokenToCondition(instr->op(), instr->is_double()); + __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex); + __ j(cc, &done); + + __ bind(&unordered); + __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex); + __ bind(&done); } void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { - Abort("Unimplemented: %s", "DoCmpIDAndBranch"); + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + + if (instr->is_double()) { + // Don't base result on EFLAGS when a NaN is involved. Instead + // jump to the false block. + __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); + __ j(parity_even, chunk_->GetAssemblyLabel(false_block)); + } else { + EmitCmpI(left, right); + } + + Condition cc = TokenToCondition(instr->op(), instr->is_double()); + EmitBranch(true_block, false_block, cc); } @@ -1028,7 +1151,13 @@ void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) { void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) { - Abort("Unimplemented: %s", "DoCmpJSObjectAndBranch"); + Register left = ToRegister(instr->InputAt(0)); + Register right = ToRegister(instr->InputAt(1)); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + + __ cmpq(left, right); + EmitBranch(true_block, false_block, equal); } @@ -1038,7 +1167,39 @@ void LCodeGen::DoIsNull(LIsNull* instr) { void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { - Abort("Unimplemented: %s", "DoIsNullAndBranch"); + Register reg = ToRegister(instr->InputAt(0)); + + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + if (instr->hydrogen()->representation().IsSpecialization() || + instr->hydrogen()->type().IsSmi()) { + // If the expression is known to untagged or smi, then it's definitely + // not null, and it can't be a an undetectable object. + // Jump directly to the false block. + EmitGoto(false_block); + return; + } + + int true_block = chunk_->LookupDestination(instr->true_block_id()); + + __ Cmp(reg, Factory::null_value()); + if (instr->is_strict()) { + EmitBranch(true_block, false_block, equal); + } else { + Label* true_label = chunk_->GetAssemblyLabel(true_block); + Label* false_label = chunk_->GetAssemblyLabel(false_block); + __ j(equal, true_label); + __ Cmp(reg, Factory::undefined_value()); + __ j(equal, true_label); + __ JumpIfSmi(reg, false_label); + // Check for undetectable objects by looking in the bit field in + // the map. The object has already been smi checked. + Register scratch = ToRegister(instr->TempAt(0)); + __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset)); + __ testb(FieldOperand(scratch, Map::kBitFieldOffset), + Immediate(1 << Map::kIsUndetectable)); + EmitBranch(true_block, false_block, not_zero); + } } @@ -1047,7 +1208,25 @@ Condition LCodeGen::EmitIsObject(Register input, Register temp2, Label* is_not_object, Label* is_object) { - Abort("Unimplemented: %s", "EmitIsObject"); + ASSERT(!input.is(temp1)); + ASSERT(!input.is(temp2)); + ASSERT(!temp1.is(temp2)); + + __ JumpIfSmi(input, is_not_object); + + __ Cmp(input, Factory::null_value()); + __ j(equal, is_object); + + __ movq(temp1, FieldOperand(input, HeapObject::kMapOffset)); + // Undetectable objects behave like undefined. + __ testb(FieldOperand(temp1, Map::kBitFieldOffset), + Immediate(1 << Map::kIsUndetectable)); + __ j(not_zero, is_not_object); + + __ movzxbl(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset)); + __ cmpb(temp2, Immediate(FIRST_JS_OBJECT_TYPE)); + __ j(below, is_not_object); + __ cmpb(temp2, Immediate(LAST_JS_OBJECT_TYPE)); return below_equal; } @@ -1058,7 +1237,18 @@ void LCodeGen::DoIsObject(LIsObject* instr) { void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { - Abort("Unimplemented: %s", "DoIsObjectAndBranch"); + Register reg = ToRegister(instr->InputAt(0)); + Register temp = ToRegister(instr->TempAt(0)); + Register temp2 = ToRegister(instr->TempAt(1)); + + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + Label* true_label = chunk_->GetAssemblyLabel(true_block); + Label* false_label = chunk_->GetAssemblyLabel(false_block); + + Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label); + + EmitBranch(true_block, false_block, true_cond); } @@ -1068,7 +1258,38 @@ void LCodeGen::DoIsSmi(LIsSmi* instr) { void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { - Abort("Unimplemented: %s", "DoIsSmiAndBranch"); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + Condition is_smi; + if (instr->InputAt(0)->IsRegister()) { + Register input = ToRegister(instr->InputAt(0)); + is_smi = masm()->CheckSmi(input); + } else { + Operand input = ToOperand(instr->InputAt(0)); + is_smi = masm()->CheckSmi(input); + } + EmitBranch(true_block, false_block, is_smi); +} + + +static InstanceType TestType(HHasInstanceType* instr) { + InstanceType from = instr->from(); + InstanceType to = instr->to(); + if (from == FIRST_TYPE) return to; + ASSERT(from == to || to == LAST_TYPE); + return from; +} + + +static Condition BranchCondition(HHasInstanceType* instr) { + InstanceType from = instr->from(); + InstanceType to = instr->to(); + if (from == to) return equal; + if (to == LAST_TYPE) return above_equal; + if (from == FIRST_TYPE) return below_equal; + UNREACHABLE(); + return equal; } @@ -1078,7 +1299,17 @@ void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) { void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { - Abort("Unimplemented: %s", "DoHasInstanceTypeAndBranch"); + Register input = ToRegister(instr->InputAt(0)); + + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + Label* false_label = chunk_->GetAssemblyLabel(false_block); + + __ JumpIfSmi(input, false_label); + + __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister); + EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen())); } @@ -1089,19 +1320,68 @@ void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { void LCodeGen::DoHasCachedArrayIndexAndBranch( LHasCachedArrayIndexAndBranch* instr) { - Abort("Unimplemented: %s", "DoHasCachedArrayIndexAndBranch"); + Register input = ToRegister(instr->InputAt(0)); + + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + __ testl(FieldOperand(input, String::kHashFieldOffset), + Immediate(String::kContainsCachedArrayIndexMask)); + EmitBranch(true_block, false_block, not_equal); } -// Branches to a label or falls through with the answer in the z flag. Trashes -// the temp registers, but not the input. Only input and temp2 may alias. +// Branches to a label or falls through with the answer in the z flag. +// Trashes the temp register and possibly input (if it and temp are aliased). void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, - Handleclass_name, + Handle class_name, Register input, - Register temp, - Register temp2) { - Abort("Unimplemented: %s", "EmitClassOfTest"); + Register temp) { + __ JumpIfSmi(input, is_false); + __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp); + __ j(below, is_false); + + // Map is now in temp. + // Functions have class 'Function'. + __ CmpInstanceType(temp, JS_FUNCTION_TYPE); + if (class_name->IsEqualTo(CStrVector("Function"))) { + __ j(equal, is_true); + } else { + __ j(equal, is_false); + } + + // Check if the constructor in the map is a function. + __ movq(temp, FieldOperand(temp, Map::kConstructorOffset)); + + // As long as JS_FUNCTION_TYPE is the last instance type and it is + // right after LAST_JS_OBJECT_TYPE, we can avoid checking for + // LAST_JS_OBJECT_TYPE. + ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); + + // Objects with a non-function constructor have class 'Object'. + __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister); + if (class_name->IsEqualTo(CStrVector("Object"))) { + __ j(not_equal, is_true); + } else { + __ j(not_equal, is_false); + } + + // temp now contains the constructor function. Grab the + // instance class name from there. + __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); + __ movq(temp, FieldOperand(temp, + SharedFunctionInfo::kInstanceClassNameOffset)); + // The class name we are testing against is a symbol because it's a literal. + // The name in the constructor is a symbol because of the way the context is + // booted. This routine isn't expected to work for random API-created + // classes and it doesn't have to because you can't access it with natives + // syntax. Since both sides are symbols it is sufficient to use an identity + // comparison. + ASSERT(class_name->IsSymbol()); + __ Cmp(temp, class_name); + // End with the answer in the z flag. } @@ -1111,7 +1391,19 @@ void LCodeGen::DoClassOfTest(LClassOfTest* instr) { void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { - Abort("Unimplemented: %s", "DoClassOfTestAndBranch"); + Register input = ToRegister(instr->InputAt(0)); + Register temp = ToRegister(instr->TempAt(0)); + Handle class_name = instr->hydrogen()->class_name(); + + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + Label* true_label = chunk_->GetAssemblyLabel(true_block); + Label* false_label = chunk_->GetAssemblyLabel(false_block); + + EmitClassOfTest(true_label, false_label, class_name, input, temp); + + EmitBranch(true_block, false_block, equal); } @@ -1126,7 +1418,13 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) { void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { - Abort("Unimplemented: %s", "DoInstanceOfAndBranch"); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + InstanceofStub stub(InstanceofStub::kArgsInRegisters); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + __ testq(rax, rax); + EmitBranch(true_block, false_block, zero); } @@ -1142,12 +1440,42 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, void LCodeGen::DoCmpT(LCmpT* instr) { - Abort("Unimplemented: %s", "DoCmpT"); + Token::Value op = instr->op(); + + Handle ic = CompareIC::GetUninitialized(op); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + + Condition condition = TokenToCondition(op, false); + if (op == Token::GT || op == Token::LTE) { + condition = ReverseCondition(condition); + } + NearLabel true_value, done; + __ testq(rax, rax); + __ j(condition, &true_value); + __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); + __ jmp(&done); + __ bind(&true_value); + __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); + __ bind(&done); } void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) { - Abort("Unimplemented: %s", "DoCmpTAndBranch"); + Token::Value op = instr->op(); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + Handle ic = CompareIC::GetUninitialized(op); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + + // The compare stub expects compare condition and the input operands + // reversed for GT and LTE. + Condition condition = TokenToCondition(op, false); + if (op == Token::GT || op == Token::LTE) { + condition = ReverseCondition(condition); + } + __ testq(rax, rax); + EmitBranch(true_block, false_block, condition); } @@ -1494,7 +1822,18 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) { void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { - Abort("Unimplemented: %s", "DoTypeofIsAndBranch"); + Register input = ToRegister(instr->InputAt(0)); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + Label* true_label = chunk_->GetAssemblyLabel(true_block); + Label* false_label = chunk_->GetAssemblyLabel(false_block); + + Condition final_branch_condition = EmitTypeofIs(true_label, + false_label, + input, + instr->type_literal()); + + EmitBranch(true_block, false_block, final_branch_condition); } @@ -1502,8 +1841,63 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label, Register input, Handle type_name) { - Abort("Unimplemented: %s", "EmitTypeofIs"); - return no_condition; + Condition final_branch_condition = no_condition; + if (type_name->Equals(Heap::number_symbol())) { + __ JumpIfSmi(input, true_label); + __ Cmp(FieldOperand(input, HeapObject::kMapOffset), + Factory::heap_number_map()); + final_branch_condition = equal; + + } else if (type_name->Equals(Heap::string_symbol())) { + __ JumpIfSmi(input, false_label); + __ movq(input, FieldOperand(input, HeapObject::kMapOffset)); + __ testb(FieldOperand(input, Map::kBitFieldOffset), + Immediate(1 << Map::kIsUndetectable)); + __ j(not_zero, false_label); + __ CmpInstanceType(input, FIRST_NONSTRING_TYPE); + final_branch_condition = below; + + } else if (type_name->Equals(Heap::boolean_symbol())) { + __ CompareRoot(input, Heap::kTrueValueRootIndex); + __ j(equal, true_label); + __ CompareRoot(input, Heap::kFalseValueRootIndex); + final_branch_condition = equal; + + } else if (type_name->Equals(Heap::undefined_symbol())) { + __ CompareRoot(input, Heap::kUndefinedValueRootIndex); + __ j(equal, true_label); + __ JumpIfSmi(input, false_label); + // Check for undetectable objects => true. + __ movq(input, FieldOperand(input, HeapObject::kMapOffset)); + __ testb(FieldOperand(input, Map::kBitFieldOffset), + Immediate(1 << Map::kIsUndetectable)); + final_branch_condition = not_zero; + + } else if (type_name->Equals(Heap::function_symbol())) { + __ JumpIfSmi(input, false_label); + __ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input); + final_branch_condition = above_equal; + + } else if (type_name->Equals(Heap::object_symbol())) { + __ JumpIfSmi(input, false_label); + __ Cmp(input, Factory::null_value()); + __ j(equal, true_label); + // Check for undetectable objects => false. + __ testb(FieldOperand(input, Map::kBitFieldOffset), + Immediate(1 << Map::kIsUndetectable)); + __ j(not_zero, false_label); + // Check for JS objects that are not RegExp or Function => true. + __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE); + __ j(below, false_label); + __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE); + final_branch_condition = below_equal; + + } else { + final_branch_condition = never; + __ jmp(false_label); + } + + return final_branch_condition; } diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h index 8d1c5c4ed2..4ce7004625 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.h +++ b/deps/v8/src/x64/lithium-codegen-x64.h @@ -138,8 +138,7 @@ class LCodeGen BASE_EMBEDDED { Label* if_false, Handle class_name, Register input, - Register temporary, - Register temporary2); + Register temporary); int StackSlotCount() const { return chunk()->spill_slot_count(); } int ParameterCount() const { return scope()->num_parameters(); } diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc index 5ef6eb7534..eff6d51e83 100644 --- a/deps/v8/src/x64/lithium-x64.cc +++ b/deps/v8/src/x64/lithium-x64.cc @@ -847,15 +847,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { if (FLAG_stress_environments && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); } - if (current->IsBranch() && !instr->IsGoto()) { - // TODO(fschneider): Handle branch instructions uniformly like - // other instructions. This requires us to generate the right - // branch instruction already at the HIR level. + if (current->IsTest() && !instr->IsGoto()) { ASSERT(instr->IsControl()); - HBranch* branch = HBranch::cast(current); - instr->set_hydrogen_value(branch->value()); - HBasicBlock* first = branch->FirstSuccessor(); - HBasicBlock* second = branch->SecondSuccessor(); + HTest* test = HTest::cast(current); + instr->set_hydrogen_value(test->value()); + HBasicBlock* first = test->FirstSuccessor(); + HBasicBlock* second = test->SecondSuccessor(); ASSERT(first != NULL && second != NULL); instr->SetBranchTargets(first->block_id(), second->block_id()); } else { @@ -912,15 +909,109 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { } -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - Abort("Unimplemented: %s", "DoBranch"); - return NULL; +LInstruction* LChunkBuilder::DoTest(HTest* instr) { + HValue* v = instr->value(); + if (v->EmitAtUses()) { + if (v->IsClassOfTest()) { + HClassOfTest* compare = HClassOfTest::cast(v); + ASSERT(compare->value()->representation().IsTagged()); + + return new LClassOfTestAndBranch(UseTempRegister(compare->value()), + TempRegister()); + } else if (v->IsCompare()) { + HCompare* compare = HCompare::cast(v); + Token::Value op = compare->token(); + HValue* left = compare->left(); + HValue* right = compare->right(); + Representation r = compare->GetInputRepresentation(); + if (r.IsInteger32()) { + ASSERT(left->representation().IsInteger32()); + ASSERT(right->representation().IsInteger32()); + + return new LCmpIDAndBranch(UseRegisterAtStart(left), + UseOrConstantAtStart(right)); + } else if (r.IsDouble()) { + ASSERT(left->representation().IsDouble()); + ASSERT(right->representation().IsDouble()); + + return new LCmpIDAndBranch(UseRegisterAtStart(left), + UseRegisterAtStart(right)); + } else { + ASSERT(left->representation().IsTagged()); + ASSERT(right->representation().IsTagged()); + bool reversed = op == Token::GT || op == Token::LTE; + LOperand* left_operand = UseFixed(left, reversed ? rax : rdx); + LOperand* right_operand = UseFixed(right, reversed ? rdx : rax); + LCmpTAndBranch* result = new LCmpTAndBranch(left_operand, + right_operand); + return MarkAsCall(result, instr); + } + } else if (v->IsIsSmi()) { + HIsSmi* compare = HIsSmi::cast(v); + ASSERT(compare->value()->representation().IsTagged()); + + return new LIsSmiAndBranch(Use(compare->value())); + } else if (v->IsHasInstanceType()) { + HHasInstanceType* compare = HHasInstanceType::cast(v); + ASSERT(compare->value()->representation().IsTagged()); + + return new LHasInstanceTypeAndBranch( + UseRegisterAtStart(compare->value())); + } else if (v->IsHasCachedArrayIndex()) { + HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v); + ASSERT(compare->value()->representation().IsTagged()); + + return new LHasCachedArrayIndexAndBranch( + UseRegisterAtStart(compare->value())); + } else if (v->IsIsNull()) { + HIsNull* compare = HIsNull::cast(v); + ASSERT(compare->value()->representation().IsTagged()); + + // We only need a temp register for non-strict compare. + LOperand* temp = compare->is_strict() ? NULL : TempRegister(); + return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), + temp); + } else if (v->IsIsObject()) { + HIsObject* compare = HIsObject::cast(v); + ASSERT(compare->value()->representation().IsTagged()); + + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), + temp1, + temp2); + } else if (v->IsCompareJSObjectEq()) { + HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v); + return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()), + UseRegisterAtStart(compare->right())); + } else if (v->IsInstanceOf()) { + HInstanceOf* instance_of = HInstanceOf::cast(v); + LInstanceOfAndBranch* result = + new LInstanceOfAndBranch( + UseFixed(instance_of->left(), InstanceofStub::left()), + UseFixed(instance_of->right(), InstanceofStub::right())); + return MarkAsCall(result, instr); + } else if (v->IsTypeofIs()) { + HTypeofIs* typeof_is = HTypeofIs::cast(v); + return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value())); + } else { + if (v->IsConstant()) { + if (HConstant::cast(v)->handle()->IsTrue()) { + return new LGoto(instr->FirstSuccessor()->block_id()); + } else if (HConstant::cast(v)->handle()->IsFalse()) { + return new LGoto(instr->SecondSuccessor()->block_id()); + } + } + Abort("Undefined compare before branch"); + return NULL; + } + } + return new LBranch(UseRegisterAtStart(v)); } -LInstruction* LChunkBuilder::DoCompareMapAndBranch( - HCompareMapAndBranch* instr) { - Abort("Unimplemented: %s", "DoCompareMapAndBranch"); +LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { + Abort("Unimplemented: %s", "DoCompareMap"); return NULL; } @@ -1124,8 +1215,29 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) { LInstruction* LChunkBuilder::DoCompare(HCompare* instr) { - Abort("Unimplemented: %s", "DoCompare"); - return NULL; + Token::Value op = instr->token(); + Representation r = instr->GetInputRepresentation(); + if (r.IsInteger32()) { + ASSERT(instr->left()->representation().IsInteger32()); + ASSERT(instr->right()->representation().IsInteger32()); + LOperand* left = UseRegisterAtStart(instr->left()); + LOperand* right = UseOrConstantAtStart(instr->right()); + return DefineAsRegister(new LCmpID(left, right)); + } else if (r.IsDouble()) { + ASSERT(instr->left()->representation().IsDouble()); + ASSERT(instr->right()->representation().IsDouble()); + LOperand* left = UseRegisterAtStart(instr->left()); + LOperand* right = UseRegisterAtStart(instr->right()); + return DefineAsRegister(new LCmpID(left, right)); + } else { + ASSERT(instr->left()->representation().IsTagged()); + ASSERT(instr->right()->representation().IsTagged()); + bool reversed = (op == Token::GT || op == Token::LTE); + LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx); + LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax); + LCmpT* result = new LCmpT(left, right); + return MarkAsCall(DefineFixed(result, rax), instr); + } } @@ -1349,6 +1461,18 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { } +LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { + Abort("Unimplemented: %s", "DoStringCharCodeAt"); + return NULL; +} + + +LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { + Abort("Unimplemented: %s", "DoStringLength"); + return NULL; +} + + LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { Abort("Unimplemented: %s", "DoArrayLiteral"); return NULL; diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h index 17d9dda108..9b7d56827e 100644 --- a/deps/v8/src/x64/lithium-x64.h +++ b/deps/v8/src/x64/lithium-x64.h @@ -335,33 +335,36 @@ class LInstruction: public ZoneObject { }; -template +template class OperandContainer { public: OperandContainer() { - for (int i = 0; i < N; i++) elems_[i] = NULL; + for (int i = 0; i < NumElements; i++) elems_[i] = NULL; } - int length() { return N; } - T& operator[](int i) { + int length() { return NumElements; } + ElementType& operator[](int i) { ASSERT(i < length()); return elems_[i]; } void PrintOperandsTo(StringStream* stream); private: - T elems_[N]; + ElementType elems_[NumElements]; }; -template -class OperandContainer { +template +class OperandContainer { public: int length() { return 0; } void PrintOperandsTo(StringStream* stream) { } }; -template +// R = number of result operands (0 or 1). +// I = number of input operands. +// T = number of temporary operands. +template class LTemplateInstruction: public LInstruction { public: // Allow 0 or 1 output operands. @@ -512,7 +515,7 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> { }; -template +template class LControlInstruction: public LTemplateInstruction<0, I, T> { public: DECLARE_INSTRUCTION(ControlInstruction) @@ -570,7 +573,7 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> { }; -class LArgumentsLength: public LTemplateInstruction<1, 1> { +class LArgumentsLength: public LTemplateInstruction<1, 1, 0> { public: explicit LArgumentsLength(LOperand* elements) { inputs_[0] = elements; @@ -627,7 +630,7 @@ class LMulI: public LTemplateInstruction<1, 2, 1> { }; -class LCmpID: public LTemplateInstruction<1, 2> { +class LCmpID: public LTemplateInstruction<1, 2, 0> { public: LCmpID(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -644,7 +647,7 @@ class LCmpID: public LTemplateInstruction<1, 2> { }; -class LCmpIDAndBranch: public LControlInstruction<2> { +class LCmpIDAndBranch: public LControlInstruction<2, 0> { public: LCmpIDAndBranch(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -663,7 +666,7 @@ class LCmpIDAndBranch: public LControlInstruction<2> { }; -class LUnaryMathOperation: public LTemplateInstruction<1, 1> { +class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> { public: explicit LUnaryMathOperation(LOperand* value) { inputs_[0] = value; @@ -677,7 +680,7 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1> { }; -class LCmpJSObjectEq: public LTemplateInstruction<1, 2> { +class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> { public: LCmpJSObjectEq(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -688,7 +691,7 @@ class LCmpJSObjectEq: public LTemplateInstruction<1, 2> { }; -class LCmpJSObjectEqAndBranch: public LControlInstruction<2> { +class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> { public: LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -700,7 +703,7 @@ class LCmpJSObjectEqAndBranch: public LControlInstruction<2> { }; -class LIsNull: public LTemplateInstruction<1, 1> { +class LIsNull: public LTemplateInstruction<1, 1, 0> { public: explicit LIsNull(LOperand* value) { inputs_[0] = value; @@ -754,7 +757,7 @@ class LIsObjectAndBranch: public LControlInstruction<1, 2> { }; -class LIsSmi: public LTemplateInstruction<1, 1> { +class LIsSmi: public LTemplateInstruction<1, 1, 0> { public: explicit LIsSmi(LOperand* value) { inputs_[0] = value; @@ -765,7 +768,7 @@ class LIsSmi: public LTemplateInstruction<1, 1> { }; -class LIsSmiAndBranch: public LControlInstruction<1> { +class LIsSmiAndBranch: public LControlInstruction<1, 0> { public: explicit LIsSmiAndBranch(LOperand* value) { inputs_[0] = value; @@ -777,7 +780,7 @@ class LIsSmiAndBranch: public LControlInstruction<1> { }; -class LHasInstanceType: public LTemplateInstruction<1, 1> { +class LHasInstanceType: public LTemplateInstruction<1, 1, 0> { public: explicit LHasInstanceType(LOperand* value) { inputs_[0] = value; @@ -788,11 +791,10 @@ class LHasInstanceType: public LTemplateInstruction<1, 1> { }; -class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> { +class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> { public: - LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) { + explicit LHasInstanceTypeAndBranch(LOperand* value) { inputs_[0] = value; - temps_[0] = temp; } DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, @@ -803,7 +805,7 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> { }; -class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> { +class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { public: explicit LHasCachedArrayIndex(LOperand* value) { inputs_[0] = value; @@ -814,7 +816,7 @@ class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> { }; -class LHasCachedArrayIndexAndBranch: public LControlInstruction<1> { +class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> { public: explicit LHasCachedArrayIndexAndBranch(LOperand* value) { inputs_[0] = value; @@ -840,12 +842,11 @@ class LClassOfTest: public LTemplateInstruction<1, 1, 1> { }; -class LClassOfTestAndBranch: public LControlInstruction<1, 2> { +class LClassOfTestAndBranch: public LControlInstruction<1, 1> { public: - LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) { + LClassOfTestAndBranch(LOperand* value, LOperand* temp) { inputs_[0] = value; temps_[0] = temp; - temps_[1] = temp2; } DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, @@ -856,7 +857,7 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> { }; -class LCmpT: public LTemplateInstruction<1, 2> { +class LCmpT: public LTemplateInstruction<1, 2, 0> { public: LCmpT(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -870,7 +871,7 @@ class LCmpT: public LTemplateInstruction<1, 2> { }; -class LCmpTAndBranch: public LControlInstruction<2> { +class LCmpTAndBranch: public LControlInstruction<2, 0> { public: LCmpTAndBranch(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -884,7 +885,7 @@ class LCmpTAndBranch: public LControlInstruction<2> { }; -class LInstanceOf: public LTemplateInstruction<1, 2> { +class LInstanceOf: public LTemplateInstruction<1, 2, 0> { public: LInstanceOf(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -895,7 +896,7 @@ class LInstanceOf: public LTemplateInstruction<1, 2> { }; -class LInstanceOfAndBranch: public LControlInstruction<2> { +class LInstanceOfAndBranch: public LControlInstruction<2, 0> { public: LInstanceOfAndBranch(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -935,7 +936,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> { }; -class LBitI: public LTemplateInstruction<1, 2> { +class LBitI: public LTemplateInstruction<1, 2, 0> { public: LBitI(Token::Value op, LOperand* left, LOperand* right) : op_(op) { @@ -952,7 +953,7 @@ class LBitI: public LTemplateInstruction<1, 2> { }; -class LShiftI: public LTemplateInstruction<1, 2> { +class LShiftI: public LTemplateInstruction<1, 2, 0> { public: LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) : op_(op), can_deopt_(can_deopt) { @@ -972,7 +973,7 @@ class LShiftI: public LTemplateInstruction<1, 2> { }; -class LSubI: public LTemplateInstruction<1, 2> { +class LSubI: public LTemplateInstruction<1, 2, 0> { public: LSubI(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -1028,7 +1029,7 @@ class LConstantT: public LConstant<0> { }; -class LBranch: public LControlInstruction<1> { +class LBranch: public LControlInstruction<1, 0> { public: explicit LBranch(LOperand* value) { inputs_[0] = value; @@ -1041,28 +1042,28 @@ class LBranch: public LControlInstruction<1> { }; -class LCmpMapAndBranch: public LTemplateInstruction<0, 1> { +class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> { public: explicit LCmpMapAndBranch(LOperand* value) { inputs_[0] = value; } DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch) + DECLARE_HYDROGEN_ACCESSOR(CompareMap) virtual bool IsControl() const { return true; } Handle map() const { return hydrogen()->map(); } int true_block_id() const { - return hydrogen()->true_destination()->block_id(); + return hydrogen()->FirstSuccessor()->block_id(); } int false_block_id() const { - return hydrogen()->false_destination()->block_id(); + return hydrogen()->SecondSuccessor()->block_id(); } }; -class LJSArrayLength: public LTemplateInstruction<1, 1> { +class LJSArrayLength: public LTemplateInstruction<1, 1, 0> { public: explicit LJSArrayLength(LOperand* value) { inputs_[0] = value; @@ -1073,7 +1074,7 @@ class LJSArrayLength: public LTemplateInstruction<1, 1> { }; -class LFixedArrayLength: public LTemplateInstruction<1, 1> { +class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> { public: explicit LFixedArrayLength(LOperand* value) { inputs_[0] = value; @@ -1096,7 +1097,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> { }; -class LThrow: public LTemplateInstruction<0, 1> { +class LThrow: public LTemplateInstruction<0, 1, 0> { public: explicit LThrow(LOperand* value) { inputs_[0] = value; @@ -1106,7 +1107,7 @@ class LThrow: public LTemplateInstruction<0, 1> { }; -class LBitNotI: public LTemplateInstruction<1, 1> { +class LBitNotI: public LTemplateInstruction<1, 1, 0> { public: explicit LBitNotI(LOperand* value) { inputs_[0] = value; @@ -1116,7 +1117,7 @@ class LBitNotI: public LTemplateInstruction<1, 1> { }; -class LAddI: public LTemplateInstruction<1, 2> { +class LAddI: public LTemplateInstruction<1, 2, 0> { public: LAddI(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -1128,7 +1129,7 @@ class LAddI: public LTemplateInstruction<1, 2> { }; -class LPower: public LTemplateInstruction<1, 2> { +class LPower: public LTemplateInstruction<1, 2, 0> { public: LPower(LOperand* left, LOperand* right) { inputs_[0] = left; @@ -1140,7 +1141,7 @@ class LPower: public LTemplateInstruction<1, 2> { }; -class LArithmeticD: public LTemplateInstruction<1, 2> { +class LArithmeticD: public LTemplateInstruction<1, 2, 0> { public: LArithmeticD(Token::Value op, LOperand* left, LOperand* right) : op_(op) { @@ -1158,7 +1159,7 @@ class LArithmeticD: public LTemplateInstruction<1, 2> { }; -class LArithmeticT: public LTemplateInstruction<1, 2> { +class LArithmeticT: public LTemplateInstruction<1, 2, 0> { public: LArithmeticT(Token::Value op, LOperand* left, LOperand* right) : op_(op) { @@ -1176,7 +1177,7 @@ class LArithmeticT: public LTemplateInstruction<1, 2> { }; -class LReturn: public LTemplateInstruction<0, 1> { +class LReturn: public LTemplateInstruction<0, 1, 0> { public: explicit LReturn(LOperand* value) { inputs_[0] = value; @@ -1186,7 +1187,7 @@ class LReturn: public LTemplateInstruction<0, 1> { }; -class LLoadNamedField: public LTemplateInstruction<1, 1> { +class LLoadNamedField: public LTemplateInstruction<1, 1, 0> { public: explicit LLoadNamedField(LOperand* object) { inputs_[0] = object; @@ -1197,7 +1198,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1> { }; -class LLoadNamedGeneric: public LTemplateInstruction<1, 1> { +class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> { public: explicit LLoadNamedGeneric(LOperand* object) { inputs_[0] = object; @@ -1225,7 +1226,7 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> { }; -class LLoadElements: public LTemplateInstruction<1, 1> { +class LLoadElements: public LTemplateInstruction<1, 1, 0> { public: explicit LLoadElements(LOperand* object) { inputs_[0] = object; @@ -1235,7 +1236,7 @@ class LLoadElements: public LTemplateInstruction<1, 1> { }; -class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> { +class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { public: LLoadKeyedFastElement(LOperand* elements, LOperand* key) { inputs_[0] = elements; @@ -1250,7 +1251,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> { }; -class LLoadKeyedGeneric: public LTemplateInstruction<1, 2> { +class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> { public: LLoadKeyedGeneric(LOperand* obj, LOperand* key) { inputs_[0] = obj; @@ -1271,7 +1272,7 @@ class LLoadGlobal: public LTemplateInstruction<1, 0, 0> { }; -class LStoreGlobal: public LTemplateInstruction<0, 1> { +class LStoreGlobal: public LTemplateInstruction<0, 1, 0> { public: explicit LStoreGlobal(LOperand* value) { inputs_[0] = value; @@ -1294,7 +1295,7 @@ class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> { }; -class LPushArgument: public LTemplateInstruction<0, 1> { +class LPushArgument: public LTemplateInstruction<0, 1, 0> { public: explicit LPushArgument(LOperand* value) { inputs_[0] = value; @@ -1328,10 +1329,10 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> { }; -class LCallKeyed: public LTemplateInstruction<1, 0, 1> { +class LCallKeyed: public LTemplateInstruction<1, 1, 0> { public: - explicit LCallKeyed(LOperand* temp) { - temps_[0] = temp; + explicit LCallKeyed(LOperand* key) { + inputs_[0] = key; } DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed") @@ -1388,7 +1389,7 @@ class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> { }; -class LCallNew: public LTemplateInstruction<1, 1> { +class LCallNew: public LTemplateInstruction<1, 1, 0> { public: explicit LCallNew(LOperand* constructor) { inputs_[0] = constructor; @@ -1413,7 +1414,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> { }; -class LInteger32ToDouble: public LTemplateInstruction<1, 1> { +class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> { public: explicit LInteger32ToDouble(LOperand* value) { inputs_[0] = value; @@ -1423,7 +1424,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1> { }; -class LNumberTagI: public LTemplateInstruction<1, 1> { +class LNumberTagI: public LTemplateInstruction<1, 1, 0> { public: explicit LNumberTagI(LOperand* value) { inputs_[0] = value; @@ -1474,7 +1475,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> { }; -class LSmiTag: public LTemplateInstruction<1, 1> { +class LSmiTag: public LTemplateInstruction<1, 1, 0> { public: explicit LSmiTag(LOperand* value) { inputs_[0] = value; @@ -1484,7 +1485,7 @@ class LSmiTag: public LTemplateInstruction<1, 1> { }; -class LNumberUntagD: public LTemplateInstruction<1, 1> { +class LNumberUntagD: public LTemplateInstruction<1, 1, 0> { public: explicit LNumberUntagD(LOperand* value) { inputs_[0] = value; @@ -1494,7 +1495,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1> { }; -class LSmiUntag: public LTemplateInstruction<1, 1> { +class LSmiUntag: public LTemplateInstruction<1, 1, 0> { public: LSmiUntag(LOperand* value, bool needs_check) : needs_check_(needs_check) { @@ -1593,7 +1594,7 @@ class LStoreKeyedGeneric: public LStoreKeyed { }; -class LCheckFunction: public LTemplateInstruction<0, 1> { +class LCheckFunction: public LTemplateInstruction<0, 1, 0> { public: explicit LCheckFunction(LOperand* value) { inputs_[0] = value; @@ -1616,7 +1617,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> { }; -class LCheckMap: public LTemplateInstruction<0, 1> { +class LCheckMap: public LTemplateInstruction<0, 1, 0> { public: explicit LCheckMap(LOperand* value) { inputs_[0] = value; @@ -1641,7 +1642,7 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> { }; -class LCheckSmi: public LTemplateInstruction<0, 1> { +class LCheckSmi: public LTemplateInstruction<0, 1, 0> { public: LCheckSmi(LOperand* value, Condition condition) : condition_(condition) { @@ -1690,7 +1691,7 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> { }; -class LTypeof: public LTemplateInstruction<1, 1> { +class LTypeof: public LTemplateInstruction<1, 1, 0> { public: explicit LTypeof(LOperand* value) { inputs_[0] = value; @@ -1700,7 +1701,7 @@ class LTypeof: public LTemplateInstruction<1, 1> { }; -class LTypeofIs: public LTemplateInstruction<1, 1> { +class LTypeofIs: public LTemplateInstruction<1, 1, 0> { public: explicit LTypeofIs(LOperand* value) { inputs_[0] = value; @@ -1715,7 +1716,7 @@ class LTypeofIs: public LTemplateInstruction<1, 1> { }; -class LTypeofIsAndBranch: public LControlInstruction<1> { +class LTypeofIsAndBranch: public LControlInstruction<1, 0> { public: explicit LTypeofIsAndBranch(LOperand* value) { inputs_[0] = value; @@ -1730,7 +1731,7 @@ class LTypeofIsAndBranch: public LControlInstruction<1> { }; -class LDeleteProperty: public LTemplateInstruction<1, 2> { +class LDeleteProperty: public LTemplateInstruction<1, 2, 0> { public: LDeleteProperty(LOperand* obj, LOperand* key) { inputs_[0] = obj; @@ -1900,30 +1901,30 @@ class LChunkBuilder BASE_EMBEDDED { MUST_USE_RESULT LOperand* UseRegister(HValue* value); MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); - // A value in a register that may be trashed. + // An input operand in a register that may be trashed. MUST_USE_RESULT LOperand* UseTempRegister(HValue* value); - // An operand value in a register or stack slot. + // An input operand in a register or stack slot. MUST_USE_RESULT LOperand* Use(HValue* value); MUST_USE_RESULT LOperand* UseAtStart(HValue* value); - // An operand value in a register, stack slot or a constant operand. + // An input operand in a register, stack slot or a constant operand. MUST_USE_RESULT LOperand* UseOrConstant(HValue* value); MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value); - // An operand value in a register or a constant operand. + // An input operand in a register or a constant operand. MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); + // An input operand in register, stack slot or a constant operand. + // Will not be moved to a register even if one is freely available. + MUST_USE_RESULT LOperand* UseAny(HValue* value); + // Temporary operand that must be in a register. MUST_USE_RESULT LUnallocated* TempRegister(); MUST_USE_RESULT LOperand* FixedTemp(Register reg); MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg); - // An operand value in register, stack slot or a constant operand. - // Will not be moved to a register even if one is freely available. - LOperand* UseAny(HValue* value); - // Methods for setting up define-use relationships. // Return the same instruction that they are passed. template diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index f95755db2a..a690bd5256 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -885,6 +885,13 @@ Condition MacroAssembler::CheckSmi(Register src) { } +Condition MacroAssembler::CheckSmi(const Operand& src) { + ASSERT_EQ(0, kSmiTag); + testb(src, Immediate(kSmiTagMask)); + return zero; +} + + Condition MacroAssembler::CheckNonNegativeSmi(Register src) { ASSERT_EQ(0, kSmiTag); // Make mask 0x8000000000000001 and test that both bits are zero. @@ -1386,6 +1393,40 @@ void MacroAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { } +void MacroAssembler::Pushad() { + push(rax); + push(rcx); + push(rdx); + push(rbx); + // Not pushing rsp or rbp. + push(rsi); + push(rdi); + push(r8); + push(r9); + // r10 is kScratchRegister. + push(r11); + push(r12); + // r13 is kRootRegister. + push(r14); + // r15 is kSmiConstantRegister +} + + +void MacroAssembler::Popad() { + pop(r14); + pop(r12); + pop(r11); + pop(r9); + pop(r8); + pop(rdi); + pop(rsi); + pop(rbx); + pop(rdx); + pop(rcx); + pop(rax); +} + + void MacroAssembler::PushTryHandler(CodeLocation try_location, HandlerType type) { // Adjust this code if not the case. diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index 30b9ba5152..b2d085f905 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -272,6 +272,7 @@ class MacroAssembler: public Assembler { // Is the value a tagged smi. Condition CheckSmi(Register src); + Condition CheckSmi(const Operand& src); // Is the value a non-negative tagged smi. Condition CheckNonNegativeSmi(Register src); @@ -590,6 +591,13 @@ class MacroAssembler: public Assembler { void Call(ExternalReference ext); void Call(Handle code_object, RelocInfo::Mode rmode); + // Non-x64 instructions. + // Push/pop all general purpose registers. + // Does not push rsp/rbp nor any of the assembler's special purpose registers + // (kScratchRegister, kSmiConstantRegister, kRootRegister). + void Pushad(); + void Popad(); + // Compare object type for heap object. // Always use unsigned comparisons: above and below, not less and greater. // Incoming register is heap_object and outgoing register is map. diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index 8c1b5794d0..c86f43de2c 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -3144,6 +3144,306 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { } +MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub( + ExternalArrayType array_type, Code::Flags flags) { + // ----------- S t a t e ------------- + // -- rax : key + // -- rdx : receiver + // -- rsp[0] : return address + // ----------------------------------- + Label slow; + + // Check that the object isn't a smi. + __ JumpIfSmi(rdx, &slow); + + // Check that the key is a smi. + __ JumpIfNotSmi(rax, &slow); + + // Check that the object is a JS object. + __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx); + __ j(not_equal, &slow); + // Check that the receiver does not require access checks. We need + // to check this explicitly since this generic stub does not perform + // map checks. The map is already in rdx. + __ testb(FieldOperand(rcx, Map::kBitFieldOffset), + Immediate(1 << Map::kIsAccessCheckNeeded)); + __ j(not_zero, &slow); + + // Check that the elements array is the appropriate type of + // ExternalArray. + // rax: index (as a smi) + // rdx: JSObject + __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); + __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), + Heap::RootIndexForExternalArrayType(array_type)); + __ j(not_equal, &slow); + + // Check that the index is in range. + __ SmiToInteger32(rcx, rax); + __ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset)); + // Unsigned comparison catches both negative and too-large values. + __ j(above_equal, &slow); + + // rax: index (as a smi) + // rdx: receiver (JSObject) + // rcx: untagged index + // rbx: elements array + __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset)); + // rbx: base pointer of external storage + switch (array_type) { + case kExternalByteArray: + __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0)); + break; + case kExternalUnsignedByteArray: + __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0)); + break; + case kExternalShortArray: + __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0)); + break; + case kExternalUnsignedShortArray: + __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0)); + break; + case kExternalIntArray: + __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0)); + break; + case kExternalUnsignedIntArray: + __ movl(rcx, Operand(rbx, rcx, times_4, 0)); + break; + case kExternalFloatArray: + __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0)); + break; + default: + UNREACHABLE(); + break; + } + + // rax: index + // rdx: receiver + // For integer array types: + // rcx: value + // For floating-point array type: + // xmm0: value as double. + + ASSERT(kSmiValueSize == 32); + if (array_type == kExternalUnsignedIntArray) { + // For the UnsignedInt array type, we need to see whether + // the value can be represented in a Smi. If not, we need to convert + // it to a HeapNumber. + NearLabel box_int; + + __ JumpIfUIntNotValidSmiValue(rcx, &box_int); + + __ Integer32ToSmi(rax, rcx); + __ ret(0); + + __ bind(&box_int); + + // Allocate a HeapNumber for the int and perform int-to-double + // conversion. + // The value is zero-extended since we loaded the value from memory + // with movl. + __ cvtqsi2sd(xmm0, rcx); + + __ AllocateHeapNumber(rcx, rbx, &slow); + // Set the value. + __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); + __ movq(rax, rcx); + __ ret(0); + } else if (array_type == kExternalFloatArray) { + // For the floating-point array type, we need to always allocate a + // HeapNumber. + __ AllocateHeapNumber(rcx, rbx, &slow); + // Set the value. + __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); + __ movq(rax, rcx); + __ ret(0); + } else { + __ Integer32ToSmi(rax, rcx); + __ ret(0); + } + + // Slow case: Jump to runtime. + __ bind(&slow); + __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1); + + // ----------- S t a t e ------------- + // -- rax : key + // -- rdx : receiver + // -- rsp[0] : return address + // ----------------------------------- + + __ pop(rbx); + __ push(rdx); // receiver + __ push(rax); // name + __ push(rbx); // return address + + // Perform tail call to the entry. + __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); + + // Return the generated code. + return GetCode(flags); +} + + +MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub( + ExternalArrayType array_type, Code::Flags flags) { + // ----------- S t a t e ------------- + // -- rax : value + // -- rcx : key + // -- rdx : receiver + // -- rsp[0] : return address + // ----------------------------------- + Label slow; + + // Check that the object isn't a smi. + __ JumpIfSmi(rdx, &slow); + // Get the map from the receiver. + __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); + // Check that the receiver does not require access checks. We need + // to do this because this generic stub does not perform map checks. + __ testb(FieldOperand(rbx, Map::kBitFieldOffset), + Immediate(1 << Map::kIsAccessCheckNeeded)); + __ j(not_zero, &slow); + // Check that the key is a smi. + __ JumpIfNotSmi(rcx, &slow); + + // Check that the object is a JS object. + __ CmpInstanceType(rbx, JS_OBJECT_TYPE); + __ j(not_equal, &slow); + + // Check that the elements array is the appropriate type of + // ExternalArray. + // rax: value + // rcx: key (a smi) + // rdx: receiver (a JSObject) + __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); + __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), + Heap::RootIndexForExternalArrayType(array_type)); + __ j(not_equal, &slow); + + // Check that the index is in range. + __ SmiToInteger32(rdi, rcx); // Untag the index. + __ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset)); + // Unsigned comparison catches both negative and too-large values. + __ j(above_equal, &slow); + + // Handle both smis and HeapNumbers in the fast path. Go to the + // runtime for all other kinds of values. + // rax: value + // rcx: key (a smi) + // rdx: receiver (a JSObject) + // rbx: elements array + // rdi: untagged key + NearLabel check_heap_number; + __ JumpIfNotSmi(rax, &check_heap_number); + // No more branches to slow case on this path. Key and receiver not needed. + __ SmiToInteger32(rdx, rax); + __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset)); + // rbx: base pointer of external storage + switch (array_type) { + case kExternalByteArray: + case kExternalUnsignedByteArray: + __ movb(Operand(rbx, rdi, times_1, 0), rdx); + break; + case kExternalShortArray: + case kExternalUnsignedShortArray: + __ movw(Operand(rbx, rdi, times_2, 0), rdx); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: + __ movl(Operand(rbx, rdi, times_4, 0), rdx); + break; + case kExternalFloatArray: + // Need to perform int-to-float conversion. + __ cvtlsi2ss(xmm0, rdx); + __ movss(Operand(rbx, rdi, times_4, 0), xmm0); + break; + default: + UNREACHABLE(); + break; + } + __ ret(0); + + __ bind(&check_heap_number); + // rax: value + // rcx: key (a smi) + // rdx: receiver (a JSObject) + // rbx: elements array + // rdi: untagged key + __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister); + __ j(not_equal, &slow); + // No more branches to slow case on this path. + + // The WebGL specification leaves the behavior of storing NaN and + // +/-Infinity into integer arrays basically undefined. For more + // reproducible behavior, convert these to zero. + __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset)); + __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset)); + // rdi: untagged index + // rbx: base pointer of external storage + // top of FPU stack: value + if (array_type == kExternalFloatArray) { + __ cvtsd2ss(xmm0, xmm0); + __ movss(Operand(rbx, rdi, times_4, 0), xmm0); + __ ret(0); + } else { + // Perform float-to-int conversion with truncation (round-to-zero) + // behavior. + + // Convert to int32 and store the low byte/word. + // If the value is NaN or +/-infinity, the result is 0x80000000, + // which is automatically zero when taken mod 2^n, n < 32. + // rdx: value (converted to an untagged integer) + // rdi: untagged index + // rbx: base pointer of external storage + switch (array_type) { + case kExternalByteArray: + case kExternalUnsignedByteArray: + __ cvttsd2si(rdx, xmm0); + __ movb(Operand(rbx, rdi, times_1, 0), rdx); + break; + case kExternalShortArray: + case kExternalUnsignedShortArray: + __ cvttsd2si(rdx, xmm0); + __ movw(Operand(rbx, rdi, times_2, 0), rdx); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: { + // Convert to int64, so that NaN and infinities become + // 0x8000000000000000, which is zero mod 2^32. + __ cvttsd2siq(rdx, xmm0); + __ movl(Operand(rbx, rdi, times_4, 0), rdx); + break; + } + default: + UNREACHABLE(); + break; + } + __ ret(0); + } + + // Slow case: call runtime. + __ bind(&slow); + + // ----------- S t a t e ------------- + // -- rax : value + // -- rcx : key + // -- rdx : receiver + // -- rsp[0] : return address + // ----------------------------------- + + __ pop(rbx); + __ push(rdx); // receiver + __ push(rcx); // key + __ push(rax); // value + __ push(rbx); // return address + + // Do tail-call to runtime routine. + __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + + return GetCode(flags); +} + #undef __ } } // namespace v8::internal diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status index a7eca5b393..5d2d9cc410 100644 --- a/deps/v8/test/cctest/cctest.status +++ b/deps/v8/test/cctest/cctest.status @@ -82,7 +82,7 @@ test-serialize/ContextSerialization: SKIP test-serialize/ContextDeserialization: SKIP test-debug/BreakPointReturn: SKIP test-debug/DebugStepLinearMixedICs: SKIP - +test-debug/DebugConditional: SKIP ############################################################################## [ $arch == arm ] diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index 6a2f3289f8..de00fbba46 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -874,6 +874,10 @@ THREADED_TEST(ExternalWrap) { TestExternalPointerWrapping(); #if defined(V8_HOST_ARCH_X64) + // Check a value with a leading 1 bit in x64 Smi encoding. + expected_ptr = reinterpret_cast(0x400000000); + TestExternalPointerWrapping(); + expected_ptr = reinterpret_cast(0xdeadbeefdeadbeef); TestExternalPointerWrapping(); @@ -2375,6 +2379,10 @@ TEST(APIThrowMessageOverwrittenToString) { CompileRun("ReferenceError.prototype.toString =" " function() { return 'Whoops' }"); CompileRun("asdf;"); + CompileRun("ReferenceError.prototype.constructor.name = void 0;"); + CompileRun("asdf;"); + CompileRun("ReferenceError.prototype.constructor = void 0;"); + CompileRun("asdf;"); v8::Handle string = CompileRun("try { asdf; } catch(e) { e + ''; }"); CHECK(string->Equals(v8_str("Whoops"))); v8::V8::RemoveMessageListeners(check_message); @@ -10583,6 +10591,33 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type, CHECK_EQ(0, result->Int32Value()); CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5)->ToObjectChecked())->value()); + + // Check truncation behavior of integral arrays. + const char* unsigned_data = + "var source_data = [0.6, 10.6];" + "var expected_results = [0, 10];"; + const char* signed_data = + "var source_data = [0.6, 10.6, -0.6, -10.6];" + "var expected_results = [0, 10, 0, -10];"; + bool is_unsigned = + (array_type == v8::kExternalUnsignedByteArray || + array_type == v8::kExternalUnsignedShortArray || + array_type == v8::kExternalUnsignedIntArray); + + i::OS::SNPrintF(test_buf, + "%s" + "var all_passed = true;" + "for (var i = 0; i < source_data.length; i++) {" + " for (var j = 0; j < 8; j++) {" + " ext_array[j] = source_data[i];" + " }" + " all_passed = all_passed &&" + " (ext_array[5] == expected_results[i]);" + "}" + "all_passed;", + (is_unsigned ? unsigned_data : signed_data)); + result = CompileRun(test_buf.start()); + CHECK_EQ(true, result->BooleanValue()); } result = CompileRun("ext_array[3] = 33;" diff --git a/deps/v8/test/mjsunit/cyclic-error-to-string.js b/deps/v8/test/mjsunit/cyclic-error-to-string.js new file mode 100644 index 0000000000..2502b5340f --- /dev/null +++ b/deps/v8/test/mjsunit/cyclic-error-to-string.js @@ -0,0 +1,46 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test printing of cyclic errors which return the empty string for +// compatibility with Safari and Firefox. + +var e = new Error(); +assertEquals('Error', e + ''); + +e = new Error(); +e.name = e; +e.message = e; +e.stack = e; +e.arguments = e; +assertEquals(': ', e + ''); + +e = new Error(); +e.name = [ e ]; +e.message = [ e ]; +e.stack = [ e ]; +e.arguments = [ e ]; +assertEquals(': ', e + ''); diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals.js b/deps/v8/test/mjsunit/debug-evaluate-locals.js index 4b87829169..8430bd3576 100644 --- a/deps/v8/test/mjsunit/debug-evaluate-locals.js +++ b/deps/v8/test/mjsunit/debug-evaluate-locals.js @@ -34,18 +34,18 @@ exception = false; function checkFrame0(name, value) { - assertTrue(name == 'a' || name == 'b'); + assertTrue(name == 'a' || name == 'b', 'frame0 name'); if (name == 'a') { assertEquals(1, value); - } - if (name == 'b') { + } else if (name == 'b') { assertEquals(2, value); } } function checkFrame1(name, value) { - assertTrue(name == '.arguments' || name == 'a'); + assertTrue(name == '.arguments' || name == 'arguments' || name == 'a', + 'frame1 name'); if (name == 'a') { assertEquals(3, value); } @@ -53,12 +53,10 @@ function checkFrame1(name, value) { function checkFrame2(name, value) { - assertTrue(name == '.arguments' || name == 'a' || - name == 'arguments' || name == 'b'); + assertTrue(name == 'a' || name == 'b', 'frame2 name'); if (name == 'a') { assertEquals(5, value); - } - if (name == 'b') { + } else if (name == 'b') { assertEquals(0, value); } } @@ -73,18 +71,17 @@ function listener(event, exec_state, event_data, data) { checkFrame0(frame0.localName(0), frame0.localValue(0).value()); checkFrame0(frame0.localName(1), frame0.localValue(1).value()); - // Frame 1 has normal variable a (and the .arguments variable). + // Frame 1 has normal variables a and arguments (and the .arguments + // variable). var frame1 = exec_state.frame(1); checkFrame1(frame1.localName(0), frame1.localValue(0).value()); checkFrame1(frame1.localName(1), frame1.localValue(1).value()); + checkFrame1(frame1.localName(2), frame1.localValue(2).value()); - // Frame 2 has normal variables a and b (and both the .arguments and - // arguments variable). + // Frame 2 has normal variables a and b. var frame2 = exec_state.frame(2); checkFrame2(frame2.localName(0), frame2.localValue(0).value()); checkFrame2(frame2.localName(1), frame2.localValue(1).value()); - checkFrame2(frame2.localName(2), frame2.localValue(2).value()); - checkFrame2(frame2.localName(3), frame2.localValue(3).value()); // Evaluating a and b on frames 0, 1 and 2 produces 1, 2, 3, 4, 5 and 6. assertEquals(1, exec_state.frame(0).evaluate('a').value()); diff --git a/deps/v8/test/mjsunit/strict-mode.js b/deps/v8/test/mjsunit/strict-mode.js new file mode 100644 index 0000000000..924b34f936 --- /dev/null +++ b/deps/v8/test/mjsunit/strict-mode.js @@ -0,0 +1,117 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +function CheckStrictMode(code, exception) { + assertDoesNotThrow(code); + assertThrows("'use strict';\n" + code, exception); + assertThrows('"use strict";\n' + code, exception); + assertDoesNotThrow("\ + function outer() {\ + function inner() {\n" + + code + + "\n}\ + }"); + assertThrows("\ + function outer() {\ + 'use strict';\ + function inner() {\n" + + code + + "\n}\ + }", exception); +} + +// Incorrect 'use strict' directive. +function UseStrictEscape() { + "use\\x20strict"; + with ({}) {}; +} + +// 'use strict' in non-directive position. +function UseStrictNonDirective() { + void(0); + "use strict"; + with ({}) {}; +} + +// Multiple directives, including "use strict". +assertThrows('\ +"directive 1";\ +"another directive";\ +"use strict";\ +"directive after strict";\ +"and one more";\ +with({}) {}', SyntaxError); + +// 'with' disallowed in strict mode. +CheckStrictMode("with({}) {}", SyntaxError); + +// Function named 'eval'. +CheckStrictMode("function eval() {}", SyntaxError) + +// Function named 'arguments'. +CheckStrictMode("function arguments() {}", SyntaxError) + +// Function parameter named 'eval'. +//CheckStrictMode("function foo(a, b, eval, c, d) {}", SyntaxError) + +// Function parameter named 'arguments'. +//CheckStrictMode("function foo(a, b, arguments, c, d) {}", SyntaxError) + +// Property accessor parameter named 'eval'. +//CheckStrictMode("var o = { set foo(eval) {} }", SyntaxError) + +// Property accessor parameter named 'arguments'. +//CheckStrictMode("var o = { set foo(arguments) {} }", SyntaxError) + +// Duplicate function parameter name. +//CheckStrictMode("function foo(a, b, c, d, b) {}", SyntaxError) + +// catch(eval) +CheckStrictMode("try{}catch(eval){};", SyntaxError) + +// catch(arguments) +CheckStrictMode("try{}catch(arguments){};", SyntaxError) + +// var eval +CheckStrictMode("var eval;", SyntaxError) + +// var arguments +CheckStrictMode("var arguments;", SyntaxError) + +// Strict mode applies to the function in which the directive is used.. +//assertThrows('\ +//function foo(eval) {\ +// "use strict";\ +//}', SyntaxError); + +// Strict mode doesn't affect the outer stop of strict code. +function NotStrict(eval) { + function Strict() { + "use strict"; + } + with ({}) {}; +} diff --git a/deps/v8/test/mjsunit/string-charcodeat.js b/deps/v8/test/mjsunit/string-charcodeat.js index 831f688fd4..fb7ab9af86 100644 --- a/deps/v8/test/mjsunit/string-charcodeat.js +++ b/deps/v8/test/mjsunit/string-charcodeat.js @@ -153,6 +153,17 @@ TestStringType(Slice16End, true); TestStringType(Flat16, true); TestStringType(NotAString16, true); + +function ConsNotSmiIndex() { + var str = Cons(); + assertTrue(isNaN(str.charCodeAt(0x7fffffff))); +} + +for (var i = 0; i < 100000; i++) { + ConsNotSmiIndex(); +} + + for (var i = 0; i != 10; i++) { assertEquals(101, Cons16().charCodeAt(1.1)); assertEquals('e', Cons16().charAt(1.1)); diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status index a119bf2a78..cfccc4603a 100644 --- a/deps/v8/test/mozilla/mozilla.status +++ b/deps/v8/test/mozilla/mozilla.status @@ -198,6 +198,9 @@ js1_5/Regress/regress-404755: PASS || FAIL js1_5/extensions/regress-363258: PASS || FAIL +# Test that assumes specific runtime for a regexp, flaky in debug mode. +ecma_3/RegExp/regress-85721: PASS || FAIL if $mode == debug + ##################### INCOMPATIBLE TESTS ##################### diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp index 024ecd7a14..fe4526668f 100644 --- a/deps/v8/tools/gyp/v8.gyp +++ b/deps/v8/tools/gyp/v8.gyp @@ -32,6 +32,7 @@ 'gcc_version%': 'unknown', 'v8_target_arch%': '<(target_arch)', 'v8_use_snapshot%': 'true', + 'v8_use_liveobjectlist%': 'false', }, 'conditions': [ ['use_system_v8==0', { @@ -66,6 +67,14 @@ }], ], }], + ['v8_use_liveobjectlist=="true"', { + 'defines': [ + 'ENABLE_DEBUGGER_SUPPORT', + 'INSPECTOR', + 'OBJECT_PRINT', + 'LIVEOBJECTLIST', + ], + }], ], 'configurations': { 'Debug': { @@ -417,6 +426,8 @@ '../../src/ic-inl.h', '../../src/ic.cc', '../../src/ic.h', + '../../src/inspector.cc', + '../../src/inspector.h', '../../src/interpreter-irregexp.cc', '../../src/interpreter-irregexp.h', '../../src/jump-target-inl.h', @@ -432,6 +443,9 @@ '../../src/lithium-allocator.h', '../../src/liveedit.cc', '../../src/liveedit.h', + '../../src/liveobjectlist-inl.h', + '../../src/liveobjectlist.cc', + '../../src/liveobjectlist.h', '../../src/log-inl.h', '../../src/log-utils.cc', '../../src/log-utils.h', diff --git a/deps/v8/tools/v8.xcodeproj/project.pbxproj b/deps/v8/tools/v8.xcodeproj/project.pbxproj index b994d36ace..d02d2b1d5d 100644 --- a/deps/v8/tools/v8.xcodeproj/project.pbxproj +++ b/deps/v8/tools/v8.xcodeproj/project.pbxproj @@ -319,6 +319,13 @@ 89B91BFB12D4F1BB002FF4BC /* libv8-x64.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 895692AA12D4ED240072C313 /* libv8-x64.a */; }; 89B933AF0FAA0F9600201304 /* version.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF32F0FAA0ED200136CF6 /* version.cc */; }; 89B933B00FAA0F9D00201304 /* version.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF32F0FAA0ED200136CF6 /* version.cc */; }; + 89D7DDD512E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */; }; + 89D7DDDA12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; }; + 89D7DDDB12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; }; + 89D7DDDC12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; }; + 89D7DDDD12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; }; + 89D7DDDE12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; }; + 89D7DDDF12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; }; 89F23C3F0E78D5B2006B2466 /* accessors.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F60E719B8F00D62E90 /* accessors.cc */; }; 89F23C400E78D5B2006B2466 /* allocation.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F80E719B8F00D62E90 /* allocation.cc */; }; 89F23C410E78D5B2006B2466 /* api.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0FA0E719B8F00D62E90 /* api.cc */; }; @@ -886,6 +893,12 @@ 89B91B9A12D4EF95002FF4BC /* virtual-frame-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "virtual-frame-x64.h"; path = "x64/virtual-frame-x64.h"; sourceTree = ""; }; 89B91BBE12D4F02A002FF4BC /* v8_shell-x64 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-x64"; sourceTree = BUILT_PRODUCTS_DIR; }; 89B91BCE12D4F02A002FF4BC /* d8-x64 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "d8-x64"; sourceTree = BUILT_PRODUCTS_DIR; }; + 89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-gap-resolver-ia32.cc"; path = "ia32/lithium-gap-resolver-ia32.cc"; sourceTree = ""; }; + 89D7DDD412E8DDCF001E2B82 /* lithium-gap-resolver-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-gap-resolver-ia32.h"; path = "ia32/lithium-gap-resolver-ia32.h"; sourceTree = ""; }; + 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "gdb-jit.cc"; sourceTree = ""; }; + 89D7DDD712E8DE09001E2B82 /* gdb-jit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gdb-jit.h"; sourceTree = ""; }; + 89D7DDD812E8DE09001E2B82 /* inspector.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = inspector.cc; sourceTree = ""; }; + 89D7DDD912E8DE09001E2B82 /* inspector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inspector.h; sourceTree = ""; }; 89F23C870E78D5B2006B2466 /* libv8-arm.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libv8-arm.a"; sourceTree = BUILT_PRODUCTS_DIR; }; 89F23C950E78D5B6006B2466 /* v8_shell-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-arm"; sourceTree = BUILT_PRODUCTS_DIR; }; 89F3605A12DCDF6400ACF8A6 /* lithium-codegen-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-codegen-x64.cc"; path = "x64/lithium-codegen-x64.cc"; sourceTree = ""; }; @@ -1101,7 +1114,6 @@ 897FF1270E719B8F00D62E90 /* dateparser.h */, 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */, 8956B6CE0F5D86570033B5A2 /* debug-agent.h */, - 898BD20C0EF6CC850068B00A /* debug-arm.cc */, 897FF1280E719B8F00D62E90 /* debug.cc */, 897FF1290E719B8F00D62E90 /* debug.h */, 893E248B12B14B3D0083370F /* deoptimizer.cc */, @@ -1141,6 +1153,8 @@ 9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */, 893E24DA12B14B9F0083370F /* gc-extension.cc */, 893E24DB12B14B9F0083370F /* gc-extension.h */, + 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */, + 89D7DDD712E8DE09001E2B82 /* gdb-jit.h */, 897FF13E0E719B8F00D62E90 /* global-handles.cc */, 897FF13F0E719B8F00D62E90 /* global-handles.h */, 897FF1400E719B8F00D62E90 /* globals.h */, @@ -1162,6 +1176,8 @@ 897FF14B0E719B8F00D62E90 /* ic-inl.h */, 897FF14C0E719B8F00D62E90 /* ic.cc */, 897FF14D0E719B8F00D62E90 /* ic.h */, + 89D7DDD812E8DE09001E2B82 /* inspector.cc */, + 89D7DDD912E8DE09001E2B82 /* inspector.h */, 89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */, 89A15C670EE4665300B48DEB /* interpreter-irregexp.h */, 897FF14E0E719B8F00D62E90 /* jsregexp.cc */, @@ -1463,6 +1479,8 @@ 89B91C0312D4F275002FF4BC /* ia32 */ = { isa = PBXGroup; children = ( + 89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */, + 89D7DDD412E8DDCF001E2B82 /* lithium-gap-resolver-ia32.h */, 897FF1000E719B8F00D62E90 /* assembler-ia32-inl.h */, 897FF1010E719B8F00D62E90 /* assembler-ia32.cc */, 897FF1020E719B8F00D62E90 /* assembler-ia32.h */, @@ -1515,6 +1533,7 @@ 896448BC0E9D530500E7C516 /* codegen-arm.h */, 895FA748107FFE73006F39D4 /* constants-arm.cc */, 897FF11B0E719B8F00D62E90 /* constants-arm.h */, + 898BD20C0EF6CC850068B00A /* debug-arm.cc */, 893E24C612B14B510083370F /* deoptimizer-arm.cc */, 9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */, 9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */, @@ -1958,6 +1977,8 @@ 8938A2A312D63B630080CDDE /* lithium-x64.cc in Sources */, 894A59E912D777E80000766D /* lithium.cc in Sources */, 89F3605B12DCDF6400ACF8A6 /* lithium-codegen-x64.cc in Sources */, + 89D7DDDE12E8DE09001E2B82 /* gdb-jit.cc in Sources */, + 89D7DDDF12E8DE09001E2B82 /* inspector.cc in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2093,6 +2114,9 @@ 893E24DD12B14B9F0083370F /* gc-extension.cc in Sources */, 8946827512C26EB700C914BC /* objects-printer.cc in Sources */, 894A59EB12D777E80000766D /* lithium.cc in Sources */, + 89D7DDD512E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc in Sources */, + 89D7DDDA12E8DE09001E2B82 /* gdb-jit.cc in Sources */, + 89D7DDDB12E8DE09001E2B82 /* inspector.cc in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2268,6 +2292,8 @@ 893E24DF12B14B9F0083370F /* gc-extension.cc in Sources */, 8946827612C26EB700C914BC /* objects-printer.cc in Sources */, 894A59EA12D777E80000766D /* lithium.cc in Sources */, + 89D7DDDC12E8DE09001E2B82 /* gdb-jit.cc in Sources */, + 89D7DDDD12E8DE09001E2B82 /* inspector.cc in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2395,6 +2421,7 @@ V8_ENABLE_CHECKS, OBJECT_PRINT, ENABLE_VMSTATE_TRACKING, + ENABLE_DEBUGGER_SUPPORT, ); GCC_SYMBOLS_PRIVATE_EXTERN = YES; GCC_TREAT_WARNINGS_AS_ERRORS = YES; @@ -2434,6 +2461,7 @@ GCC_PREPROCESSOR_DEFINITIONS = ( "$(GCC_PREPROCESSOR_DEFINITIONS)", NDEBUG, + ENABLE_DEBUGGER_SUPPORT, ); GCC_SYMBOLS_PRIVATE_EXTERN = YES; GCC_TREAT_WARNINGS_AS_ERRORS = NO;