diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 93fca4208e..95c113376e 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,19 @@ +2010-06-23: Version 2.2.19 + + Fix bug that causes the build to break when profillingsupport=off + (issue 738). + + Added expose-externalize-string flag for testing extensions. + + Resolve linker issues with using V8 as a DLL causing a number of + problems with unresolved symbols. + + Fix build failure for cctests when ENABLE_DEBUGGER_SUPPORT is not + defined. + + Performance improvements on all platforms. + + 2010-06-16: Version 2.2.18 Added API functions to retrieve information on indexed properties diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index d3880bce56..3e1952c7ad 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -196,7 +196,9 @@ class V8EXPORT HeapGraphEdge { enum Type { CONTEXT_VARIABLE = 0, // A variable from a function context. ELEMENT = 1, // An element of an array. - PROPERTY = 2 // A named object property. + PROPERTY = 2, // A named object property. + INTERNAL = 3 // A link that can't be accessed from JS, + // thus, its name isn't a real property name. }; /** Returns edge type (see HeapGraphEdge::Type). */ diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 118bb27740..b62561876c 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -3211,11 +3211,9 @@ class Internals { static const int kFullStringRepresentationMask = 0x07; static const int kExternalTwoByteRepresentationTag = 0x02; - // These constants are compiler dependent so their values must be - // defined within the implementation. - V8EXPORT static int kJSObjectType; - V8EXPORT static int kFirstNonstringType; - V8EXPORT static int kProxyType; + static const int kJSObjectType = 0x9f; + static const int kFirstNonstringType = 0x80; + static const int kProxyType = 0x85; static inline bool HasHeapObjectTag(internal::Object* value) { return ((reinterpret_cast(value) & kHeapObjectTagMask) == diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 5c88a876ec..464ca54d2d 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -106,9 +106,6 @@ static i::HandleScopeImplementer thread_local; static FatalErrorCallback exception_behavior = NULL; -int i::Internals::kJSObjectType = JS_OBJECT_TYPE; -int i::Internals::kFirstNonstringType = FIRST_NONSTRING_TYPE; -int i::Internals::kProxyType = PROXY_TYPE; static void DefaultFatalErrorHandler(const char* location, const char* message) { @@ -4460,6 +4457,7 @@ Handle HeapGraphEdge::GetName() const { reinterpret_cast(this); switch (edge->type()) { case i::HeapGraphEdge::CONTEXT_VARIABLE: + case i::HeapGraphEdge::INTERNAL: case i::HeapGraphEdge::PROPERTY: return Handle(ToApi(i::Factory::LookupAsciiSymbol( edge->name()))); diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 8ca91265ba..114ec234d2 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -45,11 +45,6 @@ namespace v8 { namespace internal { -Condition NegateCondition(Condition cc) { - ASSERT(cc != al); - return static_cast(cc ^ ne); -} - void RelocInfo::apply(intptr_t delta) { if (RelocInfo::IsInternalReference(rmode_)) { diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 16dc5cdfcf..f8d98db926 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -282,6 +282,11 @@ const Instr kBlxRegPattern = const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; const Instr kMovMvnPattern = 0xd * B21; const Instr kMovMvnFlip = B22; +const Instr kMovLeaveCCMask = 0xdff * B16; +const Instr kMovLeaveCCPattern = 0x1a0 * B16; +const Instr kMovwMask = 0xff * B20; +const Instr kMovwPattern = 0x30 * B20; +const Instr kMovwLeaveCCFlip = 0x5 * B21; const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; const Instr kCmpCmnPattern = 0x15 * B20; const Instr kCmpCmnFlip = B21; @@ -389,6 +394,12 @@ void Assembler::Align(int m) { } +void Assembler::CodeTargetAlign() { + // Preferred alignment of jump targets on some ARM chips. + Align(8); +} + + bool Assembler::IsNop(Instr instr, int type) { // Check for mov rx, rx. ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. @@ -640,6 +651,12 @@ void Assembler::next(Label* L) { } +static Instr EncodeMovwImmediate(uint32_t immediate) { + ASSERT(immediate < 0x10000); + return ((immediate & 0xf000) << 4) | (immediate & 0xfff); +} + + // Low-level code emission routines depending on the addressing mode. // If this returns true then you have to use the rotate_imm and immed_8 // that it returns, because it may have already changed the instruction @@ -664,6 +681,15 @@ static bool fits_shifter(uint32_t imm32, if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { *instr ^= kMovMvnFlip; return true; + } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) { + if (CpuFeatures::IsSupported(ARMv7)) { + if (imm32 < 0x10000) { + *instr ^= kMovwLeaveCCFlip; + *instr |= EncodeMovwImmediate(imm32); + *rotate_imm = *immed_8 = 0; // Not used for movw. + return true; + } + } } } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) { @@ -695,7 +721,7 @@ static bool fits_shifter(uint32_t imm32, // if they can be encoded in the ARM's 12 bits of immediate-offset instruction // space. There is no guarantee that the relocated location can be similarly // encoded. -static bool MustUseIp(RelocInfo::Mode rmode) { +static bool MustUseConstantPool(RelocInfo::Mode rmode) { if (rmode == RelocInfo::EXTERNAL_REFERENCE) { #ifdef DEBUG if (!Serializer::enabled()) { @@ -712,7 +738,7 @@ static bool MustUseIp(RelocInfo::Mode rmode) { bool Operand::is_single_instruction() const { if (rm_.is_valid()) return true; - if (MustUseIp(rmode_)) return false; + if (MustUseConstantPool(rmode_)) return false; uint32_t dummy1, dummy2; return fits_shifter(imm32_, &dummy1, &dummy2, NULL); } @@ -728,19 +754,34 @@ void Assembler::addrmod1(Instr instr, // Immediate. uint32_t rotate_imm; uint32_t immed_8; - if (MustUseIp(x.rmode_) || + if (MustUseConstantPool(x.rmode_) || !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { // The immediate operand cannot be encoded as a shifter operand, so load // it first to register ip and change the original instruction to use ip. // However, if the original instruction is a 'mov rd, x' (not setting the // condition code), then replace it with a 'ldr rd, [pc]'. - RecordRelocInfo(x.rmode_, x.imm32_); CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed Condition cond = static_cast(instr & CondMask); if ((instr & ~CondMask) == 13*B21) { // mov, S not set - ldr(rd, MemOperand(pc, 0), cond); + if (MustUseConstantPool(x.rmode_) || + !CpuFeatures::IsSupported(ARMv7)) { + RecordRelocInfo(x.rmode_, x.imm32_); + ldr(rd, MemOperand(pc, 0), cond); + } else { + // Will probably use movw, will certainly not use constant pool. + mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond); + movt(rd, static_cast(x.imm32_) >> 16, cond); + } } else { - ldr(ip, MemOperand(pc, 0), cond); + // If this is not a mov or mvn instruction we may still be able to avoid + // a constant pool entry by using mvn or movw. + if (!MustUseConstantPool(x.rmode_) && + (instr & kMovMvnMask) != kMovMvnPattern) { + mov(ip, x, LeaveCC, cond); + } else { + RecordRelocInfo(x.rmode_, x.imm32_); + ldr(ip, MemOperand(pc, 0), cond); + } addrmod1(instr, rn, rd, Operand(ip)); } return; @@ -1051,6 +1092,17 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) { } +void Assembler::movw(Register reg, uint32_t immediate, Condition cond) { + ASSERT(immediate < 0x10000); + mov(reg, Operand(immediate), LeaveCC, cond); +} + + +void Assembler::movt(Register reg, uint32_t immediate, Condition cond) { + emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); +} + + void Assembler::bic(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { addrmod1(cond | 14*B21 | s, src1, dst, src2); @@ -1231,7 +1283,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src, // Immediate. uint32_t rotate_imm; uint32_t immed_8; - if (MustUseIp(src.rmode_) || + if (MustUseConstantPool(src.rmode_) || !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { // Immediate operand cannot be encoded, load it first to register ip. RecordRelocInfo(src.rmode_, src.imm32_); diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index f4b43006f2..869227a7a8 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -279,7 +279,10 @@ enum Condition { // Returns the equivalent of !cc. -INLINE(Condition NegateCondition(Condition cc)); +inline Condition NegateCondition(Condition cc) { + ASSERT(cc != al); + return static_cast(cc ^ ne); +} // Corresponds to transposing the operands of a comparison. @@ -545,6 +548,12 @@ extern const Instr kMovMvnMask; extern const Instr kMovMvnPattern; extern const Instr kMovMvnFlip; +extern const Instr kMovLeaveCCMask; +extern const Instr kMovLeaveCCPattern; +extern const Instr kMovwMask; +extern const Instr kMovwPattern; +extern const Instr kMovwLeaveCCFlip; + extern const Instr kCmpCmnMask; extern const Instr kCmpCmnPattern; extern const Instr kCmpCmnFlip; @@ -694,6 +703,8 @@ class Assembler : public Malloced { // possible to align the pc offset to a multiple // of m. m must be a power of 2 (>= 4). void Align(int m); + // Aligns code to something that's optimal for a jump target for the platform. + void CodeTargetAlign(); // Branch instructions void b(int branch_offset, Condition cond = al); @@ -772,6 +783,13 @@ class Assembler : public Malloced { mov(dst, Operand(src), s, cond); } + // ARMv7 instructions for loading a 32 bit immediate in two instructions. + // This may actually emit a different mov instruction, but on an ARMv7 it + // is guaranteed to only emit one instruction. + void movw(Register reg, uint32_t immediate, Condition cond = al); + // The constant for movt should be in the range 0-0xffff. + void movt(Register reg, uint32_t immediate, Condition cond = al); + void bic(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, Condition cond = al); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 27eec4b424..8e87614c96 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -268,8 +268,7 @@ void CodeGenerator::Generate(CompilationInfo* info) { // Load the offset into r3. int slot_offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; - __ mov(r3, Operand(slot_offset)); - __ RecordWrite(r2, r3, r1); + __ RecordWrite(r2, Operand(slot_offset), r3, r1); } } } @@ -3109,9 +3108,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { exit.Branch(eq); // scratch is loaded with context when calling SlotOperand above. int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; - __ mov(r3, Operand(offset)); // r1 could be identical with tos, but that doesn't matter. - __ RecordWrite(scratch, r3, r1); + __ RecordWrite(scratch, Operand(offset), r3, r1); } // If we definitely did not jump over the assignment, we do not need // to bind the exit label. Doing so can defeat peephole @@ -3464,8 +3462,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { __ str(r0, FieldMemOperand(r1, offset)); // Update the write barrier for the array address. - __ mov(r3, Operand(offset)); - __ RecordWrite(r1, r3, r2); + __ RecordWrite(r1, Operand(offset), r3, r2); } ASSERT_EQ(original_height + 1, frame_->height()); } @@ -4279,8 +4276,7 @@ void CodeGenerator::GenerateSetValueOf(ZoneList* args) { // Store the value. __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset)); // Update the write barrier. - __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag)); - __ RecordWrite(r1, r2, r3); + __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3); // Leave. leave.Bind(); frame_->EmitPush(r0); @@ -4710,7 +4706,8 @@ void CodeGenerator::GenerateRandomHeapNumber( Label slow_allocate_heapnumber; Label heapnumber_allocated; - __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber); + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber); __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); @@ -7207,7 +7204,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) { void RecordWriteStub::Generate(MacroAssembler* masm) { - __ RecordWriteHelper(object_, offset_, scratch_); + __ RecordWriteHelper(object_, Operand(offset_), offset_, scratch_); __ Ret(); } @@ -7367,12 +7364,16 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); + Register heap_number_map = r6; if (ShouldGenerateSmiCode()) { + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + // Smi-smi case (overflow). // Since both are Smis there is no heap number to overwrite, so allocate. - // The new heap number is in r5. r6 and r7 are scratch. - __ AllocateHeapNumber(r5, r6, r7, lhs.is(r0) ? &slow_reverse : &slow); + // The new heap number is in r5. r3 and r7 are scratch. + __ AllocateHeapNumber( + r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow); // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, // using registers d7 and d6 for the double values. @@ -7385,14 +7386,14 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( __ vmov(s13, r7); __ vcvt_f64_s32(d6, s13); } else { - // Write Smi from rhs to r3 and r2 in double format. r6 is scratch. + // Write Smi from rhs to r3 and r2 in double format. r3 is scratch. __ mov(r7, Operand(rhs)); - ConvertToDoubleStub stub1(r3, r2, r7, r6); + ConvertToDoubleStub stub1(r3, r2, r7, r9); __ push(lr); __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); - // Write Smi from lhs to r1 and r0 in double format. r6 is scratch. + // Write Smi from lhs to r1 and r0 in double format. r9 is scratch. __ mov(r7, Operand(lhs)); - ConvertToDoubleStub stub2(r1, r0, r7, r6); + ConvertToDoubleStub stub2(r1, r0, r7, r9); __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); __ pop(lr); } @@ -7401,6 +7402,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( // We branch here if at least one of r0 and r1 is not a Smi. __ bind(not_smi); + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); // After this point we have the left hand side in r1 and the right hand side // in r0. @@ -7423,18 +7425,22 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( default: break; } + // Restore heap number map register. + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); } if (mode_ == NO_OVERWRITE) { // In the case where there is no chance of an overwritable float we may as // well do the allocation immediately while r0 and r1 are untouched. - __ AllocateHeapNumber(r5, r6, r7, &slow); + __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); } // Move r0 to a double in r2-r3. __ tst(r0, Operand(kSmiTagMask)); __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. - __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); + __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r4, heap_number_map); __ b(ne, &slow); if (mode_ == OVERWRITE_RIGHT) { __ mov(r5, Operand(r0)); // Overwrite this heap number. @@ -7452,7 +7458,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( __ bind(&r0_is_smi); if (mode_ == OVERWRITE_RIGHT) { // We can't overwrite a Smi so get address of new heap number into r5. - __ AllocateHeapNumber(r5, r6, r7, &slow); + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); } if (use_fp_registers) { @@ -7464,7 +7470,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( } else { // Write Smi from r0 to r3 and r2 in double format. __ mov(r7, Operand(r0)); - ConvertToDoubleStub stub3(r3, r2, r7, r6); + ConvertToDoubleStub stub3(r3, r2, r7, r4); __ push(lr); __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); __ pop(lr); @@ -7477,6 +7483,8 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( __ tst(r1, Operand(kSmiTagMask)); __ b(ne, &r1_is_not_smi); GenerateTypeTransition(masm); + // Restore heap number map register. + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); __ jmp(&r1_is_smi); } @@ -7486,7 +7494,9 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( __ tst(r1, Operand(kSmiTagMask)); __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. __ bind(&r1_is_not_smi); - __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); + __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r4, heap_number_map); __ b(ne, &slow); if (mode_ == OVERWRITE_LEFT) { __ mov(r5, Operand(r1)); // Overwrite this heap number. @@ -7504,7 +7514,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( __ bind(&r1_is_smi); if (mode_ == OVERWRITE_LEFT) { // We can't overwrite a Smi so get address of new heap number into r5. - __ AllocateHeapNumber(r5, r6, r7, &slow); + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); } if (use_fp_registers) { @@ -7516,7 +7526,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( } else { // Write Smi from r1 to r1 and r0 in double format. __ mov(r7, Operand(r1)); - ConvertToDoubleStub stub4(r1, r0, r7, r6); + ConvertToDoubleStub stub4(r1, r0, r7, r9); __ push(lr); __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); __ pop(lr); @@ -7577,13 +7587,14 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( } } - if (lhs.is(r0)) { __ b(&slow); __ bind(&slow_reverse); __ Swap(r0, r1, ip); } + heap_number_map = no_reg; // Don't use this any more from here on. + // We jump to here if something goes wrong (one param is not a number of any // sort or new-space allocation fails). __ bind(&slow); @@ -7749,9 +7760,13 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, Label rhs_is_smi, lhs_is_smi; Label done_checking_rhs, done_checking_lhs; + Register heap_number_map = r6; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ tst(lhs, Operand(kSmiTagMask)); __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. - __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); + __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); + __ cmp(r4, heap_number_map); __ b(ne, &slow); GetInt32(masm, lhs, r3, r5, r4, &slow); __ jmp(&done_checking_lhs); @@ -7761,7 +7776,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, __ tst(rhs, Operand(kSmiTagMask)); __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. - __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); + __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); + __ cmp(r4, heap_number_map); __ b(ne, &slow); GetInt32(masm, rhs, r2, r5, r4, &slow); __ jmp(&done_checking_rhs); @@ -7821,8 +7837,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, break; } case NO_OVERWRITE: { - // Get a new heap number in r5. r6 and r7 are scratch. - __ AllocateHeapNumber(r5, r6, r7, &slow); + // Get a new heap number in r5. r4 and r7 are scratch. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); } default: break; } @@ -7841,8 +7857,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, if (mode_ != NO_OVERWRITE) { __ bind(&have_to_allocate); - // Get a new heap number in r5. r6 and r7 are scratch. - __ AllocateHeapNumber(r5, r6, r7, &slow); + // Get a new heap number in r5. r4 and r7 are scratch. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); __ jmp(&got_a_heap_number); } @@ -7968,10 +7984,11 @@ const char* GenericBinaryOpStub::GetName() { } OS::SNPrintF(Vector(name_, len), - "GenericBinaryOpStub_%s_%s%s", + "GenericBinaryOpStub_%s_%s%s_%s", op_name, overwrite_name, - specialized_on_rhs_ ? "_ConstantRhs" : 0); + specialized_on_rhs_ ? "_ConstantRhs" : "", + BinaryOpIC::GetName(runtime_operands_type_)); return name_; } @@ -8164,6 +8181,28 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { } __ Ret(); __ bind(&smi_is_unsuitable); + } else if (op_ == Token::MOD && + runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && + runtime_operands_type_ != BinaryOpIC::STRINGS) { + // Do generate a bit of smi code for modulus even though the default for + // modulus is not to do it, but as the ARM processor has no coprocessor + // support for modulus checking for smis makes sense. + Label slow; + ASSERT(!ShouldGenerateSmiCode()); + ASSERT(kSmiTag == 0); // Adjust code below. + // Check for two positive smis. + __ orr(smi_test_reg, lhs, Operand(rhs)); + __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask)); + __ b(ne, &slow); + // Check that rhs is a power of two and not zero. + __ sub(scratch, rhs, Operand(1), SetCC); + __ b(mi, &slow); + __ tst(rhs, scratch); + __ b(ne, &slow); + // Calculate power of two modulus. + __ and_(result, lhs, Operand(scratch)); + __ Ret(); + __ bind(&slow); } HandleBinaryOpSlowCases( masm, @@ -8391,6 +8430,9 @@ void StackCheckStub::Generate(MacroAssembler* masm) { void GenericUnaryOpStub::Generate(MacroAssembler* masm) { Label slow, done; + Register heap_number_map = r6; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + if (op_ == Token::SUB) { // Check whether the value is a smi. Label try_float; @@ -8411,7 +8453,9 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { __ b(&done); __ bind(&try_float); - __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE); + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r1, heap_number_map); __ b(ne, &slow); // r0 is a heap number. Get a new heap number in r1. if (overwrite_) { @@ -8419,7 +8463,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); } else { - __ AllocateHeapNumber(r1, r2, r3, &slow); + __ AllocateHeapNumber(r1, r2, r3, r6, &slow); __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); @@ -8429,7 +8473,9 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { } } else if (op_ == Token::BIT_NOT) { // Check if the operand is a heap number. - __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE); + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r1, heap_number_map); __ b(ne, &slow); // Convert the heap number is r0 to an untagged integer in r1. @@ -8449,7 +8495,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { // Allocate a fresh heap number, but don't overwrite r0 until // we're sure we can do it without going through the slow case // that needs the value in r0. - __ AllocateHeapNumber(r2, r3, r4, &slow); + __ AllocateHeapNumber(r2, r3, r4, r6, &slow); __ mov(r0, Operand(r2)); } @@ -9431,17 +9477,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { RegExpImpl::kLastCaptureCountOffset)); // Store last subject and last input. __ mov(r3, last_match_info_elements); // Moved up to reduce latency. - __ mov(r2, Operand(RegExpImpl::kLastSubjectOffset)); // Ditto. __ str(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastSubjectOffset)); - __ RecordWrite(r3, r2, r7); + __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); __ str(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastInputOffset)); __ mov(r3, last_match_info_elements); - __ mov(r2, Operand(RegExpImpl::kLastInputOffset)); - __ RecordWrite(r3, r2, r7); + __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = @@ -10543,13 +10587,14 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); } - Label non_ascii, allocated; + Label non_ascii, allocated, ascii_data; ASSERT_EQ(0, kTwoByteStringTag); __ tst(r4, Operand(kStringEncodingMask)); __ tst(r5, Operand(kStringEncodingMask), ne); __ b(eq, &non_ascii); // Allocate an ASCII cons string. + __ bind(&ascii_data); __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime); __ bind(&allocated); // Fill the fields of the cons string. @@ -10561,6 +10606,19 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ Ret(); __ bind(&non_ascii); + // At least one of the strings is two-byte. Check whether it happens + // to contain only ascii characters. + // r4: first instance type. + // r5: second instance type. + __ tst(r4, Operand(kAsciiDataHintMask)); + __ tst(r5, Operand(kAsciiDataHintMask), ne); + __ b(ne, &ascii_data); + __ eor(r4, r4, Operand(r5)); + ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); + __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); + __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); + __ b(eq, &ascii_data); + // Allocate a two byte cons string. __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime); __ jmp(&allocated); diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index eeb89e0712..be4d556196 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -669,7 +669,9 @@ class GenericBinaryOpStub : public CodeStub { } void Generate(MacroAssembler* masm); - void HandleNonSmiBitwiseOp(MacroAssembler* masm, Register lhs, Register rhs); + void HandleNonSmiBitwiseOp(MacroAssembler* masm, + Register lhs, + Register rhs); void HandleBinaryOpSlowCases(MacroAssembler* masm, Label* not_smi, Register lhs, diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index e36f595c3d..fa9adbd704 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -284,6 +284,9 @@ class Instr { // with immediate inline int RotateField() const { return Bits(11, 8); } inline int Immed8Field() const { return Bits(7, 0); } + inline int Immed4Field() const { return Bits(19, 16); } + inline int ImmedMovwMovtField() const { + return Immed4Field() << 12 | Offset12Field(); } // Fields used in Load/Store instructions inline int PUField() const { return Bits(24, 23); } diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 1c05bc3a4a..400536993a 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -101,6 +101,7 @@ class Decoder { void PrintSRegister(int reg); void PrintDRegister(int reg); int FormatVFPRegister(Instr* instr, const char* format); + void PrintMovwMovt(Instr* instr); int FormatVFPinstruction(Instr* instr, const char* format); void PrintCondition(Instr* instr); void PrintShiftRm(Instr* instr); @@ -375,6 +376,16 @@ int Decoder::FormatVFPinstruction(Instr* instr, const char* format) { } +// Print the movw or movt instruction. +void Decoder::PrintMovwMovt(Instr* instr) { + int imm = instr->ImmedMovwMovtField(); + int rd = instr->RdField(); + PrintRegister(rd); + out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, + ", #%d", imm); +} + + // FormatOption takes a formatting string and interprets it based on // the current instructions. The format string points to the first // character of the option string (the option escape has already been @@ -430,7 +441,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) { return 1; } case 'm': { - if (format[1] == 'e') { // 'memop: load/store instructions + if (format[1] == 'w') { + // 'mw: movt/movw instructions. + PrintMovwMovt(instr); + return 2; + } + if (format[1] == 'e') { // 'memop: load/store instructions. ASSERT(STRING_STARTS_WITH(format, "memop")); if (instr->HasL()) { Print("ldr"); @@ -776,7 +792,7 @@ void Decoder::DecodeType01(Instr* instr) { if (instr->HasS()) { Format(instr, "tst'cond 'rn, 'shift_op"); } else { - Unknown(instr); // not used by V8 + Format(instr, "movw'cond 'mw"); } break; } @@ -794,7 +810,7 @@ void Decoder::DecodeType01(Instr* instr) { if (instr->HasS()) { Format(instr, "cmp'cond 'rn, 'shift_op"); } else { - Unknown(instr); // not used by V8 + Format(instr, "movt'cond 'mw"); } break; } diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc index 48eaf46aaf..36ac2aa3d3 100644 --- a/deps/v8/src/arm/fast-codegen-arm.cc +++ b/deps/v8/src/arm/fast-codegen-arm.cc @@ -102,8 +102,7 @@ void FastCodeGenerator::EmitThisPropertyStore(Handle name) { } if (needs_write_barrier) { - __ mov(scratch1(), Operand(offset)); - __ RecordWrite(scratch0(), scratch1(), scratch2()); + __ RecordWrite(scratch0(), Operand(offset), scratch1(), scratch2()); } if (destination().is(accumulator1())) { diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 58d737834b..673287388a 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -110,10 +110,10 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) { __ mov(r1, Operand(Context::SlotOffset(slot->index()))); __ str(r0, MemOperand(cp, r1)); // Update the write barrier. This clobbers all involved - // registers, so we have use a third register to avoid + // registers, so we have to use two more registers to avoid // clobbering cp. __ mov(r2, Operand(cp)); - __ RecordWrite(r2, r1, r0); + __ RecordWrite(r2, Operand(r1), r3, r0); } } } @@ -666,8 +666,10 @@ void FullCodeGenerator::Move(Slot* dst, __ str(src, location); // Emit the write barrier code if the location is in the heap. if (dst->type() == Slot::CONTEXT) { - __ mov(scratch2, Operand(Context::SlotOffset(dst->index()))); - __ RecordWrite(scratch1, scratch2, src); + __ RecordWrite(scratch1, + Operand(Context::SlotOffset(dst->index())), + scratch2, + src); } } @@ -715,10 +717,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, __ str(result_register(), CodeGenerator::ContextOperand(cp, slot->index())); int offset = Context::SlotOffset(slot->index()); - __ mov(r2, Operand(offset)); // We know that we have written a function, which is not a smi. __ mov(r1, Operand(cp)); - __ RecordWrite(r1, r2, result_register()); + __ RecordWrite(r1, Operand(offset), r2, result_register()); } break; @@ -1252,8 +1253,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { // Update the write barrier for the array store with r0 as the scratch // register. - __ mov(r2, Operand(offset)); - __ RecordWrite(r1, r2, result_register()); + __ RecordWrite(r1, Operand(offset), r2, result_register()); } if (result_saved) { @@ -1493,8 +1493,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, // RecordWrite may destroy all its register arguments. __ mov(r3, result_register()); int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; - __ mov(r2, Operand(offset)); - __ RecordWrite(r1, r2, r3); + __ RecordWrite(r1, Operand(offset), r2, r3); break; } @@ -2157,7 +2156,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList* args) { Label slow_allocate_heapnumber; Label heapnumber_allocated; - __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber); + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber); __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); @@ -2276,8 +2276,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList* args) { __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset)); // Update the write barrier. Save the value as it will be // overwritten by the write barrier code and is needed afterward. - __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag)); - __ RecordWrite(r1, r2, r3); + __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3); __ bind(&done); Apply(context_, r0); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 5eb98b12ba..c6de4d8ef4 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1339,7 +1339,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, __ bind(&box_int); // Allocate a HeapNumber for the result and perform int-to-double // conversion. Use r0 for result as key is not needed any more. - __ AllocateHeapNumber(r0, r3, r4, &slow); + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r0, r3, r4, r6, &slow); if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); @@ -1370,7 +1371,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, // Allocate a HeapNumber for the result and perform int-to-double // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all // registers - also when jumping due to exhausted young space. - __ AllocateHeapNumber(r2, r3, r4, &slow); + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r2, r3, r4, r6, &slow); __ vcvt_f64_u32(d0, s0); __ sub(r1, r2, Operand(kHeapObjectTag)); @@ -1407,7 +1409,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber // clobbers all registers - also when jumping due to exhausted young // space. - __ AllocateHeapNumber(r4, r5, r6, &slow); + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r4, r5, r7, r6, &slow); __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); @@ -1423,7 +1426,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, // Allocate a HeapNumber for the result. Don't use r0 and r1 as // AllocateHeapNumber clobbers all registers - also when jumping due to // exhausted young space. - __ AllocateHeapNumber(r2, r3, r4, &slow); + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r2, r3, r4, r6, &slow); __ vcvt_f64_f32(d0, s0); __ sub(r1, r2, Operand(kHeapObjectTag)); __ vstr(d0, r1, HeapNumber::kValueOffset); @@ -1434,7 +1438,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, // Allocate a HeapNumber for the result. Don't use r0 and r1 as // AllocateHeapNumber clobbers all registers - also when jumping due to // exhausted young space. - __ AllocateHeapNumber(r3, r4, r5, &slow); + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r3, r4, r5, r6, &slow); // VFP is not available, do manual single to double conversion. // r2: floating point value (binary32) @@ -1692,7 +1697,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ Ret(eq); // Update write barrier for the elements array address. __ sub(r4, r5, Operand(elements)); - __ RecordWrite(elements, r4, r5); + __ RecordWrite(elements, Operand(r4), r5, r6); __ Ret(); } diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 9bbc31f773..29e168c51e 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -270,6 +270,17 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, } +void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) { + ASSERT(lsb < 32); + if (!CpuFeatures::IsSupported(ARMv7)) { + int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); + bic(dst, dst, Operand(mask)); + } else { + bfc(dst, lsb, width, cond); + } +} + + void MacroAssembler::SmiJumpTable(Register index, Vector targets) { // Empty the const pool. CheckConstPool(true, true); @@ -299,31 +310,32 @@ void MacroAssembler::StoreRoot(Register source, void MacroAssembler::RecordWriteHelper(Register object, - Register offset, - Register scratch) { + Operand offset, + Register scratch0, + Register scratch1) { if (FLAG_debug_code) { // Check that the object is not in new space. Label not_in_new_space; - InNewSpace(object, scratch, ne, ¬_in_new_space); + InNewSpace(object, scratch1, ne, ¬_in_new_space); Abort("new-space object passed to RecordWriteHelper"); bind(¬_in_new_space); } - mov(ip, Operand(Page::kPageAlignmentMask)); // Load mask only once. - - // Calculate region number. - add(offset, object, Operand(offset)); // Add offset into the object. - and_(offset, offset, Operand(ip)); // Offset into page of the object. - mov(offset, Operand(offset, LSR, Page::kRegionSizeLog2)); + // Add offset into the object. + add(scratch0, object, offset); // Calculate page address. - bic(object, object, Operand(ip)); + Bfc(object, 0, kPageSizeBits); + + // Calculate region number. + Ubfx(scratch0, scratch0, Page::kRegionSizeLog2, + kPageSizeBits - Page::kRegionSizeLog2); // Mark region dirty. - ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset)); + ldr(scratch1, MemOperand(object, Page::kDirtyFlagOffset)); mov(ip, Operand(1)); - orr(scratch, scratch, Operand(ip, LSL, offset)); - str(scratch, MemOperand(object, Page::kDirtyFlagOffset)); + orr(scratch1, scratch1, Operand(ip, LSL, scratch0)); + str(scratch1, MemOperand(object, Page::kDirtyFlagOffset)); } @@ -341,21 +353,23 @@ void MacroAssembler::InNewSpace(Register object, // Will clobber 4 registers: object, offset, scratch, ip. The // register 'object' contains a heap object pointer. The heap object // tag is shifted away. -void MacroAssembler::RecordWrite(Register object, Register offset, - Register scratch) { +void MacroAssembler::RecordWrite(Register object, + Operand offset, + Register scratch0, + Register scratch1) { // The compiled code assumes that record write doesn't change the // context register, so we check that none of the clobbered // registers are cp. - ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp)); + ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp)); Label done; // First, test that the object is not in the new space. We cannot set // region marks for new space pages. - InNewSpace(object, scratch, eq, &done); + InNewSpace(object, scratch0, eq, &done); // Record the actual write. - RecordWriteHelper(object, offset, scratch); + RecordWriteHelper(object, offset, scratch0, scratch1); bind(&done); @@ -363,8 +377,8 @@ void MacroAssembler::RecordWrite(Register object, Register offset, // turned on to provoke errors. if (FLAG_debug_code) { mov(object, Operand(BitCast(kZapValue))); - mov(offset, Operand(BitCast(kZapValue))); - mov(scratch, Operand(BitCast(kZapValue))); + mov(scratch0, Operand(BitCast(kZapValue))); + mov(scratch1, Operand(BitCast(kZapValue))); } } @@ -1514,6 +1528,16 @@ void MacroAssembler::Assert(Condition cc, const char* msg) { } +void MacroAssembler::AssertRegisterIsRoot(Register reg, + Heap::RootListIndex index) { + if (FLAG_debug_code) { + LoadRoot(ip, index); + cmp(reg, ip); + Check(eq, "Register did not match expected root"); + } +} + + void MacroAssembler::Check(Condition cc, const char* msg) { Label L; b(cc, &L); @@ -1632,6 +1656,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1, Register scratch2, + Register heap_number_map, Label* gc_required) { // Allocate an object in the heap for the heap number and tag it as a heap // object. @@ -1642,9 +1667,9 @@ void MacroAssembler::AllocateHeapNumber(Register result, gc_required, TAG_OBJECT); - // Get heap number map and store it in the allocated object. - LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex); - str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); + // Store heap number map in the allocated object. + AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); } diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index f1eb0912af..e02a6c8a3e 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -100,6 +100,7 @@ class MacroAssembler: public Assembler { Condition cond = al); void Sbfx(Register dst, Register src, int lsb, int width, Condition cond = al); + void Bfc(Register dst, int lsb, int width, Condition cond = al); void Call(Label* target); void Move(Register dst, Handle value); @@ -127,13 +128,19 @@ class MacroAssembler: public Assembler { // For the page containing |object| mark the region covering [object+offset] // dirty. The object address must be in the first 8K of an allocated page. - void RecordWriteHelper(Register object, Register offset, Register scratch); + void RecordWriteHelper(Register object, + Operand offset, + Register scratch0, + Register scratch1); // For the page containing |object| mark the region covering [object+offset] // dirty. The object address must be in the first 8K of an allocated page. - // The 'scratch' register is used in the implementation and all 3 registers + // The 'scratch' registers are used in the implementation and all 3 registers // are clobbered by the operation, as well as the ip register. - void RecordWrite(Register object, Register offset, Register scratch); + void RecordWrite(Register object, + Operand offset, + Register scratch0, + Register scratch1); // Push two registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2, Condition cond = al) { @@ -372,6 +379,7 @@ class MacroAssembler: public Assembler { void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, + Register heap_number_map, Label* gc_required); // --------------------------------------------------------------------------- @@ -551,6 +559,7 @@ class MacroAssembler: public Assembler { // Calls Abort(msg) if the condition cc is not satisfied. // Use --debug_code to enable. void Assert(Condition cc, const char* msg); + void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index); // Like Assert(), but always enabled. void Check(Condition cc, const char* msg); diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 3bdca38eba..77776c2b6d 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -1859,7 +1859,9 @@ void Simulator::DecodeType01(Instr* instr) { SetNZFlags(alu_out); SetCFlag(shifter_carry_out); } else { - UNIMPLEMENTED(); + // Format(instr, "movw'cond 'rd, 'imm"). + alu_out = instr->ImmedMovwMovtField(); + set_register(rd, alu_out); } break; } @@ -1888,7 +1890,10 @@ void Simulator::DecodeType01(Instr* instr) { SetCFlag(!BorrowFrom(rn_val, shifter_operand)); SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false)); } else { - UNIMPLEMENTED(); + // Format(instr, "movt'cond 'rd, 'imm"). + alu_out = (get_register(rd) & 0xffff) | + (instr->ImmedMovwMovtField() << 16); + set_register(rd, alu_out); } break; } diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index dc0f4a7c4c..3e5ba1126f 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -336,9 +336,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, __ b(eq, &exit); // Update the write barrier for the array address. - // Pass the value being stored in the now unused name_reg. - __ mov(name_reg, Operand(offset)); - __ RecordWrite(receiver_reg, name_reg, scratch); + // Pass the now unused name_reg as a scratch register. + __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch); } else { // Write to the properties array. int offset = index * kPointerSize + FixedArray::kHeaderSize; @@ -352,8 +351,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Ok to clobber receiver_reg and name_reg, since we return. - __ mov(name_reg, Operand(offset)); - __ RecordWrite(scratch, name_reg, receiver_reg); + __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg); } // Return the value (register r0). diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 087413118f..bbd69ecaba 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -1462,6 +1462,7 @@ bool Genesis::InstallExtensions(Handle global_context, } if (FLAG_expose_gc) InstallExtension("v8/gc"); + if (FLAG_expose_externalize_string) InstallExtension("v8/externalize"); if (extensions == NULL) return true; // Install required extensions diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h index c2e40ba9a4..13374d86dd 100644 --- a/deps/v8/src/checks.h +++ b/deps/v8/src/checks.h @@ -155,9 +155,9 @@ static inline void CheckNonEqualsHelper(const char* file, static inline void CheckEqualsHelper(const char* file, int line, const char* expected_source, - void* expected, + const void* expected, const char* value_source, - void* value) { + const void* value) { if (expected != value) { V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p", @@ -170,9 +170,9 @@ static inline void CheckEqualsHelper(const char* file, static inline void CheckNonEqualsHelper(const char* file, int line, const char* expected_source, - void* expected, + const void* expected, const char* value_source, - void* value) { + const void* value) { if (expected == value) { V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p", expected_source, value_source, value); diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js index 77fa1ddd65..d5e91cbdd2 100644 --- a/deps/v8/src/debug-debugger.js +++ b/deps/v8/src/debug-debugger.js @@ -295,7 +295,6 @@ ScriptBreakPoint.prototype.update_positions = function(line, column) { } - ScriptBreakPoint.prototype.hit_count = function() { return this.hit_count_; }; @@ -389,7 +388,10 @@ ScriptBreakPoint.prototype.set = function (script) { // Create a break point object and set the break point. break_point = MakeBreakPoint(pos, this.line(), this.column(), this); break_point.setIgnoreCount(this.ignoreCount()); - %SetScriptBreakPoint(script, pos, break_point); + pos = %SetScriptBreakPoint(script, pos, break_point); + if (!IS_UNDEFINED(pos)) { + this.actual_location = script.locationFromPosition(pos); + } return break_point; }; diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index 98e366c7bc..d513b3121c 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -1028,8 +1028,8 @@ Handle Debug::GetDebugInfo(Handle shared) { void Debug::SetBreakPoint(Handle shared, - int source_position, - Handle break_point_object) { + Handle break_point_object, + int* source_position) { HandleScope scope; if (!EnsureDebugInfo(shared)) { @@ -1043,9 +1043,11 @@ void Debug::SetBreakPoint(Handle shared, // Find the break point and change it. BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS); - it.FindBreakLocationFromPosition(source_position); + it.FindBreakLocationFromPosition(*source_position); it.SetBreakPoint(break_point_object); + *source_position = it.position(); + // At least one active break point now. ASSERT(debug_info->GetBreakPointCount() > 0); } diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h index 1c674711d9..6019294f22 100644 --- a/deps/v8/src/debug.h +++ b/deps/v8/src/debug.h @@ -230,8 +230,8 @@ class Debug { static Object* Break(Arguments args); static void SetBreakPoint(Handle shared, - int source_position, - Handle break_point_object); + Handle break_point_object, + int* source_position); static void ClearBreakPoint(Handle break_point_object); static void ClearAllBreakPoints(); static void FloodWithOneShot(Handle shared); diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc index 006d358eaa..a6b15ccb45 100644 --- a/deps/v8/src/execution.cc +++ b/deps/v8/src/execution.cc @@ -679,7 +679,7 @@ Object* Execution::HandleStackGuardInterrupt() { // --- G C E x t e n s i o n --- -const char* GCExtension::kSource = "native function gc();"; +const char* const GCExtension::kSource = "native function gc();"; v8::Handle GCExtension::GetNativeFunction( @@ -695,7 +695,115 @@ v8::Handle GCExtension::GC(const v8::Arguments& args) { } -static GCExtension kGCExtension; -v8::DeclareExtension kGCExtensionDeclaration(&kGCExtension); +static GCExtension gc_extension; +static v8::DeclareExtension gc_extension_declaration(&gc_extension); + + +// --- E x t e r n a l i z e S t r i n g E x t e n s i o n --- + + +template +class SimpleStringResource : public Base { + public: + // Takes ownership of |data|. + SimpleStringResource(Char* data, size_t length) + : data_(data), + length_(length) {} + + virtual ~SimpleStringResource() { delete data_; } + + virtual const Char* data() const { return data_; } + + virtual size_t length() const { return length_; } + + private: + Char* const data_; + const size_t length_; +}; + + +typedef SimpleStringResource + SimpleAsciiStringResource; +typedef SimpleStringResource + SimpleTwoByteStringResource; + + +const char* const ExternalizeStringExtension::kSource = + "native function externalizeString();" + "native function isAsciiString();"; + + +v8::Handle ExternalizeStringExtension::GetNativeFunction( + v8::Handle str) { + if (strcmp(*v8::String::AsciiValue(str), "externalizeString") == 0) { + return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize); + } else { + ASSERT(strcmp(*v8::String::AsciiValue(str), "isAsciiString") == 0); + return v8::FunctionTemplate::New(ExternalizeStringExtension::IsAscii); + } +} + + +v8::Handle ExternalizeStringExtension::Externalize( + const v8::Arguments& args) { + if (args.Length() < 1 || !args[0]->IsString()) { + return v8::ThrowException(v8::String::New( + "First parameter to externalizeString() must be a string.")); + } + bool force_two_byte = false; + if (args.Length() >= 2) { + if (args[1]->IsBoolean()) { + force_two_byte = args[1]->BooleanValue(); + } else { + return v8::ThrowException(v8::String::New( + "Second parameter to externalizeString() must be a boolean.")); + } + } + bool result = false; + Handle string = Utils::OpenHandle(*args[0].As()); + if (string->IsExternalString()) { + return v8::ThrowException(v8::String::New( + "externalizeString() can't externalize twice.")); + } + if (string->IsAsciiRepresentation() && !force_two_byte) { + char* data = new char[string->length()]; + String::WriteToFlat(*string, data, 0, string->length()); + SimpleAsciiStringResource* resource = new SimpleAsciiStringResource( + data, string->length()); + result = string->MakeExternal(resource); + if (result && !string->IsSymbol()) { + i::ExternalStringTable::AddString(*string); + } + } else { + uc16* data = new uc16[string->length()]; + String::WriteToFlat(*string, data, 0, string->length()); + SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource( + data, string->length()); + result = string->MakeExternal(resource); + if (result && !string->IsSymbol()) { + i::ExternalStringTable::AddString(*string); + } + } + if (!result) { + return v8::ThrowException(v8::String::New("externalizeString() failed.")); + } + return v8::Undefined(); +} + + +v8::Handle ExternalizeStringExtension::IsAscii( + const v8::Arguments& args) { + if (args.Length() != 1 || !args[0]->IsString()) { + return v8::ThrowException(v8::String::New( + "isAsciiString() requires a single string argument.")); + } + return Utils::OpenHandle(*args[0].As())->IsAsciiRepresentation() ? + v8::True() : v8::False(); +} + + +static ExternalizeStringExtension externalize_extension; +static v8::DeclareExtension externalize_extension_declaration( + &externalize_extension); } } // namespace v8::internal diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h index e683e12259..282350332a 100644 --- a/deps/v8/src/execution.h +++ b/deps/v8/src/execution.h @@ -316,10 +316,21 @@ class GCExtension : public v8::Extension { v8::Handle name); static v8::Handle GC(const v8::Arguments& args); private: - static const char* kSource; + static const char* const kSource; }; +class ExternalizeStringExtension : public v8::Extension { + public: + ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {} + virtual v8::Handle GetNativeFunction( + v8::Handle name); + static v8::Handle Externalize(const v8::Arguments& args); + static v8::Handle IsAscii(const v8::Arguments& args); + private: + static const char* const kSource; +}; + } } // namespace v8::internal #endif // V8_EXECUTION_H_ diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 91477f9abd..02e8f16e48 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -123,6 +123,8 @@ DEFINE_bool(enable_armv7, true, DEFINE_string(expose_natives_as, NULL, "expose natives in global object") DEFINE_string(expose_debug_as, NULL, "expose debug in global object") DEFINE_bool(expose_gc, false, "expose gc extension") +DEFINE_bool(expose_externalize_string, false, + "expose externalize string extension") DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture") DEFINE_bool(disable_native_files, false, "disable builtin natives files") @@ -191,7 +193,7 @@ DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") -DEFINE_bool(flush_code, false, +DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") // v8.cc diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc index ea17abc33d..73b9748f18 100644 --- a/deps/v8/src/heap-profiler.cc +++ b/deps/v8/src/heap-profiler.cc @@ -326,20 +326,27 @@ HeapProfiler::~HeapProfiler() { delete snapshots_; } +#endif // ENABLE_LOGGING_AND_PROFILING void HeapProfiler::Setup() { +#ifdef ENABLE_LOGGING_AND_PROFILING if (singleton_ == NULL) { singleton_ = new HeapProfiler(); } +#endif } void HeapProfiler::TearDown() { +#ifdef ENABLE_LOGGING_AND_PROFILING delete singleton_; singleton_ = NULL; +#endif } +#ifdef ENABLE_LOGGING_AND_PROFILING + HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name) { ASSERT(singleton_ != NULL); return singleton_->TakeSnapshotImpl(name); @@ -353,6 +360,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(String* name) { HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) { + Heap::CollectAllGarbage(false); HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++); HeapSnapshotGenerator generator(result); generator.GenerateSnapshot(); diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h index 28e240daf6..b593b992bb 100644 --- a/deps/v8/src/heap-profiler.h +++ b/deps/v8/src/heap-profiler.h @@ -38,12 +38,16 @@ namespace internal { class HeapSnapshot; class HeapSnapshotsCollection; +#endif + // The HeapProfiler writes data to the log files, which can be postprocessed // to generate .hp files for use by the GHC/Valgrind tool hp2ps. class HeapProfiler { public: static void Setup(); static void TearDown(); + +#ifdef ENABLE_LOGGING_AND_PROFILING static HeapSnapshot* TakeSnapshot(const char* name); static HeapSnapshot* TakeSnapshot(String* name); static int GetSnapshotsCount(); @@ -68,9 +72,12 @@ class HeapProfiler { unsigned next_snapshot_uid_; static HeapProfiler* singleton_; +#endif // ENABLE_LOGGING_AND_PROFILING }; +#ifdef ENABLE_LOGGING_AND_PROFILING + // JSObjectsCluster describes a group of JS objects that are // considered equivalent in terms of a particular profile. class JSObjectsCluster BASE_EMBEDDED { diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index 3fc7d02bc8..f1ec56ce5a 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -1929,6 +1929,18 @@ Object* Heap::AllocateConsString(String* first, String* second) { return Failure::OutOfMemoryException(); } + bool is_ascii_data_in_two_byte_string = false; + if (!is_ascii) { + // At least one of the strings uses two-byte representation so we + // can't use the fast case code for short ascii strings below, but + // we can try to save memory if all chars actually fit in ascii. + is_ascii_data_in_two_byte_string = + first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars(); + if (is_ascii_data_in_two_byte_string) { + Counters::string_add_runtime_ext_to_ascii.Increment(); + } + } + // If the resulting string is small make a flat string. if (length < String::kMinNonFlatLength) { ASSERT(first->IsFlat()); @@ -1955,22 +1967,13 @@ Object* Heap::AllocateConsString(String* first, String* second) { for (int i = 0; i < second_length; i++) *dest++ = src[i]; return result; } else { - // For short external two-byte strings we check whether they can - // be represented using ascii. - if (!first_is_ascii) { - first_is_ascii = first->IsExternalTwoByteStringWithAsciiChars(); - } - if (first_is_ascii && !second_is_ascii) { - second_is_ascii = second->IsExternalTwoByteStringWithAsciiChars(); - } - if (first_is_ascii && second_is_ascii) { + if (is_ascii_data_in_two_byte_string) { Object* result = AllocateRawAsciiString(length); if (result->IsFailure()) return result; // Copy the characters into the new object. char* dest = SeqAsciiString::cast(result)->GetChars(); String::WriteToFlat(first, dest, 0, first_length); String::WriteToFlat(second, dest + first_length, 0, second_length); - Counters::string_add_runtime_ext_to_ascii.Increment(); return result; } @@ -1984,7 +1987,8 @@ Object* Heap::AllocateConsString(String* first, String* second) { } } - Map* map = is_ascii ? cons_ascii_string_map() : cons_string_map(); + Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ? + cons_ascii_string_map() : cons_string_map(); Object* result = Allocate(map, NEW_SPACE); if (result->IsFailure()) return result; @@ -2070,7 +2074,23 @@ Object* Heap::AllocateExternalStringFromTwoByte( return Failure::OutOfMemoryException(); } - Map* map = Heap::external_string_map(); + // For small strings we check whether the resource contains only + // ascii characters. If yes, we use a different string map. + bool is_ascii = true; + if (length >= static_cast(String::kMinNonFlatLength)) { + is_ascii = false; + } else { + const uc16* data = resource->data(); + for (size_t i = 0; i < length; i++) { + if (data[i] > String::kMaxAsciiCharCode) { + is_ascii = false; + break; + } + } + } + + Map* map = is_ascii ? + Heap::external_string_with_ascii_data_map() : Heap::external_string_map(); Object* result = Allocate(map, NEW_SPACE); if (result->IsFailure()) return result; @@ -2244,6 +2264,12 @@ static void FlushCodeForFunction(SharedFunctionInfo* function_info) { ThreadManager::IterateArchivedThreads(&threadvisitor); if (threadvisitor.FoundCode()) return; + // Check that there are heap allocated locals in the scopeinfo. If + // there is, we are potentially using eval and need the scopeinfo + // for variable resolution. + if (ScopeInfo<>::HasHeapAllocatedLocals(function_info->code())) + return; + HandleScope scope; // Compute the lazy compilable version of the code. function_info->set_code(*ComputeLazyCompile(function_info->length())); @@ -2853,6 +2879,9 @@ Map* Heap::SymbolMapForString(String* string) { if (map == cons_ascii_string_map()) return cons_ascii_symbol_map(); if (map == external_string_map()) return external_symbol_map(); if (map == external_ascii_string_map()) return external_ascii_symbol_map(); + if (map == external_string_with_ascii_data_map()) { + return external_symbol_with_ascii_data_map(); + } // No match found. return NULL; diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 8386e73519..a8f8c34395 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -69,10 +69,12 @@ class ZoneScopeInfo; V(Map, cons_symbol_map, ConsSymbolMap) \ V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \ V(Map, external_symbol_map, ExternalSymbolMap) \ + V(Map, external_symbol_with_ascii_data_map, ExternalSymbolWithAsciiDataMap) \ V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \ V(Map, cons_string_map, ConsStringMap) \ V(Map, cons_ascii_string_map, ConsAsciiStringMap) \ V(Map, external_string_map, ExternalStringMap) \ + V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \ V(Map, external_ascii_string_map, ExternalAsciiStringMap) \ V(Map, undetectable_string_map, UndetectableStringMap) \ V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \ diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h index a851b4274a..eb2a04dbc4 100644 --- a/deps/v8/src/ia32/assembler-ia32-inl.h +++ b/deps/v8/src/ia32/assembler-ia32-inl.h @@ -43,10 +43,6 @@ namespace v8 { namespace internal { -Condition NegateCondition(Condition cc) { - return static_cast(cc ^ 1); -} - // The modes possibly affected by apply must be in kApplyMask. void RelocInfo::apply(intptr_t delta) { diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index d4dff33044..ce2099da2d 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -378,6 +378,11 @@ void Assembler::Align(int m) { } +void Assembler::CodeTargetAlign() { + Align(16); // Preferred alignment of jump targets on ia32. +} + + void Assembler::cpuid() { ASSERT(CpuFeatures::IsEnabled(CPUID)); EnsureSpace ensure_space(this); @@ -2154,17 +2159,6 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { } -void Assembler::comisd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); - EnsureSpace ensure_space(this); - last_pc_ = pc_; - EMIT(0x66); - EMIT(0x0F); - EMIT(0x2F); - emit_sse_operand(dst, src); -} - - void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { ASSERT(CpuFeatures::IsEnabled(SSE2)); EnsureSpace ensure_space(this); diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h index 7dcbab5cdb..c76c55cf53 100644 --- a/deps/v8/src/ia32/assembler-ia32.h +++ b/deps/v8/src/ia32/assembler-ia32.h @@ -146,7 +146,10 @@ enum Condition { // Negation of the default no_condition (-1) results in a non-default // no_condition value (-2). As long as tests for no_condition check // for condition < 0, this will work as expected. -inline Condition NegateCondition(Condition cc); +inline Condition NegateCondition(Condition cc) { + return static_cast(cc ^ 1); +} + // Corresponds to transposing the operands of a comparison. inline Condition ReverseCondition(Condition cc) { @@ -172,12 +175,14 @@ inline Condition ReverseCondition(Condition cc) { }; } + enum Hint { no_hint = 0, not_taken = 0x2e, taken = 0x3e }; + // The result of negating a hint is as if the corresponding condition // were negated by NegateCondition. That is, no_hint is mapped to // itself and not_taken and taken are mapped to each other. @@ -502,6 +507,8 @@ class Assembler : public Malloced { // possible to align the pc offset to a multiple // of m. m must be a power of 2. void Align(int m); + // Aligns code to something that's optimal for a jump target for the platform. + void CodeTargetAlign(); // Stack void pushad(); @@ -779,7 +786,6 @@ class Assembler : public Malloced { void xorpd(XMMRegister dst, XMMRegister src); void sqrtsd(XMMRegister dst, XMMRegister src); - void comisd(XMMRegister dst, XMMRegister src); void ucomisd(XMMRegister dst, XMMRegister src); void movmskpd(Register dst, XMMRegister src); diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 4471d8c59a..6b0747238a 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -604,6 +604,10 @@ void CodeGenerator::ConvertInt32ResultToNumber(Result* value) { RegisterFile empty_regs; SetFrame(clone, &empty_regs); __ bind(&allocation_failed); + if (!CpuFeatures::IsSupported(SSE2)) { + // Pop the value from the floating point stack. + __ fstp(0); + } unsafe_bailout_->Jump(); done.Bind(value); @@ -2991,7 +2995,7 @@ void CodeGenerator::GenerateInlineNumberComparison(Result* left_side, ¬_numbers); LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side, ¬_numbers); - __ comisd(xmm0, xmm1); + __ ucomisd(xmm0, xmm1); } else { Label check_right, compare; @@ -7306,7 +7310,7 @@ void CodeGenerator::GenerateMathPow(ZoneList* args) { // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3. __ addsd(xmm2, xmm3); // xmm2 now has 0.5. - __ comisd(xmm2, xmm1); + __ ucomisd(xmm2, xmm1); call_runtime.Branch(not_equal); // Calculates square root. __ movsd(xmm1, xmm0); @@ -11592,7 +11596,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, CpuFeatures::Scope fscope(SSE2); __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); - __ comisd(xmm0, xmm1); + __ ucomisd(xmm0, xmm1); } else { __ fld_d(FieldOperand(object, HeapNumber::kValueOffset)); __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); @@ -11817,7 +11821,7 @@ void CompareStub::Generate(MacroAssembler* masm) { CpuFeatures::Scope use_cmov(CMOV); FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); - __ comisd(xmm0, xmm1); + __ ucomisd(xmm0, xmm1); // Don't base result on EFLAGS when a NaN is involved. __ j(parity_even, &unordered, not_taken); @@ -12848,7 +12852,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // If result is not supposed to be flat allocate a cons string object. If both // strings are ascii the result is an ascii cons string. - Label non_ascii, allocated; + Label non_ascii, allocated, ascii_data; __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset)); __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset)); __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); @@ -12857,6 +12861,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { ASSERT(kStringEncodingMask == kAsciiStringTag); __ test(ecx, Immediate(kAsciiStringTag)); __ j(zero, &non_ascii); + __ bind(&ascii_data); // Allocate an acsii cons string. __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime); __ bind(&allocated); @@ -12871,6 +12876,19 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ IncrementCounter(&Counters::string_add_native, 1); __ ret(2 * kPointerSize); __ bind(&non_ascii); + // At least one of the strings is two-byte. Check whether it happens + // to contain only ascii characters. + // ecx: first instance type AND second instance type. + // edi: second instance type. + __ test(ecx, Immediate(kAsciiDataHintMask)); + __ j(not_zero, &ascii_data); + __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); + __ xor_(edi, Operand(ecx)); + ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); + __ and_(edi, kAsciiStringTag | kAsciiDataHintTag); + __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag); + __ j(equal, &ascii_data); // Allocate a two byte cons string. __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime); __ jmp(&allocated); diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index 48d9e67454..bab0435f38 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -816,8 +816,13 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object, __ push(other); __ push(receiver); // receiver __ push(reg); // holder - __ mov(other, Immediate(callback_handle)); - __ push(FieldOperand(other, AccessorInfo::kDataOffset)); // data + // Push data from AccessorInfo. + if (Heap::InNewSpace(callback_handle->data())) { + __ mov(other, Immediate(callback_handle)); + __ push(FieldOperand(other, AccessorInfo::kDataOffset)); + } else { + __ push(Immediate(Handle(callback_handle->data()))); + } __ push(name_reg); // name // Save a pointer to where we pushed the arguments pointer. // This will be passed as the const AccessorInfo& to the C++ callback. diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index fa727ca216..475f1611c0 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -734,6 +734,28 @@ Object* LoadIC::Load(State state, Handle object, Handle name) { if (PatchInlinedLoad(address(), map, offset)) { set_target(megamorphic_stub()); return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex()); +#ifdef DEBUG + if (FLAG_trace_ic) { + PrintF("[LoadIC : inline patch %s]\n", *name->ToCString()); + } + } else { + if (FLAG_trace_ic) { + PrintF("[LoadIC : no inline patch %s (patching failed)]\n", + *name->ToCString()); + } + } + } else { + if (FLAG_trace_ic) { + PrintF("[LoadIC : no inline patch %s (not inobject)]\n", + *name->ToCString()); + } + } + } else { + if (FLAG_use_ic && state == PREMONOMORPHIC) { + if (FLAG_trace_ic) { + PrintF("[LoadIC : no inline patch %s (not inlinable)]\n", + *name->ToCString()); +#endif } } } diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc index 9a1f1f114c..3e9c5eab9a 100644 --- a/deps/v8/src/jsregexp.cc +++ b/deps/v8/src/jsregexp.cc @@ -1747,9 +1747,11 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler, if ((mask & char_mask) == char_mask) need_mask = false; mask &= char_mask; } else { - // For 2-character preloads in ASCII mode we also use a 16 bit load with - // zero extend. + // For 2-character preloads in ASCII mode or 1-character preloads in + // TWO_BYTE mode we also use a 16 bit load with zero extend. if (details->characters() == 2 && compiler->ascii()) { + if ((mask & 0x7f7f) == 0x7f7f) need_mask = false; + } else if (details->characters() == 1 && !compiler->ascii()) { if ((mask & 0xffff) == 0xffff) need_mask = false; } else { if (mask == 0xffffffff) need_mask = false; diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index f9b20a4b49..b60e54d313 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -552,12 +552,14 @@ static const char* TypeToString(InstanceType type) { case CONS_SYMBOL_TYPE: return "CONS_SYMBOL"; case CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL"; case EXTERNAL_ASCII_SYMBOL_TYPE: + case EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE: case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL"; case ASCII_STRING_TYPE: return "ASCII_STRING"; case STRING_TYPE: return "TWO_BYTE_STRING"; case CONS_STRING_TYPE: case CONS_ASCII_STRING_TYPE: return "CONS_STRING"; case EXTERNAL_ASCII_STRING_TYPE: + case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE: case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING"; case FIXED_ARRAY_TYPE: return "FIXED_ARRAY"; case BYTE_ARRAY_TYPE: return "BYTE_ARRAY"; diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index 4112f933d7..d6571bff09 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -237,31 +237,20 @@ bool StringShape::IsSymbol() { bool String::IsAsciiRepresentation() { uint32_t type = map()->instance_type(); - if ((type & kStringRepresentationMask) == kConsStringTag && - ConsString::cast(this)->second()->length() == 0) { - return ConsString::cast(this)->first()->IsAsciiRepresentation(); - } return (type & kStringEncodingMask) == kAsciiStringTag; } bool String::IsTwoByteRepresentation() { uint32_t type = map()->instance_type(); - if ((type & kStringRepresentationMask) == kConsStringTag && - ConsString::cast(this)->second()->length() == 0) { - return ConsString::cast(this)->first()->IsTwoByteRepresentation(); - } return (type & kStringEncodingMask) == kTwoByteStringTag; } -bool String::IsExternalTwoByteStringWithAsciiChars() { - if (!IsExternalTwoByteString()) return false; - const uc16* data = ExternalTwoByteString::cast(this)->resource()->data(); - for (int i = 0, len = length(); i < len; i++) { - if (data[i] > kMaxAsciiCharCode) return false; - } - return true; +bool String::HasOnlyAsciiChars() { + uint32_t type = map()->instance_type(); + return (type & kStringEncodingMask) == kAsciiStringTag || + (type & kAsciiDataHintMask) == kAsciiDataHintTag; } diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 67f4e4892e..63b77b7994 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -678,6 +678,9 @@ Object* String::SlowTryFlatten(PretenureFlag pretenure) { bool String::MakeExternal(v8::String::ExternalStringResource* resource) { + // Externalizing twice leaks the external resouce, so it's + // prohibited by the API. + ASSERT(!this->IsExternalString()); #ifdef DEBUG if (FLAG_enable_slow_asserts) { // Assert that the resource and the string are equivalent. @@ -697,13 +700,16 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { return false; } ASSERT(size >= ExternalString::kSize); + bool is_ascii = this->IsAsciiRepresentation(); bool is_symbol = this->IsSymbol(); int length = this->length(); int hash_field = this->hash_field(); // Morph the object to an external string by adjusting the map and // reinitializing the fields. - this->set_map(Heap::external_string_map()); + this->set_map(is_ascii ? + Heap::external_string_with_ascii_data_map() : + Heap::external_string_map()); ExternalTwoByteString* self = ExternalTwoByteString::cast(this); self->set_length(length); self->set_hash_field(hash_field); @@ -713,7 +719,9 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { if (is_symbol) { self->Hash(); // Force regeneration of the hash value. // Now morph this external string into a external symbol. - this->set_map(Heap::external_symbol_map()); + this->set_map(is_ascii ? + Heap::external_symbol_with_ascii_data_map() : + Heap::external_symbol_map()); } // Fill the remainder of the string with dead wood. @@ -8147,7 +8155,7 @@ Object* Dictionary::DeleteProperty(int entry, template Object* Dictionary::AtPut(Key key, Object* value) { - int entry = FindEntry(key); + int entry = this->FindEntry(key); // If the entry is present set the value; if (entry != Dictionary::kNotFound) { @@ -8172,7 +8180,7 @@ Object* Dictionary::Add(Key key, Object* value, PropertyDetails details) { // Valdate key is absent. - SLOW_ASSERT((FindEntry(key) == Dictionary::kNotFound)); + SLOW_ASSERT((this->FindEntry(key) == Dictionary::kNotFound)); // Check whether the dictionary should be extended. Object* obj = EnsureCapacity(1, key); if (obj->IsFailure()) return obj; @@ -8231,7 +8239,7 @@ Object* NumberDictionary::AddNumberEntry(uint32_t key, Object* value, PropertyDetails details) { UpdateMaxNumberKey(key); - SLOW_ASSERT(FindEntry(key) == kNotFound); + SLOW_ASSERT(this->FindEntry(key) == kNotFound); return Add(key, value, details); } diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index bfaab475c7..0c146656aa 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -320,6 +320,10 @@ enum PropertyNormalizationMode { ExternalTwoByteString::kSize, \ external_symbol, \ ExternalSymbol) \ + V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE, \ + ExternalTwoByteString::kSize, \ + external_symbol_with_ascii_data, \ + ExternalSymbolWithAsciiData) \ V(EXTERNAL_ASCII_SYMBOL_TYPE, \ ExternalAsciiString::kSize, \ external_ascii_symbol, \ @@ -344,6 +348,10 @@ enum PropertyNormalizationMode { ExternalTwoByteString::kSize, \ external_string, \ ExternalString) \ + V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \ + ExternalTwoByteString::kSize, \ + external_string_with_ascii_data, \ + ExternalStringWithAsciiData) \ V(EXTERNAL_ASCII_STRING_TYPE, \ ExternalAsciiString::kSize, \ external_ascii_string, \ @@ -412,6 +420,11 @@ enum StringRepresentationTag { }; const uint32_t kIsConsStringMask = 0x1; +// If bit 7 is clear, then bit 3 indicates whether this two-byte +// string actually contains ascii data. +const uint32_t kAsciiDataHintMask = 0x08; +const uint32_t kAsciiDataHintTag = 0x08; + // A ConsString with an empty string as the right side is a candidate // for being shortcut by the garbage collector unless it is a @@ -427,18 +440,22 @@ const uint32_t kShortcutTypeTag = kConsStringTag; enum InstanceType { // String types. - SYMBOL_TYPE = kSymbolTag | kSeqStringTag, + SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag, ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag, - CONS_SYMBOL_TYPE = kSymbolTag | kConsStringTag, + CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag, CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag, - EXTERNAL_SYMBOL_TYPE = kSymbolTag | kExternalStringTag, + EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag, + EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE = + kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag, EXTERNAL_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kExternalStringTag, - STRING_TYPE = kSeqStringTag, + STRING_TYPE = kTwoByteStringTag | kSeqStringTag, ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag, - CONS_STRING_TYPE = kConsStringTag, + CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag, CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag, - EXTERNAL_STRING_TYPE = kExternalStringTag, + EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag, + EXTERNAL_STRING_WITH_ASCII_DATA_TYPE = + kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag, EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag, PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE, @@ -474,10 +491,12 @@ enum InstanceType { TYPE_SWITCH_INFO_TYPE, SCRIPT_TYPE, CODE_CACHE_TYPE, -#ifdef ENABLE_DEBUGGER_SUPPORT + // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT + // is defined. However as include/v8.h contain some of the instance type + // constants always having them avoids them getting different numbers + // depending on whether ENABLE_DEBUGGER_SUPPORT is defined or not. DEBUG_INFO_TYPE, BREAK_POINT_INFO_TYPE, -#endif FIXED_ARRAY_TYPE, SHARED_FUNCTION_INFO_TYPE, @@ -511,6 +530,11 @@ enum InstanceType { }; +STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType); +STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType); +STATIC_CHECK(PROXY_TYPE == Internals::kProxyType); + + enum CompareResult { LESS = -1, EQUAL = 0, @@ -4069,12 +4093,14 @@ class String: public HeapObject { inline bool IsAsciiRepresentation(); inline bool IsTwoByteRepresentation(); - // Check whether this string is an external two-byte string that in - // fact contains only ascii characters. + // Returns whether this string has ascii chars, i.e. all of them can + // be ascii encoded. This might be the case even if the string is + // two-byte. Such strings may appear when the embedder prefers + // two-byte external representations even for ascii data. // - // Such strings may appear when the embedder prefers two-byte - // representations even for ascii data. - inline bool IsExternalTwoByteStringWithAsciiChars(); + // NOTE: this should be considered only a hint. False negatives are + // possible. + inline bool HasOnlyAsciiChars(); // Get and set individual two byte chars in the string. inline void Set(int index, uint16_t value); diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc index 26457e00d2..57ff6610e8 100644 --- a/deps/v8/src/profile-generator.cc +++ b/deps/v8/src/profile-generator.cc @@ -818,7 +818,7 @@ HeapGraphEdge::HeapGraphEdge(Type type, HeapEntry* from, HeapEntry* to) : type_(type), name_(name), from_(from), to_(to) { - ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY); + ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL); } @@ -845,26 +845,30 @@ HeapEntry::~HeapEntry() { } -void HeapEntry::SetClosureReference(const char* name, HeapEntry* entry) { - HeapGraphEdge* edge = - new HeapGraphEdge(HeapGraphEdge::CONTEXT_VARIABLE, name, this, entry); +void HeapEntry::AddEdge(HeapGraphEdge* edge) { children_.Add(edge); - entry->retainers_.Add(edge); + edge->to()->retainers_.Add(edge); +} + + +void HeapEntry::SetClosureReference(const char* name, HeapEntry* entry) { + AddEdge( + new HeapGraphEdge(HeapGraphEdge::CONTEXT_VARIABLE, name, this, entry)); } void HeapEntry::SetElementReference(int index, HeapEntry* entry) { - HeapGraphEdge* edge = new HeapGraphEdge(index, this, entry); - children_.Add(edge); - entry->retainers_.Add(edge); + AddEdge(new HeapGraphEdge(index, this, entry)); +} + + +void HeapEntry::SetInternalReference(const char* name, HeapEntry* entry) { + AddEdge(new HeapGraphEdge(HeapGraphEdge::INTERNAL, name, this, entry)); } void HeapEntry::SetPropertyReference(const char* name, HeapEntry* entry) { - HeapGraphEdge* edge = - new HeapGraphEdge(HeapGraphEdge::PROPERTY, name, this, entry); - children_.Add(edge); - entry->retainers_.Add(edge); + AddEdge(new HeapGraphEdge(HeapGraphEdge::PROPERTY, name, this, entry)); } @@ -1074,7 +1078,7 @@ void HeapEntry::CutEdges() { void HeapEntry::Print(int max_depth, int indent) { - OS::Print("%6d %6d %6d", self_size_, TotalSize(), NonSharedTotalSize()); + OS::Print("%6d %6d %6d ", self_size_, TotalSize(), NonSharedTotalSize()); if (type_ != STRING) { OS::Print("%s %.40s\n", TypeAsString(), name_); } else { @@ -1100,6 +1104,9 @@ void HeapEntry::Print(int max_depth, int indent) { case HeapGraphEdge::ELEMENT: OS::Print(" %*c %d: ", indent, ' ', edge->index()); break; + case HeapGraphEdge::INTERNAL: + OS::Print(" %*c $%s: ", indent, ' ', edge->name()); + break; case HeapGraphEdge::PROPERTY: OS::Print(" %*c %s: ", indent, ' ', edge->name()); break; @@ -1145,6 +1152,9 @@ void HeapGraphPath::Print() { case HeapGraphEdge::ELEMENT: OS::Print("[%d] ", edge->index()); break; + case HeapGraphEdge::INTERNAL: + OS::Print("[$%s] ", edge->name()); + break; case HeapGraphEdge::PROPERTY: OS::Print("[%s] ", edge->name()); break; @@ -1318,6 +1328,16 @@ void HeapSnapshot::SetElementReference(HeapEntry* parent, } +void HeapSnapshot::SetInternalReference(HeapEntry* parent, + const char* reference_name, + Object* child) { + HeapEntry* child_entry = GetEntry(child); + if (child_entry != NULL) { + parent->SetInternalReference(reference_name, child_entry); + } +} + + void HeapSnapshot::SetPropertyReference(HeapEntry* parent, String* reference_name, Object* child) { @@ -1546,6 +1566,7 @@ void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj, snapshot_->SetClosureReference(entry, local_name, context->get(idx)); } } + snapshot_->SetInternalReference(entry, "code", func->shared()); } } diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h index ecac8e28fb..4e423c8d92 100644 --- a/deps/v8/src/profile-generator.h +++ b/deps/v8/src/profile-generator.h @@ -431,7 +431,8 @@ class HeapGraphEdge { enum Type { CONTEXT_VARIABLE = v8::HeapGraphEdge::CONTEXT_VARIABLE, ELEMENT = v8::HeapGraphEdge::ELEMENT, - PROPERTY = v8::HeapGraphEdge::PROPERTY + PROPERTY = v8::HeapGraphEdge::PROPERTY, + INTERNAL = v8::HeapGraphEdge::INTERNAL }; HeapGraphEdge(Type type, const char* name, HeapEntry* from, HeapEntry* to); @@ -443,7 +444,7 @@ class HeapGraphEdge { return index_; } const char* name() const { - ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY); + ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL); return name_; } HeapEntry* from() const { return from_; } @@ -533,6 +534,7 @@ class HeapEntry { void PaintReachableFromOthers() { painted_ = kPaintReachableFromOthers; } void SetClosureReference(const char* name, HeapEntry* entry); void SetElementReference(int index, HeapEntry* entry); + void SetInternalReference(const char* name, HeapEntry* entry); void SetPropertyReference(const char* name, HeapEntry* entry); void SetAutoIndexReference(HeapEntry* entry); @@ -542,6 +544,7 @@ class HeapEntry { void Print(int max_depth, int indent); private: + void AddEdge(HeapGraphEdge* edge); int CalculateTotalSize(); int CalculateNonSharedTotalSize(); void FindRetainingPaths(HeapEntry* node, CachedHeapGraphPath* prev_path); @@ -641,6 +644,8 @@ class HeapSnapshot { void SetClosureReference( HeapEntry* parent, String* reference_name, Object* child); void SetElementReference(HeapEntry* parent, int index, Object* child); + void SetInternalReference( + HeapEntry* parent, const char* reference_name, Object* child); void SetPropertyReference( HeapEntry* parent, String* reference_name, Object* child); diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index b29a1abe15..71148e6bd3 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -4946,16 +4946,6 @@ static Object* ConvertCaseHelper(String* s, } -static inline SeqAsciiString* TryGetSeqAsciiString(String* s) { - if (!s->IsFlat() || !s->IsAsciiRepresentation()) return NULL; - if (s->IsConsString()) { - ASSERT(ConsString::cast(s)->second()->length() == 0); - return SeqAsciiString::cast(ConsString::cast(s)->first()); - } - return SeqAsciiString::cast(s); -} - - namespace { struct ToLowerTraits { @@ -5002,7 +4992,7 @@ static Object* ConvertCase( unibrow::Mapping* mapping) { NoHandleAllocation ha; CONVERT_CHECKED(String, s, args[0]); - s->TryFlatten(); + s = s->TryFlattenGetString(); const int length = s->length(); // Assume that the string is not empty; we need this assumption later @@ -5014,13 +5004,12 @@ static Object* ConvertCase( // character is also ascii. This is currently the case, but it // might break in the future if we implement more context and locale // dependent upper/lower conversions. - SeqAsciiString* seq_ascii = TryGetSeqAsciiString(s); - if (seq_ascii != NULL) { + if (s->IsSeqAsciiString()) { Object* o = Heap::AllocateRawAsciiString(length); if (o->IsFailure()) return o; SeqAsciiString* result = SeqAsciiString::cast(o); bool has_changed_character = ConvertTraits::ConvertAscii( - result->GetChars(), seq_ascii->GetChars(), length); + result->GetChars(), SeqAsciiString::cast(s)->GetChars(), length); return has_changed_character ? result : s; } @@ -5564,7 +5553,7 @@ static Object* Runtime_StringBuilderConcat(Arguments args) { if (first->IsString()) return first; } - bool ascii = special->IsAsciiRepresentation(); + bool ascii = special->HasOnlyAsciiChars(); int position = 0; for (int i = 0; i < array_length; i++) { int increment = 0; @@ -5605,7 +5594,7 @@ static Object* Runtime_StringBuilderConcat(Arguments args) { String* element = String::cast(elt); int element_length = element->length(); increment = element_length; - if (ascii && !element->IsAsciiRepresentation()) { + if (ascii && !element->HasOnlyAsciiChars()) { ascii = false; } } else { @@ -9061,7 +9050,7 @@ static Object* Runtime_SetFunctionBreakPoint(Arguments args) { Handle break_point_object_arg = args.at(2); // Set break point. - Debug::SetBreakPoint(shared, source_position, break_point_object_arg); + Debug::SetBreakPoint(shared, break_point_object_arg, &source_position); return Heap::undefined_value(); } @@ -9081,8 +9070,6 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle