diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc index 3a4d733152..205da7c68a 100644 --- a/deps/v8/src/arguments.cc +++ b/deps/v8/src/arguments.cc @@ -117,4 +117,12 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID) #undef WRITE_CALL_2_VOID +double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) { + // TODO(ulan): This clobbers only subset of registers depending on compiler, + // Rewrite this in assembly to really clobber all registers. + // GCC for ia32 uses the FPU and does not touch XMM registers. + return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04; +} + + } } // namespace v8::internal diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h index 92e57401f2..b7137c3175 100644 --- a/deps/v8/src/arguments.h +++ b/deps/v8/src/arguments.h @@ -289,12 +289,23 @@ class FunctionCallbackArguments }; +double ClobberDoubleRegisters(double x1, double x2, double x3, double x4); + + +#ifdef DEBUG +#define CLOBBER_DOUBLE_REGISTERS() ClobberDoubleRegisters(1, 2, 3, 4); +#else +#define CLOBBER_DOUBLE_REGISTERS() +#endif + + #define DECLARE_RUNTIME_FUNCTION(Type, Name) \ Type Name(int args_length, Object** args_object, Isolate* isolate) #define RUNTIME_FUNCTION(Type, Name) \ static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \ Type Name(int args_length, Object** args_object, Isolate* isolate) { \ + CLOBBER_DOUBLE_REGISTERS(); \ Arguments args(args_length, args_object); \ return __RT_impl_##Name(args, isolate); \ } \ diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 60f5290030..c075826a00 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -859,7 +859,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { } -void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { +static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, + SaveFPRegsMode save_doubles) { { FrameScope scope(masm, StackFrame::INTERNAL); @@ -868,7 +869,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { // registers. __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved); // Pass the function and deoptimization type to the runtime system. - __ CallRuntime(Runtime::kNotifyStubFailure, 0); + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved); } @@ -877,6 +878,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { } +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs); +} + + +void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); +} + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { { diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 9339c5fade..96d7cb33a8 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -127,6 +127,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { } +Code* Deoptimizer::NotifyStubFailureBuiltin() { + return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles); +} + + #define __ masm()-> // This code tries to be close to ia32 code so that any changes can be diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 8b22e625ba..647ce723e2 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -98,6 +98,38 @@ void LCodeGen::Abort(BailoutReason reason) { } +void LCodeGen::SaveCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Save clobbered callee double registers"); + int count = 0; + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + while (!save_iterator.Done()) { + __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + +void LCodeGen::RestoreCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Restore clobbered callee double registers"); + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + int count = 0; + while (!save_iterator.Done()) { + __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); @@ -158,16 +190,7 @@ bool LCodeGen::GeneratePrologue() { } if (info()->saves_caller_doubles()) { - Comment(";;; Save clobbered callee double registers"); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } + SaveCallerDoubles(); } // Possibly allocate a local context. @@ -313,6 +336,7 @@ bool LCodeGen::GenerateDeoptJumpTable() { Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); } if (deopt_jump_table_[i].needs_frame) { + ASSERT(!info()->saves_caller_doubles()); __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry))); if (needs_frame.is_bound()) { __ b(&needs_frame); @@ -330,6 +354,10 @@ bool LCodeGen::GenerateDeoptJumpTable() { __ mov(pc, ip); } } else { + if (info()->saves_caller_doubles()) { + ASSERT(info()->IsStub()); + RestoreCallerDoubles(); + } __ mov(lr, Operand(pc), LeaveCC, al); __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); } @@ -783,7 +811,10 @@ void LCodeGen::DeoptimizeIf(Condition condition, } ASSERT(info()->IsStub() || frame_is_built_); - if (condition == al && frame_is_built_) { + // Go through jump table if we need to handle condition, build frame, or + // restore caller doubles. + if (condition == al && frame_is_built_ && + !info()->saves_caller_doubles()) { __ Call(entry, RelocInfo::RUNTIME_ENTRY); } else { // We often have several deopts to the same entry, reuse the last @@ -2853,16 +2884,7 @@ void LCodeGen::DoReturn(LReturn* instr) { __ CallRuntime(Runtime::kTraceExit, 1); } if (info()->saves_caller_doubles()) { - ASSERT(NeedsEagerFrame()); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } + RestoreCallerDoubles(); } int no_frame_start = -1; if (NeedsEagerFrame()) { @@ -3434,7 +3456,8 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { __ jmp(&receiver_ok); __ bind(&global_object); - __ ldr(receiver, GlobalObjectOperand()); + __ ldr(receiver, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ ldr(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX)); __ ldr(receiver, FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); __ bind(&receiver_ok); diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index a9b85c89cc..ef03e60015 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -186,6 +186,9 @@ class LCodeGen: public LCodeGenBase { void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } + void SaveCallerDoubles(); + void RestoreCallerDoubles(); + // Code generation passes. Returns true if code generation should // continue. bool GeneratePrologue(); diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 32471443bb..282a8052e4 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -1045,8 +1045,10 @@ class MacroAssembler: public Assembler { } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId id, int num_arguments) { - CallRuntime(Runtime::FunctionForId(id), num_arguments); + void CallRuntime(Runtime::FunctionId id, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs) { + CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); } // Convenience function: call an external reference. diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 843f8c8960..b489314329 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -554,6 +554,7 @@ bool Call::ComputeTarget(Handle type, Handle name) { if (!type->prototype()->IsJSObject()) return false; // Go up the prototype chain, recording where we are currently. holder_ = Handle(JSObject::cast(type->prototype())); + JSObject::TryMigrateInstance(holder_); type = Handle(holder()->map()); } } diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index 9b589d843d..43e9164596 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -111,6 +111,8 @@ enum BuiltinExtraArguments { V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ \ V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \ @@ -400,6 +402,7 @@ class Builtins { static void Generate_NotifySoftDeoptimized(MacroAssembler* masm); static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm); static void Generate_NotifyStubFailure(MacroAssembler* masm); + static void Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm); static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm); static void Generate_FunctionCall(MacroAssembler* masm); diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc index 19b6088dd1..3232a74cc3 100644 --- a/deps/v8/src/code-stubs-hydrogen.cc +++ b/deps/v8/src/code-stubs-hydrogen.cc @@ -721,15 +721,23 @@ HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor( HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor( JSArrayBuilder* array_builder, ElementsKind kind) { + // Insert a bounds check because the number of arguments might exceed + // the kInitialMaxFastElementArray limit. This cannot happen for code + // that was parsed, but calling via Array.apply(thisArg, [...]) might + // trigger it. + HValue* length = GetArgumentsLength(); + HConstant* max_alloc_length = + Add(JSObject::kInitialMaxFastElementArray); + HValue* checked_length = Add(length, max_alloc_length); + // We need to fill with the hole if it's a smi array in the multi-argument // case because we might have to bail out while copying arguments into // the array because they aren't compatible with a smi array. // If it's a double array, no problem, and if it's fast then no // problem either because doubles are boxed. - HValue* length = GetArgumentsLength(); bool fill_with_hole = IsFastSmiElementsKind(kind); - HValue* new_object = array_builder->AllocateArray(length, - length, + HValue* new_object = array_builder->AllocateArray(checked_length, + checked_length, fill_with_hole); HValue* elements = array_builder->GetElementsLocation(); ASSERT(elements != NULL); @@ -739,10 +747,10 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor( context(), LoopBuilder::kPostIncrement); HValue* start = graph()->GetConstant0(); - HValue* key = builder.BeginBody(start, length, Token::LT); + HValue* key = builder.BeginBody(start, checked_length, Token::LT); HInstruction* argument_elements = Add(false); HInstruction* argument = Add( - argument_elements, length, key); + argument_elements, checked_length, key); Add(elements, key, argument, kind); builder.EndBody(); diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js index 1b128c3a0a..7a2f4d9e30 100644 --- a/deps/v8/src/date.js +++ b/deps/v8/src/date.js @@ -132,7 +132,7 @@ function TimeClip(time) { // strings over and over again. var Date_cache = { // Cached time value. - time: NAN, + time: 0, // String input for which the cached time is valid. string: null }; diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc index 84e80b9d9a..c7311b3cc9 100644 --- a/deps/v8/src/deoptimizer.cc +++ b/deps/v8/src/deoptimizer.cc @@ -1574,8 +1574,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator, output_frame->SetPc(reinterpret_cast( trampoline->instruction_start())); output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS)); - Code* notify_failure = - isolate_->builtins()->builtin(Builtins::kNotifyStubFailure); + Code* notify_failure = NotifyStubFailureBuiltin(); output_frame->SetContinuation( reinterpret_cast(notify_failure->entry())); } diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h index 4e9d281ea5..8576ffc20d 100644 --- a/deps/v8/src/deoptimizer.h +++ b/deps/v8/src/deoptimizer.h @@ -412,6 +412,10 @@ class Deoptimizer : public Malloced { // at the dynamic alignment state slot inside the frame. bool HasAlignmentPadding(JSFunction* function); + // Select the version of NotifyStubFailure builtin that either saves or + // doesn't save the double registers depending on CPU features. + Code* NotifyStubFailureBuiltin(); + Isolate* isolate_; JSFunction* function_; Code* compiled_code_; diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index e5e6ec50d1..5a3fa78e33 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -601,7 +601,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { } -void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { +static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, + SaveFPRegsMode save_doubles) { // Enter an internal frame. { FrameScope scope(masm, StackFrame::INTERNAL); @@ -610,7 +611,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { // stubs that tail call the runtime on deopts passing their parameters in // registers. __ pushad(); - __ CallRuntime(Runtime::kNotifyStubFailure, 0); + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); __ popad(); // Tear down internal frame. } @@ -620,6 +621,21 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { } +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs); +} + + +void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { + if (Serializer::enabled()) { + PlatformFeatureScope sse2(SSE2); + Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); + } else { + Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); + } +} + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { { diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc index e339b3ad11..789dbfbdce 100644 --- a/deps/v8/src/ia32/deoptimizer-ia32.cc +++ b/deps/v8/src/ia32/deoptimizer-ia32.cc @@ -231,6 +231,13 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { } +Code* Deoptimizer::NotifyStubFailureBuiltin() { + Builtins::Name name = CpuFeatures::IsSupported(SSE2) ? + Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure; + return isolate_->builtins()->builtin(name); +} + + #define __ masm()-> void Deoptimizer::EntryGenerator::Generate() { diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc index 0c7e8d6ef3..e08a6e1d66 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.cc +++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc @@ -130,6 +130,40 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) { #endif +void LCodeGen::SaveCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Save clobbered callee double registers"); + CpuFeatureScope scope(masm(), SSE2); + int count = 0; + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + while (!save_iterator.Done()) { + __ movsd(MemOperand(esp, count * kDoubleSize), + XMMRegister::FromAllocationIndex(save_iterator.Current())); + save_iterator.Advance(); + count++; + } +} + + +void LCodeGen::RestoreCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Restore clobbered callee double registers"); + CpuFeatureScope scope(masm(), SSE2); + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + int count = 0; + while (!save_iterator.Done()) { + __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(esp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); @@ -244,17 +278,7 @@ bool LCodeGen::GeneratePrologue() { } if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { - Comment(";;; Save clobbered callee double registers"); - CpuFeatureScope scope(masm(), SSE2); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ movsd(MemOperand(esp, count * kDoubleSize), - XMMRegister::FromAllocationIndex(save_iterator.Current())); - save_iterator.Advance(); - count++; - } + SaveCallerDoubles(); } } @@ -399,6 +423,7 @@ bool LCodeGen::GenerateJumpTable() { Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); } if (jump_table_[i].needs_frame) { + ASSERT(!info()->saves_caller_doubles()); __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); if (needs_frame.is_bound()) { __ jmp(&needs_frame); @@ -425,6 +450,9 @@ bool LCodeGen::GenerateJumpTable() { __ ret(0); // Call the continuation without clobbering registers. } } else { + if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { + RestoreCallerDoubles(); + } __ call(entry, RelocInfo::RUNTIME_ENTRY); } } @@ -3056,17 +3084,7 @@ void LCodeGen::DoReturn(LReturn* instr) { __ CallRuntime(Runtime::kTraceExit, 1); } if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { - ASSERT(NeedsEagerFrame()); - CpuFeatureScope scope(masm(), SSE2); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), - MemOperand(esp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } + RestoreCallerDoubles(); } if (dynamic_frame_alignment_) { // Fetch the state of the dynamic frame alignment. diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h index 78bc69de91..94ab28e1e3 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.h +++ b/deps/v8/src/ia32/lithium-codegen-ia32.h @@ -198,6 +198,9 @@ class LCodeGen: public LCodeGenBase { void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } + void SaveCallerDoubles(); + void RestoreCallerDoubles(); + // Code generation passes. Returns true if code generation should // continue. void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE; diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index 30f8a8dfbb..1e41f734ee 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -773,8 +773,10 @@ class MacroAssembler: public Assembler { } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId id, int num_arguments) { - CallRuntime(Runtime::FunctionForId(id), num_arguments); + void CallRuntime(Runtime::FunctionId id, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs) { + CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); } // Convenience function: call an external reference. diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index b75ddb382b..4494b75332 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -2643,6 +2643,7 @@ void MarkCompactCollector::ClearAndDeoptimizeDependentCode( if (IsMarked(code) && !code->marked_for_deoptimization()) { code->set_marked_for_deoptimization(true); + code->InvalidateEmbeddedObjects(); have_code_to_deoptimize_ = true; } entries->clear_at(i); diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h index de91051ed0..8c825d24ee 100644 --- a/deps/v8/src/mips/assembler-mips-inl.h +++ b/deps/v8/src/mips/assembler-mips-inl.h @@ -271,16 +271,14 @@ Handle RelocInfo::code_age_stub_handle(Assembler* origin) { Code* RelocInfo::code_age_stub() { ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); return Code::GetCodeFromTargetAddress( - Memory::Address_at(pc_ + Assembler::kInstrSize * - (kNoCodeAgeSequenceLength - 1))); + Assembler::target_address_at(pc_ + Assembler::kInstrSize)); } void RelocInfo::set_code_age_stub(Code* stub) { ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - Memory::Address_at(pc_ + Assembler::kInstrSize * - (kNoCodeAgeSequenceLength - 1)) = - stub->instruction_start(); + Assembler::set_target_address_at(pc_ + Assembler::kInstrSize, + stub->instruction_start()); } diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc index 0b495831b9..d0ae073737 100644 --- a/deps/v8/src/mips/builtins-mips.cc +++ b/deps/v8/src/mips/builtins-mips.cc @@ -821,12 +821,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { // internal frame to make the code faster, since we shouldn't have to do stack // crawls in MakeCodeYoung. This seems a bit fragile. - __ mov(a0, ra); - // Adjust a0 to point to the head of the PlatformCodeAge sequence + // Set a0 to point to the head of the PlatformCodeAge sequence. __ Subu(a0, a0, Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize)); - // Restore the original return address of the function - __ mov(ra, at); // The following registers must be saved and restored when calling through to // the runtime: @@ -863,12 +860,9 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { // save/restore the registers without worrying about which of them contain // pointers. - __ mov(a0, ra); - // Adjust a0 to point to the head of the PlatformCodeAge sequence + // Set a0 to point to the head of the PlatformCodeAge sequence. __ Subu(a0, a0, Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize)); - // Restore the original return address of the function - __ mov(ra, at); // The following registers must be saved and restored when calling through to // the runtime: @@ -900,7 +894,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { } -void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { +static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, + SaveFPRegsMode save_doubles) { { FrameScope scope(masm, StackFrame::INTERNAL); @@ -909,7 +904,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { // registers. __ MultiPush(kJSCallerSaved | kCalleeSaved); // Pass the function and deoptimization type to the runtime system. - __ CallRuntime(Runtime::kNotifyStubFailure, 0); + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); __ MultiPop(kJSCallerSaved | kCalleeSaved); } @@ -918,6 +913,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { } +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs); +} + + +void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); +} + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { { diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc index ec6649533f..9d4b52c9da 100644 --- a/deps/v8/src/mips/codegen-mips.cc +++ b/deps/v8/src/mips/codegen-mips.cc @@ -641,8 +641,8 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age, *age = kNoAgeCodeAge; *parity = NO_MARKING_PARITY; } else { - Address target_address = Memory::Address_at( - sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1)); + Address target_address = Assembler::target_address_at( + sequence + Assembler::kInstrSize); Code* stub = GetCodeFromTargetAddress(target_address); GetCodeAgeAndParity(stub, age, parity); } @@ -661,17 +661,18 @@ void Code::PatchPlatformCodeAge(Isolate* isolate, } else { Code* stub = GetCodeAgeStub(isolate, age, parity); CodePatcher patcher(sequence, young_length / Assembler::kInstrSize); - // Mark this code sequence for FindPlatformCodeAgeSequence() + // Mark this code sequence for FindPlatformCodeAgeSequence(). patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP); - // Save the function's original return address - // (it will be clobbered by Call(t9)) - patcher.masm()->mov(at, ra); - // Load the stub address to t9 and call it - patcher.masm()->li(t9, - Operand(reinterpret_cast(stub->instruction_start()))); - patcher.masm()->Call(t9); - // Record the stub address in the empty space for GetCodeAgeAndParity() - patcher.masm()->dd(reinterpret_cast(stub->instruction_start())); + // Load the stub address to t9 and call it, + // GetCodeAgeAndParity() extracts the stub address from this instruction. + patcher.masm()->li( + t9, + Operand(reinterpret_cast(stub->instruction_start())), + CONSTANT_SIZE); + patcher.masm()->nop(); // Prevent jalr to jal optimization. + patcher.masm()->jalr(t9, a0); + patcher.masm()->nop(); // Branch delay slot nop. + patcher.masm()->nop(); // Pad the empty space. } } diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc index d31990be5c..971ead6c23 100644 --- a/deps/v8/src/mips/deoptimizer-mips.cc +++ b/deps/v8/src/mips/deoptimizer-mips.cc @@ -125,6 +125,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { } +Code* Deoptimizer::NotifyStubFailureBuiltin() { + return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles); +} + + #define __ masm()-> diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc index 4484dc634e..26ffd66e1d 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.cc +++ b/deps/v8/src/mips/lithium-codegen-mips.cc @@ -98,6 +98,38 @@ void LChunkBuilder::Abort(BailoutReason reason) { } +void LCodeGen::SaveCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Save clobbered callee double registers"); + int count = 0; + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + while (!save_iterator.Done()) { + __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + +void LCodeGen::RestoreCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Restore clobbered callee double registers"); + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + int count = 0; + while (!save_iterator.Done()) { + __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); @@ -160,16 +192,7 @@ bool LCodeGen::GeneratePrologue() { } if (info()->saves_caller_doubles()) { - Comment(";;; Save clobbered callee double registers"); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } + SaveCallerDoubles(); } // Possibly allocate a local context. @@ -298,6 +321,7 @@ bool LCodeGen::GenerateDeoptJumpTable() { } __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry))); if (deopt_jump_table_[i].needs_frame) { + ASSERT(!info()->saves_caller_doubles()); if (needs_frame.is_bound()) { __ Branch(&needs_frame); } else { @@ -313,6 +337,10 @@ bool LCodeGen::GenerateDeoptJumpTable() { __ Call(t9); } } else { + if (info()->saves_caller_doubles()) { + ASSERT(info()->IsStub()); + RestoreCallerDoubles(); + } __ Call(t9); } } @@ -757,7 +785,10 @@ void LCodeGen::DeoptimizeIf(Condition condition, } ASSERT(info()->IsStub() || frame_is_built_); - if (condition == al && frame_is_built_) { + // Go through jump table if we need to handle condition, build frame, or + // restore caller doubles. + if (condition == al && frame_is_built_ && + !info()->saves_caller_doubles()) { __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); } else { // We often have several deopts to the same entry, reuse the last @@ -2706,16 +2737,7 @@ void LCodeGen::DoReturn(LReturn* instr) { __ CallRuntime(Runtime::kTraceExit, 1); } if (info()->saves_caller_doubles()) { - ASSERT(NeedsEagerFrame()); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } + RestoreCallerDoubles(); } int no_frame_start = -1; if (NeedsEagerFrame()) { @@ -3303,7 +3325,8 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { __ Branch(&receiver_ok); __ bind(&global_object); - __ lw(receiver, GlobalObjectOperand()); + __ lw(receiver, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ lw(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX)); __ lw(receiver, FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); __ bind(&receiver_ok); diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h index f643d02191..29a89d5885 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.h +++ b/deps/v8/src/mips/lithium-codegen-mips.h @@ -186,6 +186,9 @@ class LCodeGen: public LCodeGenBase { void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } + void SaveCallerDoubles(); + void RestoreCallerDoubles(); + // Code generation passes. Returns true if code generation should // continue. bool GeneratePrologue(); diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index e0cb1ba824..d6e8af1fc0 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -4601,15 +4601,15 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { // Pre-age the code. Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); nop(Assembler::CODE_AGE_MARKER_NOP); - // Save the function's original return address - // (it will be clobbered by Call(t9)) - mov(at, ra); - // Load the stub address to t9 and call it + // Load the stub address to t9 and call it, + // GetCodeAgeAndParity() extracts the stub address from this instruction. li(t9, - Operand(reinterpret_cast(stub->instruction_start()))); - Call(t9); - // Record the stub address in the empty space for GetCodeAgeAndParity() - dd(reinterpret_cast(stub->instruction_start())); + Operand(reinterpret_cast(stub->instruction_start())), + CONSTANT_SIZE); + nop(); // Prevent jalr to jal optimization. + jalr(t9, a0); + nop(); // Branch delay slot nop. + nop(); // Pad the empty space. } else { Push(ra, fp, cp, a1); nop(Assembler::CODE_AGE_SEQUENCE_NOP); diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h index 0805bb9670..a4fd766e66 100644 --- a/deps/v8/src/mips/macro-assembler-mips.h +++ b/deps/v8/src/mips/macro-assembler-mips.h @@ -1210,8 +1210,10 @@ class MacroAssembler: public Assembler { } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId id, int num_arguments) { - CallRuntime(Runtime::FunctionForId(id), num_arguments); + void CallRuntime(Runtime::FunctionId id, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs) { + CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); } // Convenience function: call an external reference. diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc index 5a96efe9c1..acc65251e2 100644 --- a/deps/v8/src/mips/simulator-mips.cc +++ b/deps/v8/src/mips/simulator-mips.cc @@ -1722,6 +1722,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr, int64_t& i64hilo, uint64_t& u64hilo, int32_t& next_pc, + int32_t& return_addr_reg, bool& do_interrupt) { // Every local variable declared here needs to be const. // This is to make sure that changed values are sent back to @@ -1782,6 +1783,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr, case JR: case JALR: next_pc = get_register(instr->RsValue()); + return_addr_reg = instr->RdValue(); break; case SLL: alu_out = rt << sa; @@ -1986,6 +1988,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { int32_t current_pc = get_pc(); // Next pc int32_t next_pc = 0; + int32_t return_addr_reg = 31; // Set up the variables if needed before executing the instruction. ConfigureTypeRegister(instr, @@ -1993,6 +1996,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { i64hilo, u64hilo, next_pc, + return_addr_reg, do_interrupt); // ---------- Raise exceptions triggered. @@ -2258,7 +2262,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { Instruction* branch_delay_instr = reinterpret_cast( current_pc+Instruction::kInstrSize); BranchDelayInstructionDecode(branch_delay_instr); - set_register(31, current_pc + 2 * Instruction::kInstrSize); + set_register(return_addr_reg, + current_pc + 2 * Instruction::kInstrSize); set_pc(next_pc); pc_modified_ = true; break; diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h index 601cd6d99d..d9fd10f245 100644 --- a/deps/v8/src/mips/simulator-mips.h +++ b/deps/v8/src/mips/simulator-mips.h @@ -289,6 +289,7 @@ class Simulator { int64_t& i64hilo, uint64_t& u64hilo, int32_t& next_pc, + int32_t& return_addr_reg, bool& do_interrupt); void DecodeTypeImmediate(Instruction* instr); diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 64fb17979d..cf51024c86 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -10332,6 +10332,18 @@ void Code::InvalidateRelocation() { } +void Code::InvalidateEmbeddedObjects() { + Object* undefined = GetHeap()->undefined_value(); + int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); + for (RelocIterator it(this, mode_mask); !it.done(); it.next()) { + RelocInfo::Mode mode = it.rinfo()->rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT) { + it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER); + } + } +} + + void Code::Relocate(intptr_t delta) { for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) { it.rinfo()->apply(delta); diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index 8331a76885..c4ff63544a 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -5008,6 +5008,7 @@ class Code: public HeapObject { // [relocation_info]: Code relocation information DECL_ACCESSORS(relocation_info, ByteArray) void InvalidateRelocation(); + void InvalidateEmbeddedObjects(); // [handler_table]: Fixed array containing offsets of exception handlers. DECL_ACCESSORS(handler_table, FixedArray) diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc index 22a546742c..e89eb1bfed 100644 --- a/deps/v8/src/store-buffer.cc +++ b/deps/v8/src/store-buffer.cc @@ -224,7 +224,7 @@ void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr); } int old_counter = containing_chunk->store_buffer_counter(); - if (old_counter == threshold) { + if (old_counter >= threshold) { containing_chunk->set_scan_on_scavenge(true); created_new_scan_on_scavenge_pages = true; } diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index 7bc0283415..dfda1a3ba6 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -35,7 +35,7 @@ #define MAJOR_VERSION 3 #define MINOR_VERSION 22 #define BUILD_NUMBER 24 -#define PATCH_LEVEL 10 +#define PATCH_LEVEL 17 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) #define IS_CANDIDATE_VERSION 0 diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc index f65b25c652..7fc26937e9 100644 --- a/deps/v8/src/x64/builtins-x64.cc +++ b/deps/v8/src/x64/builtins-x64.cc @@ -663,7 +663,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { } -void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { +static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, + SaveFPRegsMode save_doubles) { // Enter an internal frame. { FrameScope scope(masm, StackFrame::INTERNAL); @@ -672,7 +673,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { // stubs that tail call the runtime on deopts passing their parameters in // registers. __ Pushad(); - __ CallRuntime(Runtime::kNotifyStubFailure, 0); + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); __ Popad(); // Tear down internal frame. } @@ -682,6 +683,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { } +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs); +} + + +void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); +} + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { // Enter an internal frame. diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc index bf11e0860f..bc51c54099 100644 --- a/deps/v8/src/x64/deoptimizer-x64.cc +++ b/deps/v8/src/x64/deoptimizer-x64.cc @@ -126,6 +126,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { } +Code* Deoptimizer::NotifyStubFailureBuiltin() { + return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles); +} + + #define __ masm()-> void Deoptimizer::EntryGenerator::Generate() { diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index 31fbcd27ab..0aff6c94c1 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -111,6 +111,38 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) { #endif +void LCodeGen::SaveCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Save clobbered callee double registers"); + int count = 0; + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + while (!save_iterator.Done()) { + __ movsd(MemOperand(rsp, count * kDoubleSize), + XMMRegister::FromAllocationIndex(save_iterator.Current())); + save_iterator.Advance(); + count++; + } +} + + +void LCodeGen::RestoreCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Restore clobbered callee double registers"); + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + int count = 0; + while (!save_iterator.Done()) { + __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(rsp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); @@ -173,16 +205,7 @@ bool LCodeGen::GeneratePrologue() { } if (info()->saves_caller_doubles()) { - Comment(";;; Save clobbered callee double registers"); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ movsd(MemOperand(rsp, count * kDoubleSize), - XMMRegister::FromAllocationIndex(save_iterator.Current())); - save_iterator.Advance(); - count++; - } + SaveCallerDoubles(); } } @@ -261,6 +284,7 @@ bool LCodeGen::GenerateJumpTable() { Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); } if (jump_table_[i].needs_frame) { + ASSERT(!info()->saves_caller_doubles()); __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry)); if (needs_frame.is_bound()) { __ jmp(&needs_frame); @@ -279,6 +303,10 @@ bool LCodeGen::GenerateJumpTable() { __ call(kScratchRegister); } } else { + if (info()->saves_caller_doubles()) { + ASSERT(info()->IsStub()); + RestoreCallerDoubles(); + } __ call(entry, RelocInfo::RUNTIME_ENTRY); } } @@ -661,7 +689,10 @@ void LCodeGen::DeoptimizeIf(Condition cc, } ASSERT(info()->IsStub() || frame_is_built_); - if (cc == no_condition && frame_is_built_) { + // Go through jump table if we need to handle condition, build frame, or + // restore caller doubles. + if (cc == no_condition && frame_is_built_ && + !info()->saves_caller_doubles()) { __ call(entry, RelocInfo::RUNTIME_ENTRY); } else { // We often have several deopts to the same entry, reuse the last @@ -2551,16 +2582,7 @@ void LCodeGen::DoReturn(LReturn* instr) { __ CallRuntime(Runtime::kTraceExit, 1); } if (info()->saves_caller_doubles()) { - ASSERT(NeedsEagerFrame()); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), - MemOperand(rsp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } + RestoreCallerDoubles(); } int no_frame_start = -1; if (NeedsEagerFrame()) { diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h index f3f202a277..c387d81ac4 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.h +++ b/deps/v8/src/x64/lithium-codegen-x64.h @@ -153,6 +153,10 @@ class LCodeGen: public LCodeGenBase { void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } + + void SaveCallerDoubles(); + void RestoreCallerDoubles(); + // Code generation passes. Returns true if code generation should // continue. bool GeneratePrologue(); diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index 24374349a2..67ae7a2a69 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -1248,8 +1248,10 @@ class MacroAssembler: public Assembler { } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId id, int num_arguments) { - CallRuntime(Runtime::FunctionForId(id), num_arguments); + void CallRuntime(Runtime::FunctionId id, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs) { + CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); } // Convenience function: call an external reference. diff --git a/deps/v8/test/mjsunit/regress/regress-280531.js b/deps/v8/test/mjsunit/regress/regress-280531.js new file mode 100644 index 0000000000..6799574f69 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-280531.js @@ -0,0 +1,32 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +var contextA = Realm.create(); +var date1 = Realm.eval(contextA, "new Date('Thu, 29 Aug 2013 00:00:00 UTC')"); +new Date('Thu, 29 Aug 2013 00:00:01 UTC'); +var date2 = Realm.eval(contextA, "new Date('Thu, 29 Aug 2013 00:00:00 UTC')"); +assertEquals(date1, date2); diff --git a/deps/v8/test/mjsunit/regress/regress-3027.js b/deps/v8/test/mjsunit/regress/regress-3027.js new file mode 100644 index 0000000000..c7ebd539b6 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-3027.js @@ -0,0 +1,42 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test to exceed the Heap::MaxRegularSpaceAllocationSize with an array +// constructor call taking many arguments. + +function boom() { + var args = []; + for (var i = 0; i < 125000; i++) { + args.push(i); + } + return Array.apply(Array, args); +} + +var array = boom(); + +assertEquals(125000, array.length); +assertEquals(124999, array[124999]); diff --git a/deps/v8/test/mjsunit/regress/regress-318420.js b/deps/v8/test/mjsunit/regress/regress-318420.js new file mode 100644 index 0000000000..77bef10ec4 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-318420.js @@ -0,0 +1,46 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax + +function bar(a, b) { with(a) {return a + b;} } + +var obj = { + functions: [bar, bar, bar, bar], + receivers: [bar, bar, undefined, null], + foo: function () { + for (var a = this.functions, e = this.receivers, c = a.length, + d = 0; d < c ; d++) { + a[d].apply(e[d], arguments) + } + } +} + +obj.foo(1, 2, 3, 4); +obj.foo(1, 2, 3, 4); +%OptimizeFunctionOnNextCall(obj.foo); +obj.foo(1, 2, 3, 4); diff --git a/deps/v8/test/mjsunit/regress/regress-331444.js b/deps/v8/test/mjsunit/regress/regress-331444.js new file mode 100644 index 0000000000..c78d6fb71b --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-331444.js @@ -0,0 +1,44 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --expose-gc + +function boom() { + var args = []; + for (var i = 0; i < 125000; i++) + args.push(i); + return Array.apply(Array, args); +} +var array = boom(); +function fib(n) { + var f0 = 0, f1 = 1; + for (; n > 0; n = n - 1) { + f0 + f1; + f0 = array; + } +} +fib(12); diff --git a/deps/v8/test/mjsunit/regress/regress-calls-with-migrating-prototypes.js b/deps/v8/test/mjsunit/regress/regress-calls-with-migrating-prototypes.js new file mode 100644 index 0000000000..a306e5d9d8 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-calls-with-migrating-prototypes.js @@ -0,0 +1,49 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax + +function f() { + return 1; +} +function C1(f) { + this.f = f; +} +var o1 = new C1(f); +var o2 = {__proto__: new C1(f) } +function foo(o) { + return o.f(); +} +foo(o1); +foo(o1); +foo(o2); +foo(o1); +var o3 = new C1(function() { return 2; }); +%OptimizeFunctionOnNextCall(foo); +assertEquals(1, foo(o2)); +o2.__proto__.f = function() { return 3; }; +assertEquals(3, foo(o2));