Browse Source

v8: upgrade to 3.22.24.17

v0.11.11-release
Trevor Norris 11 years ago
parent
commit
f78e5df854
  1. 8
      deps/v8/src/arguments.cc
  2. 11
      deps/v8/src/arguments.h
  3. 15
      deps/v8/src/arm/builtins-arm.cc
  4. 5
      deps/v8/src/arm/deoptimizer-arm.cc
  5. 67
      deps/v8/src/arm/lithium-codegen-arm.cc
  6. 3
      deps/v8/src/arm/lithium-codegen-arm.h
  7. 6
      deps/v8/src/arm/macro-assembler-arm.h
  8. 1
      deps/v8/src/ast.cc
  9. 3
      deps/v8/src/builtins.h
  10. 18
      deps/v8/src/code-stubs-hydrogen.cc
  11. 2
      deps/v8/src/date.js
  12. 3
      deps/v8/src/deoptimizer.cc
  13. 4
      deps/v8/src/deoptimizer.h
  14. 20
      deps/v8/src/ia32/builtins-ia32.cc
  15. 7
      deps/v8/src/ia32/deoptimizer-ia32.cc
  16. 62
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  17. 3
      deps/v8/src/ia32/lithium-codegen-ia32.h
  18. 6
      deps/v8/src/ia32/macro-assembler-ia32.h
  19. 1
      deps/v8/src/mark-compact.cc
  20. 8
      deps/v8/src/mips/assembler-mips-inl.h
  21. 25
      deps/v8/src/mips/builtins-mips.cc
  22. 25
      deps/v8/src/mips/codegen-mips.cc
  23. 5
      deps/v8/src/mips/deoptimizer-mips.cc
  24. 67
      deps/v8/src/mips/lithium-codegen-mips.cc
  25. 3
      deps/v8/src/mips/lithium-codegen-mips.h
  26. 16
      deps/v8/src/mips/macro-assembler-mips.cc
  27. 6
      deps/v8/src/mips/macro-assembler-mips.h
  28. 7
      deps/v8/src/mips/simulator-mips.cc
  29. 1
      deps/v8/src/mips/simulator-mips.h
  30. 12
      deps/v8/src/objects.cc
  31. 1
      deps/v8/src/objects.h
  32. 2
      deps/v8/src/store-buffer.cc
  33. 2
      deps/v8/src/version.cc
  34. 15
      deps/v8/src/x64/builtins-x64.cc
  35. 5
      deps/v8/src/x64/deoptimizer-x64.cc
  36. 64
      deps/v8/src/x64/lithium-codegen-x64.cc
  37. 4
      deps/v8/src/x64/lithium-codegen-x64.h
  38. 6
      deps/v8/src/x64/macro-assembler-x64.h
  39. 32
      deps/v8/test/mjsunit/regress/regress-280531.js
  40. 42
      deps/v8/test/mjsunit/regress/regress-3027.js
  41. 46
      deps/v8/test/mjsunit/regress/regress-318420.js
  42. 44
      deps/v8/test/mjsunit/regress/regress-331444.js
  43. 49
      deps/v8/test/mjsunit/regress/regress-calls-with-migrating-prototypes.js

8
deps/v8/src/arguments.cc

@ -117,4 +117,12 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
#undef WRITE_CALL_2_VOID #undef WRITE_CALL_2_VOID
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
// TODO(ulan): This clobbers only subset of registers depending on compiler,
// Rewrite this in assembly to really clobber all registers.
// GCC for ia32 uses the FPU and does not touch XMM registers.
return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04;
}
} } // namespace v8::internal } } // namespace v8::internal

11
deps/v8/src/arguments.h

@ -289,12 +289,23 @@ class FunctionCallbackArguments
}; };
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
#ifdef DEBUG
#define CLOBBER_DOUBLE_REGISTERS() ClobberDoubleRegisters(1, 2, 3, 4);
#else
#define CLOBBER_DOUBLE_REGISTERS()
#endif
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \ #define DECLARE_RUNTIME_FUNCTION(Type, Name) \
Type Name(int args_length, Object** args_object, Isolate* isolate) Type Name(int args_length, Object** args_object, Isolate* isolate)
#define RUNTIME_FUNCTION(Type, Name) \ #define RUNTIME_FUNCTION(Type, Name) \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \ static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
Type Name(int args_length, Object** args_object, Isolate* isolate) { \ Type Name(int args_length, Object** args_object, Isolate* isolate) { \
CLOBBER_DOUBLE_REGISTERS(); \
Arguments args(args_length, args_object); \ Arguments args(args_length, args_object); \
return __RT_impl_##Name(args, isolate); \ return __RT_impl_##Name(args, isolate); \
} \ } \

15
deps/v8/src/arm/builtins-arm.cc

@ -859,7 +859,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
} }
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
@ -868,7 +869,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// registers. // registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved); __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system. // Pass the function and deoptimization type to the runtime system.
__ CallRuntime(Runtime::kNotifyStubFailure, 0); __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved); __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
} }
@ -877,6 +878,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
} }
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) { Deoptimizer::BailoutType type) {
{ {

5
deps/v8/src/arm/deoptimizer-arm.cc

@ -127,6 +127,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
} }
Code* Deoptimizer::NotifyStubFailureBuiltin() {
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
}
#define __ masm()-> #define __ masm()->
// This code tries to be close to ia32 code so that any changes can be // This code tries to be close to ia32 code so that any changes can be

67
deps/v8/src/arm/lithium-codegen-arm.cc

@ -98,6 +98,38 @@ void LCodeGen::Abort(BailoutReason reason) {
} }
void LCodeGen::SaveCallerDoubles() {
ASSERT(info()->saves_caller_doubles());
ASSERT(NeedsEagerFrame());
Comment(";;; Save clobbered callee double registers");
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
__ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
}
void LCodeGen::RestoreCallerDoubles() {
ASSERT(info()->saves_caller_doubles());
ASSERT(NeedsEagerFrame());
Comment(";;; Restore clobbered callee double registers");
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
__ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
}
bool LCodeGen::GeneratePrologue() { bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating()); ASSERT(is_generating());
@ -158,16 +190,7 @@ bool LCodeGen::GeneratePrologue() {
} }
if (info()->saves_caller_doubles()) { if (info()->saves_caller_doubles()) {
Comment(";;; Save clobbered callee double registers"); SaveCallerDoubles();
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
__ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
} }
// Possibly allocate a local context. // Possibly allocate a local context.
@ -313,6 +336,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
} }
if (deopt_jump_table_[i].needs_frame) { if (deopt_jump_table_[i].needs_frame) {
ASSERT(!info()->saves_caller_doubles());
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry))); __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
if (needs_frame.is_bound()) { if (needs_frame.is_bound()) {
__ b(&needs_frame); __ b(&needs_frame);
@ -330,6 +354,10 @@ bool LCodeGen::GenerateDeoptJumpTable() {
__ mov(pc, ip); __ mov(pc, ip);
} }
} else { } else {
if (info()->saves_caller_doubles()) {
ASSERT(info()->IsStub());
RestoreCallerDoubles();
}
__ mov(lr, Operand(pc), LeaveCC, al); __ mov(lr, Operand(pc), LeaveCC, al);
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
} }
@ -783,7 +811,10 @@ void LCodeGen::DeoptimizeIf(Condition condition,
} }
ASSERT(info()->IsStub() || frame_is_built_); ASSERT(info()->IsStub() || frame_is_built_);
if (condition == al && frame_is_built_) { // Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
__ Call(entry, RelocInfo::RUNTIME_ENTRY); __ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else { } else {
// We often have several deopts to the same entry, reuse the last // We often have several deopts to the same entry, reuse the last
@ -2853,16 +2884,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ CallRuntime(Runtime::kTraceExit, 1); __ CallRuntime(Runtime::kTraceExit, 1);
} }
if (info()->saves_caller_doubles()) { if (info()->saves_caller_doubles()) {
ASSERT(NeedsEagerFrame()); RestoreCallerDoubles();
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
__ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
} }
int no_frame_start = -1; int no_frame_start = -1;
if (NeedsEagerFrame()) { if (NeedsEagerFrame()) {
@ -3434,7 +3456,8 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ jmp(&receiver_ok); __ jmp(&receiver_ok);
__ bind(&global_object); __ bind(&global_object);
__ ldr(receiver, GlobalObjectOperand()); __ ldr(receiver, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ ldr(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
__ ldr(receiver, __ ldr(receiver,
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok); __ bind(&receiver_ok);

3
deps/v8/src/arm/lithium-codegen-arm.h

@ -186,6 +186,9 @@ class LCodeGen: public LCodeGenBase {
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation passes. Returns true if code generation should // Code generation passes. Returns true if code generation should
// continue. // continue.
bool GeneratePrologue(); bool GeneratePrologue();

6
deps/v8/src/arm/macro-assembler-arm.h

@ -1045,8 +1045,10 @@ class MacroAssembler: public Assembler {
} }
// Convenience function: Same as above, but takes the fid instead. // Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments) { void CallRuntime(Runtime::FunctionId id,
CallRuntime(Runtime::FunctionForId(id), num_arguments); int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
} }
// Convenience function: call an external reference. // Convenience function: call an external reference.

1
deps/v8/src/ast.cc

@ -554,6 +554,7 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
if (!type->prototype()->IsJSObject()) return false; if (!type->prototype()->IsJSObject()) return false;
// Go up the prototype chain, recording where we are currently. // Go up the prototype chain, recording where we are currently.
holder_ = Handle<JSObject>(JSObject::cast(type->prototype())); holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
JSObject::TryMigrateInstance(holder_);
type = Handle<Map>(holder()->map()); type = Handle<Map>(holder()->map());
} }
} }

3
deps/v8/src/builtins.h

@ -111,6 +111,8 @@ enum BuiltinExtraArguments {
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \ V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \ V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \
\ \
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \ V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
@ -400,6 +402,7 @@ class Builtins {
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm); static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm); static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyStubFailure(MacroAssembler* masm); static void Generate_NotifyStubFailure(MacroAssembler* masm);
static void Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm); static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
static void Generate_FunctionCall(MacroAssembler* masm); static void Generate_FunctionCall(MacroAssembler* masm);

18
deps/v8/src/code-stubs-hydrogen.cc

@ -721,15 +721,23 @@ HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor( HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
JSArrayBuilder* array_builder, ElementsKind kind) { JSArrayBuilder* array_builder, ElementsKind kind) {
// Insert a bounds check because the number of arguments might exceed
// the kInitialMaxFastElementArray limit. This cannot happen for code
// that was parsed, but calling via Array.apply(thisArg, [...]) might
// trigger it.
HValue* length = GetArgumentsLength();
HConstant* max_alloc_length =
Add<HConstant>(JSObject::kInitialMaxFastElementArray);
HValue* checked_length = Add<HBoundsCheck>(length, max_alloc_length);
// We need to fill with the hole if it's a smi array in the multi-argument // We need to fill with the hole if it's a smi array in the multi-argument
// case because we might have to bail out while copying arguments into // case because we might have to bail out while copying arguments into
// the array because they aren't compatible with a smi array. // the array because they aren't compatible with a smi array.
// If it's a double array, no problem, and if it's fast then no // If it's a double array, no problem, and if it's fast then no
// problem either because doubles are boxed. // problem either because doubles are boxed.
HValue* length = GetArgumentsLength();
bool fill_with_hole = IsFastSmiElementsKind(kind); bool fill_with_hole = IsFastSmiElementsKind(kind);
HValue* new_object = array_builder->AllocateArray(length, HValue* new_object = array_builder->AllocateArray(checked_length,
length, checked_length,
fill_with_hole); fill_with_hole);
HValue* elements = array_builder->GetElementsLocation(); HValue* elements = array_builder->GetElementsLocation();
ASSERT(elements != NULL); ASSERT(elements != NULL);
@ -739,10 +747,10 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
context(), context(),
LoopBuilder::kPostIncrement); LoopBuilder::kPostIncrement);
HValue* start = graph()->GetConstant0(); HValue* start = graph()->GetConstant0();
HValue* key = builder.BeginBody(start, length, Token::LT); HValue* key = builder.BeginBody(start, checked_length, Token::LT);
HInstruction* argument_elements = Add<HArgumentsElements>(false); HInstruction* argument_elements = Add<HArgumentsElements>(false);
HInstruction* argument = Add<HAccessArgumentsAt>( HInstruction* argument = Add<HAccessArgumentsAt>(
argument_elements, length, key); argument_elements, checked_length, key);
Add<HStoreKeyed>(elements, key, argument, kind); Add<HStoreKeyed>(elements, key, argument, kind);
builder.EndBody(); builder.EndBody();

2
deps/v8/src/date.js

@ -132,7 +132,7 @@ function TimeClip(time) {
// strings over and over again. // strings over and over again.
var Date_cache = { var Date_cache = {
// Cached time value. // Cached time value.
time: NAN, time: 0,
// String input for which the cached time is valid. // String input for which the cached time is valid.
string: null string: null
}; };

3
deps/v8/src/deoptimizer.cc

@ -1574,8 +1574,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
output_frame->SetPc(reinterpret_cast<intptr_t>( output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start())); trampoline->instruction_start()));
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS)); output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
Code* notify_failure = Code* notify_failure = NotifyStubFailureBuiltin();
isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
output_frame->SetContinuation( output_frame->SetContinuation(
reinterpret_cast<intptr_t>(notify_failure->entry())); reinterpret_cast<intptr_t>(notify_failure->entry()));
} }

4
deps/v8/src/deoptimizer.h

@ -412,6 +412,10 @@ class Deoptimizer : public Malloced {
// at the dynamic alignment state slot inside the frame. // at the dynamic alignment state slot inside the frame.
bool HasAlignmentPadding(JSFunction* function); bool HasAlignmentPadding(JSFunction* function);
// Select the version of NotifyStubFailure builtin that either saves or
// doesn't save the double registers depending on CPU features.
Code* NotifyStubFailureBuiltin();
Isolate* isolate_; Isolate* isolate_;
JSFunction* function_; JSFunction* function_;
Code* compiled_code_; Code* compiled_code_;

20
deps/v8/src/ia32/builtins-ia32.cc

@ -601,7 +601,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
} }
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
// Enter an internal frame. // Enter an internal frame.
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
@ -610,7 +611,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// stubs that tail call the runtime on deopts passing their parameters in // stubs that tail call the runtime on deopts passing their parameters in
// registers. // registers.
__ pushad(); __ pushad();
__ CallRuntime(Runtime::kNotifyStubFailure, 0); __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ popad(); __ popad();
// Tear down internal frame. // Tear down internal frame.
} }
@ -620,6 +621,21 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
} }
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
if (Serializer::enabled()) {
PlatformFeatureScope sse2(SSE2);
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
} else {
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) { Deoptimizer::BailoutType type) {
{ {

7
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -231,6 +231,13 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
} }
Code* Deoptimizer::NotifyStubFailureBuiltin() {
Builtins::Name name = CpuFeatures::IsSupported(SSE2) ?
Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure;
return isolate_->builtins()->builtin(name);
}
#define __ masm()-> #define __ masm()->
void Deoptimizer::EntryGenerator::Generate() { void Deoptimizer::EntryGenerator::Generate() {

62
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -130,6 +130,40 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) {
#endif #endif
void LCodeGen::SaveCallerDoubles() {
ASSERT(info()->saves_caller_doubles());
ASSERT(NeedsEagerFrame());
Comment(";;; Save clobbered callee double registers");
CpuFeatureScope scope(masm(), SSE2);
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
__ movsd(MemOperand(esp, count * kDoubleSize),
XMMRegister::FromAllocationIndex(save_iterator.Current()));
save_iterator.Advance();
count++;
}
}
void LCodeGen::RestoreCallerDoubles() {
ASSERT(info()->saves_caller_doubles());
ASSERT(NeedsEagerFrame());
Comment(";;; Restore clobbered callee double registers");
CpuFeatureScope scope(masm(), SSE2);
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(esp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
}
bool LCodeGen::GeneratePrologue() { bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating()); ASSERT(is_generating());
@ -244,17 +278,7 @@ bool LCodeGen::GeneratePrologue() {
} }
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
Comment(";;; Save clobbered callee double registers"); SaveCallerDoubles();
CpuFeatureScope scope(masm(), SSE2);
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
__ movsd(MemOperand(esp, count * kDoubleSize),
XMMRegister::FromAllocationIndex(save_iterator.Current()));
save_iterator.Advance();
count++;
}
} }
} }
@ -399,6 +423,7 @@ bool LCodeGen::GenerateJumpTable() {
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
} }
if (jump_table_[i].needs_frame) { if (jump_table_[i].needs_frame) {
ASSERT(!info()->saves_caller_doubles());
__ push(Immediate(ExternalReference::ForDeoptEntry(entry))); __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
if (needs_frame.is_bound()) { if (needs_frame.is_bound()) {
__ jmp(&needs_frame); __ jmp(&needs_frame);
@ -425,6 +450,9 @@ bool LCodeGen::GenerateJumpTable() {
__ ret(0); // Call the continuation without clobbering registers. __ ret(0); // Call the continuation without clobbering registers.
} }
} else { } else {
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
RestoreCallerDoubles();
}
__ call(entry, RelocInfo::RUNTIME_ENTRY); __ call(entry, RelocInfo::RUNTIME_ENTRY);
} }
} }
@ -3056,17 +3084,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ CallRuntime(Runtime::kTraceExit, 1); __ CallRuntime(Runtime::kTraceExit, 1);
} }
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
ASSERT(NeedsEagerFrame()); RestoreCallerDoubles();
CpuFeatureScope scope(masm(), SSE2);
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(esp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
} }
if (dynamic_frame_alignment_) { if (dynamic_frame_alignment_) {
// Fetch the state of the dynamic frame alignment. // Fetch the state of the dynamic frame alignment.

3
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -198,6 +198,9 @@ class LCodeGen: public LCodeGenBase {
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation passes. Returns true if code generation should // Code generation passes. Returns true if code generation should
// continue. // continue.
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE; void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;

6
deps/v8/src/ia32/macro-assembler-ia32.h

@ -773,8 +773,10 @@ class MacroAssembler: public Assembler {
} }
// Convenience function: Same as above, but takes the fid instead. // Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments) { void CallRuntime(Runtime::FunctionId id,
CallRuntime(Runtime::FunctionForId(id), num_arguments); int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
} }
// Convenience function: call an external reference. // Convenience function: call an external reference.

1
deps/v8/src/mark-compact.cc

@ -2643,6 +2643,7 @@ void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
if (IsMarked(code) && !code->marked_for_deoptimization()) { if (IsMarked(code) && !code->marked_for_deoptimization()) {
code->set_marked_for_deoptimization(true); code->set_marked_for_deoptimization(true);
code->InvalidateEmbeddedObjects();
have_code_to_deoptimize_ = true; have_code_to_deoptimize_ = true;
} }
entries->clear_at(i); entries->clear_at(i);

8
deps/v8/src/mips/assembler-mips-inl.h

@ -271,16 +271,14 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
Code* RelocInfo::code_age_stub() { Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress( return Code::GetCodeFromTargetAddress(
Memory::Address_at(pc_ + Assembler::kInstrSize * Assembler::target_address_at(pc_ + Assembler::kInstrSize));
(kNoCodeAgeSequenceLength - 1)));
} }
void RelocInfo::set_code_age_stub(Code* stub) { void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Memory::Address_at(pc_ + Assembler::kInstrSize * Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
(kNoCodeAgeSequenceLength - 1)) = stub->instruction_start());
stub->instruction_start();
} }

25
deps/v8/src/mips/builtins-mips.cc

@ -821,12 +821,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// internal frame to make the code faster, since we shouldn't have to do stack // internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile. // crawls in MakeCodeYoung. This seems a bit fragile.
__ mov(a0, ra); // Set a0 to point to the head of the PlatformCodeAge sequence.
// Adjust a0 to point to the head of the PlatformCodeAge sequence
__ Subu(a0, a0, __ Subu(a0, a0,
Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize)); Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
// Restore the original return address of the function
__ mov(ra, at);
// The following registers must be saved and restored when calling through to // The following registers must be saved and restored when calling through to
// the runtime: // the runtime:
@ -863,12 +860,9 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// save/restore the registers without worrying about which of them contain // save/restore the registers without worrying about which of them contain
// pointers. // pointers.
__ mov(a0, ra); // Set a0 to point to the head of the PlatformCodeAge sequence.
// Adjust a0 to point to the head of the PlatformCodeAge sequence
__ Subu(a0, a0, __ Subu(a0, a0,
Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize)); Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
// Restore the original return address of the function
__ mov(ra, at);
// The following registers must be saved and restored when calling through to // The following registers must be saved and restored when calling through to
// the runtime: // the runtime:
@ -900,7 +894,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
} }
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
@ -909,7 +904,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// registers. // registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved); __ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system. // Pass the function and deoptimization type to the runtime system.
__ CallRuntime(Runtime::kNotifyStubFailure, 0); __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved); __ MultiPop(kJSCallerSaved | kCalleeSaved);
} }
@ -918,6 +913,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
} }
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) { Deoptimizer::BailoutType type) {
{ {

25
deps/v8/src/mips/codegen-mips.cc

@ -641,8 +641,8 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
*age = kNoAgeCodeAge; *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY; *parity = NO_MARKING_PARITY;
} else { } else {
Address target_address = Memory::Address_at( Address target_address = Assembler::target_address_at(
sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1)); sequence + Assembler::kInstrSize);
Code* stub = GetCodeFromTargetAddress(target_address); Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity); GetCodeAgeAndParity(stub, age, parity);
} }
@ -661,17 +661,18 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
} else { } else {
Code* stub = GetCodeAgeStub(isolate, age, parity); Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize); CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
// Mark this code sequence for FindPlatformCodeAgeSequence() // Mark this code sequence for FindPlatformCodeAgeSequence().
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP); patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
// Save the function's original return address // Load the stub address to t9 and call it,
// (it will be clobbered by Call(t9)) // GetCodeAgeAndParity() extracts the stub address from this instruction.
patcher.masm()->mov(at, ra); patcher.masm()->li(
// Load the stub address to t9 and call it t9,
patcher.masm()->li(t9, Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
Operand(reinterpret_cast<uint32_t>(stub->instruction_start()))); CONSTANT_SIZE);
patcher.masm()->Call(t9); patcher.masm()->nop(); // Prevent jalr to jal optimization.
// Record the stub address in the empty space for GetCodeAgeAndParity() patcher.masm()->jalr(t9, a0);
patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start())); patcher.masm()->nop(); // Branch delay slot nop.
patcher.masm()->nop(); // Pad the empty space.
} }
} }

5
deps/v8/src/mips/deoptimizer-mips.cc

@ -125,6 +125,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
} }
Code* Deoptimizer::NotifyStubFailureBuiltin() {
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
}
#define __ masm()-> #define __ masm()->

67
deps/v8/src/mips/lithium-codegen-mips.cc

@ -98,6 +98,38 @@ void LChunkBuilder::Abort(BailoutReason reason) {
} }
void LCodeGen::SaveCallerDoubles() {
ASSERT(info()->saves_caller_doubles());
ASSERT(NeedsEagerFrame());
Comment(";;; Save clobbered callee double registers");
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
__ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
}
void LCodeGen::RestoreCallerDoubles() {
ASSERT(info()->saves_caller_doubles());
ASSERT(NeedsEagerFrame());
Comment(";;; Restore clobbered callee double registers");
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
__ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
}
bool LCodeGen::GeneratePrologue() { bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating()); ASSERT(is_generating());
@ -160,16 +192,7 @@ bool LCodeGen::GeneratePrologue() {
} }
if (info()->saves_caller_doubles()) { if (info()->saves_caller_doubles()) {
Comment(";;; Save clobbered callee double registers"); SaveCallerDoubles();
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
__ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
} }
// Possibly allocate a local context. // Possibly allocate a local context.
@ -298,6 +321,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
} }
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry))); __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
if (deopt_jump_table_[i].needs_frame) { if (deopt_jump_table_[i].needs_frame) {
ASSERT(!info()->saves_caller_doubles());
if (needs_frame.is_bound()) { if (needs_frame.is_bound()) {
__ Branch(&needs_frame); __ Branch(&needs_frame);
} else { } else {
@ -313,6 +337,10 @@ bool LCodeGen::GenerateDeoptJumpTable() {
__ Call(t9); __ Call(t9);
} }
} else { } else {
if (info()->saves_caller_doubles()) {
ASSERT(info()->IsStub());
RestoreCallerDoubles();
}
__ Call(t9); __ Call(t9);
} }
} }
@ -757,7 +785,10 @@ void LCodeGen::DeoptimizeIf(Condition condition,
} }
ASSERT(info()->IsStub() || frame_is_built_); ASSERT(info()->IsStub() || frame_is_built_);
if (condition == al && frame_is_built_) { // Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
} else { } else {
// We often have several deopts to the same entry, reuse the last // We often have several deopts to the same entry, reuse the last
@ -2706,16 +2737,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ CallRuntime(Runtime::kTraceExit, 1); __ CallRuntime(Runtime::kTraceExit, 1);
} }
if (info()->saves_caller_doubles()) { if (info()->saves_caller_doubles()) {
ASSERT(NeedsEagerFrame()); RestoreCallerDoubles();
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
__ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
} }
int no_frame_start = -1; int no_frame_start = -1;
if (NeedsEagerFrame()) { if (NeedsEagerFrame()) {
@ -3303,7 +3325,8 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ Branch(&receiver_ok); __ Branch(&receiver_ok);
__ bind(&global_object); __ bind(&global_object);
__ lw(receiver, GlobalObjectOperand()); __ lw(receiver, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ lw(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
__ lw(receiver, __ lw(receiver,
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok); __ bind(&receiver_ok);

3
deps/v8/src/mips/lithium-codegen-mips.h

@ -186,6 +186,9 @@ class LCodeGen: public LCodeGenBase {
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation passes. Returns true if code generation should // Code generation passes. Returns true if code generation should
// continue. // continue.
bool GeneratePrologue(); bool GeneratePrologue();

16
deps/v8/src/mips/macro-assembler-mips.cc

@ -4601,15 +4601,15 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
// Pre-age the code. // Pre-age the code.
Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
nop(Assembler::CODE_AGE_MARKER_NOP); nop(Assembler::CODE_AGE_MARKER_NOP);
// Save the function's original return address // Load the stub address to t9 and call it,
// (it will be clobbered by Call(t9)) // GetCodeAgeAndParity() extracts the stub address from this instruction.
mov(at, ra);
// Load the stub address to t9 and call it
li(t9, li(t9,
Operand(reinterpret_cast<uint32_t>(stub->instruction_start()))); Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
Call(t9); CONSTANT_SIZE);
// Record the stub address in the empty space for GetCodeAgeAndParity() nop(); // Prevent jalr to jal optimization.
dd(reinterpret_cast<uint32_t>(stub->instruction_start())); jalr(t9, a0);
nop(); // Branch delay slot nop.
nop(); // Pad the empty space.
} else { } else {
Push(ra, fp, cp, a1); Push(ra, fp, cp, a1);
nop(Assembler::CODE_AGE_SEQUENCE_NOP); nop(Assembler::CODE_AGE_SEQUENCE_NOP);

6
deps/v8/src/mips/macro-assembler-mips.h

@ -1210,8 +1210,10 @@ class MacroAssembler: public Assembler {
} }
// Convenience function: Same as above, but takes the fid instead. // Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments) { void CallRuntime(Runtime::FunctionId id,
CallRuntime(Runtime::FunctionForId(id), num_arguments); int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
} }
// Convenience function: call an external reference. // Convenience function: call an external reference.

7
deps/v8/src/mips/simulator-mips.cc

@ -1722,6 +1722,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
int64_t& i64hilo, int64_t& i64hilo,
uint64_t& u64hilo, uint64_t& u64hilo,
int32_t& next_pc, int32_t& next_pc,
int32_t& return_addr_reg,
bool& do_interrupt) { bool& do_interrupt) {
// Every local variable declared here needs to be const. // Every local variable declared here needs to be const.
// This is to make sure that changed values are sent back to // This is to make sure that changed values are sent back to
@ -1782,6 +1783,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
case JR: case JR:
case JALR: case JALR:
next_pc = get_register(instr->RsValue()); next_pc = get_register(instr->RsValue());
return_addr_reg = instr->RdValue();
break; break;
case SLL: case SLL:
alu_out = rt << sa; alu_out = rt << sa;
@ -1986,6 +1988,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
int32_t current_pc = get_pc(); int32_t current_pc = get_pc();
// Next pc // Next pc
int32_t next_pc = 0; int32_t next_pc = 0;
int32_t return_addr_reg = 31;
// Set up the variables if needed before executing the instruction. // Set up the variables if needed before executing the instruction.
ConfigureTypeRegister(instr, ConfigureTypeRegister(instr,
@ -1993,6 +1996,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
i64hilo, i64hilo,
u64hilo, u64hilo,
next_pc, next_pc,
return_addr_reg,
do_interrupt); do_interrupt);
// ---------- Raise exceptions triggered. // ---------- Raise exceptions triggered.
@ -2258,7 +2262,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
Instruction* branch_delay_instr = reinterpret_cast<Instruction*>( Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
current_pc+Instruction::kInstrSize); current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr); BranchDelayInstructionDecode(branch_delay_instr);
set_register(31, current_pc + 2 * Instruction::kInstrSize); set_register(return_addr_reg,
current_pc + 2 * Instruction::kInstrSize);
set_pc(next_pc); set_pc(next_pc);
pc_modified_ = true; pc_modified_ = true;
break; break;

1
deps/v8/src/mips/simulator-mips.h

@ -289,6 +289,7 @@ class Simulator {
int64_t& i64hilo, int64_t& i64hilo,
uint64_t& u64hilo, uint64_t& u64hilo,
int32_t& next_pc, int32_t& next_pc,
int32_t& return_addr_reg,
bool& do_interrupt); bool& do_interrupt);
void DecodeTypeImmediate(Instruction* instr); void DecodeTypeImmediate(Instruction* instr);

12
deps/v8/src/objects.cc

@ -10332,6 +10332,18 @@ void Code::InvalidateRelocation() {
} }
void Code::InvalidateEmbeddedObjects() {
Object* undefined = GetHeap()->undefined_value();
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
}
}
}
void Code::Relocate(intptr_t delta) { void Code::Relocate(intptr_t delta) {
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) { for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta); it.rinfo()->apply(delta);

1
deps/v8/src/objects.h

@ -5008,6 +5008,7 @@ class Code: public HeapObject {
// [relocation_info]: Code relocation information // [relocation_info]: Code relocation information
DECL_ACCESSORS(relocation_info, ByteArray) DECL_ACCESSORS(relocation_info, ByteArray)
void InvalidateRelocation(); void InvalidateRelocation();
void InvalidateEmbeddedObjects();
// [handler_table]: Fixed array containing offsets of exception handlers. // [handler_table]: Fixed array containing offsets of exception handlers.
DECL_ACCESSORS(handler_table, FixedArray) DECL_ACCESSORS(handler_table, FixedArray)

2
deps/v8/src/store-buffer.cc

@ -224,7 +224,7 @@ void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr); containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
} }
int old_counter = containing_chunk->store_buffer_counter(); int old_counter = containing_chunk->store_buffer_counter();
if (old_counter == threshold) { if (old_counter >= threshold) {
containing_chunk->set_scan_on_scavenge(true); containing_chunk->set_scan_on_scavenge(true);
created_new_scan_on_scavenge_pages = true; created_new_scan_on_scavenge_pages = true;
} }

2
deps/v8/src/version.cc

@ -35,7 +35,7 @@
#define MAJOR_VERSION 3 #define MAJOR_VERSION 3
#define MINOR_VERSION 22 #define MINOR_VERSION 22
#define BUILD_NUMBER 24 #define BUILD_NUMBER 24
#define PATCH_LEVEL 10 #define PATCH_LEVEL 17
// Use 1 for candidates and 0 otherwise. // Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.) // (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0 #define IS_CANDIDATE_VERSION 0

15
deps/v8/src/x64/builtins-x64.cc

@ -663,7 +663,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
} }
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
// Enter an internal frame. // Enter an internal frame.
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
@ -672,7 +673,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// stubs that tail call the runtime on deopts passing their parameters in // stubs that tail call the runtime on deopts passing their parameters in
// registers. // registers.
__ Pushad(); __ Pushad();
__ CallRuntime(Runtime::kNotifyStubFailure, 0); __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ Popad(); __ Popad();
// Tear down internal frame. // Tear down internal frame.
} }
@ -682,6 +683,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
} }
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) { Deoptimizer::BailoutType type) {
// Enter an internal frame. // Enter an internal frame.

5
deps/v8/src/x64/deoptimizer-x64.cc

@ -126,6 +126,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
} }
Code* Deoptimizer::NotifyStubFailureBuiltin() {
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
}
#define __ masm()-> #define __ masm()->
void Deoptimizer::EntryGenerator::Generate() { void Deoptimizer::EntryGenerator::Generate() {

64
deps/v8/src/x64/lithium-codegen-x64.cc

@ -111,6 +111,38 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) {
#endif #endif
void LCodeGen::SaveCallerDoubles() {
ASSERT(info()->saves_caller_doubles());
ASSERT(NeedsEagerFrame());
Comment(";;; Save clobbered callee double registers");
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
__ movsd(MemOperand(rsp, count * kDoubleSize),
XMMRegister::FromAllocationIndex(save_iterator.Current()));
save_iterator.Advance();
count++;
}
}
void LCodeGen::RestoreCallerDoubles() {
ASSERT(info()->saves_caller_doubles());
ASSERT(NeedsEagerFrame());
Comment(";;; Restore clobbered callee double registers");
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(rsp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
}
bool LCodeGen::GeneratePrologue() { bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating()); ASSERT(is_generating());
@ -173,16 +205,7 @@ bool LCodeGen::GeneratePrologue() {
} }
if (info()->saves_caller_doubles()) { if (info()->saves_caller_doubles()) {
Comment(";;; Save clobbered callee double registers"); SaveCallerDoubles();
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
__ movsd(MemOperand(rsp, count * kDoubleSize),
XMMRegister::FromAllocationIndex(save_iterator.Current()));
save_iterator.Advance();
count++;
}
} }
} }
@ -261,6 +284,7 @@ bool LCodeGen::GenerateJumpTable() {
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
} }
if (jump_table_[i].needs_frame) { if (jump_table_[i].needs_frame) {
ASSERT(!info()->saves_caller_doubles());
__ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry)); __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
if (needs_frame.is_bound()) { if (needs_frame.is_bound()) {
__ jmp(&needs_frame); __ jmp(&needs_frame);
@ -279,6 +303,10 @@ bool LCodeGen::GenerateJumpTable() {
__ call(kScratchRegister); __ call(kScratchRegister);
} }
} else { } else {
if (info()->saves_caller_doubles()) {
ASSERT(info()->IsStub());
RestoreCallerDoubles();
}
__ call(entry, RelocInfo::RUNTIME_ENTRY); __ call(entry, RelocInfo::RUNTIME_ENTRY);
} }
} }
@ -661,7 +689,10 @@ void LCodeGen::DeoptimizeIf(Condition cc,
} }
ASSERT(info()->IsStub() || frame_is_built_); ASSERT(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) { // Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (cc == no_condition && frame_is_built_ &&
!info()->saves_caller_doubles()) {
__ call(entry, RelocInfo::RUNTIME_ENTRY); __ call(entry, RelocInfo::RUNTIME_ENTRY);
} else { } else {
// We often have several deopts to the same entry, reuse the last // We often have several deopts to the same entry, reuse the last
@ -2551,16 +2582,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ CallRuntime(Runtime::kTraceExit, 1); __ CallRuntime(Runtime::kTraceExit, 1);
} }
if (info()->saves_caller_doubles()) { if (info()->saves_caller_doubles()) {
ASSERT(NeedsEagerFrame()); RestoreCallerDoubles();
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(rsp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
} }
int no_frame_start = -1; int no_frame_start = -1;
if (NeedsEagerFrame()) { if (NeedsEagerFrame()) {

4
deps/v8/src/x64/lithium-codegen-x64.h

@ -153,6 +153,10 @@ class LCodeGen: public LCodeGenBase {
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation passes. Returns true if code generation should // Code generation passes. Returns true if code generation should
// continue. // continue.
bool GeneratePrologue(); bool GeneratePrologue();

6
deps/v8/src/x64/macro-assembler-x64.h

@ -1248,8 +1248,10 @@ class MacroAssembler: public Assembler {
} }
// Convenience function: Same as above, but takes the fid instead. // Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments) { void CallRuntime(Runtime::FunctionId id,
CallRuntime(Runtime::FunctionForId(id), num_arguments); int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
} }
// Convenience function: call an external reference. // Convenience function: call an external reference.

32
deps/v8/test/mjsunit/regress/regress-280531.js

@ -0,0 +1,32 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
var contextA = Realm.create();
var date1 = Realm.eval(contextA, "new Date('Thu, 29 Aug 2013 00:00:00 UTC')");
new Date('Thu, 29 Aug 2013 00:00:01 UTC');
var date2 = Realm.eval(contextA, "new Date('Thu, 29 Aug 2013 00:00:00 UTC')");
assertEquals(date1, date2);

42
deps/v8/test/mjsunit/regress/regress-3027.js

@ -0,0 +1,42 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test to exceed the Heap::MaxRegularSpaceAllocationSize with an array
// constructor call taking many arguments.
function boom() {
var args = [];
for (var i = 0; i < 125000; i++) {
args.push(i);
}
return Array.apply(Array, args);
}
var array = boom();
assertEquals(125000, array.length);
assertEquals(124999, array[124999]);

46
deps/v8/test/mjsunit/regress/regress-318420.js

@ -0,0 +1,46 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
function bar(a, b) { with(a) {return a + b;} }
var obj = {
functions: [bar, bar, bar, bar],
receivers: [bar, bar, undefined, null],
foo: function () {
for (var a = this.functions, e = this.receivers, c = a.length,
d = 0; d < c ; d++) {
a[d].apply(e[d], arguments)
}
}
}
obj.foo(1, 2, 3, 4);
obj.foo(1, 2, 3, 4);
%OptimizeFunctionOnNextCall(obj.foo);
obj.foo(1, 2, 3, 4);

44
deps/v8/test/mjsunit/regress/regress-331444.js

@ -0,0 +1,44 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-gc
function boom() {
var args = [];
for (var i = 0; i < 125000; i++)
args.push(i);
return Array.apply(Array, args);
}
var array = boom();
function fib(n) {
var f0 = 0, f1 = 1;
for (; n > 0; n = n - 1) {
f0 + f1;
f0 = array;
}
}
fib(12);

49
deps/v8/test/mjsunit/regress/regress-calls-with-migrating-prototypes.js

@ -0,0 +1,49 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
function f() {
return 1;
}
function C1(f) {
this.f = f;
}
var o1 = new C1(f);
var o2 = {__proto__: new C1(f) }
function foo(o) {
return o.f();
}
foo(o1);
foo(o1);
foo(o2);
foo(o1);
var o3 = new C1(function() { return 2; });
%OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo(o2));
o2.__proto__.f = function() { return 3; };
assertEquals(3, foo(o2));
Loading…
Cancel
Save