Browse Source

deps: update V8 to 4.4.63.26

Includes cherry-picks for:
 * JitCodeEvent patch: https://crrev.com/f7969b1d5a55e66237221a463daf422ac7611788
 * argparse patch: https://crrev.com/44bc918458481d60b08d5566f0f31a79e39b85d7

PR-URL: https://github.com/nodejs/io.js/pull/2220
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Reviewed-By: Rod Vagg <rod@vagg.org>
v4.0.0-rc
Michaël Zasso 10 years ago
committed by Rod Vagg
parent
commit
3d3c687012
  1. 2
      deps/v8/include/v8-version.h
  2. 4
      deps/v8/src/api.cc
  3. 4
      deps/v8/src/arm/assembler-arm-inl.h
  4. 23
      deps/v8/src/arm/assembler-arm.cc
  5. 20
      deps/v8/src/arm/assembler-arm.h
  6. 32
      deps/v8/src/arm64/code-stubs-arm64.cc
  7. 1
      deps/v8/src/bootstrapper.cc
  8. 2
      deps/v8/src/compiler/arm/code-generator-arm.cc
  9. 2
      deps/v8/src/flag-definitions.h
  10. 5
      deps/v8/src/hydrogen.cc
  11. 48
      deps/v8/src/ic/ic.cc
  12. 1
      deps/v8/src/isolate.h
  13. 4
      deps/v8/src/mips/assembler-mips-inl.h
  14. 4
      deps/v8/src/mips/assembler-mips.cc
  15. 2
      deps/v8/src/mips/assembler-mips.h
  16. 4
      deps/v8/src/mips/code-stubs-mips.cc
  17. 4
      deps/v8/src/mips64/assembler-mips64-inl.h
  18. 4
      deps/v8/src/mips64/assembler-mips64.cc
  19. 2
      deps/v8/src/mips64/assembler-mips64.h
  20. 16
      deps/v8/src/mips64/code-stubs-mips64.cc
  21. 91
      deps/v8/src/objects.cc
  22. 1
      deps/v8/src/objects.h
  23. 285
      deps/v8/src/snapshot/serialize.cc
  24. 91
      deps/v8/src/snapshot/serialize.h
  25. 2
      deps/v8/src/unicode-decoder.cc
  26. 16
      deps/v8/src/x64/lithium-x64.cc
  27. 90
      deps/v8/test/cctest/test-api.cc
  28. 59
      deps/v8/test/cctest/test-assembler-arm.cc
  29. 3
      deps/v8/test/cctest/test-assembler-mips.cc
  30. 3
      deps/v8/test/cctest/test-assembler-mips64.cc
  31. 90
      deps/v8/test/cctest/test-macro-assembler-mips.cc
  32. 89
      deps/v8/test/cctest/test-macro-assembler-mips64.cc
  33. 95
      deps/v8/test/cctest/test-serialize.cc
  34. 80
      deps/v8/test/mjsunit/es6/regress/regress-cr493566.js
  35. 4
      deps/v8/test/mjsunit/mjsunit.status
  36. 22
      deps/v8/test/mjsunit/regress/regress-487981.js
  37. 52
      deps/v8/test/mjsunit/regress/regress-crbug-478612.js
  38. 33
      deps/v8/test/mjsunit/regress/regress-crbug-500497.js
  39. 27
      deps/v8/test/mjsunit/regress/regress-crbug-502930.js
  40. 23
      deps/v8/test/mjsunit/regress/regress-crbug-514268.js

2
deps/v8/include/v8-version.h

@ -11,7 +11,7 @@
#define V8_MAJOR_VERSION 4 #define V8_MAJOR_VERSION 4
#define V8_MINOR_VERSION 4 #define V8_MINOR_VERSION 4
#define V8_BUILD_NUMBER 63 #define V8_BUILD_NUMBER 63
#define V8_PATCH_LEVEL 12 #define V8_PATCH_LEVEL 26
// Use 1 for candidates and 0 otherwise. // Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.) // (Boolean macro values are not supported by all preprocessors.)

4
deps/v8/src/api.cc

@ -345,12 +345,14 @@ StartupData V8::CreateSnapshotDataBlob(const char* custom_source) {
base::ElapsedTimer timer; base::ElapsedTimer timer;
timer.Start(); timer.Start();
Isolate::Scope isolate_scope(isolate); Isolate::Scope isolate_scope(isolate);
internal_isolate->set_creating_default_snapshot(true);
internal_isolate->Init(NULL); internal_isolate->Init(NULL);
Persistent<Context> context; Persistent<Context> context;
i::Snapshot::Metadata metadata; i::Snapshot::Metadata metadata;
{ {
HandleScope handle_scope(isolate); HandleScope handle_scope(isolate);
Handle<Context> new_context = Context::New(isolate); Handle<Context> new_context = Context::New(isolate);
internal_isolate->set_creating_default_snapshot(false);
context.Reset(isolate, new_context); context.Reset(isolate, new_context);
if (custom_source != NULL) { if (custom_source != NULL) {
metadata.set_embeds_script(true); metadata.set_embeds_script(true);
@ -379,7 +381,7 @@ StartupData V8::CreateSnapshotDataBlob(const char* custom_source) {
i::SnapshotByteSink context_sink; i::SnapshotByteSink context_sink;
i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink); i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink);
context_ser.Serialize(&raw_context); context_ser.Serialize(&raw_context);
ser.SerializeWeakReferences(); ser.SerializeWeakReferencesAndDeferred();
result = i::Snapshot::CreateSnapshotBlob(ser, context_ser, metadata); result = i::Snapshot::CreateSnapshotBlob(ser, context_ser, metadata);
} }

4
deps/v8/src/arm/assembler-arm-inl.h

@ -432,9 +432,7 @@ void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) { if (buffer_space() <= kGap) {
GrowBuffer(); GrowBuffer();
} }
if (pc_offset() >= next_buffer_check_) { MaybeCheckConstPool();
CheckConstPool(false, true);
}
} }

23
deps/v8/src/arm/assembler-arm.cc

@ -1298,7 +1298,7 @@ void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
} }
int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { int Assembler::branch_offset(Label* L) {
int target_pos; int target_pos;
if (L->is_bound()) { if (L->is_bound()) {
target_pos = L->pos(); target_pos = L->pos();
@ -1315,7 +1315,8 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
// Block the emission of the constant pool, since the branch instruction must // Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label. // be emitted at the pc offset recorded by the label.
BlockConstPoolFor(1); if (!is_const_pool_blocked()) BlockConstPoolFor(1);
return target_pos - (pc_offset() + kPcLoadDelta); return target_pos - (pc_offset() + kPcLoadDelta);
} }
@ -1367,6 +1368,24 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
} }
void Assembler::b(Label* L, Condition cond) {
CheckBuffer();
b(branch_offset(L), cond);
}
void Assembler::bl(Label* L, Condition cond) {
CheckBuffer();
bl(branch_offset(L), cond);
}
void Assembler::blx(Label* L) {
CheckBuffer();
blx(branch_offset(L));
}
// Data-processing instructions. // Data-processing instructions.
void Assembler::and_(Register dst, Register src1, const Operand& src2, void Assembler::and_(Register dst, Register src1, const Operand& src2,

20
deps/v8/src/arm/assembler-arm.h

@ -746,7 +746,7 @@ class Assembler : public AssemblerBase {
// Returns the branch offset to the given label from the current code position // Returns the branch offset to the given label from the current code position
// Links the label to the current position if it is still unbound // Links the label to the current position if it is still unbound
// Manages the jump elimination optimization if the second parameter is true. // Manages the jump elimination optimization if the second parameter is true.
int branch_offset(Label* L, bool jump_elimination_allowed); int branch_offset(Label* L);
// Returns true if the given pc address is the start of a constant pool load // Returns true if the given pc address is the start of a constant pool load
// instruction sequence. // instruction sequence.
@ -852,13 +852,11 @@ class Assembler : public AssemblerBase {
void bx(Register target, Condition cond = al); // v5 and above, plus v4t void bx(Register target, Condition cond = al); // v5 and above, plus v4t
// Convenience branch instructions using labels // Convenience branch instructions using labels
void b(Label* L, Condition cond = al) { void b(Label* L, Condition cond = al);
b(branch_offset(L, cond == al), cond); void b(Condition cond, Label* L) { b(L, cond); }
} void bl(Label* L, Condition cond = al);
void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); } void bl(Condition cond, Label* L) { bl(L, cond); }
void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); } void blx(Label* L); // v5 and above
void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); }
void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
// Data-processing instructions // Data-processing instructions
@ -1536,6 +1534,12 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool. // Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump); void CheckConstPool(bool force_emit, bool require_jump);
void MaybeCheckConstPool() {
if (pc_offset() >= next_buffer_check_) {
CheckConstPool(false, true);
}
}
// Allocate a constant pool of the correct size for the generated code. // Allocate a constant pool of the correct size for the generated code.
Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);

32
deps/v8/src/arm64/code-stubs-arm64.cc

@ -2286,27 +2286,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Register last_match_info_elements = x21; Register last_match_info_elements = x21;
Register code_object = x22; Register code_object = x22;
// TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
CPURegList used_callee_saved_registers(subject,
regexp_data,
last_match_info_elements,
code_object);
__ PushCPURegList(used_callee_saved_registers);
// Stack frame. // Stack frame.
// jssp[0] : x19 // jssp[00]: last_match_info (JSArray)
// jssp[8] : x20 // jssp[08]: previous index
// jssp[16]: x21 // jssp[16]: subject string
// jssp[24]: x22 // jssp[24]: JSRegExp object
// jssp[32]: last_match_info (JSArray)
// jssp[40]: previous index const int kLastMatchInfoOffset = 0 * kPointerSize;
// jssp[48]: subject string const int kPreviousIndexOffset = 1 * kPointerSize;
// jssp[56]: JSRegExp object const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
const int kLastMatchInfoOffset = 4 * kPointerSize;
const int kPreviousIndexOffset = 5 * kPointerSize;
const int kSubjectOffset = 6 * kPointerSize;
const int kJSRegExpOffset = 7 * kPointerSize;
// Ensure that a RegExp stack is allocated. // Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address = ExternalReference address_of_regexp_stack_memory_address =
@ -2673,7 +2662,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Return last match info. // Return last match info.
__ Peek(x0, kLastMatchInfoOffset); __ Peek(x0, kLastMatchInfoOffset);
__ PopCPURegList(used_callee_saved_registers);
// Drop the 4 arguments of the stub from the stack. // Drop the 4 arguments of the stub from the stack.
__ Drop(4); __ Drop(4);
__ Ret(); __ Ret();
@ -2696,13 +2684,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Bind(&failure); __ Bind(&failure);
__ Mov(x0, Operand(isolate()->factory()->null_value())); __ Mov(x0, Operand(isolate()->factory()->null_value()));
__ PopCPURegList(used_callee_saved_registers);
// Drop the 4 arguments of the stub from the stack. // Drop the 4 arguments of the stub from the stack.
__ Drop(4); __ Drop(4);
__ Ret(); __ Ret();
__ Bind(&runtime); __ Bind(&runtime);
__ PopCPURegList(used_callee_saved_registers);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1); __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
// Deferred code for string handling. // Deferred code for string handling.

1
deps/v8/src/bootstrapper.cc

@ -2813,6 +2813,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
if (value->IsPropertyCell()) { if (value->IsPropertyCell()) {
value = handle(PropertyCell::cast(*value)->value(), isolate()); value = handle(PropertyCell::cast(*value)->value(), isolate());
} }
if (value->IsTheHole()) continue;
PropertyDetails details = properties->DetailsAt(i); PropertyDetails details = properties->DetailsAt(i);
DCHECK_EQ(kData, details.kind()); DCHECK_EQ(kData, details.kind());
JSObject::AddProperty(to, key, value, details.attributes()); JSObject::AddProperty(to, key, value, details.attributes());

2
deps/v8/src/compiler/arm/code-generator-arm.cc

@ -316,6 +316,8 @@ void CodeGenerator::AssembleDeconstructActivationRecord() {
void CodeGenerator::AssembleArchInstruction(Instruction* instr) { void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ArmOperandConverter i(this, instr); ArmOperandConverter i(this, instr);
masm()->MaybeCheckConstPool();
switch (ArchOpcodeField::decode(instr->opcode())) { switch (ArchOpcodeField::decode(instr->opcode())) {
case kArchCallCodeObject: { case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt(); EnsureSpaceForLazyDeopt();

2
deps/v8/src/flag-definitions.h

@ -199,7 +199,7 @@ DEFINE_IMPLICATION(es_staging, harmony)
#define HARMONY_STAGED(V) \ #define HARMONY_STAGED(V) \
V(harmony_rest_parameters, "harmony rest parameters") \ V(harmony_rest_parameters, "harmony rest parameters") \
V(harmony_spreadcalls, "harmony spread-calls") \ V(harmony_spreadcalls, "harmony spread-calls") \
V(harmony_tostring, "harmony toString") \ V(harmony_tostring, "harmony toString")
// Features that are shipping (turned on by default, but internal flag remains). // Features that are shipping (turned on by default, but internal flag remains).
#define HARMONY_SHIPPING(V) \ #define HARMONY_SHIPPING(V) \

5
deps/v8/src/hydrogen.cc

@ -5219,9 +5219,12 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
HValue* function = AddLoadJSBuiltin(Builtins::FILTER_KEY); HValue* function = AddLoadJSBuiltin(Builtins::FILTER_KEY);
Add<HPushArguments>(enumerable, key); Add<HPushArguments>(enumerable, key);
key = Add<HInvokeFunction>(function, 2); key = Add<HInvokeFunction>(function, 2);
Push(key);
Add<HSimulate>(stmt->FilterId());
key = Pop();
Bind(each_var, key); Bind(each_var, key);
Add<HSimulate>(stmt->AssignmentId());
Add<HCheckHeapObject>(key); Add<HCheckHeapObject>(key);
Add<HSimulate>(stmt->AssignmentId());
} }
BreakAndContinueInfo break_info(stmt, scope(), 5); BreakAndContinueInfo break_info(stmt, scope(), 5);

48
deps/v8/src/ic/ic.cc

@ -1112,7 +1112,39 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
code = slow_stub(); code = slow_stub();
} }
} else { } else {
code = ComputeHandler(lookup); if (lookup->state() == LookupIterator::ACCESSOR) {
Handle<Object> accessors = lookup->GetAccessors();
Handle<Map> map = receiver_map();
if (accessors->IsExecutableAccessorInfo()) {
Handle<ExecutableAccessorInfo> info =
Handle<ExecutableAccessorInfo>::cast(accessors);
if ((v8::ToCData<Address>(info->getter()) != 0) &&
!ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate(), info,
map)) {
TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
code = slow_stub();
}
} else if (accessors->IsAccessorPair()) {
Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
isolate());
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
Handle<Object> receiver = lookup->GetReceiver();
if (getter->IsJSFunction() && holder->HasFastProperties()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
if (receiver->IsJSObject() || function->IsBuiltin() ||
!is_sloppy(function->shared()->language_mode())) {
CallOptimization call_optimization(function);
if (call_optimization.is_simple_api_call() &&
!call_optimization.IsCompatibleReceiver(receiver, holder)) {
TRACE_GENERIC_IC(isolate(), "LoadIC",
"incompatible receiver type");
code = slow_stub();
}
}
}
}
}
if (code.is_null()) code = ComputeHandler(lookup);
} }
PatchCache(lookup->name(), code); PatchCache(lookup->name(), code);
@ -1242,6 +1274,8 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
if (v8::ToCData<Address>(info->getter()) == 0) break; if (v8::ToCData<Address>(info->getter()) == 0) break;
if (!ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate(), info, if (!ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate(), info,
map)) { map)) {
// This case should be already handled in LoadIC::UpdateCaches.
UNREACHABLE();
break; break;
} }
if (!holder->HasFastProperties()) break; if (!holder->HasFastProperties()) break;
@ -1262,10 +1296,14 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
} }
CallOptimization call_optimization(function); CallOptimization call_optimization(function);
NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder); NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
if (call_optimization.is_simple_api_call() && if (call_optimization.is_simple_api_call()) {
call_optimization.IsCompatibleReceiver(receiver, holder)) { if (call_optimization.IsCompatibleReceiver(receiver, holder)) {
return compiler.CompileLoadCallback(lookup->name(), call_optimization, return compiler.CompileLoadCallback(
lookup->GetAccessorIndex()); lookup->name(), call_optimization, lookup->GetAccessorIndex());
} else {
// This case should be already handled in LoadIC::UpdateCaches.
UNREACHABLE();
}
} }
int expected_arguments = int expected_arguments =
function->shared()->internal_formal_parameter_count(); function->shared()->internal_formal_parameter_count();

1
deps/v8/src/isolate.h

@ -389,6 +389,7 @@ typedef List<HeapObject*> DebugObjectCache;
V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \ V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
V(PromiseRejectCallback, promise_reject_callback, NULL) \ V(PromiseRejectCallback, promise_reject_callback, NULL) \
V(const v8::StartupData*, snapshot_blob, NULL) \ V(const v8::StartupData*, snapshot_blob, NULL) \
V(bool, creating_default_snapshot, false) \
ISOLATE_INIT_SIMULATOR_LIST(V) ISOLATE_INIT_SIMULATOR_LIST(V)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \ #define THREAD_LOCAL_TOP_ACCESSOR(type, name) \

4
deps/v8/src/mips/assembler-mips-inl.h

@ -500,8 +500,8 @@ void Assembler::CheckBuffer() {
} }
void Assembler::CheckTrampolinePoolQuick() { void Assembler::CheckTrampolinePoolQuick(int extra_instructions) {
if (pc_offset() >= next_buffer_check_) { if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
CheckTrampolinePool(); CheckTrampolinePool();
} }
} }

4
deps/v8/src/mips/assembler-mips.cc

@ -795,7 +795,7 @@ void Assembler::bind_to(Label* L, int pos) {
trampoline_pos = get_trampoline_entry(fixup_pos); trampoline_pos = get_trampoline_entry(fixup_pos);
CHECK(trampoline_pos != kInvalidSlotPos); CHECK(trampoline_pos != kInvalidSlotPos);
} }
DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
target_at_put(fixup_pos, trampoline_pos, false); target_at_put(fixup_pos, trampoline_pos, false);
fixup_pos = trampoline_pos; fixup_pos = trampoline_pos;
dist = pos - fixup_pos; dist = pos - fixup_pos;
@ -1415,6 +1415,7 @@ void Assembler::jal(int32_t target) {
void Assembler::jalr(Register rs, Register rd) { void Assembler::jalr(Register rs, Register rd) {
DCHECK(rs.code() != rd.code());
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions(); positions_recorder()->WriteRecordedPositions();
GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
@ -2633,6 +2634,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
void Assembler::BlockTrampolinePoolFor(int instructions) { void Assembler::BlockTrampolinePoolFor(int instructions) {
CheckTrampolinePoolQuick(instructions);
BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
} }

2
deps/v8/src/mips/assembler-mips.h

@ -1253,7 +1253,7 @@ class Assembler : public AssemblerBase {
inline void CheckBuffer(); inline void CheckBuffer();
void GrowBuffer(); void GrowBuffer();
inline void emit(Instr x); inline void emit(Instr x);
inline void CheckTrampolinePoolQuick(); inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
// Instruction generation. // Instruction generation.
// We have 3 different kind of encoding layout on MIPS. // We have 3 different kind of encoding layout on MIPS.

4
deps/v8/src/mips/code-stubs-mips.cc

@ -4028,8 +4028,8 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
intptr_t loc = intptr_t loc =
reinterpret_cast<intptr_t>(GetCode().location()); reinterpret_cast<intptr_t>(GetCode().location());
__ Move(t9, target); __ Move(t9, target);
__ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE); __ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(ra); __ Call(at);
} }

4
deps/v8/src/mips64/assembler-mips64-inl.h

@ -504,8 +504,8 @@ void Assembler::CheckBuffer() {
} }
void Assembler::CheckTrampolinePoolQuick() { void Assembler::CheckTrampolinePoolQuick(int extra_instructions) {
if (pc_offset() >= next_buffer_check_) { if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
CheckTrampolinePool(); CheckTrampolinePool();
} }
} }

4
deps/v8/src/mips64/assembler-mips64.cc

@ -780,7 +780,7 @@ void Assembler::bind_to(Label* L, int pos) {
trampoline_pos = get_trampoline_entry(fixup_pos); trampoline_pos = get_trampoline_entry(fixup_pos);
CHECK(trampoline_pos != kInvalidSlotPos); CHECK(trampoline_pos != kInvalidSlotPos);
} }
DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
target_at_put(fixup_pos, trampoline_pos, false); target_at_put(fixup_pos, trampoline_pos, false);
fixup_pos = trampoline_pos; fixup_pos = trampoline_pos;
dist = pos - fixup_pos; dist = pos - fixup_pos;
@ -1396,6 +1396,7 @@ void Assembler::jal(int64_t target) {
void Assembler::jalr(Register rs, Register rd) { void Assembler::jalr(Register rs, Register rd) {
DCHECK(rs.code() != rd.code());
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions(); positions_recorder()->WriteRecordedPositions();
GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
@ -2809,6 +2810,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
void Assembler::BlockTrampolinePoolFor(int instructions) { void Assembler::BlockTrampolinePoolFor(int instructions) {
CheckTrampolinePoolQuick(instructions);
BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
} }

2
deps/v8/src/mips64/assembler-mips64.h

@ -1288,7 +1288,7 @@ class Assembler : public AssemblerBase {
void GrowBuffer(); void GrowBuffer();
inline void emit(Instr x); inline void emit(Instr x);
inline void emit(uint64_t x); inline void emit(uint64_t x);
inline void CheckTrampolinePoolQuick(); inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
// Instruction generation. // Instruction generation.
// We have 3 different kind of encoding layout on MIPS. // We have 3 different kind of encoding layout on MIPS.

16
deps/v8/src/mips64/code-stubs-mips64.cc

@ -4071,8 +4071,8 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
intptr_t loc = intptr_t loc =
reinterpret_cast<intptr_t>(GetCode().location()); reinterpret_cast<intptr_t>(GetCode().location());
__ Move(t9, target); __ Move(t9, target);
__ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE); __ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(ra); __ Call(at);
} }
@ -5285,9 +5285,9 @@ static void CallApiFunctionAndReturn(
__ li(s3, Operand(next_address)); __ li(s3, Operand(next_address));
__ ld(s0, MemOperand(s3, kNextOffset)); __ ld(s0, MemOperand(s3, kNextOffset));
__ ld(s1, MemOperand(s3, kLimitOffset)); __ ld(s1, MemOperand(s3, kLimitOffset));
__ ld(s2, MemOperand(s3, kLevelOffset)); __ lw(s2, MemOperand(s3, kLevelOffset));
__ Daddu(s2, s2, Operand(1)); __ Addu(s2, s2, Operand(1));
__ sd(s2, MemOperand(s3, kLevelOffset)); __ sw(s2, MemOperand(s3, kLevelOffset));
if (FLAG_log_timer_events) { if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL); FrameScope frame(masm, StackFrame::MANUAL);
@ -5328,11 +5328,11 @@ static void CallApiFunctionAndReturn(
// previous handle scope. // previous handle scope.
__ sd(s0, MemOperand(s3, kNextOffset)); __ sd(s0, MemOperand(s3, kNextOffset));
if (__ emit_debug_code()) { if (__ emit_debug_code()) {
__ ld(a1, MemOperand(s3, kLevelOffset)); __ lw(a1, MemOperand(s3, kLevelOffset));
__ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2)); __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
} }
__ Dsubu(s2, s2, Operand(1)); __ Subu(s2, s2, Operand(1));
__ sd(s2, MemOperand(s3, kLevelOffset)); __ sw(s2, MemOperand(s3, kLevelOffset));
__ ld(at, MemOperand(s3, kLimitOffset)); __ ld(at, MemOperand(s3, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(at)); __ Branch(&delete_allocated_handles, ne, s1, Operand(at));

91
deps/v8/src/objects.cc

@ -3275,54 +3275,58 @@ MaybeHandle<Object> Object::SetSuperProperty(LookupIterator* it,
if (found) return result; if (found) return result;
LookupIterator own_lookup(it->GetReceiver(), it->name(), LookupIterator::OWN); LookupIterator own_lookup(it->GetReceiver(), it->name(), LookupIterator::OWN);
for (; own_lookup.IsFound(); own_lookup.Next()) {
switch (own_lookup.state()) {
case LookupIterator::ACCESS_CHECK:
if (!own_lookup.HasAccess()) {
return JSObject::SetPropertyWithFailedAccessCheck(&own_lookup, value,
SLOPPY);
}
break;
switch (own_lookup.state()) { case LookupIterator::INTEGER_INDEXED_EXOTIC:
case LookupIterator::NOT_FOUND: return RedefineNonconfigurableProperty(it->isolate(), it->name(), value,
return JSObject::AddDataProperty(&own_lookup, value, NONE, language_mode, language_mode);
store_mode);
case LookupIterator::INTEGER_INDEXED_EXOTIC:
return result;
case LookupIterator::DATA: { case LookupIterator::DATA: {
PropertyDetails details = own_lookup.property_details(); PropertyDetails details = own_lookup.property_details();
if (details.IsConfigurable() || !details.IsReadOnly()) { if (details.IsConfigurable() || !details.IsReadOnly()) {
return JSObject::SetOwnPropertyIgnoreAttributes( return JSObject::SetOwnPropertyIgnoreAttributes(
Handle<JSObject>::cast(it->GetReceiver()), it->name(), value, Handle<JSObject>::cast(it->GetReceiver()), it->name(), value,
details.attributes()); details.attributes());
}
return WriteToReadOnlyProperty(&own_lookup, value, language_mode);
} }
return WriteToReadOnlyProperty(&own_lookup, value, language_mode);
}
case LookupIterator::ACCESSOR: { case LookupIterator::ACCESSOR: {
PropertyDetails details = own_lookup.property_details(); PropertyDetails details = own_lookup.property_details();
if (details.IsConfigurable()) { if (details.IsConfigurable()) {
return JSObject::SetOwnPropertyIgnoreAttributes( return JSObject::SetOwnPropertyIgnoreAttributes(
Handle<JSObject>::cast(it->GetReceiver()), it->name(), value, Handle<JSObject>::cast(it->GetReceiver()), it->name(), value,
details.attributes()); details.attributes());
} }
return RedefineNonconfigurableProperty(it->isolate(), it->name(), value, return RedefineNonconfigurableProperty(it->isolate(), it->name(), value,
language_mode); language_mode);
} }
case LookupIterator::TRANSITION: case LookupIterator::INTERCEPTOR:
UNREACHABLE(); case LookupIterator::JSPROXY: {
break; bool found = false;
MaybeHandle<Object> result = SetPropertyInternal(
&own_lookup, value, language_mode, store_mode, &found);
if (found) return result;
break;
}
case LookupIterator::INTERCEPTOR: case LookupIterator::NOT_FOUND:
case LookupIterator::JSPROXY: case LookupIterator::TRANSITION:
case LookupIterator::ACCESS_CHECK: { UNREACHABLE();
bool found = false;
MaybeHandle<Object> result = SetPropertyInternal(
&own_lookup, value, language_mode, store_mode, &found);
if (found) return result;
return SetDataProperty(&own_lookup, value);
} }
} }
UNREACHABLE(); return JSObject::AddDataProperty(&own_lookup, value, NONE, language_mode,
return MaybeHandle<Object>(); store_mode);
} }
@ -14697,9 +14701,10 @@ Handle<Derived> HashTable<Derived, Shape, Key>::New(
PretenureFlag pretenure) { PretenureFlag pretenure) {
DCHECK(0 <= at_least_space_for); DCHECK(0 <= at_least_space_for);
DCHECK(!capacity_option || base::bits::IsPowerOfTwo32(at_least_space_for)); DCHECK(!capacity_option || base::bits::IsPowerOfTwo32(at_least_space_for));
int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY) int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY)
? at_least_space_for ? at_least_space_for
: isolate->serializer_enabled() : isolate->creating_default_snapshot()
? ComputeCapacityForSerialization(at_least_space_for) ? ComputeCapacityForSerialization(at_least_space_for)
: ComputeCapacity(at_least_space_for); : ComputeCapacity(at_least_space_for);
if (capacity > HashTable::kMaxCapacity) { if (capacity > HashTable::kMaxCapacity) {
@ -15692,6 +15697,14 @@ Handle<String> StringTable::LookupKey(Isolate* isolate, HashTableKey* key) {
} }
String* StringTable::LookupKeyIfExists(Isolate* isolate, HashTableKey* key) {
Handle<StringTable> table = isolate->factory()->string_table();
int entry = table->FindEntry(key);
if (entry != kNotFound) return String::cast(table->KeyAt(entry));
return NULL;
}
Handle<Object> CompilationCacheTable::Lookup(Handle<String> src, Handle<Object> CompilationCacheTable::Lookup(Handle<String> src,
Handle<Context> context, Handle<Context> context,
LanguageMode language_mode) { LanguageMode language_mode) {

1
deps/v8/src/objects.h

@ -3553,6 +3553,7 @@ class StringTable: public HashTable<StringTable,
// added. The return value is the string found. // added. The return value is the string found.
static Handle<String> LookupString(Isolate* isolate, Handle<String> key); static Handle<String> LookupString(Isolate* isolate, Handle<String> key);
static Handle<String> LookupKey(Isolate* isolate, HashTableKey* key); static Handle<String> LookupKey(Isolate* isolate, HashTableKey* key);
static String* LookupKeyIfExists(Isolate* isolate, HashTableKey* key);
// Tries to internalize given string and returns string handle on success // Tries to internalize given string and returns string handle on success
// or an empty handle otherwise. // or an empty handle otherwise.

285
deps/v8/src/snapshot/serialize.cc

@ -516,10 +516,18 @@ void Deserializer::DecodeReservation(
void Deserializer::FlushICacheForNewCodeObjects() { void Deserializer::FlushICacheForNewCodeObjects() {
PageIterator it(isolate_->heap()->code_space()); if (!deserializing_user_code_) {
while (it.has_next()) { // The entire isolate is newly deserialized. Simply flush all code pages.
Page* p = it.next(); PageIterator it(isolate_->heap()->code_space());
CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start()); while (it.has_next()) {
Page* p = it.next();
CpuFeatures::FlushICache(p->area_start(),
p->area_end() - p->area_start());
}
}
for (Code* code : new_code_objects_) {
CpuFeatures::FlushICache(code->instruction_start(),
code->instruction_size());
} }
} }
@ -556,10 +564,15 @@ void Deserializer::Deserialize(Isolate* isolate) {
DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse()); DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse());
// No active handles. // No active handles.
DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty()); DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
isolate_->heap()->IterateSmiRoots(this);
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); {
isolate_->heap()->RepairFreeListsAfterDeserialization(); DisallowHeapAllocation no_gc;
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); isolate_->heap()->IterateSmiRoots(this);
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate_->heap()->RepairFreeListsAfterDeserialization();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
DeserializeDeferredObjects();
}
isolate_->heap()->set_native_contexts_list( isolate_->heap()->set_native_contexts_list(
isolate_->heap()->undefined_value()); isolate_->heap()->undefined_value());
@ -608,11 +621,12 @@ MaybeHandle<Object> Deserializer::DeserializePartial(
Object* root; Object* root;
Object* outdated_contexts; Object* outdated_contexts;
VisitPointer(&root); VisitPointer(&root);
DeserializeDeferredObjects();
VisitPointer(&outdated_contexts); VisitPointer(&outdated_contexts);
// There's no code deserialized here. If this assert fires // There's no code deserialized here. If this assert fires then that's
// then that's changed and logging should be added to notify // changed and logging should be added to notify the profiler et al of the
// the profiler et al of the new code. // new code, which also has to be flushed from instruction cache.
CHECK_EQ(start_address, code_space->top()); CHECK_EQ(start_address, code_space->top());
CHECK(outdated_contexts->IsFixedArray()); CHECK(outdated_contexts->IsFixedArray());
*outdated_contexts_out = *outdated_contexts_out =
@ -628,10 +642,17 @@ MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
return Handle<SharedFunctionInfo>(); return Handle<SharedFunctionInfo>();
} else { } else {
deserializing_user_code_ = true; deserializing_user_code_ = true;
DisallowHeapAllocation no_gc; HandleScope scope(isolate);
Object* root; Handle<SharedFunctionInfo> result;
VisitPointer(&root); {
return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root)); DisallowHeapAllocation no_gc;
Object* root;
VisitPointer(&root);
DeserializeDeferredObjects();
result = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
}
CommitNewInternalizedStrings(isolate);
return scope.CloseAndEscape(result);
} }
} }
@ -652,13 +673,21 @@ void Deserializer::VisitPointers(Object** start, Object** end) {
} }
void Deserializer::RelinkAllocationSite(AllocationSite* site) { void Deserializer::DeserializeDeferredObjects() {
if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) { for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
site->set_weak_next(isolate_->heap()->undefined_value()); int space = code & kSpaceMask;
} else { DCHECK(space <= kNumberOfSpaces);
site->set_weak_next(isolate_->heap()->allocation_sites_list()); DCHECK(code - space == kNewObject);
HeapObject* object = GetBackReferencedObject(space);
int size = source_.GetInt() << kPointerSizeLog2;
Address obj_address = object->address();
Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize);
Object** end = reinterpret_cast<Object**>(obj_address + size);
bool filled = ReadData(start, end, space, obj_address);
CHECK(filled);
DCHECK(CanBeDeferred(object));
PostProcessNewObject(object, space);
} }
isolate_->heap()->set_allocation_sites_list(site);
} }
@ -688,31 +717,76 @@ class StringTableInsertionKey : public HashTableKey {
return handle(string_, isolate); return handle(string_, isolate);
} }
private:
String* string_; String* string_;
uint32_t hash_; uint32_t hash_;
DisallowHeapAllocation no_gc;
}; };
HeapObject* Deserializer::ProcessNewObjectFromSerializedCode(HeapObject* obj) { HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
if (obj->IsString()) { if (deserializing_user_code()) {
String* string = String::cast(obj); if (obj->IsString()) {
// Uninitialize hash field as the hash seed may have changed. String* string = String::cast(obj);
string->set_hash_field(String::kEmptyHashField); // Uninitialize hash field as the hash seed may have changed.
if (string->IsInternalizedString()) { string->set_hash_field(String::kEmptyHashField);
DisallowHeapAllocation no_gc; if (string->IsInternalizedString()) {
HandleScope scope(isolate_); // Canonicalize the internalized string. If it already exists in the
StringTableInsertionKey key(string); // string table, set it to forward to the existing one.
String* canonical = *StringTable::LookupKey(isolate_, &key); StringTableInsertionKey key(string);
string->SetForwardedInternalizedString(canonical); String* canonical = StringTable::LookupKeyIfExists(isolate_, &key);
return canonical; if (canonical == NULL) {
new_internalized_strings_.Add(handle(string));
return string;
} else {
string->SetForwardedInternalizedString(canonical);
return canonical;
}
}
} else if (obj->IsScript()) {
// Assign a new script id to avoid collision.
Script::cast(obj)->set_id(isolate_->heap()->NextScriptId());
} else {
DCHECK(CanBeDeferred(obj));
}
}
if (obj->IsAllocationSite()) {
DCHECK(obj->IsAllocationSite());
// Allocation sites are present in the snapshot, and must be linked into
// a list at deserialization time.
AllocationSite* site = AllocationSite::cast(obj);
// TODO(mvstanton): consider treating the heap()->allocation_sites_list()
// as a (weak) root. If this root is relocated correctly, this becomes
// unnecessary.
if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
site->set_weak_next(isolate_->heap()->undefined_value());
} else {
site->set_weak_next(isolate_->heap()->allocation_sites_list());
}
isolate_->heap()->set_allocation_sites_list(site);
} else if (obj->IsCode()) {
// We flush all code pages after deserializing the startup snapshot. In that
// case, we only need to remember code objects in the large object space.
// When deserializing user code, remember each individual code object.
if (deserializing_user_code() || space == LO_SPACE) {
new_code_objects_.Add(Code::cast(obj));
} }
} else if (obj->IsScript()) {
Script::cast(obj)->set_id(isolate_->heap()->NextScriptId());
} }
return obj; return obj;
} }
void Deserializer::CommitNewInternalizedStrings(Isolate* isolate) {
StringTable::EnsureCapacityForDeserialization(
isolate, new_internalized_strings_.length());
for (Handle<String> string : new_internalized_strings_) {
StringTableInsertionKey key(*string);
DCHECK_NULL(StringTable::LookupKeyIfExists(isolate, &key));
StringTable::LookupKey(isolate, &key);
}
}
HeapObject* Deserializer::GetBackReferencedObject(int space) { HeapObject* Deserializer::GetBackReferencedObject(int space) {
HeapObject* obj; HeapObject* obj;
BackReference back_reference(source_.GetInt()); BackReference back_reference(source_.GetInt());
@ -746,21 +820,10 @@ void Deserializer::ReadObject(int space_number, Object** write_back) {
HeapObject* obj; HeapObject* obj;
int next_int = source_.GetInt(); int next_int = source_.GetInt();
bool double_align = false;
#ifndef V8_HOST_ARCH_64_BIT
double_align = next_int == kDoubleAlignmentSentinel;
if (double_align) next_int = source_.GetInt();
#endif
DCHECK_NE(kDoubleAlignmentSentinel, next_int); DCHECK_NE(kDoubleAlignmentSentinel, next_int);
int size = next_int << kObjectAlignmentBits; int size = next_int << kObjectAlignmentBits;
int reserved_size = size + (double_align ? kPointerSize : 0); address = Allocate(space_number, size);
address = Allocate(space_number, reserved_size);
obj = HeapObject::FromAddress(address); obj = HeapObject::FromAddress(address);
if (double_align) {
obj = isolate_->heap()->DoubleAlignForDeserialization(obj, reserved_size);
address = obj->address();
}
isolate_->heap()->OnAllocationEvent(obj, size); isolate_->heap()->OnAllocationEvent(obj, size);
Object** current = reinterpret_cast<Object**>(address); Object** current = reinterpret_cast<Object**>(address);
@ -768,24 +831,17 @@ void Deserializer::ReadObject(int space_number, Object** write_back) {
if (FLAG_log_snapshot_positions) { if (FLAG_log_snapshot_positions) {
LOG(isolate_, SnapshotPositionEvent(address, source_.position())); LOG(isolate_, SnapshotPositionEvent(address, source_.position()));
} }
ReadData(current, limit, space_number, address);
// TODO(mvstanton): consider treating the heap()->allocation_sites_list()
// as a (weak) root. If this root is relocated correctly,
// RelinkAllocationSite() isn't necessary.
if (obj->IsAllocationSite()) RelinkAllocationSite(AllocationSite::cast(obj));
// Fix up strings from serialized user code. if (ReadData(current, limit, space_number, address)) {
if (deserializing_user_code()) obj = ProcessNewObjectFromSerializedCode(obj); // Only post process if object content has not been deferred.
obj = PostProcessNewObject(obj, space_number);
}
Object* write_back_obj = obj; Object* write_back_obj = obj;
UnalignedCopy(write_back, &write_back_obj); UnalignedCopy(write_back, &write_back_obj);
#ifdef DEBUG #ifdef DEBUG
if (obj->IsCode()) { if (obj->IsCode()) {
DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE); DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
#ifdef VERIFY_HEAP
obj->ObjectVerify();
#endif // VERIFY_HEAP
} else { } else {
DCHECK(space_number != CODE_SPACE); DCHECK(space_number != CODE_SPACE);
} }
@ -829,7 +885,7 @@ Address Deserializer::Allocate(int space_index, int size) {
} }
void Deserializer::ReadData(Object** current, Object** limit, int source_space, bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
Address current_object_address) { Address current_object_address) {
Isolate* const isolate = isolate_; Isolate* const isolate = isolate_;
// Write barrier support costs around 1% in startup time. In fact there // Write barrier support costs around 1% in startup time. In fact there
@ -1086,6 +1142,18 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
break; break;
} }
case kDeferred: {
// Deferred can only occur right after the heap object header.
DCHECK(current == reinterpret_cast<Object**>(current_object_address +
kPointerSize));
HeapObject* obj = HeapObject::FromAddress(current_object_address);
// If the deferred object is a map, its instance type may be used
// during deserialization. Initialize it with a temporary value.
if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE);
current = limit;
return false;
}
case kSynchronize: case kSynchronize:
// If we get here then that indicates that you have a mismatch between // If we get here then that indicates that you have a mismatch between
// the number of GC roots when serializing and deserializing. // the number of GC roots when serializing and deserializing.
@ -1192,6 +1260,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
} }
} }
CHECK_EQ(limit, current); CHECK_EQ(limit, current);
return true;
} }
@ -1200,6 +1269,7 @@ Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
sink_(sink), sink_(sink),
external_reference_encoder_(isolate), external_reference_encoder_(isolate),
root_index_map_(isolate), root_index_map_(isolate),
recursion_depth_(0),
code_address_map_(NULL), code_address_map_(NULL),
large_objects_total_size_(0), large_objects_total_size_(0),
seen_large_objects_index_(0) { seen_large_objects_index_(0) {
@ -1275,6 +1345,16 @@ void Serializer::OutputStatistics(const char* name) {
} }
void Serializer::SerializeDeferredObjects() {
while (deferred_objects_.length() > 0) {
HeapObject* obj = deferred_objects_.RemoveLast();
ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject);
obj_serializer.SerializeDeferred();
}
sink_->Put(kSynchronize, "Finished with deferred objects");
}
void StartupSerializer::SerializeStrongReferences() { void StartupSerializer::SerializeStrongReferences() {
Isolate* isolate = this->isolate(); Isolate* isolate = this->isolate();
// No active threads. // No active threads.
@ -1318,6 +1398,7 @@ void PartialSerializer::Serialize(Object** o) {
back_reference_map()->AddGlobalProxy(context->global_proxy()); back_reference_map()->AddGlobalProxy(context->global_proxy());
} }
VisitPointer(o); VisitPointer(o);
SerializeDeferredObjects();
SerializeOutdatedContextsAsFixedArray(); SerializeOutdatedContextsAsFixedArray();
Pad(); Pad();
} }
@ -1342,10 +1423,10 @@ void PartialSerializer::SerializeOutdatedContextsAsFixedArray() {
sink_->Put(reinterpret_cast<byte*>(&length_smi)[i], "Byte"); sink_->Put(reinterpret_cast<byte*>(&length_smi)[i], "Byte");
} }
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
BackReference back_ref = outdated_contexts_[i]; Context* context = outdated_contexts_[i];
DCHECK(BackReferenceIsAlreadyAllocated(back_ref)); BackReference back_reference = back_reference_map_.Lookup(context);
sink_->Put(kBackref + back_ref.space(), "BackRef"); sink_->Put(kBackref + back_reference.space(), "BackRef");
sink_->PutInt(back_ref.reference(), "BackRefValue"); PutBackReference(context, back_reference);
} }
} }
} }
@ -1508,10 +1589,7 @@ bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
"BackRefWithSkip"); "BackRefWithSkip");
sink_->PutInt(skip, "BackRefSkipDistance"); sink_->PutInt(skip, "BackRefSkipDistance");
} }
DCHECK(BackReferenceIsAlreadyAllocated(back_reference)); PutBackReference(obj, back_reference);
sink_->PutInt(back_reference.reference(), "BackRefValue");
hot_objects_.Add(obj);
} }
return true; return true;
} }
@ -1547,7 +1625,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
} }
void StartupSerializer::SerializeWeakReferences() { void StartupSerializer::SerializeWeakReferencesAndDeferred() {
// This phase comes right after the serialization (of the snapshot). // This phase comes right after the serialization (of the snapshot).
// After we have done the partial serialization the partial snapshot cache // After we have done the partial serialization the partial snapshot cache
// will contain some references needed to decode the partial snapshot. We // will contain some references needed to decode the partial snapshot. We
@ -1556,6 +1634,7 @@ void StartupSerializer::SerializeWeakReferences() {
Object* undefined = isolate()->heap()->undefined_value(); Object* undefined = isolate()->heap()->undefined_value();
VisitPointer(&undefined); VisitPointer(&undefined);
isolate()->heap()->IterateWeakRoots(this, VISIT_ALL); isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
SerializeDeferredObjects();
Pad(); Pad();
} }
@ -1588,6 +1667,13 @@ void Serializer::PutRoot(int root_index,
} }
void Serializer::PutBackReference(HeapObject* object, BackReference reference) {
DCHECK(BackReferenceIsAlreadyAllocated(reference));
sink_->PutInt(reference.reference(), "BackRefValue");
hot_objects_.Add(object);
}
void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code, void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) { WhereToPoint where_to_point, int skip) {
if (obj->IsMap()) { if (obj->IsMap()) {
@ -1641,9 +1727,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
Context::cast(obj)->global_object() == global_object_) { Context::cast(obj)->global_object() == global_object_) {
// Context refers to the current global object. This reference will // Context refers to the current global object. This reference will
// become outdated after deserialization. // become outdated after deserialization.
BackReference back_reference = back_reference_map_.Lookup(obj); outdated_contexts_.Add(Context::cast(obj));
DCHECK(back_reference.is_valid());
outdated_contexts_.Add(back_reference);
} }
} }
@ -1671,17 +1755,8 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
} }
back_reference = serializer_->AllocateLargeObject(size); back_reference = serializer_->AllocateLargeObject(size);
} else { } else {
bool needs_double_align = false; back_reference = serializer_->Allocate(space, size);
if (object_->NeedsToEnsureDoubleAlignment()) {
// Add wriggle room for double alignment padding.
back_reference = serializer_->Allocate(space, size + kPointerSize);
needs_double_align = true;
} else {
back_reference = serializer_->Allocate(space, size);
}
sink_->Put(kNewObject + reference_representation_ + space, "NewObject"); sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
if (needs_double_align)
sink_->PutInt(kDoubleAlignmentSentinel, "DoubleAlignSentinel");
int encoded_size = size >> kObjectAlignmentBits; int encoded_size = size >> kObjectAlignmentBits;
DCHECK_NE(kDoubleAlignmentSentinel, encoded_size); DCHECK_NE(kDoubleAlignmentSentinel, encoded_size);
sink_->PutInt(encoded_size, "ObjectSizeInWords"); sink_->PutInt(encoded_size, "ObjectSizeInWords");
@ -1773,6 +1848,9 @@ void Serializer::ObjectSerializer::Serialize() {
// We cannot serialize typed array objects correctly. // We cannot serialize typed array objects correctly.
DCHECK(!object_->IsJSTypedArray()); DCHECK(!object_->IsJSTypedArray());
// We don't expect fillers.
DCHECK(!object_->IsFiller());
if (object_->IsPrototypeInfo()) { if (object_->IsPrototypeInfo()) {
Object* prototype_users = PrototypeInfo::cast(object_)->prototype_users(); Object* prototype_users = PrototypeInfo::cast(object_)->prototype_users();
if (prototype_users->IsWeakFixedArray()) { if (prototype_users->IsWeakFixedArray()) {
@ -1810,6 +1888,39 @@ void Serializer::ObjectSerializer::Serialize() {
CHECK_EQ(0, bytes_processed_so_far_); CHECK_EQ(0, bytes_processed_so_far_);
bytes_processed_so_far_ = kPointerSize; bytes_processed_so_far_ = kPointerSize;
RecursionScope recursion(serializer_);
// Objects that are immediately post processed during deserialization
// cannot be deferred, since post processing requires the object content.
if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) {
serializer_->QueueDeferredObject(object_);
sink_->Put(kDeferred, "Deferring object content");
return;
}
object_->IterateBody(map->instance_type(), size, this);
OutputRawData(object_->address() + size);
}
void Serializer::ObjectSerializer::SerializeDeferred() {
if (FLAG_trace_serializer) {
PrintF(" Encoding deferred heap object: ");
object_->ShortPrint();
PrintF("\n");
}
int size = object_->Size();
Map* map = object_->map();
BackReference reference = serializer_->back_reference_map()->Lookup(object_);
// Serialize the rest of the object.
CHECK_EQ(0, bytes_processed_so_far_);
bytes_processed_so_far_ = kPointerSize;
sink_->Put(kNewObject + reference.space(), "deferred object");
serializer_->PutBackReference(object_, reference);
sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
object_->IterateBody(map->instance_type(), size, this); object_->IterateBody(map->instance_type(), size, this);
OutputRawData(object_->address() + size); OutputRawData(object_->address() + size);
} }
@ -2134,6 +2245,7 @@ ScriptData* CodeSerializer::Serialize(Isolate* isolate,
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
Object** location = Handle<Object>::cast(info).location(); Object** location = Handle<Object>::cast(info).location();
cs.VisitPointer(location); cs.VisitPointer(location);
cs.SerializeDeferredObjects();
cs.Pad(); cs.Pad();
SerializedCodeData data(sink.data(), cs); SerializedCodeData data(sink.data(), cs);
@ -2212,8 +2324,6 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
void CodeSerializer::SerializeGeneric(HeapObject* heap_object, void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
HowToCode how_to_code, HowToCode how_to_code,
WhereToPoint where_to_point) { WhereToPoint where_to_point) {
if (heap_object->IsInternalizedString()) num_internalized_strings_++;
// Object has not yet been serialized. Serialize it here. // Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this, heap_object, sink_, how_to_code, ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
where_to_point); where_to_point);
@ -2325,10 +2435,6 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
return MaybeHandle<SharedFunctionInfo>(); return MaybeHandle<SharedFunctionInfo>();
} }
// Eagerly expand string table to avoid allocations during deserialization.
StringTable::EnsureCapacityForDeserialization(isolate,
scd->NumInternalizedStrings());
// Prepare and register list of attached objects. // Prepare and register list of attached objects.
Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys(); Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New( Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
@ -2492,7 +2598,6 @@ SerializedCodeData::SerializedCodeData(const List<byte>& payload,
SetHeaderValue(kCpuFeaturesOffset, SetHeaderValue(kCpuFeaturesOffset,
static_cast<uint32_t>(CpuFeatures::SupportedFeatures())); static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
SetHeaderValue(kFlagHashOffset, FlagList::Hash()); SetHeaderValue(kFlagHashOffset, FlagList::Hash());
SetHeaderValue(kNumInternalizedStringsOffset, cs.num_internalized_strings());
SetHeaderValue(kNumReservationsOffset, reservations.length()); SetHeaderValue(kNumReservationsOffset, reservations.length());
SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys); SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
SetHeaderValue(kPayloadLengthOffset, payload.length()); SetHeaderValue(kPayloadLengthOffset, payload.length());
@ -2570,10 +2675,6 @@ Vector<const byte> SerializedCodeData::Payload() const {
} }
int SerializedCodeData::NumInternalizedStrings() const {
return GetHeaderValue(kNumInternalizedStringsOffset);
}
Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const { Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size; int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
const byte* start = data_ + kHeaderSize + reservations_size; const byte* start = data_ + kHeaderSize + reservations_size;

91
deps/v8/src/snapshot/serialize.h

@ -306,6 +306,10 @@ class SerializerDeserializer: public ObjectVisitor {
static const int kNumberOfSpaces = LAST_SPACE + 1; static const int kNumberOfSpaces = LAST_SPACE + 1;
protected: protected:
static bool CanBeDeferred(HeapObject* o) {
return !o->IsString() && !o->IsScript();
}
// ---------- byte code range 0x00..0x7f ---------- // ---------- byte code range 0x00..0x7f ----------
// Byte codes in this range represent Where, HowToCode and WhereToPoint. // Byte codes in this range represent Where, HowToCode and WhereToPoint.
// Where the pointed-to object can be found: // Where the pointed-to object can be found:
@ -373,6 +377,8 @@ class SerializerDeserializer: public ObjectVisitor {
static const int kNop = 0x3d; static const int kNop = 0x3d;
// Move to next reserved chunk. // Move to next reserved chunk.
static const int kNextChunk = 0x3e; static const int kNextChunk = 0x3e;
// Deferring object content.
static const int kDeferred = 0x3f;
// A tag emitted at strategic points in the snapshot to delineate sections. // A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it // If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together. // is an indication that the snapshot and the VM do not fit together.
@ -553,22 +559,22 @@ class Deserializer: public SerializerDeserializer {
memcpy(dest, src, sizeof(*src)); memcpy(dest, src, sizeof(*src));
} }
// Allocation sites are present in the snapshot, and must be linked into void DeserializeDeferredObjects();
// a list at deserialization time.
void RelinkAllocationSite(AllocationSite* site); void CommitNewInternalizedStrings(Isolate* isolate);
// Fills in some heap data in an area from start to end (non-inclusive). The // Fills in some heap data in an area from start to end (non-inclusive). The
// space id is used for the write barrier. The object_address is the address // space id is used for the write barrier. The object_address is the address
// of the object we are writing into, or NULL if we are not writing into an // of the object we are writing into, or NULL if we are not writing into an
// object, i.e. if we are writing a series of tagged values that are not on // object, i.e. if we are writing a series of tagged values that are not on
// the heap. // the heap. Return false if the object content has been deferred.
void ReadData(Object** start, Object** end, int space, bool ReadData(Object** start, Object** end, int space,
Address object_address); Address object_address);
void ReadObject(int space_number, Object** write_back); void ReadObject(int space_number, Object** write_back);
Address Allocate(int space_index, int size); Address Allocate(int space_index, int size);
// Special handling for serialized code like hooking up internalized strings. // Special handling for serialized code like hooking up internalized strings.
HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj); HeapObject* PostProcessNewObject(HeapObject* obj, int space);
// This returns the address of an object that has been described in the // This returns the address of an object that has been described in the
// snapshot by chunk index and offset. // snapshot by chunk index and offset.
@ -594,6 +600,8 @@ class Deserializer: public SerializerDeserializer {
ExternalReferenceTable* external_reference_table_; ExternalReferenceTable* external_reference_table_;
List<HeapObject*> deserialized_large_objects_; List<HeapObject*> deserialized_large_objects_;
List<Code*> new_code_objects_;
List<Handle<String> > new_internalized_strings_;
bool deserializing_user_code_; bool deserializing_user_code_;
@ -612,6 +620,8 @@ class Serializer : public SerializerDeserializer {
void EncodeReservations(List<SerializedData::Reservation>* out) const; void EncodeReservations(List<SerializedData::Reservation>* out) const;
void SerializeDeferredObjects();
Isolate* isolate() const { return isolate_; } Isolate* isolate() const { return isolate_; }
BackReferenceMap* back_reference_map() { return &back_reference_map_; } BackReferenceMap* back_reference_map() { return &back_reference_map_; }
@ -634,6 +644,7 @@ class Serializer : public SerializerDeserializer {
is_code_object_(o->IsCode()), is_code_object_(o->IsCode()),
code_has_been_output_(false) {} code_has_been_output_(false) {}
void Serialize(); void Serialize();
void SerializeDeferred();
void VisitPointers(Object** start, Object** end); void VisitPointers(Object** start, Object** end);
void VisitEmbeddedPointer(RelocInfo* target); void VisitEmbeddedPointer(RelocInfo* target);
void VisitExternalReference(Address* p); void VisitExternalReference(Address* p);
@ -675,12 +686,29 @@ class Serializer : public SerializerDeserializer {
bool code_has_been_output_; bool code_has_been_output_;
}; };
class RecursionScope {
public:
explicit RecursionScope(Serializer* serializer) : serializer_(serializer) {
serializer_->recursion_depth_++;
}
~RecursionScope() { serializer_->recursion_depth_--; }
bool ExceedsMaximum() {
return serializer_->recursion_depth_ >= kMaxRecursionDepth;
}
private:
static const int kMaxRecursionDepth = 32;
Serializer* serializer_;
};
virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) = 0; WhereToPoint where_to_point, int skip) = 0;
void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where, void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
int skip); int skip);
void PutBackReference(HeapObject* object, BackReference reference);
// Returns true if the object was successfully serialized. // Returns true if the object was successfully serialized.
bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code, bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip); WhereToPoint where_to_point, int skip);
@ -722,6 +750,11 @@ class Serializer : public SerializerDeserializer {
SnapshotByteSink* sink() const { return sink_; } SnapshotByteSink* sink() const { return sink_; }
void QueueDeferredObject(HeapObject* obj) {
DCHECK(back_reference_map_.Lookup(obj).is_valid());
deferred_objects_.Add(obj);
}
void OutputStatistics(const char* name); void OutputStatistics(const char* name);
Isolate* isolate_; Isolate* isolate_;
@ -732,8 +765,11 @@ class Serializer : public SerializerDeserializer {
BackReferenceMap back_reference_map_; BackReferenceMap back_reference_map_;
RootIndexMap root_index_map_; RootIndexMap root_index_map_;
int recursion_depth_;
friend class Deserializer; friend class Deserializer;
friend class ObjectSerializer; friend class ObjectSerializer;
friend class RecursionScope;
friend class SnapshotData; friend class SnapshotData;
private: private:
@ -752,6 +788,9 @@ class Serializer : public SerializerDeserializer {
List<byte> code_buffer_; List<byte> code_buffer_;
// To handle stack overflow.
List<HeapObject*> deferred_objects_;
#ifdef OBJECT_PRINT #ifdef OBJECT_PRINT
static const int kInstanceTypes = 256; static const int kInstanceTypes = 256;
int* instance_type_count_; int* instance_type_count_;
@ -797,7 +836,7 @@ class PartialSerializer : public Serializer {
void SerializeOutdatedContextsAsFixedArray(); void SerializeOutdatedContextsAsFixedArray();
Serializer* startup_serializer_; Serializer* startup_serializer_;
List<BackReference> outdated_contexts_; List<Context*> outdated_contexts_;
Object* global_object_; Object* global_object_;
PartialCacheIndexMap partial_cache_index_map_; PartialCacheIndexMap partial_cache_index_map_;
DISALLOW_COPY_AND_ASSIGN(PartialSerializer); DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
@ -829,11 +868,10 @@ class StartupSerializer : public Serializer {
virtual void SerializeStrongReferences(); virtual void SerializeStrongReferences();
virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override; WhereToPoint where_to_point, int skip) override;
void SerializeWeakReferences(); void SerializeWeakReferencesAndDeferred();
void Serialize() { void Serialize() {
SerializeStrongReferences(); SerializeStrongReferences();
SerializeWeakReferences(); SerializeWeakReferencesAndDeferred();
Pad();
} }
private: private:
@ -862,15 +900,11 @@ class CodeSerializer : public Serializer {
} }
const List<uint32_t>* stub_keys() const { return &stub_keys_; } const List<uint32_t>* stub_keys() const { return &stub_keys_; }
int num_internalized_strings() const { return num_internalized_strings_; }
private: private:
CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source, CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source,
Code* main_code) Code* main_code)
: Serializer(isolate, sink), : Serializer(isolate, sink), source_(source), main_code_(main_code) {
source_(source),
main_code_(main_code),
num_internalized_strings_(0) {
back_reference_map_.AddSourceString(source); back_reference_map_.AddSourceString(source);
} }
@ -892,7 +926,6 @@ class CodeSerializer : public Serializer {
DisallowHeapAllocation no_gc_; DisallowHeapAllocation no_gc_;
String* source_; String* source_;
Code* main_code_; Code* main_code_;
int num_internalized_strings_;
List<uint32_t> stub_keys_; List<uint32_t> stub_keys_;
DISALLOW_COPY_AND_ASSIGN(CodeSerializer); DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
}; };
@ -951,7 +984,6 @@ class SerializedCodeData : public SerializedData {
Vector<const Reservation> Reservations() const; Vector<const Reservation> Reservations() const;
Vector<const byte> Payload() const; Vector<const byte> Payload() const;
int NumInternalizedStrings() const;
Vector<const uint32_t> CodeStubKeys() const; Vector<const uint32_t> CodeStubKeys() const;
private: private:
@ -972,17 +1004,16 @@ class SerializedCodeData : public SerializedData {
uint32_t SourceHash(String* source) const { return source->length(); } uint32_t SourceHash(String* source) const { return source->length(); }
// The data header consists of uint32_t-sized entries: // The data header consists of uint32_t-sized entries:
// [ 0] magic number and external reference count // [0] magic number and external reference count
// [ 1] version hash // [1] version hash
// [ 2] source hash // [2] source hash
// [ 3] cpu features // [3] cpu features
// [ 4] flag hash // [4] flag hash
// [ 5] number of internalized strings // [5] number of code stub keys
// [ 6] number of code stub keys // [6] number of reservation size entries
// [ 7] number of reservation size entries // [7] payload length
// [ 8] payload length // [8] payload checksum part 1
// [ 9] payload checksum part 1 // [9] payload checksum part 2
// [10] payload checksum part 2
// ... reservations // ... reservations
// ... code stub keys // ... code stub keys
// ... serialized payload // ... serialized payload
@ -990,9 +1021,7 @@ class SerializedCodeData : public SerializedData {
static const int kSourceHashOffset = kVersionHashOffset + kInt32Size; static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size; static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size; static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
static const int kNumInternalizedStringsOffset = kFlagHashOffset + kInt32Size; static const int kNumReservationsOffset = kFlagHashOffset + kInt32Size;
static const int kNumReservationsOffset =
kNumInternalizedStringsOffset + kInt32Size;
static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size; static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size; static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size; static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;

2
deps/v8/src/unicode-decoder.cc

@ -67,6 +67,7 @@ void Utf8DecoderBase::WriteUtf16Slow(const uint8_t* stream,
// There's a total lack of bounds checking for stream // There's a total lack of bounds checking for stream
// as it was already done in Reset. // as it was already done in Reset.
stream += cursor; stream += cursor;
DCHECK(stream_length >= cursor);
stream_length -= cursor; stream_length -= cursor;
if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) { if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
*data++ = Utf16::LeadSurrogate(character); *data++ = Utf16::LeadSurrogate(character);
@ -78,7 +79,6 @@ void Utf8DecoderBase::WriteUtf16Slow(const uint8_t* stream,
data_length -= 1; data_length -= 1;
} }
} }
DCHECK(stream_length >= 0);
} }
} // namespace unibrow } // namespace unibrow

16
deps/v8/src/x64/lithium-x64.cc

@ -1313,7 +1313,13 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); LOperand* right;
if (SmiValuesAre32Bits() && instr->representation().IsSmi()) {
// We don't support tagged immediates, so we request it in a register.
right = UseRegisterAtStart(instr->BetterRightOperand());
} else {
right = UseOrConstantAtStart(instr->BetterRightOperand());
}
return DefineSameAsFirst(new(zone()) LBitI(left, right)); return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else { } else {
return DoArithmeticT(instr->op(), instr); return DoArithmeticT(instr->op(), instr);
@ -1555,7 +1561,13 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
DCHECK(instr->left()->representation().Equals(instr->representation())); DCHECK(instr->left()->representation().Equals(instr->representation()));
DCHECK(instr->right()->representation().Equals(instr->representation())); DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->left()); LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right()); LOperand* right;
if (SmiValuesAre32Bits() && instr->representation().IsSmi()) {
// We don't support tagged immediates, so we request it in a register.
right = UseRegisterAtStart(instr->right());
} else {
right = UseOrConstantAtStart(instr->right());
}
LSubI* sub = new(zone()) LSubI(left, right); LSubI* sub = new(zone()) LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub); LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) { if (instr->CheckFlag(HValue::kCanOverflow)) {

90
deps/v8/test/cctest/test-api.cc

@ -7318,6 +7318,57 @@ THREADED_TEST(Utf16Symbol) {
} }
THREADED_TEST(Utf16MissingTrailing) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
// Make sure it will go past the buffer, so it will call `WriteUtf16Slow`
int size = 1024 * 64;
uint8_t* buffer = new uint8_t[size];
for (int i = 0; i < size; i += 4) {
buffer[i] = 0xf0;
buffer[i + 1] = 0x9d;
buffer[i + 2] = 0x80;
buffer[i + 3] = 0x9e;
}
// Now invoke the decoder without last 3 bytes
v8::Local<v8::String> str =
v8::String::NewFromUtf8(
context->GetIsolate(), reinterpret_cast<char*>(buffer),
v8::NewStringType::kNormal, size - 3).ToLocalChecked();
USE(str);
delete[] buffer;
}
THREADED_TEST(Utf16Trailing3Byte) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
// Make sure it will go past the buffer, so it will call `WriteUtf16Slow`
int size = 1024 * 63;
uint8_t* buffer = new uint8_t[size];
for (int i = 0; i < size; i += 3) {
buffer[i] = 0xe2;
buffer[i + 1] = 0x80;
buffer[i + 2] = 0xa6;
}
// Now invoke the decoder without last 3 bytes
v8::Local<v8::String> str =
v8::String::NewFromUtf8(
context->GetIsolate(), reinterpret_cast<char*>(buffer),
v8::NewStringType::kNormal, size).ToLocalChecked();
v8::String::Value value(str);
CHECK_EQ(value.length(), size / 3);
CHECK_EQ((*value)[value.length() - 1], 0x2026);
delete[] buffer;
}
THREADED_TEST(ToArrayIndex) { THREADED_TEST(ToArrayIndex) {
LocalContext context; LocalContext context;
v8::Isolate* isolate = context->GetIsolate(); v8::Isolate* isolate = context->GetIsolate();
@ -20927,3 +20978,42 @@ TEST(SealHandleScopeNested) {
USE(obj); USE(obj);
} }
} }
TEST(CompatibleReceiverCheckOnCachedICHandler) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::FunctionTemplate> parent = FunctionTemplate::New(isolate);
v8::Local<v8::Signature> signature = v8::Signature::New(isolate, parent);
auto returns_42 =
v8::FunctionTemplate::New(isolate, Returns42, Local<Value>(), signature);
parent->PrototypeTemplate()->SetAccessorProperty(v8_str("age"), returns_42);
v8::Local<v8::FunctionTemplate> child = v8::FunctionTemplate::New(isolate);
child->Inherit(parent);
LocalContext env;
env->Global()->Set(v8_str("Child"), child->GetFunction());
// Make sure there's a compiled stub for "Child.prototype.age" in the cache.
CompileRun(
"var real = new Child();\n"
"for (var i = 0; i < 3; ++i) {\n"
" real.age;\n"
"}\n");
// Check that the cached stub is never used.
ExpectInt32(
"var fake = Object.create(Child.prototype);\n"
"var result = 0;\n"
"function test(d) {\n"
" if (d == 3) return;\n"
" try {\n"
" fake.age;\n"
" result = 1;\n"
" } catch (e) {\n"
" }\n"
" test(d+1);\n"
"}\n"
"test(0);\n"
"result;\n",
0);
}

59
deps/v8/test/cctest/test-assembler-arm.cc

@ -1981,4 +1981,63 @@ TEST(ARMv8_vrintX) {
#undef CHECK_VRINT #undef CHECK_VRINT
} }
} }
TEST(regress4292_b) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
Label end;
__ mov(r0, Operand(isolate->factory()->infinity_value()));
for (int i = 0; i < 1020; ++i) {
__ b(hi, &end);
}
__ bind(&end);
}
TEST(regress4292_bl) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
Label end;
__ mov(r0, Operand(isolate->factory()->infinity_value()));
for (int i = 0; i < 1020; ++i) {
__ bl(hi, &end);
}
__ bind(&end);
}
TEST(regress4292_blx) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
Label end;
__ mov(r0, Operand(isolate->factory()->infinity_value()));
for (int i = 0; i < 1020; ++i) {
__ blx(&end);
}
__ bind(&end);
}
TEST(regress4292_CheckConstPool) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
__ mov(r0, Operand(isolate->factory()->infinity_value()));
__ BlockConstPoolFor(1019);
for (int i = 0; i < 1019; ++i) __ nop();
__ vldr(d0, MemOperand(r0, 0));
}
#undef __ #undef __

3
deps/v8/test/cctest/test-assembler-mips.cc

@ -1672,6 +1672,7 @@ TEST(jump_tables1) {
Label done; Label done;
{ {
__ BlockTrampolinePoolFor(kNumCases + 7);
PredictableCodeSizeScope predictable( PredictableCodeSizeScope predictable(
&assm, (kNumCases + 7) * Assembler::kInstrSize); &assm, (kNumCases + 7) * Assembler::kInstrSize);
Label here; Label here;
@ -1748,6 +1749,7 @@ TEST(jump_tables2) {
__ bind(&dispatch); __ bind(&dispatch);
{ {
__ BlockTrampolinePoolFor(kNumCases + 7);
PredictableCodeSizeScope predictable( PredictableCodeSizeScope predictable(
&assm, (kNumCases + 7) * Assembler::kInstrSize); &assm, (kNumCases + 7) * Assembler::kInstrSize);
Label here; Label here;
@ -1823,6 +1825,7 @@ TEST(jump_tables3) {
__ bind(&dispatch); __ bind(&dispatch);
{ {
__ BlockTrampolinePoolFor(kNumCases + 7);
PredictableCodeSizeScope predictable( PredictableCodeSizeScope predictable(
&assm, (kNumCases + 7) * Assembler::kInstrSize); &assm, (kNumCases + 7) * Assembler::kInstrSize);
Label here; Label here;

3
deps/v8/test/cctest/test-assembler-mips64.cc

@ -1889,6 +1889,7 @@ TEST(jump_tables1) {
Label done; Label done;
{ {
__ BlockTrampolinePoolFor(kNumCases * 2 + 7);
PredictableCodeSizeScope predictable( PredictableCodeSizeScope predictable(
&assm, (kNumCases * 2 + 7) * Assembler::kInstrSize); &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize);
Label here; Label here;
@ -1968,6 +1969,7 @@ TEST(jump_tables2) {
} }
__ bind(&dispatch); __ bind(&dispatch);
{ {
__ BlockTrampolinePoolFor(kNumCases * 2 + 7);
PredictableCodeSizeScope predictable( PredictableCodeSizeScope predictable(
&assm, (kNumCases * 2 + 7) * Assembler::kInstrSize); &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize);
Label here; Label here;
@ -2049,6 +2051,7 @@ TEST(jump_tables3) {
} }
__ bind(&dispatch); __ bind(&dispatch);
{ {
__ BlockTrampolinePoolFor(kNumCases * 2 + 7);
PredictableCodeSizeScope predictable( PredictableCodeSizeScope predictable(
&assm, (kNumCases * 2 + 7) * Assembler::kInstrSize); &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize);
Label here; Label here;

90
deps/v8/test/cctest/test-macro-assembler-mips.cc

@ -26,18 +26,20 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h> #include <stdlib.h>
#include <iostream> // NOLINT(readability/streams)
#include "src/v8.h" #include "src/base/utils/random-number-generator.h"
#include "test/cctest/cctest.h"
#include "src/macro-assembler.h" #include "src/macro-assembler.h"
#include "src/mips/macro-assembler-mips.h" #include "src/mips/macro-assembler-mips.h"
#include "src/mips/simulator-mips.h" #include "src/mips/simulator-mips.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
using namespace v8::internal; using namespace v8::internal;
typedef void* (*F)(int x, int y, int p2, int p3, int p4); typedef void* (*F)(int x, int y, int p2, int p3, int p4);
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
#define __ masm-> #define __ masm->
@ -174,4 +176,86 @@ TEST(NaN1) {
} }
TEST(jump_tables4) {
// Similar to test-assembler-mips jump_tables1, with extra test for branch
// trampoline required before emission of the dd table (where trampolines are
// blocked), and proper transition to long-branch mode.
// Regression test for v8:4294.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assembler(isolate, NULL, 0);
MacroAssembler* masm = &assembler;
const int kNumCases = 512;
int values[kNumCases];
isolate->random_number_generator()->NextBytes(values, sizeof(values));
Label labels[kNumCases];
Label near_start, end;
__ addiu(sp, sp, -4);
__ sw(ra, MemOperand(sp));
__ mov(v0, zero_reg);
__ Branch(&end);
__ bind(&near_start);
// Generate slightly less than 32K instructions, which will soon require
// trampoline for branch distance fixup.
for (int i = 0; i < 32768 - 256; ++i) {
__ addiu(v0, v0, 1);
}
Label done;
{
__ BlockTrampolinePoolFor(kNumCases + 6);
PredictableCodeSizeScope predictable(
masm, (kNumCases + 6) * Assembler::kInstrSize);
Label here;
__ bal(&here);
__ sll(at, a0, 2); // In delay slot.
__ bind(&here);
__ addu(at, at, ra);
__ lw(at, MemOperand(at, 4 * Assembler::kInstrSize));
__ jr(at);
__ nop(); // Branch delay slot nop.
for (int i = 0; i < kNumCases; ++i) {
__ dd(&labels[i]);
}
}
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
__ lui(v0, (values[i] >> 16) & 0xffff);
__ ori(v0, v0, values[i] & 0xffff);
__ Branch(&done);
}
__ bind(&done);
__ lw(ra, MemOperand(sp));
__ addiu(sp, sp, 4);
__ jr(ra);
__ nop();
__ bind(&end);
__ Branch(&near_start);
CodeDesc desc;
masm->GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
}
#undef __ #undef __

89
deps/v8/test/cctest/test-macro-assembler-mips64.cc

@ -26,10 +26,12 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h> #include <stdlib.h>
#include <iostream> // NOLINT(readability/streams)
#include "src/v8.h" #include "src/v8.h"
#include "test/cctest/cctest.h" #include "test/cctest/cctest.h"
#include "src/base/utils/random-number-generator.h"
#include "src/macro-assembler.h" #include "src/macro-assembler.h"
#include "src/mips64/macro-assembler-mips64.h" #include "src/mips64/macro-assembler-mips64.h"
#include "src/mips64/simulator-mips64.h" #include "src/mips64/simulator-mips64.h"
@ -38,6 +40,7 @@
using namespace v8::internal; using namespace v8::internal;
typedef void* (*F)(int64_t x, int64_t y, int p2, int p3, int p4); typedef void* (*F)(int64_t x, int64_t y, int p2, int p3, int p4);
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
#define __ masm-> #define __ masm->
@ -214,4 +217,90 @@ TEST(LoadAddress) {
// Check results. // Check results.
} }
TEST(jump_tables4) {
// Similar to test-assembler-mips jump_tables1, with extra test for branch
// trampoline required before emission of the dd table (where trampolines are
// blocked), and proper transition to long-branch mode.
// Regression test for v8:4294.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assembler(isolate, NULL, 0);
MacroAssembler* masm = &assembler;
const int kNumCases = 512;
int values[kNumCases];
isolate->random_number_generator()->NextBytes(values, sizeof(values));
Label labels[kNumCases];
Label near_start, end;
__ daddiu(sp, sp, -8);
__ sd(ra, MemOperand(sp));
if ((masm->pc_offset() & 7) == 0) {
__ nop();
}
__ mov(v0, zero_reg);
__ Branch(&end);
__ bind(&near_start);
// Generate slightly less than 32K instructions, which will soon require
// trampoline for branch distance fixup.
for (int i = 0; i < 32768 - 256; ++i) {
__ addiu(v0, v0, 1);
}
Label done;
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6);
PredictableCodeSizeScope predictable(
masm, (kNumCases * 2 + 6) * Assembler::kInstrSize);
Label here;
__ bal(&here);
__ dsll(at, a0, 3); // In delay slot.
__ bind(&here);
__ daddu(at, at, ra);
__ ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
__ jr(at);
__ nop(); // Branch delay slot nop.
for (int i = 0; i < kNumCases; ++i) {
__ dd(&labels[i]);
}
}
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
__ lui(v0, (values[i] >> 16) & 0xffff);
__ ori(v0, v0, values[i] & 0xffff);
__ Branch(&done);
}
__ bind(&done);
__ ld(ra, MemOperand(sp));
__ daddiu(sp, sp, 8);
__ jr(ra);
__ nop();
__ bind(&end);
__ Branch(&near_start);
CodeDesc desc;
masm->GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
int64_t res =
reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
CHECK_EQ(values[i], res);
}
}
#undef __ #undef __

95
deps/v8/test/cctest/test-serialize.cc

@ -329,7 +329,7 @@ UNINITIALIZED_TEST(PartialSerialization) {
&partial_sink); &partial_sink);
partial_serializer.Serialize(&raw_foo); partial_serializer.Serialize(&raw_foo);
startup_serializer.SerializeWeakReferences(); startup_serializer.SerializeWeakReferencesAndDeferred();
SnapshotData startup_snapshot(startup_serializer); SnapshotData startup_snapshot(startup_serializer);
SnapshotData partial_snapshot(partial_serializer); SnapshotData partial_snapshot(partial_serializer);
@ -447,7 +447,7 @@ UNINITIALIZED_TEST(ContextSerialization) {
PartialSerializer partial_serializer(isolate, &startup_serializer, PartialSerializer partial_serializer(isolate, &startup_serializer,
&partial_sink); &partial_sink);
partial_serializer.Serialize(&raw_context); partial_serializer.Serialize(&raw_context);
startup_serializer.SerializeWeakReferences(); startup_serializer.SerializeWeakReferencesAndDeferred();
SnapshotData startup_snapshot(startup_serializer); SnapshotData startup_snapshot(startup_serializer);
SnapshotData partial_snapshot(partial_serializer); SnapshotData partial_snapshot(partial_serializer);
@ -582,7 +582,7 @@ UNINITIALIZED_TEST(CustomContextSerialization) {
PartialSerializer partial_serializer(isolate, &startup_serializer, PartialSerializer partial_serializer(isolate, &startup_serializer,
&partial_sink); &partial_sink);
partial_serializer.Serialize(&raw_context); partial_serializer.Serialize(&raw_context);
startup_serializer.SerializeWeakReferences(); startup_serializer.SerializeWeakReferencesAndDeferred();
SnapshotData startup_snapshot(startup_serializer); SnapshotData startup_snapshot(startup_serializer);
SnapshotData partial_snapshot(partial_serializer); SnapshotData partial_snapshot(partial_serializer);
@ -702,6 +702,57 @@ TEST(PerIsolateSnapshotBlobs) {
} }
static void SerializationFunctionTemplate(
const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(args[0]);
}
TEST(PerIsolateSnapshotBlobsOutdatedContextWithOverflow) {
DisableTurbofan();
const char* source1 =
"var o = {};"
"(function() {"
" function f1(x) { return f2(x) instanceof Array; }"
" function f2(x) { return foo.bar(x); }"
" o.a = f2.bind(null);"
" o.b = 1;"
" o.c = 2;"
" o.d = 3;"
" o.e = 4;"
"})();\n";
const char* source2 = "o.a(42)";
v8::StartupData data = v8::V8::CreateSnapshotDataBlob(source1);
v8::Isolate::CreateParams params;
params.snapshot_blob = &data;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(params);
{
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
v8::Local<v8::ObjectTemplate> property = v8::ObjectTemplate::New(isolate);
v8::Local<v8::FunctionTemplate> function =
v8::FunctionTemplate::New(isolate, SerializationFunctionTemplate);
property->Set(isolate, "bar", function);
global->Set(isolate, "foo", property);
v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
delete[] data.data; // We can dispose of the snapshot blob now.
v8::Context::Scope c_scope(context);
v8::Local<v8::Value> result = CompileRun(source2);
CHECK(v8_str("42")->Equals(result));
}
isolate->Dispose();
}
TEST(PerIsolateSnapshotBlobsWithLocker) { TEST(PerIsolateSnapshotBlobsWithLocker) {
DisableTurbofan(); DisableTurbofan();
v8::Isolate::CreateParams create_params; v8::Isolate::CreateParams create_params;
@ -738,6 +789,44 @@ TEST(PerIsolateSnapshotBlobsWithLocker) {
} }
TEST(SnapshotBlobsStackOverflow) {
DisableTurbofan();
const char* source =
"var a = [0];"
"var b = a;"
"for (var i = 0; i < 10000; i++) {"
" var c = [i];"
" b.push(c);"
" b.push(c);"
" b = c;"
"}";
v8::StartupData data = v8::V8::CreateSnapshotDataBlob(source);
v8::Isolate::CreateParams params;
params.snapshot_blob = &data;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(params);
{
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
delete[] data.data; // We can dispose of the snapshot blob now.
v8::Context::Scope c_scope(context);
const char* test =
"var sum = 0;"
"while (a) {"
" sum += a[0];"
" a = a[1];"
"}"
"sum";
CHECK_EQ(9999 * 5000, CompileRun(test)->ToInt32(isolate)->Int32Value());
}
isolate->Dispose();
}
TEST(TestThatAlwaysSucceeds) { TEST(TestThatAlwaysSucceeds) {
} }

80
deps/v8/test/mjsunit/es6/regress/regress-cr493566.js

@ -0,0 +1,80 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --harmony-proxies
"use strict";
var global = this;
(function TestGlobalReceiver() {
class A {
s() {
super.bla = 10;
}
}
new A().s.call(global);
assertEquals(10, global.bla);
})();
(function TestProxyProto() {
var calls = 0;
var handler = {
getPropertyDescriptor: function(name) {
calls++;
return undefined;
}
};
var proto = {};
var proxy = Proxy.create(handler, proto);
var object = {
__proto__: proxy,
setX(v) {
super.x = v;
},
setSymbol(sym, v) {
super[sym] = v;
}
};
object.setX(1);
assertEquals(1, Object.getOwnPropertyDescriptor(object, 'x').value);
assertEquals(1, calls);
var sym = Symbol();
object.setSymbol.call(global, sym, 2);
assertEquals(2, Object.getOwnPropertyDescriptor(global, sym).value);
// We currently do not invoke proxy traps for symbols
assertEquals(1, calls);
})();
(function TestProxyReceiver() {
var object = {
setY(v) {
super.y = v;
}
};
var calls = 0;
var handler = {
getPropertyDescriptor(name) {
assertUnreachable();
},
set(receiver, name, value) {
calls++;
assertEquals(proxy, receiver);
assertEquals('y', name);
assertEquals(3, value);
}
};
var proxy = Proxy.create(handler);
object.setY.call(proxy, 3);
assertEquals(1, calls);
})();

4
deps/v8/test/mjsunit/mjsunit.status

@ -561,10 +561,6 @@
'math-floor-of-div-minus-zero': [SKIP], 'math-floor-of-div-minus-zero': [SKIP],
}], # 'arch == mips64el' }], # 'arch == mips64el'
['arch == mips64el and simulator_run == False', {
# Random failures on HW, need investigation.
'debug-*': [SKIP],
}],
############################################################################## ##############################################################################
['system == windows', { ['system == windows', {
# TODO(mstarzinger): Too slow with turbo fan. # TODO(mstarzinger): Too slow with turbo fan.

22
deps/v8/test/mjsunit/regress/regress-487981.js

@ -0,0 +1,22 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --stress-compaction
// To reliably reproduce the crash use --verify-heap --random-seed=-133185440
function __f_2(o) {
return o.field.b.x;
}
try {
%OptimizeFunctionOnNextCall(__f_2);
__v_1 = __f_2();
} catch(e) { }
function __f_3() { __f_3(/./.test()); };
try {
__f_3();
} catch(e) { }

52
deps/v8/test/mjsunit/regress/regress-crbug-478612.js

@ -0,0 +1,52 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
// This is used to force binary operations below to have tagged representation.
var z = {valueOf: function() { return 3; }};
function f() {
var y = -2;
return (1 & z) - y++;
}
assertEquals(3, f());
assertEquals(3, f());
%OptimizeFunctionOnNextCall(f);
assertEquals(3, f());
function g() {
var y = 2;
return (1 & z) | y++;
}
assertEquals(3, g());
assertEquals(3, g());
%OptimizeFunctionOnNextCall(g);
assertEquals(3, g());
function h() {
var y = 3;
return (3 & z) & y++;
}
assertEquals(3, h());
assertEquals(3, h());
%OptimizeFunctionOnNextCall(h);
assertEquals(3, h());
function i() {
var y = 2;
return (1 & z) ^ y++;
}
assertEquals(3, i());
assertEquals(3, i());
%OptimizeFunctionOnNextCall(i);
assertEquals(3, i());

33
deps/v8/test/mjsunit/regress/regress-crbug-500497.js

@ -0,0 +1,33 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// New space must be at max capacity to trigger pretenuring decision.
// Flags: --allow-natives-syntax --verify-heap --max-semi-space-size=1
var global = []; // Used to keep some objects alive.
function Ctor() {
var result = {a: {}, b: {}, c: {}, d: {}, e: {}, f: {}, g: {}};
return result;
}
for (var i = 0; i < 120; i++) {
// Make the "a" property long-lived, while everything else is short-lived.
global.push(Ctor().a);
(function FillNewSpace() { new Array(10000); })();
}
// The bad situation is only triggered if Ctor wasn't optimized too early.
assertUnoptimized(Ctor);
// Optimized code for Ctor will pretenure the "a" property, so it will have
// three allocations:
// #1 Allocate the "result" object in new-space.
// #2 Allocate the object stored in the "a" property in old-space.
// #3 Allocate the objects for the "b" through "g" properties in new-space.
%OptimizeFunctionOnNextCall(Ctor);
for (var i = 0; i < 10000; i++) {
// At least one of these calls will run out of new space. The bug is
// triggered when it is allocation #3 that triggers GC.
Ctor();
}

27
deps/v8/test/mjsunit/regress/regress-crbug-502930.js

@ -0,0 +1,27 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
var accessor_to_data_case = (function() {
var v = {};
Object.defineProperty(v, "foo", { get: function() { return 42; }, configurable: true});
var obj = {};
obj["boom"] = v;
Object.defineProperty(v, "foo", { value: 0, writable: true, configurable: true });
return obj;
})();
var data_to_accessor_case = (function() {
var v = {};
Object.defineProperty(v, "bar", { value: 0, writable: true, configurable: true });
var obj = {};
obj["bam"] = v;
Object.defineProperty(v, "bar", { get: function() { return 42; }, configurable: true});
return obj;
})();

23
deps/v8/test/mjsunit/regress/regress-crbug-514268.js

@ -0,0 +1,23 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
function bar(a) {
a.pop();
}
function foo(a) {
assertEquals(2, a.length);
var d;
for (d in a) {
bar(a);
}
// If this fails, bar was not called exactly once.
assertEquals(1, a.length);
}
foo([1,2]);
foo([2,3]);
%OptimizeFunctionOnNextCall(foo);
foo([1,2]);
Loading…
Cancel
Save