Browse Source

Upgrade V8 to 2.3.2

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
e4eeaa7fbc
  1. 18
      deps/v8/ChangeLog
  2. 1
      deps/v8/SConstruct
  3. 21
      deps/v8/include/v8-profiler.h
  4. 5
      deps/v8/include/v8.h
  5. 36
      deps/v8/src/api.cc
  6. 24
      deps/v8/src/arm/assembler-arm.cc
  7. 21
      deps/v8/src/arm/assembler-arm.h
  8. 18
      deps/v8/src/arm/codegen-arm.cc
  9. 1
      deps/v8/src/arm/codegen-arm.h
  10. 43
      deps/v8/src/arm/disasm-arm.cc
  11. 19
      deps/v8/src/arm/full-codegen-arm.cc
  12. 40
      deps/v8/src/arm/ic-arm.cc
  13. 31
      deps/v8/src/arm/macro-assembler-arm.cc
  14. 2
      deps/v8/src/arm/macro-assembler-arm.h
  15. 32
      deps/v8/src/arm/simulator-arm.cc
  16. 3
      deps/v8/src/arm/simulator-arm.h
  17. 2
      deps/v8/src/codegen.h
  18. 6
      deps/v8/src/compiler.cc
  19. 2
      deps/v8/src/full-codegen.cc
  20. 1
      deps/v8/src/full-codegen.h
  21. 12
      deps/v8/src/global-handles.cc
  22. 4
      deps/v8/src/handles.cc
  23. 7
      deps/v8/src/heap-profiler.cc
  24. 16
      deps/v8/src/heap-profiler.h
  25. 3
      deps/v8/src/heap.cc
  26. 1
      deps/v8/src/heap.h
  27. 470
      deps/v8/src/ia32/codegen-ia32.cc
  28. 12
      deps/v8/src/ia32/codegen-ia32.h
  29. 20
      deps/v8/src/ia32/full-codegen-ia32.cc
  30. 83
      deps/v8/src/ia32/ic-ia32.cc
  31. 6
      deps/v8/src/ia32/macro-assembler-ia32.cc
  32. 93
      deps/v8/src/ic.cc
  33. 12
      deps/v8/src/ic.h
  34. 3
      deps/v8/src/macros.py
  35. 4
      deps/v8/src/mark-compact.cc
  36. 5
      deps/v8/src/mips/codegen-mips.cc
  37. 1
      deps/v8/src/mips/codegen-mips.h
  38. 40
      deps/v8/src/objects.cc
  39. 12
      deps/v8/src/objects.h
  40. 299
      deps/v8/src/profile-generator.cc
  41. 128
      deps/v8/src/profile-generator.h
  42. 20
      deps/v8/src/runtime.js
  43. 2
      deps/v8/src/serialize.cc
  44. 4
      deps/v8/src/spaces.cc
  45. 46
      deps/v8/src/v8natives.js
  46. 2
      deps/v8/src/version.cc
  47. 557
      deps/v8/src/x64/codegen-x64.cc
  48. 20
      deps/v8/src/x64/codegen-x64.h
  49. 19
      deps/v8/src/x64/full-codegen-x64.cc
  50. 36
      deps/v8/src/x64/ic-x64.cc
  51. 20
      deps/v8/src/x64/virtual-frame-x64.cc
  52. 4
      deps/v8/src/x64/virtual-frame-x64.h
  53. 5
      deps/v8/test/cctest/test-api.cc
  54. 34
      deps/v8/test/cctest/test-assembler-arm.cc
  55. 9
      deps/v8/test/cctest/test-disasm-arm.cc
  56. 173
      deps/v8/test/cctest/test-heap-profiler.cc
  57. 13
      deps/v8/test/cctest/test-heap.cc
  58. 1
      deps/v8/test/cctest/test-mark-compact.cc
  59. 57
      deps/v8/test/mjsunit/for-in-special-cases.js
  60. 49
      deps/v8/tools/gc-nvp-trace-processor.py
  61. 10
      deps/v8/tools/gyp/v8.gyp

18
deps/v8/ChangeLog

@ -1,3 +1,21 @@
2010-07-21: Version 2.3.2
Fixed compiler warnings when building with LLVM.
Fixed a bug with for-in applied to strings (issue 785).
Performance improvements on all platforms.
2010-07-19: Version 2.3.1
Fixed compilation and linking with V8_INTERPRETED_REGEXP flag.
Fixed bug related to code flushing while compiling a lazy
compilable function (issue http://crbug.com/49099).
Performance improvements on all platforms.
2010-07-15: Version 2.3.0
Added ES5 Object.seal and Object.isSealed.

1
deps/v8/SConstruct

@ -300,6 +300,7 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']

21
deps/v8/include/v8-profiler.h

@ -258,6 +258,12 @@ class V8EXPORT HeapGraphNode {
*/
Handle<String> GetName() const;
/**
* Returns node id. For the same heap object, the id remains the same
* across all snapshots.
*/
uint64_t GetId() const;
/** Returns node's own size, in bytes. */
int GetSelfSize() const;
@ -290,6 +296,16 @@ class V8EXPORT HeapGraphNode {
};
class V8EXPORT HeapSnapshotsDiff {
public:
/** Returns the root node for added nodes. */
const HeapGraphNode* GetAdditionsRoot() const;
/** Returns the root node for deleted nodes. */
const HeapGraphNode* GetDeletionsRoot() const;
};
/**
* HeapSnapshots record the state of the JS heap at some moment.
*/
@ -302,7 +318,10 @@ class V8EXPORT HeapSnapshot {
Handle<String> GetTitle() const;
/** Returns the root node of the heap graph. */
const HeapGraphNode* GetHead() const;
const HeapGraphNode* GetRoot() const;
/** Returns a diff between this snapshot and another one. */
const HeapSnapshotsDiff* CompareWith(const HeapSnapshot* snapshot) const;
};

5
deps/v8/include/v8.h

@ -137,6 +137,9 @@ class Top;
/**
* A weak reference callback function.
*
* This callback should either explicitly invoke Dispose on |object| if
* V8 wrapper is not needed anymore, or 'revive' it by invocation of MakeWeak.
*
* \param object the weak global object to be reclaimed by the garbage collector
* \param parameter the value passed in when making the weak global object
*/
@ -148,7 +151,7 @@ typedef void (*WeakReferenceCallback)(Persistent<Value> object,
#define TYPE_CHECK(T, S) \
while (false) { \
*(static_cast<T**>(0)) = static_cast<S*>(0); \
*(static_cast<T* volatile*>(0)) = static_cast<S*>(0); \
}
/**

36
deps/v8/src/api.cc

@ -4561,6 +4561,12 @@ Handle<String> HeapGraphNode::GetName() const {
}
uint64_t HeapGraphNode::GetId() const {
IsDeadCheck("v8::HeapGraphNode::GetId");
return reinterpret_cast<const i::HeapEntry*>(this)->id();
}
int HeapGraphNode::GetSelfSize() const {
IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
return reinterpret_cast<const i::HeapEntry*>(this)->self_size();
@ -4624,6 +4630,22 @@ const HeapGraphPath* HeapGraphNode::GetRetainingPath(int index) const {
}
const HeapGraphNode* HeapSnapshotsDiff::GetAdditionsRoot() const {
IsDeadCheck("v8::HeapSnapshotsDiff::GetAdditionsRoot");
const i::HeapSnapshotsDiff* diff =
reinterpret_cast<const i::HeapSnapshotsDiff*>(this);
return reinterpret_cast<const HeapGraphNode*>(diff->additions_root());
}
const HeapGraphNode* HeapSnapshotsDiff::GetDeletionsRoot() const {
IsDeadCheck("v8::HeapSnapshotsDiff::GetDeletionsRoot");
const i::HeapSnapshotsDiff* diff =
reinterpret_cast<const i::HeapSnapshotsDiff*>(this);
return reinterpret_cast<const HeapGraphNode*>(diff->deletions_root());
}
unsigned HeapSnapshot::GetUid() const {
IsDeadCheck("v8::HeapSnapshot::GetUid");
return reinterpret_cast<const i::HeapSnapshot*>(this)->uid();
@ -4639,7 +4661,7 @@ Handle<String> HeapSnapshot::GetTitle() const {
}
const HeapGraphNode* HeapSnapshot::GetHead() const {
const HeapGraphNode* HeapSnapshot::GetRoot() const {
IsDeadCheck("v8::HeapSnapshot::GetHead");
const i::HeapSnapshot* snapshot =
reinterpret_cast<const i::HeapSnapshot*>(this);
@ -4647,6 +4669,18 @@ const HeapGraphNode* HeapSnapshot::GetHead() const {
}
const HeapSnapshotsDiff* HeapSnapshot::CompareWith(
const HeapSnapshot* snapshot) const {
IsDeadCheck("v8::HeapSnapshot::CompareWith");
i::HeapSnapshot* snapshot1 = const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(this));
i::HeapSnapshot* snapshot2 = const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot));
return reinterpret_cast<const HeapSnapshotsDiff*>(
snapshot1->CompareWith(snapshot2));
}
int HeapProfiler::GetSnapshotsCount() {
IsDeadCheck("v8::HeapProfiler::GetSnapshotsCount");
return i::HeapProfiler::GetSnapshotsCount();

24
deps/v8/src/arm/assembler-arm.cc

@ -1192,6 +1192,30 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
}
// Saturating instructions.
// Unsigned saturate.
void Assembler::usat(Register dst,
int satpos,
const Operand& src,
Condition cond) {
// v6 and above.
ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.rm_.is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
ASSERT(src.rs_.is(no_reg));
int sh = 0;
if (src.shift_op_ == ASR) {
sh = 1;
}
emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
}
// Bitfield manipulation instructions.
// Unsigned bit field extract.

21
deps/v8/src/arm/assembler-arm.h

@ -445,6 +445,8 @@ class Operand BASE_EMBEDDED {
}
Register rm() const { return rm_; }
Register rs() const { return rs_; }
ShiftOp shift_op() const { return shift_op_; }
private:
Register rm_;
@ -834,6 +836,25 @@ class Assembler : public Malloced {
void clz(Register dst, Register src, Condition cond = al); // v5 and above
// Saturating instructions. v6 and above.
// Unsigned saturate.
//
// Saturate an optionally shifted signed value to an unsigned range.
//
// usat dst, #satpos, src
// usat dst, #satpos, src, lsl #sh
// usat dst, #satpos, src, asr #sh
//
// Register dst will contain:
//
// 0, if s < 0
// (1 << satpos) - 1, if s > ((1 << satpos) - 1)
// s, otherwise
//
// where s is the contents of src after shifting (if used.)
void usat(Register dst, int satpos, const Operand& src, Condition cond = al);
// Bitfield manipulation instructions. v7 and above.
void ubfx(Register dst, Register src, int lsb, int width,

18
deps/v8/src/arm/codegen-arm.cc

@ -4760,6 +4760,24 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
// typeof(arg) == function).
// It includes undetectable objects (as opposed to IsObject).
ASSERT(args->length() == 1);
Load(args->at(0));
Register value = frame_->PopToRegister();
__ tst(value, Operand(kSmiTagMask));
false_target()->Branch(eq);
// Check that this is an object.
__ ldr(value, FieldMemOperand(value, HeapObject::kMapOffset));
__ ldrb(value, FieldMemOperand(value, Map::kInstanceTypeOffset));
__ cmp(value, Operand(FIRST_JS_OBJECT_TYPE));
cc_reg_ = ge;
}
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')

1
deps/v8/src/arm/codegen-arm.h

@ -475,6 +475,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsRegExp(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);

43
deps/v8/src/arm/disasm-arm.cc

@ -106,6 +106,7 @@ class Decoder {
void PrintCondition(Instr* instr);
void PrintShiftRm(Instr* instr);
void PrintShiftImm(Instr* instr);
void PrintShiftSat(Instr* instr);
void PrintPU(Instr* instr);
void PrintSoftwareInterrupt(SoftwareInterruptCodes swi);
@ -248,6 +249,18 @@ void Decoder::PrintShiftImm(Instr* instr) {
}
// Print the optional shift and immediate used by saturating instructions.
void Decoder::PrintShiftSat(Instr* instr) {
int shift = instr->Bits(11, 7);
if (shift > 0) {
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
", %s #%d",
shift_names[instr->Bit(6) * 2],
instr->Bits(11, 7));
}
}
// Print PU formatting to reduce complexity of FormatOption.
void Decoder::PrintPU(Instr* instr) {
switch (instr->PUField()) {
@ -440,6 +453,20 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
}
return 1;
}
case 'i': { // 'i: immediate value from adjacent bits.
// Expects tokens in the form imm%02d@%02d, ie. imm05@07, imm10@16
int width = (format[3] - '0') * 10 + (format[4] - '0');
int lsb = (format[6] - '0') * 10 + (format[7] - '0');
ASSERT((width >= 1) && (width <= 32));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width + lsb) <= 32);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"#%d",
instr->Bits(width + lsb - 1, lsb));
return 8;
}
case 'l': { // 'l: branch and link
if (instr->HasLink()) {
Print("l");
@ -507,7 +534,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
return FormatRegister(instr, format);
}
case 's': {
if (format[1] == 'h') { // 'shift_op or 'shift_rm
if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
if (format[6] == 'o') { // 'shift_op
ASSERT(STRING_STARTS_WITH(format, "shift_op"));
if (instr->TypeField() == 0) {
@ -517,6 +544,10 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
PrintShiftImm(instr);
}
return 8;
} else if (format[6] == 's') { // 'shift_sat.
ASSERT(STRING_STARTS_WITH(format, "shift_sat"));
PrintShiftSat(instr);
return 9;
} else { // 'shift_rm
ASSERT(STRING_STARTS_WITH(format, "shift_rm"));
PrintShiftRm(instr);
@ -897,8 +928,16 @@ void Decoder::DecodeType3(Instr* instr) {
break;
}
case 1: {
ASSERT(!instr->HasW());
if (instr->HasW()) {
ASSERT(instr->Bits(5, 4) == 0x1);
if (instr->Bit(22) == 0x1) {
Format(instr, "usat 'rd, 'imm05@16, 'rm'shift_sat");
} else {
UNREACHABLE(); // SSAT.
}
} else {
Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
}
break;
}
case 2: {

19
deps/v8/src/arm/full-codegen-arm.cc

@ -1908,6 +1908,25 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
}
void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
__ b(ge, if_true);
__ b(if_false);
Apply(context_, if_true, if_false);
}
void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);

40
deps/v8/src/arm/ic-arm.cc

@ -958,14 +958,6 @@ static inline bool IsInlinedICSite(Address address,
}
void LoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined in-object property load (if present) to
// guarantee failure by holding an invalid map (the null value). The offset
// can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), 0);
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// Find the end of the inlined code for handling the load if this is an
// inlined IC call site.
@ -996,10 +988,9 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined keyed load (if present) to
// guarantee failure by holding an invalid map (the null value).
PatchInlinedLoad(address, Heap::null_value());
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
// TODO(787): Implement inline stores on arm.
return false;
}
@ -1018,21 +1009,6 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
}
void KeyedStoreIC::ClearInlinedVersion(Address address) {
// Insert null as the elements map to check for. This will make
// sure that the elements fast-case map check fails so that control
// flows to the IC instead of the inlined version.
PatchInlinedStore(address, Heap::null_value());
}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
// Restore the fast-case elements map check so that the inlined
// version can be used again.
PatchInlinedStore(address, Heap::fixed_array_map());
}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
// Find the end of the inlined code for handling the store if this is an
// inlined IC call site.
@ -1698,14 +1674,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r4, Operand(ip));
__ b(hs, &slow);
__ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
{ // Clamp the value to [0..255].
Label done;
__ tst(r5, Operand(0xFFFFFF00));
__ b(eq, &done);
__ mov(r5, Operand(0), LeaveCC, mi); // 0 if negative.
__ mov(r5, Operand(255), LeaveCC, pl); // 255 if positive.
__ bind(&done);
}
__ Usat(r5, 8, Operand(r5)); // Clamp the value to [0..255].
// Get the pointer to the external array. This clobbers elements.
__ ldr(elements,
FieldMemOperand(elements, PixelArray::kExternalPointerOffset));

31
deps/v8/src/arm/macro-assembler-arm.cc

@ -281,6 +281,37 @@ void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
}
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
Condition cond) {
if (!CpuFeatures::IsSupported(ARMv7)) {
ASSERT(!dst.is(pc) && !src.rm().is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
// These asserts are required to ensure compatibility with the ARMv7
// implementation.
ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
ASSERT(src.rs().is(no_reg));
Label done;
int satval = (1 << satpos) - 1;
if (cond != al) {
b(NegateCondition(cond), &done); // Skip saturate if !condition.
}
if (!(src.is_reg() && dst.is(src.rm()))) {
mov(dst, src);
}
tst(dst, Operand(~satval));
b(eq, &done);
mov(dst, Operand(0), LeaveCC, mi); // 0 if negative.
mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
bind(&done);
} else {
usat(dst, satpos, src, cond);
}
}
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);

2
deps/v8/src/arm/macro-assembler-arm.h

@ -112,6 +112,8 @@ class MacroAssembler: public Assembler {
void Sbfx(Register dst, Register src, int lsb, int width,
Condition cond = al);
void Bfc(Register dst, int lsb, int width, Condition cond = al);
void Usat(Register dst, int satpos, const Operand& src,
Condition cond = al);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);

32
deps/v8/src/arm/simulator-arm.cc

@ -2047,11 +2047,41 @@ void Simulator::DecodeType3(Instr* instr) {
case 0: {
ASSERT(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
UNIMPLEMENTED();
break;
}
case 1: {
ASSERT(!instr->HasW());
if (instr->HasW()) {
ASSERT(instr->Bits(5, 4) == 0x1);
if (instr->Bit(22) == 0x1) { // USAT.
int32_t sat_pos = instr->Bits(20, 16);
int32_t sat_val = (1 << sat_pos) - 1;
int32_t shift = instr->Bits(11, 7);
int32_t shift_type = instr->Bit(6);
int32_t rm_val = get_register(instr->RmField());
if (shift_type == 0) { // LSL
rm_val <<= shift;
} else { // ASR
rm_val >>= shift;
}
// If saturation occurs, the Q flag should be set in the CPSR.
// There is no Q flag yet, and no instruction (MRS) to read the
// CPSR directly.
if (rm_val > sat_val) {
rm_val = sat_val;
} else if (rm_val < 0) {
rm_val = 0;
}
set_register(rd, rm_val);
} else { // SSAT.
UNIMPLEMENTED();
}
return;
} else {
Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
UNIMPLEMENTED();
}
break;
}
case 2: {

3
deps/v8/src/arm/simulator-arm.h

@ -294,6 +294,9 @@ class Simulator {
void TrashCallerSaveRegisters();
// Architecture state.
// Saturating instructions require a Q flag to indicate saturation.
// There is currently no way to read the CPSR directly, and thus read the Q
// flag, so this is left unimplemented.
int32_t registers_[16];
bool n_flag_;
bool z_flag_;

2
deps/v8/src/codegen.h

@ -120,6 +120,7 @@ namespace internal {
F(IsObject, 1, 1) \
F(IsFunction, 1, 1) \
F(IsUndetectableObject, 1, 1) \
F(IsSpecObject, 1, 1) \
F(StringAdd, 2, 1) \
F(SubString, 3, 1) \
F(StringCompare, 2, 1) \
@ -180,7 +181,6 @@ class CodeGeneratorScope BASE_EMBEDDED {
CodeGenerator* previous_;
};
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
// State of used registers in a virtual frame.

6
deps/v8/src/compiler.cc

@ -449,8 +449,12 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
code);
// Update the shared function info with the compiled code and the scope info.
shared->set_code(*code);
// Please note, that the order of the sharedfunction initialization is
// important since set_scope_info might trigger a GC, causing the ASSERT
// below to be invalid if the code was flushed. By settting the code
// object last we avoid this.
shared->set_scope_info(*SerializedScopeInfo::Create(info->scope()));
shared->set_code(*code);
// Set the expected number of properties for instances.
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());

2
deps/v8/src/full-codegen.cc

@ -857,6 +857,8 @@ void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
EmitIsNonNegativeSmi(expr->arguments());
} else if (strcmp("_IsObject", *name->ToCString()) == 0) {
EmitIsObject(expr->arguments());
} else if (strcmp("_IsSpecObject", *name->ToCString()) == 0) {
EmitIsSpecObject(expr->arguments());
} else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
EmitIsUndetectableObject(expr->arguments());
} else if (strcmp("_IsFunction", *name->ToCString()) == 0) {

1
deps/v8/src/full-codegen.h

@ -402,6 +402,7 @@ class FullCodeGenerator: public AstVisitor {
void EmitIsSmi(ZoneList<Expression*>* arguments);
void EmitIsNonNegativeSmi(ZoneList<Expression*>* arguments);
void EmitIsObject(ZoneList<Expression*>* arguments);
void EmitIsSpecObject(ZoneList<Expression*>* arguments);
void EmitIsUndetectableObject(ZoneList<Expression*>* arguments);
void EmitIsFunction(ZoneList<Expression*>* arguments);
void EmitIsArray(ZoneList<Expression*>* arguments);

12
deps/v8/src/global-handles.cc

@ -151,13 +151,14 @@ class GlobalHandles::Node : public Malloced {
bool PostGarbageCollectionProcessing() {
if (state_ != Node::PENDING) return false;
LOG(HandleEvent("GlobalHandle::Processing", handle().location()));
WeakReferenceCallback func = callback();
if (func == NULL) {
Destroy();
return false;
}
void* par = parameter();
state_ = NEAR_DEATH;
set_parameter(NULL);
// The callback function is resolved as late as possible to preserve old
// behavior.
WeakReferenceCallback func = callback();
if (func == NULL) return false;
v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
{
@ -178,6 +179,9 @@ class GlobalHandles::Node : public Malloced {
VMState state(EXTERNAL);
func(object, par);
}
// Absense of explicit cleanup or revival of weak handle
// in most of the cases would lead to memory leak.
ASSERT(state_ != NEAR_DEATH);
return true;
}

4
deps/v8/src/handles.cc

@ -664,8 +664,12 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
// therefore it does not make sense to cache the property names
// for arguments objects. Arguments objects will always have
// elements.
// Wrapped strings have elements, but don't have an elements
// array or dictionary. So the fast inline test for whether to
// use the cache says yes, so we should not create a cache.
bool cache_enum_keys =
((current->map()->constructor() != *arguments_function) &&
!current->IsJSValue() &&
!current->IsAccessCheckNeeded() &&
!current->HasNamedInterceptor() &&
!current->HasIndexedInterceptor());

7
deps/v8/src/heap-profiler.cc

@ -364,6 +364,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) {
HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
HeapSnapshotGenerator generator(result);
generator.GenerateSnapshot();
snapshots_->SnapshotGenerationFinished();
return result;
}
@ -391,6 +392,12 @@ HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
}
void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
ASSERT(singleton_ != NULL);
singleton_->snapshots_->ObjectMoveEvent(from, to);
}
const JSObjectsClusterTreeConfig::Key JSObjectsClusterTreeConfig::kNoKey;
const JSObjectsClusterTreeConfig::Value JSObjectsClusterTreeConfig::kNoValue;

16
deps/v8/src/heap-profiler.h

@ -38,7 +38,15 @@ namespace internal {
class HeapSnapshot;
class HeapSnapshotsCollection;
#endif
#define HEAP_PROFILE(Call) \
do { \
if (v8::internal::HeapProfiler::is_profiling()) { \
v8::internal::HeapProfiler::Call; \
} \
} while (false)
#else
#define HEAP_PROFILE(Call) ((void) 0)
#endif // ENABLE_LOGGING_AND_PROFILING
// The HeapProfiler writes data to the log files, which can be postprocessed
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
@ -54,6 +62,12 @@ class HeapProfiler {
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
static void ObjectMoveEvent(Address from, Address to);
static INLINE(bool is_profiling()) {
return singleton_ != NULL && singleton_->snapshots_->is_tracking_objects();
}
// Obsolete interface.
// Write a single heap sample to the log file.
static void WriteSample();

3
deps/v8/src/heap.cc

@ -638,6 +638,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
if (collector == MARK_COMPACTOR) {
if (FLAG_flush_code) {
// Flush all potentially unused code.
GCTracer::Scope gc_scope(tracer, GCTracer::Scope::MC_FLUSH_CODE);
FlushCode();
}
@ -1107,6 +1108,7 @@ inline static HeapObject* MigrateObject(HeapObject* source,
// Update NewSpace stats if necessary.
RecordCopiedObject(target);
#endif
HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
return target;
}
@ -4840,6 +4842,7 @@ GCTracer::~GCTracer() {
PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
PrintF("flushcode=%d ", static_cast<int>(scopes_[Scope::MC_FLUSH_CODE]));
PrintF("total_size_before=%d ", start_size_);
PrintF("total_size_after=%d ", Heap::SizeOfObjects());

1
deps/v8/src/heap.h

@ -1722,6 +1722,7 @@ class GCTracer BASE_EMBEDDED {
MC_MARK,
MC_SWEEP,
MC_COMPACT,
MC_FLUSH_CODE,
kNumberOfScopes
};

470
deps/v8/src/ia32/codegen-ia32.cc

@ -34,12 +34,9 @@
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
#include "jsregexp.h"
#include "parser.h"
#include "regexp-macro-assembler.h"
#include "regexp-stack.h"
#include "register-allocator-inl.h"
#include "runtime.h"
#include "scopes.h"
#include "virtual-frame-inl.h"
@ -143,7 +140,7 @@ CodeGenState::~CodeGenState() {
// -------------------------------------------------------------------------
// CodeGenerator implementation
// CodeGenerator implementation.
CodeGenerator::CodeGenerator(MacroAssembler* masm)
: deferred_(8),
@ -374,12 +371,11 @@ void CodeGenerator::Generate(CompilationInfo* info) {
}
// Adjust for function-level loop nesting.
ASSERT_EQ(info->loop_nesting(), loop_nesting_);
ASSERT_EQ(loop_nesting_, info->loop_nesting());
loop_nesting_ = 0;
// Code generation state must be reset.
ASSERT(state_ == NULL);
ASSERT(loop_nesting() == 0);
ASSERT(!function_return_is_shadowed_);
function_return_.Unuse();
DeleteFrame();
@ -646,7 +642,6 @@ void CodeGenerator::Load(Expression* expr) {
} else {
JumpTarget true_target;
JumpTarget false_target;
ControlDestination dest(&true_target, &false_target, true);
LoadCondition(expr, &dest, false);
@ -784,9 +779,9 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
JumpTarget done;
bool skip_arguments = false;
if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
// We have to skip storing into the arguments slot if it has already
// been written to. This can happen if the a function has a local
// variable named 'arguments'.
// We have to skip storing into the arguments slot if it has
// already been written to. This can happen if the a function
// has a local variable named 'arguments'.
LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
Result probe = frame_->Pop();
if (probe.is_constant()) {
@ -1434,8 +1429,8 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
} else {
unsigned_left >>= shift_amount;
}
ASSERT(Smi::IsValid(unsigned_left)); // Converted to signed.
answer_object = Smi::FromInt(unsigned_left); // Converted to signed.
ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
break;
}
default:
@ -1919,12 +1914,12 @@ class DeferredInlineSmiOperationReversed: public DeferredCode {
void DeferredInlineSmiOperationReversed::Generate() {
GenericBinaryOpStub igostub(
GenericBinaryOpStub stub(
op_,
overwrite_mode_,
NO_SMI_CODE_IN_STUB,
TypeInfo::Combine(TypeInfo::Smi(), type_info_));
igostub.GenerateCall(masm_, value_, src_);
stub.GenerateCall(masm_, value_, src_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@ -2424,6 +2419,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
break;
}
// Fall through if we did not find a power of 2 on the right hand side!
// The next case must be the default.
default: {
Result constant_operand(value);
@ -2487,8 +2483,7 @@ void CodeGenerator::Comparison(AstNode* node,
}
ASSERT(cc == less || cc == equal || cc == greater_equal);
// If either side is a constant of some sort, we can probably optimize the
// comparison.
// If either side is a constant smi, optimize the comparison.
bool left_side_constant_smi = false;
bool left_side_constant_null = false;
bool left_side_constant_1_char_string = false;
@ -2513,114 +2508,11 @@ void CodeGenerator::Comparison(AstNode* node,
}
if (left_side_constant_smi || right_side_constant_smi) {
if (left_side_constant_smi && right_side_constant_smi) {
// Trivial case, comparing two constants.
int left_value = Smi::cast(*left_side.handle())->value();
int right_value = Smi::cast(*right_side.handle())->value();
switch (cc) {
case less:
dest->Goto(left_value < right_value);
break;
case equal:
dest->Goto(left_value == right_value);
break;
case greater_equal:
dest->Goto(left_value >= right_value);
break;
default:
UNREACHABLE();
}
} else {
// Only one side is a constant Smi.
// If left side is a constant Smi, reverse the operands.
// Since one side is a constant Smi, conversion order does not matter.
if (left_side_constant_smi) {
Result temp = left_side;
left_side = right_side;
right_side = temp;
cc = ReverseCondition(cc);
// This may re-introduce greater or less_equal as the value of cc.
// CompareStub and the inline code both support all values of cc.
}
// Implement comparison against a constant Smi, inlining the case
// where both sides are Smis.
left_side.ToRegister();
Register left_reg = left_side.reg();
Handle<Object> right_val = right_side.handle();
// Here we split control flow to the stub call and inlined cases
// before finally splitting it to the control destination. We use
// a jump target and branching to duplicate the virtual frame at
// the first split. We manually handle the off-frame references
// by reconstituting them on the non-fall-through path.
if (left_side.is_smi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(left_side.reg());
}
} else {
JumpTarget is_smi;
__ test(left_side.reg(), Immediate(kSmiTagMask));
is_smi.Branch(zero, taken);
bool is_loop_condition = (node->AsExpression() != NULL) &&
node->AsExpression()->is_loop_condition();
if (!is_loop_condition &&
CpuFeatures::IsSupported(SSE2) &&
right_val->IsSmi()) {
// Right side is a constant smi and left side has been checked
// not to be a smi.
CpuFeatures::Scope use_sse2(SSE2);
JumpTarget not_number;
__ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
not_number.Branch(not_equal, &left_side);
__ movdbl(xmm1,
FieldOperand(left_reg, HeapNumber::kValueOffset));
int value = Smi::cast(*right_val)->value();
if (value == 0) {
__ xorpd(xmm0, xmm0);
} else {
Result temp = allocator()->Allocate();
__ mov(temp.reg(), Immediate(value));
__ cvtsi2sd(xmm0, Operand(temp.reg()));
temp.Unuse();
}
__ ucomisd(xmm1, xmm0);
// Jump to builtin for NaN.
not_number.Branch(parity_even, &left_side);
left_side.Unuse();
dest->true_target()->Branch(DoubleCondition(cc));
dest->false_target()->Jump();
not_number.Bind(&left_side);
}
// Setup and call the compare stub.
CompareStub stub(cc, strict, kCantBothBeNaN);
Result result = frame_->CallStub(&stub, &left_side, &right_side);
result.ToRegister();
__ cmp(result.reg(), 0);
result.Unuse();
dest->true_target()->Branch(cc);
dest->false_target()->Jump();
is_smi.Bind();
}
left_side = Result(left_reg);
right_side = Result(right_val);
// Test smi equality and comparison by signed int comparison.
if (IsUnsafeSmi(right_side.handle())) {
right_side.ToRegister();
__ cmp(left_side.reg(), Operand(right_side.reg()));
} else {
__ cmp(Operand(left_side.reg()), Immediate(right_side.handle()));
}
left_side.Unuse();
right_side.Unuse();
dest->Split(cc);
}
ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
left_side_constant_smi, right_side_constant_smi,
is_loop_condition);
} else if (cc == equal &&
(left_side_constant_null || right_side_constant_null)) {
// To make null checks efficient, we check if either the left side or
@ -2780,13 +2672,14 @@ void CodeGenerator::Comparison(AstNode* node,
}
} else {
// Neither side is a constant Smi, constant 1-char string or constant null.
// If either side is a non-smi constant, or known to be a heap number skip
// the smi check.
// If either side is a non-smi constant, or known to be a heap number,
// skip the smi check.
bool known_non_smi =
(left_side.is_constant() && !left_side.handle()->IsSmi()) ||
(right_side.is_constant() && !right_side.handle()->IsSmi()) ||
left_side.type_info().IsDouble() ||
right_side.type_info().IsDouble();
NaNInformation nan_info =
(CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
kBothCouldBeNaN :
@ -2811,14 +2704,15 @@ void CodeGenerator::Comparison(AstNode* node,
right_side.ToRegister();
if (known_non_smi) {
// Inline the equality check if both operands can't be a NaN. If both
// objects are the same they are equal.
// Inlined equality check:
// If at least one of the objects is not NaN, then if the objects
// are identical, they are equal.
if (nan_info == kCantBothBeNaN && cc == equal) {
__ cmp(left_side.reg(), Operand(right_side.reg()));
dest->true_target()->Branch(equal);
}
// Inline number comparison.
// Inlined number comparison:
if (inline_number_compare) {
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
}
@ -2856,7 +2750,7 @@ void CodeGenerator::Comparison(AstNode* node,
dest->true_target()->Branch(equal);
}
// Inline number comparison.
// Inlined number comparison:
if (inline_number_compare) {
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
}
@ -2882,6 +2776,139 @@ void CodeGenerator::Comparison(AstNode* node,
}
void CodeGenerator::ConstantSmiComparison(Condition cc,
bool strict,
ControlDestination* dest,
Result* left_side,
Result* right_side,
bool left_side_constant_smi,
bool right_side_constant_smi,
bool is_loop_condition) {
if (left_side_constant_smi && right_side_constant_smi) {
// Trivial case, comparing two constants.
int left_value = Smi::cast(*left_side->handle())->value();
int right_value = Smi::cast(*right_side->handle())->value();
switch (cc) {
case less:
dest->Goto(left_value < right_value);
break;
case equal:
dest->Goto(left_value == right_value);
break;
case greater_equal:
dest->Goto(left_value >= right_value);
break;
default:
UNREACHABLE();
}
} else {
// Only one side is a constant Smi.
// If left side is a constant Smi, reverse the operands.
// Since one side is a constant Smi, conversion order does not matter.
if (left_side_constant_smi) {
Result* temp = left_side;
left_side = right_side;
right_side = temp;
cc = ReverseCondition(cc);
// This may re-introduce greater or less_equal as the value of cc.
// CompareStub and the inline code both support all values of cc.
}
// Implement comparison against a constant Smi, inlining the case
// where both sides are Smis.
left_side->ToRegister();
Register left_reg = left_side->reg();
Handle<Object> right_val = right_side->handle();
if (left_side->is_smi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(left_reg);
}
// Test smi equality and comparison by signed int comparison.
if (IsUnsafeSmi(right_side->handle())) {
right_side->ToRegister();
__ cmp(left_reg, Operand(right_side->reg()));
} else {
__ cmp(Operand(left_reg), Immediate(right_side->handle()));
}
left_side->Unuse();
right_side->Unuse();
dest->Split(cc);
} else {
// Only the case where the left side could possibly be a non-smi is left.
JumpTarget is_smi;
if (cc == equal) {
// We can do the equality comparison before the smi check.
__ cmp(Operand(left_reg), Immediate(right_side->handle()));
dest->true_target()->Branch(equal);
__ test(left_reg, Immediate(kSmiTagMask));
dest->false_target()->Branch(zero);
} else {
// Do the smi check, then the comparison.
JumpTarget is_not_smi;
__ test(left_reg, Immediate(kSmiTagMask));
is_smi.Branch(zero, left_side, right_side);
}
// Jump or fall through to here if we are comparing a non-smi to a
// constant smi. If the non-smi is a heap number and this is not
// a loop condition, inline the floating point code.
if (!is_loop_condition && CpuFeatures::IsSupported(SSE2)) {
// Right side is a constant smi and left side has been checked
// not to be a smi.
CpuFeatures::Scope use_sse2(SSE2);
JumpTarget not_number;
__ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
not_number.Branch(not_equal, left_side);
__ movdbl(xmm1,
FieldOperand(left_reg, HeapNumber::kValueOffset));
int value = Smi::cast(*right_val)->value();
if (value == 0) {
__ xorpd(xmm0, xmm0);
} else {
Result temp = allocator()->Allocate();
__ mov(temp.reg(), Immediate(value));
__ cvtsi2sd(xmm0, Operand(temp.reg()));
temp.Unuse();
}
__ ucomisd(xmm1, xmm0);
// Jump to builtin for NaN.
not_number.Branch(parity_even, left_side);
left_side->Unuse();
dest->true_target()->Branch(DoubleCondition(cc));
dest->false_target()->Jump();
not_number.Bind(left_side);
}
// Setup and call the compare stub.
CompareStub stub(cc, strict, kCantBothBeNaN);
Result result = frame_->CallStub(&stub, left_side, right_side);
result.ToRegister();
__ test(result.reg(), Operand(result.reg()));
result.Unuse();
if (cc == equal) {
dest->Split(cc);
} else {
dest->true_target()->Branch(cc);
dest->false_target()->Jump();
// It is important for performance for this case to be at the end.
is_smi.Bind(left_side, right_side);
if (IsUnsafeSmi(right_side->handle())) {
right_side->ToRegister();
__ cmp(left_reg, Operand(right_side->reg()));
} else {
__ cmp(Operand(left_reg), Immediate(right_side->handle()));
}
left_side->Unuse();
right_side->Unuse();
dest->Split(cc);
}
}
}
}
// Check that the comparison operand is a number. Jump to not_numbers jump
// target passing the left and right result if the operand is not a number.
static void CheckComparisonOperand(MacroAssembler* masm_,
@ -2941,19 +2968,19 @@ static void LoadComparisonOperand(MacroAssembler* masm_,
// target passing the left and right result if the operand is not a number.
static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
Result* operand,
XMMRegister reg,
XMMRegister xmm_reg,
Result* left_side,
Result* right_side,
JumpTarget* not_numbers) {
Label done;
if (operand->type_info().IsDouble()) {
// Operand is known to be a heap number, just load it.
__ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
__ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
} else if (operand->type_info().IsSmi()) {
// Operand is known to be a smi. Convert it to double and keep the original
// smi.
__ SmiUntag(operand->reg());
__ cvtsi2sd(reg, Operand(operand->reg()));
__ cvtsi2sd(xmm_reg, Operand(operand->reg()));
__ SmiTag(operand->reg());
} else {
// Operand type not known, check for smi or heap number.
@ -2965,13 +2992,13 @@ static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
Immediate(Factory::heap_number_map()));
not_numbers->Branch(not_equal, left_side, right_side, taken);
}
__ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
__ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&smi);
// Comvert smi to float and keep the original smi.
__ SmiUntag(operand->reg());
__ cvtsi2sd(reg, Operand(operand->reg()));
__ cvtsi2sd(xmm_reg, Operand(operand->reg()));
__ SmiTag(operand->reg());
__ jmp(&done);
}
@ -3568,8 +3595,10 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
return_value->ToRegister(eax);
// Add a label for checking the size of the code used for returning.
#ifdef DEBUG
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
// Leave the frame and return popping the arguments and the
// receiver.
@ -3690,7 +3719,6 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
}
}
// The last instruction emitted was a jump, either to the default
// clause or the break target, or else to a case body from the loop
// that compiles the tests.
@ -3778,8 +3806,8 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
// Compile the test.
switch (info) {
case ALWAYS_TRUE:
// If control flow can fall off the end of the body, jump back to
// the top and bind the break target at the exit.
// If control flow can fall off the end of the body, jump back
// to the top and bind the break target at the exit.
if (has_valid_frame()) {
node->continue_target()->Jump();
}
@ -3815,6 +3843,8 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
}
DecrementLoopNesting();
node->continue_target()->Unuse();
node->break_target()->Unuse();
}
@ -3899,8 +3929,8 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
break;
case DONT_KNOW:
if (test_at_bottom) {
// If we have chosen to recompile the test at the bottom, then
// it is the continue target.
// If we have chosen to recompile the test at the bottom,
// then it is the continue target.
if (node->continue_target()->is_linked()) {
node->continue_target()->Bind();
}
@ -4016,6 +4046,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
loop.Bind();
}
// Compile the test with the body as the true target and preferred
// fall-through and with the break target as the false target.
ControlDestination dest(&body, node->break_target(), true);
@ -4125,8 +4156,8 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
break;
}
// The break target may be already bound (by the condition), or
// there may not be a valid frame. Bind it only if needed.
// The break target may be already bound (by the condition), or there
// may not be a valid frame. Bind it only if needed.
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
@ -5309,6 +5340,11 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
frame_->Dup();
Load(property->value());
Result dummy = frame_->CallStoreIC(Handle<String>::cast(key), false);
// A test eax instruction following the store IC call would
// indicate the presence of an inlined version of the
// store. Add a nop to indicate that there is no such
// inlined version.
__ nop();
dummy.Unuse();
break;
}
@ -6406,6 +6442,27 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
// typeof(arg) == function).
// It includes undetectable objects (as opposed to IsObject).
ASSERT(args->length() == 1);
Load(args->at(0));
Result value = frame_->Pop();
value.ToRegister();
ASSERT(value.is_valid());
__ test(value.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(equal);
// Check that this is an object.
frame_->Spill(value.reg());
__ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, value.reg());
value.Unuse();
destination()->Split(above_equal);
}
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
@ -8809,7 +8866,97 @@ Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
#ifdef DEBUG
int expected_height = frame()->height() - (is_contextual ? 1 : 2);
#endif
Result result = frame()->CallStoreIC(name, is_contextual);
Result result;
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
result = frame()->CallStoreIC(name, is_contextual);
// A test eax instruction following the call signals that the inobject
// property case was inlined. Ensure that there is not a test eax
// instruction here.
__ nop();
} else {
// Inline the in-object property case.
JumpTarget slow, done;
Label patch_site;
// Get the value and receiver from the stack.
Result value = frame()->Pop();
value.ToRegister();
Result receiver = frame()->Pop();
receiver.ToRegister();
// Allocate result register.
result = allocator()->Allocate();
ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
// Check that the receiver is a heap object.
__ test(receiver.reg(), Immediate(kSmiTagMask));
slow.Branch(zero, &value, &receiver);
// This is the map check instruction that will be patched (so we can't
// use the double underscore macro that may insert instructions).
// Initially use an invalid map to force a failure.
__ bind(&patch_site);
masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
Immediate(Factory::null_value()));
// This branch is always a forwards branch so it's always a fixed size
// which allows the assert below to succeed and patching to work.
slow.Branch(not_equal, &value, &receiver);
// The delta from the patch label to the store offset must be
// statically known.
ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
StoreIC::kOffsetToStoreInstruction);
// The initial (invalid) offset has to be large enough to force a 32-bit
// instruction encoding to allow patching with an arbitrary offset. Use
// kMaxInt (minus kHeapObjectTag).
int offset = kMaxInt;
__ mov(FieldOperand(receiver.reg(), offset), value.reg());
__ mov(result.reg(), Operand(value.reg()));
// Allocate scratch register for write barrier.
Result scratch = allocator()->Allocate();
ASSERT(scratch.is_valid() &&
result.is_valid() &&
receiver.is_valid() &&
value.is_valid());
// The write barrier clobbers all input registers, so spill the
// receiver and the value.
frame_->Spill(receiver.reg());
frame_->Spill(value.reg());
// Update the write barrier. To save instructions in the inlined
// version we do not filter smis.
Label skip_write_barrier;
__ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
__ lea(scratch.reg(), Operand(receiver.reg(), offset));
__ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
if (FLAG_debug_code) {
__ mov(receiver.reg(), Immediate(BitCast<int32_t>(kZapValue)));
__ mov(value.reg(), Immediate(BitCast<int32_t>(kZapValue)));
__ mov(scratch.reg(), Immediate(BitCast<int32_t>(kZapValue)));
}
__ bind(&skip_write_barrier);
value.Unuse();
scratch.Unuse();
receiver.Unuse();
done.Jump(&result);
slow.Bind(&value, &receiver);
frame()->Push(&receiver);
frame()->Push(&value);
result = frame()->CallStoreIC(name, is_contextual);
// Encode the offset to the map check instruction and the offset
// to the write barrier store address computation in a test eax
// instruction.
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
__ test(eax,
Immediate((delta_to_record_write << 16) | delta_to_patch_site));
done.Bind(&result);
}
ASSERT_EQ(expected_height, frame()->height());
return result;
@ -11787,12 +11934,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
}
// Push arguments below the return address.
__ pop(ecx);
__ push(eax);
__ push(edx);
__ push(ecx);
// Generate the number comparison code.
if (include_number_compare_) {
Label non_number_comparison;
@ -11812,33 +11953,32 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ cmov(above, eax, Operand(ecx));
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, Operand(ecx));
__ ret(2 * kPointerSize);
__ ret(0);
} else {
FloatingPointHelper::CheckFloatOperands(
masm, &non_number_comparison, ebx);
FloatingPointHelper::LoadFloatOperands(masm, ecx);
FloatingPointHelper::LoadFloatOperand(masm, eax);
FloatingPointHelper::LoadFloatOperand(masm, edx);
__ FCmp();
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, not_taken);
Label below_label, above_label;
// Return a result of -1, 0, or 1, based on EFLAGS. In all cases remove
// two arguments from the stack as they have been pushed in preparation
// of a possible runtime call.
// Return a result of -1, 0, or 1, based on EFLAGS.
__ j(below, &below_label, not_taken);
__ j(above, &above_label, not_taken);
__ xor_(eax, Operand(eax));
__ ret(2 * kPointerSize);
__ ret(0);
__ bind(&below_label);
__ mov(eax, Immediate(Smi::FromInt(-1)));
__ ret(2 * kPointerSize);
__ ret(0);
__ bind(&above_label);
__ mov(eax, Immediate(Smi::FromInt(1)));
__ ret(2 * kPointerSize);
__ ret(0);
}
// If one of the numbers was NaN, then the result is always false.
@ -11850,7 +11990,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
} else {
__ mov(eax, Immediate(Smi::FromInt(-1)));
}
__ ret(2 * kPointerSize); // eax, edx were pushed
__ ret(0);
// The number comparison code did not provide a valid result.
__ bind(&non_number_comparison);
@ -11865,7 +12005,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// We've already checked for object identity, so if both operands
// are symbols they aren't equal. Register eax already holds a
// non-zero value, which indicates not equal, so just return.
__ ret(2 * kPointerSize);
__ ret(0);
}
__ bind(&check_for_strings);
@ -11918,14 +12058,12 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&return_unequal);
// Return non-equal by returning the non-zero object pointer in eax,
// or return equal if we fell through to here.
__ ret(2 * kPointerSize); // rax, rdx were pushed
__ ret(0); // rax, rdx were pushed
__ bind(&not_both_objects);
}
// must swap argument order
// Push arguments below the return address.
__ pop(ecx);
__ pop(edx);
__ pop(eax);
__ push(edx);
__ push(eax);
@ -13502,19 +13640,19 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
ASSERT_EQ(0, EQUAL);
ASSERT_EQ(0, kSmiTag);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(2 * kPointerSize);
__ ret(0);
__ bind(&result_not_equal);
__ j(greater, &result_greater);
// Result is LESS.
__ Set(eax, Immediate(Smi::FromInt(LESS)));
__ ret(2 * kPointerSize);
__ ret(0);
// Result is GREATER.
__ bind(&result_greater);
__ Set(eax, Immediate(Smi::FromInt(GREATER)));
__ ret(2 * kPointerSize);
__ ret(0);
}
@ -13544,6 +13682,10 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
// Compare flat ascii strings.
// Drop arguments from the stack.
__ pop(ecx);
__ add(Operand(esp), Immediate(2 * kPointerSize));
__ push(ecx);
GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)

12
deps/v8/src/ia32/codegen-ia32.h

@ -560,6 +560,17 @@ class CodeGenerator: public AstVisitor {
Condition cc,
bool strict,
ControlDestination* destination);
// If at least one of the sides is a constant smi, generate optimized code.
void ConstantSmiComparison(Condition cc,
bool strict,
ControlDestination* destination,
Result* left_side,
Result* right_side,
bool left_side_constant_smi,
bool right_side_constant_smi,
bool is_loop_condition);
void GenerateInlineNumberComparison(Result* left_side,
Result* right_side,
Condition cc,
@ -621,6 +632,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsRegExp(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);

20
deps/v8/src/ia32/full-codegen-ia32.cc

@ -1985,6 +1985,26 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
}
void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
__ test(eax, Immediate(kSmiTagMask));
__ j(equal, if_false);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
__ j(above_equal, if_true);
__ jmp(if_false);
Apply(context_, if_true, if_false);
}
void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);

83
deps/v8/src/ia32/ic-ia32.cc

@ -1644,37 +1644,6 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// One byte opcode for test eax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9;
void LoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined inobject property load (if
// present) to guarantee failure by holding an invalid map (the null
// value). The offset can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {
// Insert null as the map to check for to make sure the map check fails
// sending control flow to the IC instead of the inlined version.
PatchInlinedLoad(address, Heap::null_value());
}
void KeyedStoreIC::ClearInlinedVersion(Address address) {
// Insert null as the elements map to check for. This will make
// sure that the elements fast-case map check fails so that control
// flows to the IC instead of the inlined version.
PatchInlinedStore(address, Heap::null_value());
}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
// Restore the fast-case elements map check so that the inlined
// version can be used again.
PatchInlinedStore(address, Heap::fixed_array_map());
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call.
Address test_instruction_address =
@ -1703,6 +1672,52 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
}
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test eax, nothing
// was inlined.
if (*test_instruction_address != kTestEaxByte) return false;
// Extract the encoded deltas from the test eax instruction.
Address encoded_offsets_address = test_instruction_address + 1;
int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
int delta_to_map_check = -(encoded_offsets & 0xFFFF);
int delta_to_record_write = encoded_offsets >> 16;
// Patch the map to check. The map address is the last 4 bytes of
// the 7-byte operand-immediate compare instruction.
Address map_check_address = test_instruction_address + delta_to_map_check;
Address map_address = map_check_address + 3;
*(reinterpret_cast<Object**>(map_address)) = map;
// Patch the offset in the store instruction. The offset is in the
// last 4 bytes of a six byte register-to-memory move instruction.
Address offset_address =
map_check_address + StoreIC::kOffsetToStoreInstruction + 2;
// The offset should have initial value (kMaxInt - 1), cleared value
// (-1) or we should be clearing the inlined version.
ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
*reinterpret_cast<int*>(offset_address) == -1 ||
(offset == 0 && map == Heap::null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
// Patch the offset in the write-barrier code. The offset is the
// last 4 bytes of a six byte lea instruction.
offset_address = map_check_address + delta_to_record_write + 2;
// The offset should have initial value (kMaxInt), cleared value
// (-1) or we should be clearing the inlined version.
ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
*reinterpret_cast<int*>(offset_address) == -1 ||
(offset == 0 && map == Heap::null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
return true;
}
static bool PatchInlinedMapCheck(Address address, Object* map) {
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@ -1814,6 +1829,12 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
}
// The offset from the inlined patch site to the start of the inlined
// store instruction. It is 7 bytes (test reg, imm) plus 6 bytes (jne
// slow_label).
const int StoreIC::kOffsetToStoreInstruction = 13;
void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value

6
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -98,8 +98,10 @@ void MacroAssembler::InNewSpace(Register object,
}
void MacroAssembler::RecordWrite(Register object, int offset,
Register value, Register scratch) {
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
Register scratch) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are esi.

93
deps/v8/src/ic.cc

@ -277,6 +277,13 @@ void CallICBase::Clear(Address address, Code* target) {
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {
// Insert null as the map to check for to make sure the map check fails
// sending control flow to the IC instead of the inlined version.
PatchInlinedLoad(address, Heap::null_value());
}
void KeyedLoadIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
// Make sure to also clear the map used in inline fast cases. If we
@ -287,6 +294,14 @@ void KeyedLoadIC::Clear(Address address, Code* target) {
}
void LoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined inobject property load (if
// present) to guarantee failure by holding an invalid map (the null
// value). The offset can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), 0);
}
void LoadIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
ClearInlinedVersion(address);
@ -294,12 +309,36 @@ void LoadIC::Clear(Address address, Code* target) {
}
void StoreIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined inobject property store (if
// present) to guarantee failure by holding an invalid map (the null
// value). The offset can be patched to anything.
PatchInlinedStore(address, Heap::null_value(), 0);
}
void StoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
ClearInlinedVersion(address);
SetTargetAtAddress(address, initialize_stub());
}
void KeyedStoreIC::ClearInlinedVersion(Address address) {
// Insert null as the elements map to check for. This will make
// sure that the elements fast-case map check fails so that control
// flows to the IC instead of the inlined version.
PatchInlinedStore(address, Heap::null_value());
}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
// Restore the fast-case elements map check so that the inlined
// version can be used again.
PatchInlinedStore(address, Heap::fixed_array_map());
}
void KeyedStoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, initialize_stub());
@ -777,11 +816,13 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
int offset = map->instance_size() + (index * kPointerSize);
if (PatchInlinedLoad(address(), map, offset)) {
set_target(megamorphic_stub());
return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[LoadIC : inline patch %s]\n", *name->ToCString());
}
#endif
return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
#ifdef DEBUG
} else {
if (FLAG_trace_ic) {
PrintF("[LoadIC : no inline patch %s (patching failed)]\n",
@ -1205,7 +1246,57 @@ Object* StoreIC::Store(State state,
// Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup;
if (LookupForWrite(*receiver, *name, &lookup)) {
bool can_be_inlined =
state == UNINITIALIZED &&
lookup.IsProperty() &&
lookup.holder() == *receiver &&
lookup.type() == FIELD &&
!receiver->IsAccessCheckNeeded();
if (can_be_inlined) {
Map* map = lookup.holder()->map();
// Property's index in the properties array. If negative we have
// an inobject property.
int index = lookup.GetFieldIndex() - map->inobject_properties();
if (index < 0) {
// Index is an offset from the end of the object.
int offset = map->instance_size() + (index * kPointerSize);
if (PatchInlinedStore(address(), map, offset)) {
set_target(megamorphic_stub());
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[StoreIC : inline patch %s]\n", *name->ToCString());
}
#endif
return receiver->SetProperty(*name, *value, NONE);
#ifdef DEBUG
} else {
if (FLAG_trace_ic) {
PrintF("[StoreIC : no inline patch %s (patching failed)]\n",
*name->ToCString());
}
}
} else {
if (FLAG_trace_ic) {
PrintF("[StoreIC : no inline patch %s (not inobject)]\n",
*name->ToCString());
}
}
} else {
if (state == PREMONOMORPHIC) {
if (FLAG_trace_ic) {
PrintF("[StoreIC : no inline patch %s (not inlinable)]\n",
*name->ToCString());
#endif
}
}
}
// If no inlined store ic was patched, generate a stub for this
// store.
UpdateCaches(&lookup, state, receiver, name, value);
}
}

12
deps/v8/src/ic.h

@ -391,6 +391,13 @@ class StoreIC: public IC {
static void GenerateArrayLength(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
// Clear the use of an inlined version.
static void ClearInlinedVersion(Address address);
// The offset from the inlined patch site to the start of the
// inlined store instruction.
static const int kOffsetToStoreInstruction;
private:
// Update the inline cache and the global stub cache based on the
// lookup result.
@ -408,6 +415,11 @@ class StoreIC: public IC {
}
static void Clear(Address address, Code* target);
// Support for patching the index and the map that is checked in an
// inlined version of the named store.
static bool PatchInlinedStore(Address address, Object* map, int index);
friend class IC;
};

3
deps/v8/src/macros.py

@ -115,7 +115,8 @@ macro FLOOR(arg) = $floor(arg);
# Macro for ECMAScript 5 queries of the type:
# "Type(O) is object."
# This is the same as being either a function or an object in V8 terminology.
macro IS_SPEC_OBJECT_OR_NULL(arg) = (%_IsObject(arg) || %_IsFunction(arg));
# In addition, an undetectable object is also included by this.
macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));

4
deps/v8/src/mark-compact.cc

@ -28,6 +28,7 @@
#include "v8.h"
#include "execution.h"
#include "heap-profiler.h"
#include "global-handles.h"
#include "ic-inl.h"
#include "mark-compact.h"
@ -2218,6 +2219,7 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
}
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
@ -2264,6 +2266,7 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// Notify the logger that compiled code has moved.
PROFILE(CodeMoveEvent(old_addr, new_addr));
}
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
@ -2308,6 +2311,7 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
}
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}

5
deps/v8/src/mips/codegen-mips.cc

@ -907,6 +907,11 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}

1
deps/v8/src/mips/codegen-mips.h

@ -355,6 +355,7 @@ class CodeGenerator: public AstVisitor {
void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
void GenerateStringAdd(ZoneList<Expression*>* args);

40
deps/v8/src/objects.cc

@ -7338,6 +7338,46 @@ int HashTable<Shape, Key>::FindEntry(Key key) {
}
// Find entry for key otherwise return kNotFound.
int StringDictionary::FindEntry(String* key) {
if (!key->IsSymbol()) {
return HashTable<StringDictionaryShape, String*>::FindEntry(key);
}
// Optimized for symbol key. Knowledge of the key type allows:
// 1. Move the check if the key is a symbol out of the loop.
// 2. Avoid comparing hash codes in symbol to symbol comparision.
// 3. Detect a case when a dictionary key is not a symbol but the key is.
// In case of positive result the dictionary key may be replaced by
// the symbol with minimal performance penalty. It gives a chance to
// perform further lookups in code stubs (and significant performance boost
// a certain style of code).
// EnsureCapacity will guarantee the hash table is never full.
uint32_t capacity = Capacity();
uint32_t entry = FirstProbe(key->Hash(), capacity);
uint32_t count = 1;
while (true) {
int index = EntryToIndex(entry);
Object* element = get(index);
if (element->IsUndefined()) break; // Empty entry.
if (key == element) return entry;
if (!element->IsSymbol() &&
!element->IsNull() &&
String::cast(element)->Equals(key)) {
// Replace a non-symbol key by the equivalent symbol for faster further
// lookups.
set(index, key);
return entry;
}
ASSERT(element->IsNull() || !String::cast(element)->Equals(key));
entry = NextProbe(entry, count++, capacity);
}
return kNotFound;
}
template<typename Shape, typename Key>
Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
int capacity = Capacity();

12
deps/v8/src/objects.h

@ -39,7 +39,7 @@
#endif
//
// All object types in the V8 JavaScript are described in this file.
// Most object types in the V8 JavaScript are described in this file.
//
// Inheritance hierarchy:
// - Object
@ -74,8 +74,8 @@
// - CodeCacheHashTable
// - MapCache
// - Context
// - GlobalContext
// - JSFunctionResultCache
// - SerializedScopeInfo
// - String
// - SeqString
// - SeqAsciiString
@ -2012,7 +2012,7 @@ class HashTable: public FixedArray {
static const int kMaxCapacity =
(FixedArray::kMaxLength - kElementsStartOffset) / kEntrySize;
// Find entry for key otherwise return -1.
// Find entry for key otherwise return kNotFound.
int FindEntry(Key key);
protected:
@ -2294,6 +2294,10 @@ class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
// For transforming properties of a JSObject.
Object* TransformPropertiesToFastFor(JSObject* obj,
int unused_property_fields);
// Find entry for key otherwise return kNotFound. Optimzed version of
// HashTable::FindEntry.
int FindEntry(String* key);
};
@ -2721,7 +2725,7 @@ class Code: public HeapObject {
};
enum {
NUMBER_OF_KINDS = KEYED_STORE_IC + 1
NUMBER_OF_KINDS = LAST_IC_KIND + 1
};
#ifdef ENABLE_DISASSEMBLER

299
deps/v8/src/profile-generator.cc

@ -74,6 +74,7 @@ void TokenEnumerator::TokenRemovedCallback(v8::Persistent<v8::Value> handle,
void* parameter) {
reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved(
Utils::OpenHandle(*handle).location());
handle.Dispose();
}
@ -181,8 +182,6 @@ void ProfileNode::Print(int indent) {
}
namespace {
class DeleteNodesCallback {
public:
void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
@ -194,8 +193,6 @@ class DeleteNodesCallback {
void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
};
} // namespace
ProfileTree::ProfileTree()
: root_entry_(Logger::FUNCTION_TAG,
@ -240,8 +237,6 @@ void ProfileTree::AddPathFromStart(const Vector<CodeEntry*>& path) {
}
namespace {
struct NodesPair {
NodesPair(ProfileNode* src, ProfileNode* dst)
: src(src), dst(dst) { }
@ -294,8 +289,6 @@ class FilteredCloneCallback {
int security_token_id_;
};
} // namespace
void ProfileTree::FilteredClone(ProfileTree* src, int security_token_id) {
ms_to_ticks_scale_ = src->ms_to_ticks_scale_;
FilteredCloneCallback cb(root_, security_token_id);
@ -309,8 +302,6 @@ void ProfileTree::SetTickRatePerMs(double ticks_per_ms) {
}
namespace {
class Position {
public:
explicit Position(ProfileNode* node)
@ -328,8 +319,6 @@ class Position {
int child_idx_;
};
} // namespace
// Non-recursive implementation of a depth-first post-order tree traversal.
template <typename Callback>
@ -355,8 +344,6 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
namespace {
class CalculateTotalTicksCallback {
public:
void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
@ -370,8 +357,6 @@ class CalculateTotalTicksCallback {
}
};
} // namespace
void ProfileTree::CalculateTotalTicks() {
CalculateTotalTicksCallback cb;
@ -877,6 +862,11 @@ void HeapEntry::SetAutoIndexReference(HeapEntry* entry) {
}
void HeapEntry::SetUnidirAutoIndexReference(HeapEntry* entry) {
children_.Add(new HeapGraphEdge(next_auto_index_++, this, entry));
}
int HeapEntry::TotalSize() {
return total_size_ != kUnknownSize ? total_size_ : CalculateTotalSize();
}
@ -888,12 +878,12 @@ int HeapEntry::NonSharedTotalSize() {
}
int HeapEntry::CalculateTotalSize() {
snapshot_->ClearPaint();
template<class Visitor>
void HeapEntry::ApplyAndPaintAllReachable(Visitor* visitor) {
List<HeapEntry*> list(10);
list.Add(this);
total_size_ = self_size_;
this->PaintReachable();
visitor->Apply(this);
while (!list.is_empty()) {
HeapEntry* entry = list.RemoveLast();
const int children_count = entry->children_.length();
@ -902,15 +892,48 @@ int HeapEntry::CalculateTotalSize() {
if (!child->painted_reachable()) {
list.Add(child);
child->PaintReachable();
total_size_ += child->self_size_;
visitor->Apply(child);
}
}
}
return total_size_;
}
namespace {
class NullClass {
public:
void Apply(HeapEntry* entry) { }
};
void HeapEntry::PaintAllReachable() {
NullClass null;
ApplyAndPaintAllReachable(&null);
}
class TotalSizeCalculator {
public:
TotalSizeCalculator()
: total_size_(0) {
}
int total_size() const { return total_size_; }
void Apply(HeapEntry* entry) {
total_size_ += entry->self_size();
}
private:
int total_size_;
};
int HeapEntry::CalculateTotalSize() {
snapshot_->ClearPaint();
TotalSizeCalculator calc;
ApplyAndPaintAllReachable(&calc);
total_size_ = calc.total_size();
return total_size_;
}
class NonSharedSizeCalculator {
public:
@ -930,41 +953,26 @@ class NonSharedSizeCalculator {
int non_shared_total_size_;
};
} // namespace
int HeapEntry::CalculateNonSharedTotalSize() {
// To calculate non-shared total size, first we paint all reachable
// nodes in one color, then we paint all nodes reachable from other
// nodes with a different color. Then we consider only nodes painted
// with the first color for caclulating the total size.
// with the first color for calculating the total size.
snapshot_->ClearPaint();
List<HeapEntry*> list(10);
list.Add(this);
this->PaintReachable();
while (!list.is_empty()) {
HeapEntry* entry = list.RemoveLast();
const int children_count = entry->children_.length();
for (int i = 0; i < children_count; ++i) {
HeapEntry* child = entry->children_[i]->to();
if (!child->painted_reachable()) {
list.Add(child);
child->PaintReachable();
}
}
}
PaintAllReachable();
List<HeapEntry*> list2(10);
List<HeapEntry*> list(10);
if (this != snapshot_->root()) {
list2.Add(snapshot_->root());
list.Add(snapshot_->root());
snapshot_->root()->PaintReachableFromOthers();
}
while (!list2.is_empty()) {
HeapEntry* entry = list2.RemoveLast();
while (!list.is_empty()) {
HeapEntry* entry = list.RemoveLast();
const int children_count = entry->children_.length();
for (int i = 0; i < children_count; ++i) {
HeapEntry* child = entry->children_[i]->to();
if (child != this && child->not_painted_reachable_from_others()) {
list2.Add(child);
list.Add(child);
child->PaintReachableFromOthers();
}
}
@ -972,7 +980,8 @@ int HeapEntry::CalculateNonSharedTotalSize() {
NonSharedSizeCalculator calculator;
snapshot_->IterateEntries(&calculator);
return calculator.non_shared_total_size();
non_shared_total_size_ = calculator.non_shared_total_size();
return non_shared_total_size_;
}
@ -1078,7 +1087,8 @@ void HeapEntry::CutEdges() {
void HeapEntry::Print(int max_depth, int indent) {
OS::Print("%6d %6d %6d ", self_size_, TotalSize(), NonSharedTotalSize());
OS::Print("%6d %6d %6d [%ld] ",
self_size_, TotalSize(), NonSharedTotalSize(), id_);
if (type_ != STRING) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
} else {
@ -1244,7 +1254,13 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
: collection_(collection),
title_(title),
uid_(uid),
root_(this) {
root_(this),
sorted_entries_(NULL) {
}
HeapSnapshot::~HeapSnapshot() {
delete sorted_entries_;
}
@ -1355,6 +1371,7 @@ HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
HeapEntry* entry = new HeapEntry(this,
type,
name,
collection_->GetObjectId(object->address()),
GetObjectSize(object),
GetObjectSecurityToken(object));
entries_.Pair(object, entry);
@ -1381,8 +1398,6 @@ HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
}
namespace {
class EdgesCutter {
public:
explicit EdgesCutter(int global_security_token)
@ -1400,8 +1415,6 @@ class EdgesCutter {
const int global_security_token_;
};
} // namespace
void HeapSnapshot::CutObjectsFromForeignSecurityContexts() {
EdgesCutter cutter(GetGlobalSecurityToken());
entries_.Apply(&cutter);
@ -1454,13 +1467,129 @@ int HeapSnapshot::CalculateNetworkSize(JSObject* obj) {
}
class EntriesCollector {
public:
explicit EntriesCollector(List<HeapEntry*>* list) : list_(list) { }
void Apply(HeapEntry* entry) {
list_->Add(entry);
}
private:
List<HeapEntry*>* list_;
};
template<class T>
static int SortByIds(const T* entry1_ptr,
const T* entry2_ptr) {
if ((*entry1_ptr)->id() == (*entry2_ptr)->id()) return 0;
return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1;
}
List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
if (sorted_entries_ != NULL) return sorted_entries_;
sorted_entries_ = new List<HeapEntry*>(entries_.capacity());
EntriesCollector collector(sorted_entries_);
entries_.Apply(&collector);
sorted_entries_->Sort(SortByIds);
return sorted_entries_;
}
HeapSnapshotsDiff* HeapSnapshot::CompareWith(HeapSnapshot* snapshot) {
return collection_->CompareSnapshots(this, snapshot);
}
void HeapSnapshot::Print(int max_depth) {
root_.Print(max_depth, 0);
}
HeapObjectsMap::HeapObjectsMap()
: initial_fill_mode_(true),
next_id_(1),
entries_map_(AddressesMatch),
entries_(new List<EntryInfo>()) { }
HeapObjectsMap::~HeapObjectsMap() {
delete entries_;
}
void HeapObjectsMap::SnapshotGenerationFinished() {
initial_fill_mode_ = false;
RemoveDeadEntries();
}
uint64_t HeapObjectsMap::FindObject(Address addr) {
if (!initial_fill_mode_) {
uint64_t existing = FindEntry(addr);
if (existing != 0) return existing;
}
uint64_t id = next_id_++;
AddEntry(addr, id);
return id;
}
void HeapObjectsMap::MoveObject(Address from, Address to) {
if (from == to) return;
HashMap::Entry* entry = entries_map_.Lookup(from, AddressHash(from), false);
if (entry != NULL) {
void* value = entry->value;
entries_map_.Remove(from, AddressHash(from));
entry = entries_map_.Lookup(to, AddressHash(to), true);
// We can have an entry at the new location, it is OK, as GC can overwrite
// dead objects with alive objects being moved.
entry->value = value;
}
}
void HeapObjectsMap::AddEntry(Address addr, uint64_t id) {
HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true);
ASSERT(entry->value == NULL);
entry->value = reinterpret_cast<void*>(entries_->length());
entries_->Add(EntryInfo(id));
}
uint64_t HeapObjectsMap::FindEntry(Address addr) {
HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false);
if (entry != NULL) {
int entry_index =
static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
EntryInfo& entry_info = entries_->at(entry_index);
entry_info.accessed = true;
return entry_info.id;
} else {
return 0;
}
}
void HeapObjectsMap::RemoveDeadEntries() {
List<EntryInfo>* new_entries = new List<EntryInfo>();
for (HashMap::Entry* entry = entries_map_.Start();
entry != NULL;
entry = entries_map_.Next(entry)) {
int entry_index =
static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
EntryInfo& entry_info = entries_->at(entry_index);
if (entry_info.accessed) {
entry->value = reinterpret_cast<void*>(new_entries->length());
new_entries->Add(EntryInfo(entry_info.id, false));
}
}
delete entries_;
entries_ = new_entries;
}
HeapSnapshotsCollection::HeapSnapshotsCollection()
: snapshots_uids_(HeapSnapshotsMatch),
: is_tracking_objects_(false),
snapshots_uids_(HeapSnapshotsMatch),
token_enumerator_(new TokenEnumerator()) {
}
@ -1478,6 +1607,7 @@ HeapSnapshotsCollection::~HeapSnapshotsCollection() {
HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
unsigned uid) {
is_tracking_objects_ = true; // Start watching for heap objects moves.
HeapSnapshot* snapshot = new HeapSnapshot(this, name, uid);
snapshots_.Add(snapshot);
HashMap::Entry* entry =
@ -1498,6 +1628,13 @@ HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) {
}
HeapSnapshotsDiff* HeapSnapshotsCollection::CompareSnapshots(
HeapSnapshot* snapshot1,
HeapSnapshot* snapshot2) {
return comparator_.Compare(snapshot1, snapshot2);
}
HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot)
: snapshot_(snapshot) {
}
@ -1630,6 +1767,64 @@ void HeapSnapshotGenerator::ExtractElementReferences(JSObject* js_obj,
}
}
static void DeleteHeapSnapshotsDiff(HeapSnapshotsDiff** diff_ptr) {
delete *diff_ptr;
}
HeapSnapshotsComparator::~HeapSnapshotsComparator() {
diffs_.Iterate(DeleteHeapSnapshotsDiff);
}
HeapSnapshotsDiff* HeapSnapshotsComparator::Compare(HeapSnapshot* snapshot1,
HeapSnapshot* snapshot2) {
HeapSnapshotsDiff* diff = new HeapSnapshotsDiff(snapshot1, snapshot2);
diffs_.Add(diff);
List<HeapEntry*>* entries1 = snapshot1->GetSortedEntriesList();
List<HeapEntry*>* entries2 = snapshot2->GetSortedEntriesList();
int i = 0, j = 0;
List<HeapEntry*> added_entries, deleted_entries;
while (i < entries1->length() && j < entries2->length()) {
uint64_t id1 = entries1->at(i)->id();
uint64_t id2 = entries2->at(j)->id();
if (id1 == id2) {
i++;
j++;
} else if (id1 < id2) {
HeapEntry* entry = entries1->at(i++);
deleted_entries.Add(entry);
} else {
HeapEntry* entry = entries2->at(j++);
added_entries.Add(entry);
}
}
while (i < entries1->length()) {
HeapEntry* entry = entries1->at(i++);
deleted_entries.Add(entry);
}
while (j < entries2->length()) {
HeapEntry* entry = entries2->at(j++);
added_entries.Add(entry);
}
snapshot1->ClearPaint();
snapshot1->root()->PaintAllReachable();
for (int i = 0; i < deleted_entries.length(); ++i) {
HeapEntry* entry = deleted_entries[i];
if (entry->painted_reachable())
diff->AddDeletedEntry(entry);
}
snapshot2->ClearPaint();
snapshot2->root()->PaintAllReachable();
for (int i = 0; i < added_entries.length(); ++i) {
HeapEntry* entry = added_entries[i];
if (entry->painted_reachable())
diff->AddAddedEntry(entry);
}
return diff;
}
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING

128
deps/v8/src/profile-generator.h

@ -74,7 +74,7 @@ class StringsStorage {
reinterpret_cast<char*>(key2)) == 0;
}
// String::Hash -> const char*
// Mapping of strings by String::Hash to const char* strings.
HashMap names_;
DISALLOW_COPY_AND_ASSIGN(StringsStorage);
@ -156,7 +156,7 @@ class ProfileNode {
CodeEntry* entry_;
unsigned total_ticks_;
unsigned self_ticks_;
// CodeEntry* -> ProfileNode*
// Mapping from CodeEntry* to ProfileNode*
HashMap children_;
List<ProfileNode*> children_list_;
@ -312,11 +312,12 @@ class CpuProfilesCollection {
}
StringsStorage function_and_resource_names_;
// args_count -> char*
// Mapping from args_count (int) to char* strings.
List<char*> args_count_names_;
List<CodeEntry*> code_entries_;
List<List<CpuProfile*>* > profiles_by_token_;
// uid -> index
// Mapping from profiles' uids to indexes in the second nested list
// of profiles_by_token_.
HashMap profiles_uids_;
// Accessed by VM thread and profile generator thread.
@ -482,6 +483,7 @@ class HeapEntry {
visited_(false),
type_(INTERNAL),
name_(""),
id_(0),
next_auto_index_(0),
self_size_(0),
security_token_id_(TokenEnumerator::kNoSecurityToken),
@ -494,12 +496,14 @@ class HeapEntry {
HeapEntry(HeapSnapshot* snapshot,
Type type,
const char* name,
uint64_t id,
int self_size,
int security_token_id)
: snapshot_(snapshot),
visited_(false),
type_(type),
name_(name),
id_(id),
next_auto_index_(1),
self_size_(self_size),
security_token_id_(security_token_id),
@ -514,6 +518,7 @@ class HeapEntry {
bool visited() const { return visited_; }
Type type() const { return type_; }
const char* name() const { return name_; }
uint64_t id() const { return id_; }
int self_size() const { return self_size_; }
int security_token_id() const { return security_token_id_; }
bool painted_reachable() { return painted_ == kPaintReachable; }
@ -524,9 +529,13 @@ class HeapEntry {
const List<HeapGraphEdge*>* retainers() const { return &retainers_; }
const List<HeapGraphPath*>* GetRetainingPaths();
template<class Visitor>
void ApplyAndPaintAllReachable(Visitor* visitor);
void ClearPaint() { painted_ = kUnpainted; }
void CutEdges();
void MarkAsVisited() { visited_ = true; }
void PaintAllReachable();
void PaintReachable() {
ASSERT(painted_ == kUnpainted);
painted_ = kPaintReachable;
@ -537,6 +546,7 @@ class HeapEntry {
void SetInternalReference(const char* name, HeapEntry* entry);
void SetPropertyReference(const char* name, HeapEntry* entry);
void SetAutoIndexReference(HeapEntry* entry);
void SetUnidirAutoIndexReference(HeapEntry* entry);
int TotalSize();
int NonSharedTotalSize();
@ -557,6 +567,7 @@ class HeapEntry {
bool visited_;
Type type_;
const char* name_;
uint64_t id_;
int next_auto_index_;
int self_size_;
int security_token_id_;
@ -607,6 +618,8 @@ class HeapEntriesMap {
HeapEntry* Map(HeapObject* object);
void Pair(HeapObject* object, HeapEntry* entry);
uint32_t capacity() { return entries_.capacity(); }
private:
INLINE(uint32_t Hash(HeapObject* object)) {
return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
@ -627,6 +640,7 @@ class HeapEntriesMap {
class HeapSnapshotsCollection;
class HeapSnapshotsDiff;
// HeapSnapshot represents a single heap snapshot. It is stored in
// HeapSnapshotsCollection, which is also a factory for
@ -638,6 +652,7 @@ class HeapSnapshot {
HeapSnapshot(HeapSnapshotsCollection* collection,
const char* title,
unsigned uid);
~HeapSnapshot();
void ClearPaint();
void CutObjectsFromForeignSecurityContexts();
HeapEntry* GetEntry(Object* object);
@ -655,6 +670,8 @@ class HeapSnapshot {
HeapEntry* root() { return &root_; }
template<class Visitor>
void IterateEntries(Visitor* visitor) { entries_.Apply(visitor); }
List<HeapEntry*>* GetSortedEntriesList();
HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
void Print(int max_depth);
@ -679,19 +696,108 @@ class HeapSnapshot {
const char* title_;
unsigned uid_;
HeapEntry root_;
// HeapObject* -> HeapEntry*
// Mapping from HeapObject* pointers to HeapEntry* pointers.
HeapEntriesMap entries_;
// Entries sorted by id.
List<HeapEntry*>* sorted_entries_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
};
class HeapObjectsMap {
public:
HeapObjectsMap();
~HeapObjectsMap();
void SnapshotGenerationFinished();
uint64_t FindObject(Address addr);
void MoveObject(Address from, Address to);
private:
struct EntryInfo {
explicit EntryInfo(uint64_t id) : id(id), accessed(true) { }
EntryInfo(uint64_t id, bool accessed) : id(id), accessed(accessed) { }
uint64_t id;
bool accessed;
};
void AddEntry(Address addr, uint64_t id);
uint64_t FindEntry(Address addr);
void RemoveDeadEntries();
static bool AddressesMatch(void* key1, void* key2) {
return key1 == key2;
}
static uint32_t AddressHash(Address addr) {
return static_cast<int32_t>(reinterpret_cast<intptr_t>(addr));
}
bool initial_fill_mode_;
uint64_t next_id_;
HashMap entries_map_;
List<EntryInfo>* entries_;
DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
};
class HeapSnapshotsDiff {
public:
HeapSnapshotsDiff(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2)
: snapshot1_(snapshot1),
snapshot2_(snapshot2),
additions_root_(new HeapEntry(snapshot2)),
deletions_root_(new HeapEntry(snapshot1)) { }
~HeapSnapshotsDiff() {
delete deletions_root_;
delete additions_root_;
}
void AddAddedEntry(HeapEntry* entry) {
additions_root_->SetUnidirAutoIndexReference(entry);
}
void AddDeletedEntry(HeapEntry* entry) {
deletions_root_->SetUnidirAutoIndexReference(entry);
}
const HeapEntry* additions_root() const { return additions_root_; }
const HeapEntry* deletions_root() const { return deletions_root_; }
private:
HeapSnapshot* snapshot1_;
HeapSnapshot* snapshot2_;
HeapEntry* additions_root_;
HeapEntry* deletions_root_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsDiff);
};
class HeapSnapshotsComparator {
public:
HeapSnapshotsComparator() { }
~HeapSnapshotsComparator();
HeapSnapshotsDiff* Compare(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2);
private:
List<HeapSnapshotsDiff*> diffs_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsComparator);
};
class HeapSnapshotsCollection {
public:
HeapSnapshotsCollection();
~HeapSnapshotsCollection();
bool is_tracking_objects() { return is_tracking_objects_; }
HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
void SnapshotGenerationFinished() { ids_.SnapshotGenerationFinished(); }
List<HeapSnapshot*>* snapshots() { return &snapshots_; }
HeapSnapshot* GetSnapshot(unsigned uid);
@ -699,16 +805,26 @@ class HeapSnapshotsCollection {
TokenEnumerator* token_enumerator() { return token_enumerator_; }
uint64_t GetObjectId(Address addr) { return ids_.FindObject(addr); }
void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
HeapSnapshotsDiff* CompareSnapshots(HeapSnapshot* snapshot1,
HeapSnapshot* snapshot2);
private:
INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
return key1 == key2;
}
bool is_tracking_objects_; // Whether tracking object moves is needed.
List<HeapSnapshot*> snapshots_;
// uid -> HeapSnapshot*
// Mapping from snapshots' uids to HeapSnapshot* pointers.
HashMap snapshots_uids_;
StringsStorage names_;
TokenEnumerator* token_enumerator_;
// Mapping from HeapObject addresses to objects' uids.
HeapObjectsMap ids_;
HeapSnapshotsComparator comparator_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
};

20
deps/v8/src/runtime.js

@ -80,7 +80,7 @@ function EQUALS(y) {
} else {
// x is not a number, boolean, null or undefined.
if (y == null) return 1; // not equal
if (IS_SPEC_OBJECT_OR_NULL(y)) {
if (IS_SPEC_OBJECT(y)) {
return %_ObjectEquals(x, y) ? 0 : 1;
}
@ -345,7 +345,7 @@ function DELETE(key) {
// ECMA-262, section 11.8.7, page 54.
function IN(x) {
if (x == null || !IS_SPEC_OBJECT_OR_NULL(x)) {
if (!IS_SPEC_OBJECT(x)) {
throw %MakeTypeError('invalid_in_operator_use', [this, x]);
}
return %_IsNonNegativeSmi(this) ? %HasElement(x, this) : %HasProperty(x, %ToString(this));
@ -363,13 +363,13 @@ function INSTANCE_OF(F) {
}
// If V is not an object, return false.
if (IS_NULL(V) || !IS_SPEC_OBJECT_OR_NULL(V)) {
if (!IS_SPEC_OBJECT(V)) {
return 1;
}
// Get the prototype of F; if it is not an object, throw an error.
var O = F.prototype;
if (IS_NULL(O) || !IS_SPEC_OBJECT_OR_NULL(O)) {
if (!IS_SPEC_OBJECT(O)) {
throw %MakeTypeError('instanceof_nonobject_proto', [O]);
}
@ -483,8 +483,7 @@ function ToPrimitive(x, hint) {
// Fast case check.
if (IS_STRING(x)) return x;
// Normal behavior.
if (!IS_SPEC_OBJECT_OR_NULL(x)) return x;
if (x == null) return x; // check for null, undefined
if (!IS_SPEC_OBJECT(x)) return x;
if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
return (hint == NUMBER_HINT) ? %DefaultNumber(x) : %DefaultString(x);
}
@ -583,13 +582,10 @@ function SameValue(x, y) {
// Returns if the given x is a primitive value - not an object or a
// function.
function IsPrimitive(x) {
if (!IS_SPEC_OBJECT_OR_NULL(x)) {
return true;
} else {
// Even though the type of null is "object", null is still
// considered a primitive value.
return IS_NULL(x);
}
// considered a primitive value. IS_SPEC_OBJECT handles this correctly
// (i.e., it will return false if x is null).
return !IS_SPEC_OBJECT(x);
}

2
deps/v8/src/serialize.cc

@ -360,6 +360,7 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
5,
"StackGuard::address_of_real_jslimit()");
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::address_of_regexp_stack_limit().address(),
UNCLASSIFIED,
6,
@ -376,6 +377,7 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
9,
"OffsetsVector::static_offsets_vector");
#endif // V8_INTERPRETED_REGEXP
Add(ExternalReference::new_space_start().address(),
UNCLASSIFIED,
10,

4
deps/v8/src/spaces.cc

@ -342,7 +342,9 @@ void MemoryAllocator::TearDown() {
void* MemoryAllocator::AllocateRawMemory(const size_t requested,
size_t* allocated,
Executability executable) {
if (size_ + static_cast<int>(requested) > capacity_) return NULL;
if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
return NULL;
}
void* mem;
if (executable == EXECUTABLE && CodeRange::exists()) {
mem = CodeRange::AllocateRawMemory(requested, allocated);

46
deps/v8/src/v8natives.js

@ -225,16 +225,14 @@ function ObjectHasOwnProperty(V) {
// ECMA-262 - 15.2.4.6
function ObjectIsPrototypeOf(V) {
if (!IS_SPEC_OBJECT_OR_NULL(V) && !IS_UNDETECTABLE(V)) return false;
if (!IS_SPEC_OBJECT(V)) return false;
return %IsInPrototypeChain(this, V);
}
// ECMA-262 - 15.2.4.6
function ObjectPropertyIsEnumerable(V) {
if (this == null) return false;
if (!IS_SPEC_OBJECT_OR_NULL(this)) return false;
return %IsPropertyEnumerable(this, ToString(V));
return %IsPropertyEnumerable(ToObject(this), ToString(V));
}
@ -279,8 +277,7 @@ function ObjectLookupSetter(name) {
function ObjectKeys(obj) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj))
if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
return %LocalKeys(obj);
}
@ -329,7 +326,7 @@ function FromPropertyDescriptor(desc) {
// ES5 8.10.5.
function ToPropertyDescriptor(obj) {
if (!IS_SPEC_OBJECT_OR_NULL(obj)) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("property_desc_object", [obj]);
}
var desc = new PropertyDescriptor();
@ -626,8 +623,7 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
// ES5 section 15.2.3.2.
function ObjectGetPrototypeOf(obj) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj))
if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
return obj.__proto__;
}
@ -635,8 +631,7 @@ function ObjectGetPrototypeOf(obj) {
// ES5 section 15.2.3.3
function ObjectGetOwnPropertyDescriptor(obj, p) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj))
if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyDescriptor"]);
var desc = GetOwnProperty(obj, p);
return FromPropertyDescriptor(desc);
@ -645,8 +640,7 @@ function ObjectGetOwnPropertyDescriptor(obj, p) {
// ES5 section 15.2.3.4.
function ObjectGetOwnPropertyNames(obj) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj))
if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
// Find all the indexed properties.
@ -698,7 +692,7 @@ function ObjectGetOwnPropertyNames(obj) {
// ES5 section 15.2.3.5.
function ObjectCreate(proto, properties) {
if (!IS_SPEC_OBJECT_OR_NULL(proto)) {
if (!IS_SPEC_OBJECT(proto) && proto !== null) {
throw MakeTypeError("proto_object_or_null", [proto]);
}
var obj = new $Object();
@ -710,8 +704,7 @@ function ObjectCreate(proto, properties) {
// ES5 section 15.2.3.6.
function ObjectDefineProperty(obj, p, attributes) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj)) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
}
var name = ToString(p);
@ -723,8 +716,7 @@ function ObjectDefineProperty(obj, p, attributes) {
// ES5 section 15.2.3.7.
function ObjectDefineProperties(obj, properties) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj))
if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
var props = ToObject(properties);
var key_values = [];
@ -747,8 +739,7 @@ function ObjectDefineProperties(obj, properties) {
// ES5 section 15.2.3.8.
function ObjectSeal(obj) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj)) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["seal"]);
}
var names = ObjectGetOwnPropertyNames(obj);
@ -764,8 +755,7 @@ function ObjectSeal(obj) {
// ES5 section 15.2.3.9.
function ObjectFreeze(obj) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj)) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["freeze"]);
}
var names = ObjectGetOwnPropertyNames(obj);
@ -782,8 +772,7 @@ function ObjectFreeze(obj) {
// ES5 section 15.2.3.10
function ObjectPreventExtension(obj) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj)) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
}
%PreventExtensions(obj);
@ -793,8 +782,7 @@ function ObjectPreventExtension(obj) {
// ES5 section 15.2.3.11
function ObjectIsSealed(obj) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj)) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["isSealed"]);
}
var names = ObjectGetOwnPropertyNames(obj);
@ -812,8 +800,7 @@ function ObjectIsSealed(obj) {
// ES5 section 15.2.3.12
function ObjectIsFrozen(obj) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj)) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["isFrozen"]);
}
var names = ObjectGetOwnPropertyNames(obj);
@ -832,8 +819,7 @@ function ObjectIsFrozen(obj) {
// ES5 section 15.2.3.13
function ObjectIsExtensible(obj) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj)) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
}
return %IsExtensible(obj);

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 3
#define BUILD_NUMBER 0
#define BUILD_NUMBER 2
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

557
deps/v8/src/x64/codegen-x64.cc

@ -139,7 +139,7 @@ CodeGenState::~CodeGenState() {
}
// -----------------------------------------------------------------------------
// -------------------------------------------------------------------------
// CodeGenerator implementation.
CodeGenerator::CodeGenerator(MacroAssembler* masm)
@ -155,6 +155,12 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm)
}
// Calling conventions:
// rbp: caller's frame pointer
// rsp: stack pointer
// rdi: called JS function
// rsi: callee's context
void CodeGenerator::Generate(CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(info->function());
@ -171,7 +177,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Adjust for function-level loop nesting.
ASSERT_EQ(0, loop_nesting_);
loop_nesting_ += info->loop_nesting();
loop_nesting_ = info->loop_nesting();
JumpTarget::set_compiling_deferred_code(false);
@ -469,14 +475,14 @@ Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
// frame. If the expression is boolean-valued it may be compiled (or
// partially compiled) into control flow to the control destination.
// If force_control is true, control flow is forced.
void CodeGenerator::LoadCondition(Expression* x,
void CodeGenerator::LoadCondition(Expression* expr,
ControlDestination* dest,
bool force_control) {
ASSERT(!in_spilled_code());
int original_height = frame_->height();
{ CodeGenState new_state(this, dest);
Visit(x);
Visit(expr);
// If we hit a stack overflow, we may not have actually visited
// the expression. In that case, we ensure that we have a
@ -496,7 +502,6 @@ void CodeGenerator::LoadCondition(Expression* x,
if (force_control && !dest->is_used()) {
// Convert the TOS value into flow to the control destination.
// TODO(X64): Make control flow to control destinations work.
ToBoolean(dest);
}
@ -506,7 +511,6 @@ void CodeGenerator::LoadCondition(Expression* x,
void CodeGenerator::LoadAndSpill(Expression* expression) {
// TODO(x64): No architecture specific code. Move to shared location.
ASSERT(in_spilled_code());
set_in_spilled_code(false);
Load(expression);
@ -652,7 +656,6 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
frame_->Push(&result);
}
Variable* arguments = scope()->arguments()->var();
Variable* shadow = scope()->arguments_shadow()->var();
ASSERT(arguments != NULL && arguments->slot() != NULL);
@ -663,11 +666,11 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
// We have to skip storing into the arguments slot if it has
// already been written to. This can happen if the a function
// has a local variable named 'arguments'.
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
Result probe = frame_->Pop();
if (probe.is_constant()) {
// We have to skip updating the arguments object if it has been
// assigned a proper value.
// We have to skip updating the arguments object if it has
// been assigned a proper value.
skip_arguments = !probe.handle()->IsTheHole();
} else {
__ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
@ -686,9 +689,6 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
//------------------------------------------------------------------------------
// CodeGenerator implementation of variables, lookups, and stores.
//------------------------------------------------------------------------------
// CodeGenerator implementation of variables, lookups, and stores.
Reference::Reference(CodeGenerator* cgen,
Expression* expression,
bool persist_after_get)
@ -845,8 +845,8 @@ class FloatingPointHelper : public AllStatic {
const char* GenericBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int len = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(len);
const int kMaxNameLength = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
@ -857,7 +857,7 @@ const char* GenericBinaryOpStub::GetName() {
default: overwrite_name = "UnknownOverwrite"; break;
}
OS::SNPrintF(Vector<char>(name_, len),
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
op_name,
overwrite_name,
@ -1138,7 +1138,7 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
// If the product is zero and the non-zero factor is negative,
// the spec requires us to return floating point negative zero.
if (answer != 0 || (left + right) >= 0) {
if (answer != 0 || (left >= 0 && right >= 0)) {
answer_object = Smi::FromInt(static_cast<int>(answer));
}
}
@ -1645,7 +1645,6 @@ class DeferredInlineSmiSub: public DeferredCode {
};
void DeferredInlineSmiSub::Generate() {
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
igostub.GenerateCall(masm_, dst_, value_);
@ -1710,6 +1709,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
} else {
operand->ToRegister();
frame_->Spill(operand->reg());
answer = *operand;
DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
smi_value,
overwrite_mode);
@ -1721,7 +1721,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
smi_value,
deferred->entry_label());
deferred->BindExit();
answer = *operand;
operand->Unuse();
}
break;
}
@ -1932,6 +1932,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
return answer;
}
static bool CouldBeNaN(const Result& result) {
if (result.type_info().IsSmi()) return false;
if (result.type_info().IsInteger32()) return false;
@ -2002,106 +2003,11 @@ void CodeGenerator::Comparison(AstNode* node,
}
if (left_side_constant_smi || right_side_constant_smi) {
if (left_side_constant_smi && right_side_constant_smi) {
// Trivial case, comparing two constants.
int left_value = Smi::cast(*left_side.handle())->value();
int right_value = Smi::cast(*right_side.handle())->value();
switch (cc) {
case less:
dest->Goto(left_value < right_value);
break;
case equal:
dest->Goto(left_value == right_value);
break;
case greater_equal:
dest->Goto(left_value >= right_value);
break;
default:
UNREACHABLE();
}
} else {
// Only one side is a constant Smi.
// If left side is a constant Smi, reverse the operands.
// Since one side is a constant Smi, conversion order does not matter.
if (left_side_constant_smi) {
Result temp = left_side;
left_side = right_side;
right_side = temp;
cc = ReverseCondition(cc);
// This may re-introduce greater or less_equal as the value of cc.
// CompareStub and the inline code both support all values of cc.
}
// Implement comparison against a constant Smi, inlining the case
// where both sides are Smis.
left_side.ToRegister();
Register left_reg = left_side.reg();
Handle<Object> right_val = right_side.handle();
// Here we split control flow to the stub call and inlined cases
// before finally splitting it to the control destination. We use
// a jump target and branching to duplicate the virtual frame at
// the first split. We manually handle the off-frame references
// by reconstituting them on the non-fall-through path.
JumpTarget is_smi;
if (left_side.is_smi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(left_side.reg());
}
} else {
Condition left_is_smi = masm_->CheckSmi(left_side.reg());
is_smi.Branch(left_is_smi);
bool is_loop_condition = (node->AsExpression() != NULL) &&
node->AsExpression()->is_loop_condition();
if (!is_loop_condition && right_val->IsSmi()) {
// Right side is a constant smi and left side has been checked
// not to be a smi.
JumpTarget not_number;
__ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
Factory::heap_number_map());
not_number.Branch(not_equal, &left_side);
__ movsd(xmm1,
FieldOperand(left_reg, HeapNumber::kValueOffset));
int value = Smi::cast(*right_val)->value();
if (value == 0) {
__ xorpd(xmm0, xmm0);
} else {
Result temp = allocator()->Allocate();
__ movl(temp.reg(), Immediate(value));
__ cvtlsi2sd(xmm0, temp.reg());
temp.Unuse();
}
__ ucomisd(xmm1, xmm0);
// Jump to builtin for NaN.
not_number.Branch(parity_even, &left_side);
left_side.Unuse();
dest->true_target()->Branch(DoubleCondition(cc));
dest->false_target()->Jump();
not_number.Bind(&left_side);
}
// Setup and call the compare stub.
CompareStub stub(cc, strict, kCantBothBeNaN);
Result result = frame_->CallStub(&stub, &left_side, &right_side);
result.ToRegister();
__ testq(result.reg(), result.reg());
result.Unuse();
dest->true_target()->Branch(cc);
dest->false_target()->Jump();
is_smi.Bind();
}
left_side = Result(left_reg);
right_side = Result(right_val);
// Test smi equality and comparison by signed int comparison.
// Both sides are smis, so we can use an Immediate.
__ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
left_side.Unuse();
right_side.Unuse();
dest->Split(cc);
}
ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
left_side_constant_smi, right_side_constant_smi,
is_loop_condition);
} else if (cc == equal &&
(left_side_constant_null || right_side_constant_null)) {
// To make null checks efficient, we check if either the left side or
@ -2274,7 +2180,8 @@ void CodeGenerator::Comparison(AstNode* node,
}
} else {
// Neither side is a constant Smi, constant 1-char string, or constant null.
// If either side is a non-smi constant, skip the smi check.
// If either side is a non-smi constant, or known to be a heap number,
// skip the smi check.
bool known_non_smi =
(left_side.is_constant() && !left_side.handle()->IsSmi()) ||
(right_side.is_constant() && !right_side.handle()->IsSmi()) ||
@ -2300,6 +2207,7 @@ void CodeGenerator::Comparison(AstNode* node,
bool inline_number_compare =
loop_nesting() > 0 && cc != equal && !is_loop_condition;
// Left and right needed in registers for the following code.
left_side.ToRegister();
right_side.ToRegister();
@ -2317,6 +2225,8 @@ void CodeGenerator::Comparison(AstNode* node,
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
}
// End of in-line compare, call out to the compare stub. Don't include
// number comparison in the stub if it was inlined.
CompareStub stub(cc, strict, nan_info, !inline_number_compare);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
__ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
@ -2347,6 +2257,8 @@ void CodeGenerator::Comparison(AstNode* node,
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
}
// End of in-line compare, call out to the compare stub. Don't include
// number comparison in the stub if it was inlined.
CompareStub stub(cc, strict, nan_info, !inline_number_compare);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
__ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
@ -2366,6 +2278,128 @@ void CodeGenerator::Comparison(AstNode* node,
}
void CodeGenerator::ConstantSmiComparison(Condition cc,
bool strict,
ControlDestination* dest,
Result* left_side,
Result* right_side,
bool left_side_constant_smi,
bool right_side_constant_smi,
bool is_loop_condition) {
if (left_side_constant_smi && right_side_constant_smi) {
// Trivial case, comparing two constants.
int left_value = Smi::cast(*left_side->handle())->value();
int right_value = Smi::cast(*right_side->handle())->value();
switch (cc) {
case less:
dest->Goto(left_value < right_value);
break;
case equal:
dest->Goto(left_value == right_value);
break;
case greater_equal:
dest->Goto(left_value >= right_value);
break;
default:
UNREACHABLE();
}
} else {
// Only one side is a constant Smi.
// If left side is a constant Smi, reverse the operands.
// Since one side is a constant Smi, conversion order does not matter.
if (left_side_constant_smi) {
Result* temp = left_side;
left_side = right_side;
right_side = temp;
cc = ReverseCondition(cc);
// This may re-introduce greater or less_equal as the value of cc.
// CompareStub and the inline code both support all values of cc.
}
// Implement comparison against a constant Smi, inlining the case
// where both sides are Smis.
left_side->ToRegister();
Register left_reg = left_side->reg();
Smi* constant_smi = Smi::cast(*right_side->handle());
if (left_side->is_smi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(left_reg);
}
// Test smi equality and comparison by signed int comparison.
// Both sides are smis, so we can use an Immediate.
__ SmiCompare(left_reg, constant_smi);
left_side->Unuse();
right_side->Unuse();
dest->Split(cc);
} else {
// Only the case where the left side could possibly be a non-smi is left.
JumpTarget is_smi;
if (cc == equal) {
// We can do the equality comparison before the smi check.
__ SmiCompare(left_reg, constant_smi);
dest->true_target()->Branch(equal);
Condition left_is_smi = masm_->CheckSmi(left_reg);
dest->false_target()->Branch(left_is_smi);
} else {
// Do the smi check, then the comparison.
Condition left_is_smi = masm_->CheckSmi(left_reg);
is_smi.Branch(left_is_smi, left_side, right_side);
}
// Jump or fall through to here if we are comparing a non-smi to a
// constant smi. If the non-smi is a heap number and this is not
// a loop condition, inline the floating point code.
if (!is_loop_condition) {
// Right side is a constant smi and left side has been checked
// not to be a smi.
JumpTarget not_number;
__ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
Factory::heap_number_map());
not_number.Branch(not_equal, left_side);
__ movsd(xmm1,
FieldOperand(left_reg, HeapNumber::kValueOffset));
int value = constant_smi->value();
if (value == 0) {
__ xorpd(xmm0, xmm0);
} else {
Result temp = allocator()->Allocate();
__ movl(temp.reg(), Immediate(value));
__ cvtlsi2sd(xmm0, temp.reg());
temp.Unuse();
}
__ ucomisd(xmm1, xmm0);
// Jump to builtin for NaN.
not_number.Branch(parity_even, left_side);
left_side->Unuse();
dest->true_target()->Branch(DoubleCondition(cc));
dest->false_target()->Jump();
not_number.Bind(left_side);
}
// Setup and call the compare stub.
CompareStub stub(cc, strict, kCantBothBeNaN);
Result result = frame_->CallStub(&stub, left_side, right_side);
result.ToRegister();
__ testq(result.reg(), result.reg());
result.Unuse();
if (cc == equal) {
dest->Split(cc);
} else {
dest->true_target()->Branch(cc);
dest->false_target()->Jump();
// It is important for performance for this case to be at the end.
is_smi.Bind(left_side, right_side);
__ SmiCompare(left_reg, constant_smi);
left_side->Unuse();
right_side->Unuse();
dest->Split(cc);
}
}
}
}
// Load a comparison operand into into a XMM register. Jump to not_numbers jump
// target passing the left and right result if the operand is not a number.
static void LoadComparisonOperand(MacroAssembler* masm_,
@ -2677,7 +2711,6 @@ void CodeGenerator::CheckStack() {
void CodeGenerator::VisitAndSpill(Statement* statement) {
// TODO(X64): No architecture specific code. Move to shared location.
ASSERT(in_spilled_code());
set_in_spilled_code(false);
Visit(statement);
@ -2689,6 +2722,9 @@ void CodeGenerator::VisitAndSpill(Statement* statement) {
void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
ASSERT(in_spilled_code());
set_in_spilled_code(false);
VisitStatements(statements);
@ -2696,14 +2732,20 @@ void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
frame_->SpillAll();
}
set_in_spilled_code(true);
ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
ASSERT(!in_spilled_code());
for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
Visit(statements->at(i));
}
ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
@ -2939,6 +2981,7 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
CodeForStatementPosition(node);
Load(node->expression());
Result return_value = frame_->Pop();
masm()->WriteRecordedPositions();
if (function_return_is_shadowed_) {
function_return_.Jump(&return_value);
} else {
@ -2976,6 +3019,8 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// receiver.
frame_->Exit();
masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
DeleteFrame();
#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint.
// frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
@ -2989,7 +3034,6 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
ASSERT_EQ(Assembler::kJSReturnSequenceLength,
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
DeleteFrame();
}
@ -3028,8 +3072,6 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
// TODO(X64): This code is completely generic and should be moved somewhere
// where it can be shared between architectures.
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ SwitchStatement");
CodeForStatementPosition(node);
@ -3321,8 +3363,8 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
LoadCondition(node->cond(), &dest, true);
}
} else {
// If we have chosen not to recompile the test at the
// bottom, jump back to the one at the top.
// If we have chosen not to recompile the test at the bottom,
// jump back to the one at the top.
if (has_valid_frame()) {
node->continue_target()->Jump();
}
@ -3884,6 +3926,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
node->break_target()->Unuse();
}
void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
ASSERT(!in_spilled_code());
VirtualFrame::SpilledScope spilled_scope;
@ -4922,7 +4965,192 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
}
void CodeGenerator::EmitSlotAssignment(Assignment* node) {
#ifdef DEBUG
int original_height = frame()->height();
#endif
Comment cmnt(masm(), "[ Variable Assignment");
Variable* var = node->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
Slot* slot = var->slot();
ASSERT(slot != NULL);
// Evaluate the right-hand side.
if (node->is_compound()) {
// For a compound assignment the right-hand side is a binary operation
// between the current property value and the actual right-hand side.
LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
Load(node->value());
// Perform the binary operation.
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
// Construct the implicit binary operation.
BinaryOperation expr(node, node->binary_op(), node->target(),
node->value());
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
// For non-compound assignment just load the right-hand side.
Load(node->value());
}
// Perform the assignment.
if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
CodeForSourcePosition(node->position());
StoreToSlot(slot,
node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
}
ASSERT(frame()->height() == original_height + 1);
}
void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
#ifdef DEBUG
int original_height = frame()->height();
#endif
Comment cmnt(masm(), "[ Named Property Assignment");
Variable* var = node->target()->AsVariableProxy()->AsVariable();
Property* prop = node->target()->AsProperty();
ASSERT(var == NULL || (prop == NULL && var->is_global()));
// Initialize name and evaluate the receiver sub-expression if necessary. If
// the receiver is trivial it is not placed on the stack at this point, but
// loaded whenever actually needed.
Handle<String> name;
bool is_trivial_receiver = false;
if (var != NULL) {
name = var->name();
} else {
Literal* lit = prop->key()->AsLiteral();
ASSERT_NOT_NULL(lit);
name = Handle<String>::cast(lit->handle());
// Do not materialize the receiver on the frame if it is trivial.
is_trivial_receiver = prop->obj()->IsTrivial();
if (!is_trivial_receiver) Load(prop->obj());
}
// Change to slow case in the beginning of an initialization block to
// avoid the quadratic behavior of repeatedly adding fast properties.
if (node->starts_initialization_block()) {
// Initialization block consists of assignments of the form expr.x = ..., so
// this will never be an assignment to a variable, so there must be a
// receiver object.
ASSERT_EQ(NULL, var);
if (is_trivial_receiver) {
frame()->Push(prop->obj());
} else {
frame()->Dup();
}
Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
}
// Change to fast case at the end of an initialization block. To prepare for
// that add an extra copy of the receiver to the frame, so that it can be
// converted back to fast case after the assignment.
if (node->ends_initialization_block() && !is_trivial_receiver) {
frame()->Dup();
}
// Stack layout:
// [tos] : receiver (only materialized if non-trivial)
// [tos+1] : receiver if at the end of an initialization block
// Evaluate the right-hand side.
if (node->is_compound()) {
// For a compound assignment the right-hand side is a binary operation
// between the current property value and the actual right-hand side.
if (is_trivial_receiver) {
frame()->Push(prop->obj());
} else if (var != NULL) {
// The LoadIC stub expects the object in rax.
// Freeing rax causes the code generator to load the global into it.
frame_->Spill(rax);
LoadGlobal();
} else {
frame()->Dup();
}
Result value = EmitNamedLoad(name, var != NULL);
frame()->Push(&value);
Load(node->value());
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
// Construct the implicit binary operation.
BinaryOperation expr(node, node->binary_op(), node->target(),
node->value());
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
// For non-compound assignment just load the right-hand side.
Load(node->value());
}
// Stack layout:
// [tos] : value
// [tos+1] : receiver (only materialized if non-trivial)
// [tos+2] : receiver if at the end of an initialization block
// Perform the assignment. It is safe to ignore constants here.
ASSERT(var == NULL || var->mode() != Variable::CONST);
ASSERT_NE(Token::INIT_CONST, node->op());
if (is_trivial_receiver) {
Result value = frame()->Pop();
frame()->Push(prop->obj());
frame()->Push(&value);
}
CodeForSourcePosition(node->position());
bool is_contextual = (var != NULL);
Result answer = EmitNamedStore(name, is_contextual);
frame()->Push(&answer);
// Stack layout:
// [tos] : result
// [tos+1] : receiver if at the end of an initialization block
if (node->ends_initialization_block()) {
ASSERT_EQ(NULL, var);
// The argument to the runtime call is the receiver.
if (is_trivial_receiver) {
frame()->Push(prop->obj());
} else {
// A copy of the receiver is below the value of the assignment. Swap
// the receiver and the value of the assignment expression.
Result result = frame()->Pop();
Result receiver = frame()->Pop();
frame()->Push(&result);
frame()->Push(&receiver);
}
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
// Stack layout:
// [tos] : result
ASSERT_EQ(frame()->height(), original_height + 1);
}
void CodeGenerator::VisitAssignment(Assignment* node) {
#ifdef DEBUG
int original_height = frame()->height();
#endif
Variable* var = node->target()->AsVariableProxy()->AsVariable();
Property* prop = node->target()->AsProperty();
if (var != NULL && !var->is_global()) {
EmitSlotAssignment(node);
} else if ((prop != NULL && prop->key()->IsPropertyName()) ||
(var != NULL && var->is_global())) {
// Properties whose keys are property names and global variables are
// treated as named property references. We do not need to consider
// global 'this' because it is not a valid left-hand side.
EmitNamedPropertyAssignment(node);
} else {
Comment cmnt(masm_, "[ Assignment");
{ Reference target(this, node->target(), node->is_compound());
@ -4932,7 +5160,6 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
frame_->Push(Smi::FromInt(0));
return;
}
Variable* var = node->target()->AsVariableProxy()->AsVariable();
if (node->starts_initialization_block()) {
ASSERT(target.type() == Reference::NAMED ||
@ -4975,8 +5202,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
// There are two cases where the target is not read in the right hand
// side, that are easy to test for: the right hand side is a literal,
// or the right hand side is a different variable. TakeValue invalidates
// the target, with an implicit promise that it will be written to again
// or the right hand side is a different variable. TakeValue
// invalidates the target, with an implicit promise that it will be
// written to again
// before it is read.
if (literal != NULL || (right_var != NULL && right_var != var)) {
target.TakeValue();
@ -4986,10 +5214,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
Load(node->value());
BinaryOperation expr(node, node->binary_op(), node->target(),
node->value());
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
GenericBinaryOperation(
&expr, overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
}
if (var != NULL &&
var->mode() == Variable::CONST &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
@ -5020,6 +5247,11 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
}
}
}
// Stack layout:
// [tos] : result
ASSERT(frame()->height() == original_height + 1);
}
void CodeGenerator::VisitThrow(Throw* node) {
@ -5680,6 +5912,25 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
// typeof(arg) == function).
// It includes undetectable objects (as opposed to IsObject).
ASSERT(args->length() == 1);
Load(args->at(0));
Result value = frame_->Pop();
value.ToRegister();
ASSERT(value.is_valid());
Condition is_smi = masm_->CheckSmi(value.reg());
destination()->false_target()->Branch(is_smi);
// Check that this is an object.
__ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
value.Unuse();
destination()->Split(above_equal);
}
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
@ -7730,6 +7981,22 @@ Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
}
Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
#ifdef DEBUG
int expected_height = frame()->height() - (is_contextual ? 1 : 2);
#endif
Result result = frame()->CallStoreIC(name, is_contextual);
// A test rax instruction following the call signals that the inobject
// property case was inlined. Ensure that there is not a test rax
// instruction here.
__ nop();
ASSERT_EQ(expected_height, frame()->height());
return result;
}
Result CodeGenerator::EmitKeyedLoad() {
#ifdef DEBUG
int original_height = frame()->height();
@ -10226,12 +10493,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
}
// Push arguments below the return address to prepare jump to builtin.
__ pop(rcx);
__ push(rax);
__ push(rdx);
__ push(rcx);
// Generate the number comparison code.
if (include_number_compare_) {
Label non_number_comparison;
@ -10247,7 +10508,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ setcc(above, rax);
__ setcc(below, rcx);
__ subq(rax, rcx);
__ ret(2 * kPointerSize); // rax, rdx were pushed
__ ret(0);
// If one of the numbers was NaN, then the result is always false.
// The cc is never not-equal.
@ -10258,7 +10519,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
} else {
__ Set(rax, -1);
}
__ ret(2 * kPointerSize); // rax, rdx were pushed
__ ret(0);
// The number comparison code did not provide a valid result.
__ bind(&non_number_comparison);
@ -10273,7 +10534,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// We've already checked for object identity, so if both operands
// are symbols they aren't equal. Register eax (not rax) already holds a
// non-zero value, which indicates not equal, so just return.
__ ret(2 * kPointerSize);
__ ret(0);
}
__ bind(&check_for_strings);
@ -10324,14 +10585,12 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&return_unequal);
// Return non-equal by returning the non-zero object pointer in eax,
// or return equal if we fell through to here.
__ ret(2 * kPointerSize); // rax, rdx were pushed
__ ret(0);
__ bind(&not_both_objects);
}
// must swap argument order
// Push arguments below the return address to prepare jump to builtin.
__ pop(rcx);
__ pop(rdx);
__ pop(rax);
__ push(rdx);
__ push(rax);
@ -11483,7 +11742,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Make count the number of bytes to copy.
if (!ascii) {
ASSERT_EQ(2, sizeof(uc16)); // NOLINT
ASSERT_EQ(2, static_cast<int>(sizeof(uc16))); // NOLINT
__ addl(count, count);
}
@ -11908,7 +12167,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Result is EQUAL.
__ Move(rax, Smi::FromInt(EQUAL));
__ ret(2 * kPointerSize);
__ ret(0);
Label result_greater;
__ bind(&result_not_equal);
@ -11917,12 +12176,12 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Result is LESS.
__ Move(rax, Smi::FromInt(LESS));
__ ret(2 * kPointerSize);
__ ret(0);
// Result is GREATER.
__ bind(&result_greater);
__ Move(rax, Smi::FromInt(GREATER));
__ ret(2 * kPointerSize);
__ ret(0);
}
@ -11952,6 +12211,10 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Inline comparison of ascii strings.
__ IncrementCounter(&Counters::string_compare_native, 1);
// Drop arguments from the stack
__ pop(rcx);
__ addq(rsp, Immediate(2 * kPointerSize));
__ push(rcx);
GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)

20
deps/v8/src/x64/codegen-x64.h

@ -454,9 +454,17 @@ class CodeGenerator: public AstVisitor {
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
// Support for compiling assignment expressions.
void EmitSlotAssignment(Assignment* node);
void EmitNamedPropertyAssignment(Assignment* node);
// Receiver is passed on the frame and not consumed.
Result EmitNamedLoad(Handle<String> name, bool is_contextual);
// If the store is contextual, value is passed on the frame and consumed.
// Otherwise, receiver and value are passed on the frame and consumed.
Result EmitNamedStore(Handle<String> name, bool is_contextual);
// Load a property of an object, returning it in a Result.
// The object and the property name are passed on the stack, and
// not changed.
@ -521,6 +529,17 @@ class CodeGenerator: public AstVisitor {
Condition cc,
bool strict,
ControlDestination* destination);
// If at least one of the sides is a constant smi, generate optimized code.
void ConstantSmiComparison(Condition cc,
bool strict,
ControlDestination* destination,
Result* left_side,
Result* right_side,
bool left_side_constant_smi,
bool right_side_constant_smi,
bool is_loop_condition);
void GenerateInlineNumberComparison(Result* left_side,
Result* right_side,
Condition cc,
@ -578,6 +597,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsRegExp(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);

19
deps/v8/src/x64/full-codegen-x64.cc

@ -1991,6 +1991,25 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
}
void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
__ j(above_equal, if_true);
__ jmp(if_false);
Apply(context_, if_true, if_false);
}
void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);

36
deps/v8/src/x64/ic-x64.cc

@ -418,28 +418,6 @@ bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {
// Insert null as the map to check for to make sure the map check fails
// sending control flow to the IC instead of the inlined version.
PatchInlinedLoad(address, Heap::null_value());
}
void KeyedStoreIC::ClearInlinedVersion(Address address) {
// Insert null as the elements map to check for. This will make
// sure that the elements fast-case map check fails so that control
// flows to the IC instead of the inlined version.
PatchInlinedStore(address, Heap::null_value());
}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
// Restore the fast-case elements map check so that the inlined
// version can be used again.
PatchInlinedStore(address, Heap::fixed_array_map());
}
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
@ -1630,14 +1608,6 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
const int LoadIC::kOffsetToLoadInstruction = 20;
void LoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined inobject property load (if
// present) to guarantee failure by holding an invalid map (the null
// value). The offset can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
@ -1767,6 +1737,12 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
}
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
// TODO(787): Implement inline stores on x64.
return false;
}
void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value

20
deps/v8/src/x64/virtual-frame-x64.cc

@ -1168,6 +1168,26 @@ Result VirtualFrame::CallCommonStoreIC(Handle<Code> ic,
}
Result VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
// Value and (if not contextual) receiver are on top of the frame.
// The IC expects name in rcx, value in rax, and receiver in rdx.
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Result value = Pop();
if (is_contextual) {
PrepareForCall(0, 0);
value.ToRegister(rax);
__ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
value.Unuse();
} else {
Result receiver = Pop();
PrepareForCall(0, 0);
MoveResultsToRegisters(&value, &receiver, rax, rdx);
}
__ Move(rcx, name);
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
int arg_count,
int loop_nesting) {

4
deps/v8/src/x64/virtual-frame-x64.h

@ -354,6 +354,10 @@ class VirtualFrame : public ZoneObject {
return CallCommonStoreIC(ic, &value, &name, &receiver);
}
// Call store IC. If the load is contextual, value is found on top of the
// frame. If not, value and receiver are on the frame. Both are dropped.
Result CallStoreIC(Handle<String> name, bool is_contextual);
// Call keyed store IC. Value, key, and receiver are found on top
// of the frame. All are dropped.
Result CallKeyedStoreIC() {

5
deps/v8/test/cctest/test-api.cc

@ -8015,9 +8015,10 @@ TEST(DontLeakGlobalObjects) {
v8::Persistent<v8::Object> some_object;
v8::Persistent<v8::Object> bad_handle;
void NewPersistentHandleCallback(v8::Persistent<v8::Value>, void*) {
void NewPersistentHandleCallback(v8::Persistent<v8::Value> handle, void*) {
v8::HandleScope scope;
bad_handle = v8::Persistent<v8::Object>::New(some_object);
handle.Dispose();
}
@ -8046,6 +8047,7 @@ v8::Persistent<v8::Object> to_be_disposed;
void DisposeAndForceGcCallback(v8::Persistent<v8::Value> handle, void*) {
to_be_disposed.Dispose();
i::Heap::CollectAllGarbage(false);
handle.Dispose();
}
@ -8070,6 +8072,7 @@ void DisposingCallback(v8::Persistent<v8::Value> handle, void*) {
void HandleCreatingCallback(v8::Persistent<v8::Value> handle, void*) {
v8::HandleScope scope;
v8::Persistent<v8::Object>::New(v8::Object::New());
handle.Dispose();
}

34
deps/v8/test/cctest/test-assembler-arm.cc

@ -310,4 +310,38 @@ TEST(5) {
}
}
TEST(6) {
// Test saturating instructions.
InitializeVM();
v8::HandleScope scope;
Assembler assm(NULL, 0);
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
__ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF.
__ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F.
__ usat(r3, 1, Operand(r0, LSL, 16)); // Sat (0xFFFF<<16) to 0-1 = 0x0.
__ add(r0, r1, Operand(r2));
__ add(r0, r0, Operand(r3));
__ mov(pc, Operand(lr));
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
#endif
F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(
CALL_GENERATED_CODE(f, 0xFFFF, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(382, res);
}
}
#undef __

9
deps/v8/test/cctest/test-disasm-arm.cc

@ -396,6 +396,15 @@ TEST(Type3) {
"e7df0f91 bfi r0, r1, #31, #1");
COMPARE(bfi(r1, r0, 31, 1),
"e7df1f90 bfi r1, r0, #31, #1");
COMPARE(usat(r0, 1, Operand(r1)),
"e6e10011 usat r0, #1, r1");
COMPARE(usat(r2, 7, Operand(lr)),
"e6e7201e usat r2, #7, lr");
COMPARE(usat(r3, 31, Operand(r4, LSL, 31)),
"e6ff3f94 usat r3, #31, r4, lsl #31");
COMPARE(usat(r8, 0, Operand(r5, ASR, 17)),
"e6e088d5 usat r8, #0, r5, asr #17");
}
VERIFY_RUN();

173
deps/v8/test/cctest/test-heap-profiler.cc

@ -56,8 +56,7 @@ class ConstructorHeapProfileTestHelper : public i::ConstructorHeapProfile {
TEST(ConstructorProfile) {
v8::HandleScope scope;
v8::Handle<v8::Context> env = v8::Context::New();
env->Enter();
LocalContext env;
CompileAndRunScript(
"function F() {} // A constructor\n"
@ -144,8 +143,7 @@ static inline void CheckNonEqualsHelper(const char* file, int line,
TEST(ClustersCoarserSimple) {
v8::HandleScope scope;
v8::Handle<v8::Context> env = v8::Context::New();
env->Enter();
LocalContext env;
i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
@ -183,8 +181,7 @@ TEST(ClustersCoarserSimple) {
TEST(ClustersCoarserMultipleConstructors) {
v8::HandleScope scope;
v8::Handle<v8::Context> env = v8::Context::New();
env->Enter();
LocalContext env;
i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
@ -214,8 +211,7 @@ TEST(ClustersCoarserMultipleConstructors) {
TEST(ClustersCoarserPathsTraversal) {
v8::HandleScope scope;
v8::Handle<v8::Context> env = v8::Context::New();
env->Enter();
LocalContext env;
i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
@ -267,8 +263,7 @@ TEST(ClustersCoarserPathsTraversal) {
TEST(ClustersCoarserSelf) {
v8::HandleScope scope;
v8::Handle<v8::Context> env = v8::Context::New();
env->Enter();
LocalContext env;
i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
@ -362,8 +357,7 @@ class RetainerProfilePrinter : public RetainerHeapProfile::Printer {
TEST(RetainerProfile) {
v8::HandleScope scope;
v8::Handle<v8::Context> env = v8::Context::New();
env->Enter();
LocalContext env;
CompileAndRunScript(
"function A() {}\n"
@ -431,8 +425,8 @@ class NamedEntriesDetector {
static const v8::HeapGraphNode* GetGlobalObject(
const v8::HeapSnapshot* snapshot) {
CHECK_EQ(1, snapshot->GetHead()->GetChildrenCount());
return snapshot->GetHead()->GetChild(0)->GetToNode();
CHECK_EQ(1, snapshot->GetRoot()->GetChildrenCount());
return snapshot->GetRoot()->GetChild(0)->GetToNode();
}
@ -449,6 +443,19 @@ static const v8::HeapGraphNode* GetProperty(const v8::HeapGraphNode* node,
}
static bool IsNodeRetainedAs(const v8::HeapGraphNode* node,
v8::HeapGraphEdge::Type type,
const char* name) {
for (int i = 0, count = node->GetRetainersCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = node->GetRetainer(i);
v8::String::AsciiValue prop_name(prop->GetName());
if (prop->GetType() == type && strcmp(name, *prop_name) == 0)
return true;
}
return false;
}
static bool HasString(const v8::HeapGraphNode* node, const char* contents) {
for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = node->GetChild(i);
@ -464,11 +471,9 @@ static bool HasString(const v8::HeapGraphNode* node, const char* contents) {
TEST(HeapSnapshot) {
v8::HandleScope scope;
v8::Handle<v8::String> token1 = v8::String::New("token1");
v8::Handle<v8::Context> env1 = v8::Context::New();
LocalContext env1;
env1->SetSecurityToken(token1);
env1->Enter();
CompileAndRunScript(
"function A1() {}\n"
@ -479,9 +484,8 @@ TEST(HeapSnapshot) {
"var c1 = new C1(a1);");
v8::Handle<v8::String> token2 = v8::String::New("token2");
v8::Handle<v8::Context> env2 = v8::Context::New();
LocalContext env2;
env2->SetSecurityToken(token2);
env2->Enter();
CompileAndRunScript(
"function A2() {}\n"
@ -569,8 +573,7 @@ TEST(HeapSnapshot) {
TEST(HeapSnapshotCodeObjects) {
v8::HandleScope scope;
v8::Handle<v8::Context> env = v8::Context::New();
env->Enter();
LocalContext env;
CompileAndRunScript(
"function lazy(x) { return x - 1; }\n"
@ -625,4 +628,132 @@ TEST(HeapSnapshotCodeObjects) {
CHECK(!lazy_references_x);
}
// Trying to introduce a check helper for uint64_t causes many
// overloading ambiguities, so it seems easier just to cast
// them to a signed type.
#define CHECK_EQ_UINT64_T(a, b) \
CHECK_EQ(static_cast<int64_t>(a), static_cast<int64_t>(b))
#define CHECK_NE_UINT64_T(a, b) do \
{ \
bool ne = a != b; \
CHECK(ne); \
} while (false)
TEST(HeapEntryIdsAndGC) {
v8::HandleScope scope;
LocalContext env;
CompileAndRunScript(
"function A() {}\n"
"function B(x) { this.x = x; }\n"
"var a = new A();\n"
"var b = new B(a);");
const v8::HeapSnapshot* snapshot1 =
v8::HeapProfiler::TakeSnapshot(v8::String::New("s1"));
i::Heap::CollectAllGarbage(true); // Enforce compaction.
const v8::HeapSnapshot* snapshot2 =
v8::HeapProfiler::TakeSnapshot(v8::String::New("s2"));
const v8::HeapGraphNode* global1 = GetGlobalObject(snapshot1);
const v8::HeapGraphNode* global2 = GetGlobalObject(snapshot2);
CHECK_NE_UINT64_T(0, global1->GetId());
CHECK_EQ_UINT64_T(global1->GetId(), global2->GetId());
const v8::HeapGraphNode* A1 =
GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "A");
const v8::HeapGraphNode* A2 =
GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "A");
CHECK_NE_UINT64_T(0, A1->GetId());
CHECK_EQ_UINT64_T(A1->GetId(), A2->GetId());
const v8::HeapGraphNode* B1 =
GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "B");
const v8::HeapGraphNode* B2 =
GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "B");
CHECK_NE_UINT64_T(0, B1->GetId());
CHECK_EQ_UINT64_T(B1->GetId(), B2->GetId());
const v8::HeapGraphNode* a1 =
GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "a");
const v8::HeapGraphNode* a2 =
GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "a");
CHECK_NE_UINT64_T(0, a1->GetId());
CHECK_EQ_UINT64_T(a1->GetId(), a2->GetId());
const v8::HeapGraphNode* b1 =
GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "b");
const v8::HeapGraphNode* b2 =
GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "b");
CHECK_NE_UINT64_T(0, b1->GetId());
CHECK_EQ_UINT64_T(b1->GetId(), b2->GetId());
}
TEST(HeapSnapshotsDiff) {
v8::HandleScope scope;
LocalContext env;
CompileAndRunScript(
"function A() {}\n"
"function B(x) { this.x = x; }\n"
"var a = new A();\n"
"var b = new B(a);");
const v8::HeapSnapshot* snapshot1 =
v8::HeapProfiler::TakeSnapshot(v8::String::New("s1"));
CompileAndRunScript(
"delete a;\n"
"b.x = null;\n"
"var a = new A();\n"
"var b2 = new B(a);");
const v8::HeapSnapshot* snapshot2 =
v8::HeapProfiler::TakeSnapshot(v8::String::New("s2"));
const v8::HeapSnapshotsDiff* diff = snapshot1->CompareWith(snapshot2);
// Verify additions: ensure that addition of A and B was detected.
const v8::HeapGraphNode* additions_root = diff->GetAdditionsRoot();
bool found_A = false, found_B = false;
uint64_t s1_A_id = 0;
for (int i = 0, count = additions_root->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = additions_root->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
if (node->GetType() == v8::HeapGraphNode::OBJECT) {
v8::String::AsciiValue node_name(node->GetName());
if (strcmp(*node_name, "A") == 0) {
CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::PROPERTY, "a"));
CHECK(!found_A);
found_A = true;
s1_A_id = node->GetId();
} else if (strcmp(*node_name, "B") == 0) {
CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::PROPERTY, "b2"));
CHECK(!found_B);
found_B = true;
}
}
}
CHECK(found_A);
CHECK(found_B);
// Verify deletions: ensure that deletion of A was detected.
const v8::HeapGraphNode* deletions_root = diff->GetDeletionsRoot();
bool found_A_del = false;
uint64_t s2_A_id = 0;
for (int i = 0, count = deletions_root->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = deletions_root->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
if (node->GetType() == v8::HeapGraphNode::OBJECT) {
v8::String::AsciiValue node_name(node->GetName());
if (strcmp(*node_name, "A") == 0) {
CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::PROPERTY, "a"));
CHECK(!found_A_del);
found_A_del = true;
s2_A_id = node->GetId();
}
}
}
CHECK(found_A_del);
CHECK_NE_UINT64_T(0, s1_A_id);
CHECK(s1_A_id != s2_A_id);
}
#endif // ENABLE_LOGGING_AND_PROFILING

13
deps/v8/test/cctest/test-heap.cc

@ -322,8 +322,8 @@ static bool WeakPointerCleared = false;
static void TestWeakGlobalHandleCallback(v8::Persistent<v8::Value> handle,
void* id) {
USE(handle);
if (1234 == reinterpret_cast<intptr_t>(id)) WeakPointerCleared = true;
handle.Dispose();
}
@ -398,17 +398,8 @@ TEST(WeakGlobalHandlesMark) {
CHECK(WeakPointerCleared);
CHECK(!GlobalHandles::IsNearDeath(h1.location()));
CHECK(GlobalHandles::IsNearDeath(h2.location()));
GlobalHandles::Destroy(h1.location());
GlobalHandles::Destroy(h2.location());
}
static void TestDeleteWeakGlobalHandleCallback(
v8::Persistent<v8::Value> handle,
void* id) {
if (1234 == reinterpret_cast<intptr_t>(id)) WeakPointerCleared = true;
handle.Dispose();
}
TEST(DeleteWeakGlobalHandle) {
@ -427,7 +418,7 @@ TEST(DeleteWeakGlobalHandle) {
GlobalHandles::MakeWeak(h.location(),
reinterpret_cast<void*>(1234),
&TestDeleteWeakGlobalHandleCallback);
&TestWeakGlobalHandleCallback);
// Scanvenge does not recognize weak reference.
Heap::PerformScavenge();

1
deps/v8/test/cctest/test-mark-compact.cc

@ -273,6 +273,7 @@ TEST(GCCallback) {
static int NumberOfWeakCalls = 0;
static void WeakPointerCallback(v8::Persistent<v8::Value> handle, void* id) {
NumberOfWeakCalls++;
handle.Dispose();
}
TEST(ObjectGroups) {

57
deps/v8/test/mjsunit/for-in-special-cases.js

@ -62,3 +62,60 @@ for (var j = 0; j < 10; ++j) {
assertEquals(10, i);
assertEquals(10, j);
function Accumulate(x) {
var accumulator = "";
for (var i in x) {
accumulator += i;
}
return accumulator;
}
for (var i = 0; i < 3; ++i) {
var elements = Accumulate("abcd");
// We do not assume that for-in enumerates elements in order.
assertTrue(-1 != elements.indexOf("0"));
assertTrue(-1 != elements.indexOf("1"));
assertTrue(-1 != elements.indexOf("2"));
assertTrue(-1 != elements.indexOf("3"));
assertEquals(4, elements.length);
}
function for_in_string_prototype() {
var x = new String("abc");
x.foo = 19;
function B() {
this.bar = 5;
this[7] = 4;
}
B.prototype = x;
var y = new B();
y.gub = 13;
var elements = Accumulate(y);
var elements1 = Accumulate(y);
// If for-in returns elements in a different order on multiple calls, this
// assert will fail. If that happens, consider if that behavior is OK.
assertEquals(elements, elements1, "For-in elements not the same both times.");
// We do not assume that for-in enumerates elements in order.
assertTrue(-1 != elements.indexOf("0"));
assertTrue(-1 != elements.indexOf("1"));
assertTrue(-1 != elements.indexOf("2"));
assertTrue(-1 != elements.indexOf("7"));
assertTrue(-1 != elements.indexOf("foo"));
assertTrue(-1 != elements.indexOf("bar"));
assertTrue(-1 != elements.indexOf("gub"));
assertEquals(13, elements.length);
elements = Accumulate(x);
assertTrue(-1 != elements.indexOf("0"));
assertTrue(-1 != elements.indexOf("1"));
assertTrue(-1 != elements.indexOf("2"));
assertTrue(-1 != elements.indexOf("foo"));
assertEquals(6, elements.length);
}
for_in_string_prototype();
for_in_string_prototype();

49
deps/v8/tools/gc-nvp-trace-processor.py

@ -47,8 +47,12 @@ def flatten(l):
def split_nvp(s):
t = {}
for m in re.finditer(r"(\w+)=(-?\d+)", s):
t[m.group(1)] = int(m.group(2))
for (name, value) in re.findall(r"(\w+)=([-\w]+)", s):
try:
t[name] = int(value)
except ValueError:
t[name] = value
return t
def parse_gc_trace(input):
@ -211,6 +215,9 @@ def plot_all(plots, trace, prefix):
def reclaimed_bytes(row):
return row['total_size_before'] - row['total_size_after']
def other_scope(r):
return r['pause'] - r['mark'] - r['sweep'] - r['compact'] - r['flushcode']
plots = [
[
Set('style fill solid 0.5 noborder'),
@ -219,9 +226,8 @@ plots = [
Plot(Item('Marking', 'mark', lc = 'purple'),
Item('Sweep', 'sweep', lc = 'blue'),
Item('Compaction', 'compact', lc = 'red'),
Item('Other',
lambda r: r['pause'] - r['mark'] - r['sweep'] - r['compact'],
lc = 'grey'))
Item('Flush Code', 'flushcode', lc = 'yellow'),
Item('Other', other_scope, lc = 'grey'))
],
[
Set('style histogram rowstacked'),
@ -256,19 +262,48 @@ plots = [
],
]
def calc_total(trace, field):
return reduce(lambda t,r: t + r[field], trace, 0)
def calc_max(trace, field):
return reduce(lambda t,r: max(t, r[field]), trace, 0)
def process_trace(filename):
trace = parse_gc_trace(filename)
total_gc = reduce(lambda t,r: t + r['pause'], trace, 0)
max_gc = reduce(lambda t,r: max(t, r['pause']), trace, 0)
total_gc = calc_total(trace, 'pause')
max_gc = calc_max(trace, 'pause')
avg_gc = total_gc / len(trace)
total_sweep = calc_total(trace, 'sweep')
max_sweep = calc_max(trace, 'sweep')
total_mark = calc_total(trace, 'mark')
max_mark = calc_max(trace, 'mark')
scavenges = filter(lambda r: r['gc'] == 's', trace)
total_scavenge = calc_total(scavenges, 'pause')
max_scavenge = calc_max(scavenges, 'pause')
avg_scavenge = total_scavenge / len(scavenges)
charts = plot_all(plots, trace, filename)
with open(filename + '.html', 'w') as out:
out.write('<html><body>')
out.write('<table><tr><td>')
out.write('Total in GC: <b>%d</b><br/>' % total_gc)
out.write('Max in GC: <b>%d</b><br/>' % max_gc)
out.write('Avg in GC: <b>%d</b><br/>' % avg_gc)
out.write('</td><td>')
out.write('Total in Scavenge: <b>%d</b><br/>' % total_scavenge)
out.write('Max in Scavenge: <b>%d</b><br/>' % max_scavenge)
out.write('Avg in Scavenge: <b>%d</b><br/>' % avg_scavenge)
out.write('</td><td>')
out.write('Total in Sweep: <b>%d</b><br/>' % total_sweep)
out.write('Max in Sweep: <b>%d</b><br/>' % max_sweep)
out.write('</td><td>')
out.write('Total in Mark: <b>%d</b><br/>' % total_mark)
out.write('Max in Mark: <b>%d</b><br/>' % max_mark)
out.write('</td></tr></table>')
for chart in charts:
out.write('<img src="%s">' % chart)
out.write('</body></html>')

10
deps/v8/tools/gyp/v8.gyp

@ -181,6 +181,11 @@
'defines': [
'BUILDING_V8_SHARED'
],
'direct_dependent_settings': {
'defines': [
'USING_V8_SHARED',
],
},
},
{
'type': 'none',
@ -738,11 +743,6 @@
# This could be gotten by not setting chromium_code, if that's OK.
'defines': ['_CRT_SECURE_NO_WARNINGS'],
}],
['OS=="win" and component=="shared_library"', {
'defines': [
'USING_V8_SHARED',
],
}],
],
},
],

Loading…
Cancel
Save