Browse Source

Upgrade V8 to 2.2.19

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
2c0d91be6c
  1. 16
      deps/v8/ChangeLog
  2. 4
      deps/v8/include/v8-profiler.h
  3. 8
      deps/v8/include/v8.h
  4. 4
      deps/v8/src/api.cc
  5. 5
      deps/v8/src/arm/assembler-arm-inl.h
  6. 62
      deps/v8/src/arm/assembler-arm.cc
  7. 20
      deps/v8/src/arm/assembler-arm.h
  8. 140
      deps/v8/src/arm/codegen-arm.cc
  9. 4
      deps/v8/src/arm/codegen-arm.h
  10. 3
      deps/v8/src/arm/constants-arm.h
  11. 22
      deps/v8/src/arm/disasm-arm.cc
  12. 3
      deps/v8/src/arm/fast-codegen-arm.cc
  13. 25
      deps/v8/src/arm/full-codegen-arm.cc
  14. 17
      deps/v8/src/arm/ic-arm.cc
  15. 71
      deps/v8/src/arm/macro-assembler-arm.cc
  16. 15
      deps/v8/src/arm/macro-assembler-arm.h
  17. 9
      deps/v8/src/arm/simulator-arm.cc
  18. 8
      deps/v8/src/arm/stub-cache-arm.cc
  19. 1
      deps/v8/src/bootstrapper.cc
  20. 8
      deps/v8/src/checks.h
  21. 6
      deps/v8/src/debug-debugger.js
  22. 8
      deps/v8/src/debug.cc
  23. 4
      deps/v8/src/debug.h
  24. 114
      deps/v8/src/execution.cc
  25. 13
      deps/v8/src/execution.h
  26. 4
      deps/v8/src/flag-definitions.h
  27. 8
      deps/v8/src/heap-profiler.cc
  28. 7
      deps/v8/src/heap-profiler.h
  29. 53
      deps/v8/src/heap.cc
  30. 2
      deps/v8/src/heap.h
  31. 4
      deps/v8/src/ia32/assembler-ia32-inl.h
  32. 16
      deps/v8/src/ia32/assembler-ia32.cc
  33. 10
      deps/v8/src/ia32/assembler-ia32.h
  34. 28
      deps/v8/src/ia32/codegen-ia32.cc
  35. 7
      deps/v8/src/ia32/stub-cache-ia32.cc
  36. 22
      deps/v8/src/ic.cc
  37. 6
      deps/v8/src/jsregexp.cc
  38. 2
      deps/v8/src/objects-debug.cc
  39. 19
      deps/v8/src/objects-inl.h
  40. 18
      deps/v8/src/objects.cc
  41. 52
      deps/v8/src/objects.h
  42. 47
      deps/v8/src/profile-generator.cc
  43. 9
      deps/v8/src/profile-generator.h
  44. 47
      deps/v8/src/runtime.cc
  45. 3
      deps/v8/src/scanner.cc
  46. 109
      deps/v8/src/scanner.h
  47. 12
      deps/v8/src/scopeinfo.cc
  48. 3
      deps/v8/src/scopeinfo.h
  49. 1
      deps/v8/src/stub-cache.cc
  50. 2
      deps/v8/src/version.cc
  51. 5
      deps/v8/src/x64/assembler-x64-inl.h
  52. 25
      deps/v8/src/x64/assembler-x64.cc
  53. 10
      deps/v8/src/x64/assembler-x64.h
  54. 533
      deps/v8/src/x64/codegen-x64.cc
  55. 59
      deps/v8/src/x64/ic-x64.cc
  56. 167
      deps/v8/src/x64/macro-assembler-x64.cc
  57. 11
      deps/v8/src/x64/macro-assembler-x64.h
  58. 39
      deps/v8/src/x64/stub-cache-x64.cc
  59. 18
      deps/v8/src/x64/virtual-frame-x64.cc
  60. 8
      deps/v8/test/cctest/test-api.cc
  61. 53
      deps/v8/test/cctest/test-debug.cc
  62. 27
      deps/v8/test/cctest/test-disasm-arm.cc
  63. 4
      deps/v8/test/cctest/test-disasm-ia32.cc
  64. 2
      deps/v8/test/cctest/test-func-name-inference.cc
  65. 147
      deps/v8/test/cctest/test-heap-profiler.cc
  66. 4
      deps/v8/test/cctest/test-liveedit.cc
  67. 10
      deps/v8/test/cctest/test-serialize.cc
  68. 15
      deps/v8/test/mjsunit/apply.js
  69. 8
      deps/v8/test/mjsunit/debug-setbreakpoint.js
  70. 3
      deps/v8/test/mjsunit/mjsunit.status
  71. 56
      deps/v8/test/mjsunit/regress/regress-747.js
  72. 95
      deps/v8/test/mjsunit/string-externalize.js

16
deps/v8/ChangeLog

@ -1,3 +1,19 @@
2010-06-23: Version 2.2.19
Fix bug that causes the build to break when profillingsupport=off
(issue 738).
Added expose-externalize-string flag for testing extensions.
Resolve linker issues with using V8 as a DLL causing a number of
problems with unresolved symbols.
Fix build failure for cctests when ENABLE_DEBUGGER_SUPPORT is not
defined.
Performance improvements on all platforms.
2010-06-16: Version 2.2.18 2010-06-16: Version 2.2.18
Added API functions to retrieve information on indexed properties Added API functions to retrieve information on indexed properties

4
deps/v8/include/v8-profiler.h

@ -196,7 +196,9 @@ class V8EXPORT HeapGraphEdge {
enum Type { enum Type {
CONTEXT_VARIABLE = 0, // A variable from a function context. CONTEXT_VARIABLE = 0, // A variable from a function context.
ELEMENT = 1, // An element of an array. ELEMENT = 1, // An element of an array.
PROPERTY = 2 // A named object property. PROPERTY = 2, // A named object property.
INTERNAL = 3 // A link that can't be accessed from JS,
// thus, its name isn't a real property name.
}; };
/** Returns edge type (see HeapGraphEdge::Type). */ /** Returns edge type (see HeapGraphEdge::Type). */

8
deps/v8/include/v8.h

@ -3211,11 +3211,9 @@ class Internals {
static const int kFullStringRepresentationMask = 0x07; static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02; static const int kExternalTwoByteRepresentationTag = 0x02;
// These constants are compiler dependent so their values must be static const int kJSObjectType = 0x9f;
// defined within the implementation. static const int kFirstNonstringType = 0x80;
V8EXPORT static int kJSObjectType; static const int kProxyType = 0x85;
V8EXPORT static int kFirstNonstringType;
V8EXPORT static int kProxyType;
static inline bool HasHeapObjectTag(internal::Object* value) { static inline bool HasHeapObjectTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) == return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==

4
deps/v8/src/api.cc

@ -106,9 +106,6 @@ static i::HandleScopeImplementer thread_local;
static FatalErrorCallback exception_behavior = NULL; static FatalErrorCallback exception_behavior = NULL;
int i::Internals::kJSObjectType = JS_OBJECT_TYPE;
int i::Internals::kFirstNonstringType = FIRST_NONSTRING_TYPE;
int i::Internals::kProxyType = PROXY_TYPE;
static void DefaultFatalErrorHandler(const char* location, static void DefaultFatalErrorHandler(const char* location,
const char* message) { const char* message) {
@ -4460,6 +4457,7 @@ Handle<Value> HeapGraphEdge::GetName() const {
reinterpret_cast<const i::HeapGraphEdge*>(this); reinterpret_cast<const i::HeapGraphEdge*>(this);
switch (edge->type()) { switch (edge->type()) {
case i::HeapGraphEdge::CONTEXT_VARIABLE: case i::HeapGraphEdge::CONTEXT_VARIABLE:
case i::HeapGraphEdge::INTERNAL:
case i::HeapGraphEdge::PROPERTY: case i::HeapGraphEdge::PROPERTY:
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol( return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
edge->name()))); edge->name())));

5
deps/v8/src/arm/assembler-arm-inl.h

@ -45,11 +45,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
Condition NegateCondition(Condition cc) {
ASSERT(cc != al);
return static_cast<Condition>(cc ^ ne);
}
void RelocInfo::apply(intptr_t delta) { void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) { if (RelocInfo::IsInternalReference(rmode_)) {

62
deps/v8/src/arm/assembler-arm.cc

@ -282,6 +282,11 @@ const Instr kBlxRegPattern =
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
const Instr kMovMvnPattern = 0xd * B21; const Instr kMovMvnPattern = 0xd * B21;
const Instr kMovMvnFlip = B22; const Instr kMovMvnFlip = B22;
const Instr kMovLeaveCCMask = 0xdff * B16;
const Instr kMovLeaveCCPattern = 0x1a0 * B16;
const Instr kMovwMask = 0xff * B20;
const Instr kMovwPattern = 0x30 * B20;
const Instr kMovwLeaveCCFlip = 0x5 * B21;
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20; const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21; const Instr kCmpCmnFlip = B21;
@ -389,6 +394,12 @@ void Assembler::Align(int m) {
} }
void Assembler::CodeTargetAlign() {
// Preferred alignment of jump targets on some ARM chips.
Align(8);
}
bool Assembler::IsNop(Instr instr, int type) { bool Assembler::IsNop(Instr instr, int type) {
// Check for mov rx, rx. // Check for mov rx, rx.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
@ -640,6 +651,12 @@ void Assembler::next(Label* L) {
} }
static Instr EncodeMovwImmediate(uint32_t immediate) {
ASSERT(immediate < 0x10000);
return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
}
// Low-level code emission routines depending on the addressing mode. // Low-level code emission routines depending on the addressing mode.
// If this returns true then you have to use the rotate_imm and immed_8 // If this returns true then you have to use the rotate_imm and immed_8
// that it returns, because it may have already changed the instruction // that it returns, because it may have already changed the instruction
@ -664,6 +681,15 @@ static bool fits_shifter(uint32_t imm32,
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kMovMvnFlip; *instr ^= kMovMvnFlip;
return true; return true;
} else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
if (CpuFeatures::IsSupported(ARMv7)) {
if (imm32 < 0x10000) {
*instr ^= kMovwLeaveCCFlip;
*instr |= EncodeMovwImmediate(imm32);
*rotate_imm = *immed_8 = 0; // Not used for movw.
return true;
}
}
} }
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) { if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
@ -695,7 +721,7 @@ static bool fits_shifter(uint32_t imm32,
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly // space. There is no guarantee that the relocated location can be similarly
// encoded. // encoded.
static bool MustUseIp(RelocInfo::Mode rmode) { static bool MustUseConstantPool(RelocInfo::Mode rmode) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) { if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG #ifdef DEBUG
if (!Serializer::enabled()) { if (!Serializer::enabled()) {
@ -712,7 +738,7 @@ static bool MustUseIp(RelocInfo::Mode rmode) {
bool Operand::is_single_instruction() const { bool Operand::is_single_instruction() const {
if (rm_.is_valid()) return true; if (rm_.is_valid()) return true;
if (MustUseIp(rmode_)) return false; if (MustUseConstantPool(rmode_)) return false;
uint32_t dummy1, dummy2; uint32_t dummy1, dummy2;
return fits_shifter(imm32_, &dummy1, &dummy2, NULL); return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
} }
@ -728,19 +754,34 @@ void Assembler::addrmod1(Instr instr,
// Immediate. // Immediate.
uint32_t rotate_imm; uint32_t rotate_imm;
uint32_t immed_8; uint32_t immed_8;
if (MustUseIp(x.rmode_) || if (MustUseConstantPool(x.rmode_) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load // The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip. // it first to register ip and change the original instruction to use ip.
// However, if the original instruction is a 'mov rd, x' (not setting the // However, if the original instruction is a 'mov rd, x' (not setting the
// condition code), then replace it with a 'ldr rd, [pc]'. // condition code), then replace it with a 'ldr rd, [pc]'.
RecordRelocInfo(x.rmode_, x.imm32_);
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = static_cast<Condition>(instr & CondMask); Condition cond = static_cast<Condition>(instr & CondMask);
if ((instr & ~CondMask) == 13*B21) { // mov, S not set if ((instr & ~CondMask) == 13*B21) { // mov, S not set
if (MustUseConstantPool(x.rmode_) ||
!CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond); ldr(rd, MemOperand(pc, 0), cond);
} else { } else {
// Will probably use movw, will certainly not use constant pool.
mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
}
} else {
// If this is not a mov or mvn instruction we may still be able to avoid
// a constant pool entry by using mvn or movw.
if (!MustUseConstantPool(x.rmode_) &&
(instr & kMovMvnMask) != kMovMvnPattern) {
mov(ip, x, LeaveCC, cond);
} else {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(ip, MemOperand(pc, 0), cond); ldr(ip, MemOperand(pc, 0), cond);
}
addrmod1(instr, rn, rd, Operand(ip)); addrmod1(instr, rn, rd, Operand(ip));
} }
return; return;
@ -1051,6 +1092,17 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
} }
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
ASSERT(immediate < 0x10000);
mov(reg, Operand(immediate), LeaveCC, cond);
}
void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
}
void Assembler::bic(Register dst, Register src1, const Operand& src2, void Assembler::bic(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | 14*B21 | s, src1, dst, src2); addrmod1(cond | 14*B21 | s, src1, dst, src2);
@ -1231,7 +1283,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Immediate. // Immediate.
uint32_t rotate_imm; uint32_t rotate_imm;
uint32_t immed_8; uint32_t immed_8;
if (MustUseIp(src.rmode_) || if (MustUseConstantPool(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip. // Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_); RecordRelocInfo(src.rmode_, src.imm32_);

20
deps/v8/src/arm/assembler-arm.h

@ -279,7 +279,10 @@ enum Condition {
// Returns the equivalent of !cc. // Returns the equivalent of !cc.
INLINE(Condition NegateCondition(Condition cc)); inline Condition NegateCondition(Condition cc) {
ASSERT(cc != al);
return static_cast<Condition>(cc ^ ne);
}
// Corresponds to transposing the operands of a comparison. // Corresponds to transposing the operands of a comparison.
@ -545,6 +548,12 @@ extern const Instr kMovMvnMask;
extern const Instr kMovMvnPattern; extern const Instr kMovMvnPattern;
extern const Instr kMovMvnFlip; extern const Instr kMovMvnFlip;
extern const Instr kMovLeaveCCMask;
extern const Instr kMovLeaveCCPattern;
extern const Instr kMovwMask;
extern const Instr kMovwPattern;
extern const Instr kMovwLeaveCCFlip;
extern const Instr kCmpCmnMask; extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern; extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip; extern const Instr kCmpCmnFlip;
@ -694,6 +703,8 @@ class Assembler : public Malloced {
// possible to align the pc offset to a multiple // possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4). // of m. m must be a power of 2 (>= 4).
void Align(int m); void Align(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
// Branch instructions // Branch instructions
void b(int branch_offset, Condition cond = al); void b(int branch_offset, Condition cond = al);
@ -772,6 +783,13 @@ class Assembler : public Malloced {
mov(dst, Operand(src), s, cond); mov(dst, Operand(src), s, cond);
} }
// ARMv7 instructions for loading a 32 bit immediate in two instructions.
// This may actually emit a different mov instruction, but on an ARMv7 it
// is guaranteed to only emit one instruction.
void movw(Register reg, uint32_t immediate, Condition cond = al);
// The constant for movt should be in the range 0-0xffff.
void movt(Register reg, uint32_t immediate, Condition cond = al);
void bic(Register dst, Register src1, const Operand& src2, void bic(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al); SBit s = LeaveCC, Condition cond = al);

140
deps/v8/src/arm/codegen-arm.cc

@ -268,8 +268,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Load the offset into r3. // Load the offset into r3.
int slot_offset = int slot_offset =
FixedArray::kHeaderSize + slot->index() * kPointerSize; FixedArray::kHeaderSize + slot->index() * kPointerSize;
__ mov(r3, Operand(slot_offset)); __ RecordWrite(r2, Operand(slot_offset), r3, r1);
__ RecordWrite(r2, r3, r1);
} }
} }
} }
@ -3109,9 +3108,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
exit.Branch(eq); exit.Branch(eq);
// scratch is loaded with context when calling SlotOperand above. // scratch is loaded with context when calling SlotOperand above.
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
__ mov(r3, Operand(offset));
// r1 could be identical with tos, but that doesn't matter. // r1 could be identical with tos, but that doesn't matter.
__ RecordWrite(scratch, r3, r1); __ RecordWrite(scratch, Operand(offset), r3, r1);
} }
// If we definitely did not jump over the assignment, we do not need // If we definitely did not jump over the assignment, we do not need
// to bind the exit label. Doing so can defeat peephole // to bind the exit label. Doing so can defeat peephole
@ -3464,8 +3462,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
__ str(r0, FieldMemOperand(r1, offset)); __ str(r0, FieldMemOperand(r1, offset));
// Update the write barrier for the array address. // Update the write barrier for the array address.
__ mov(r3, Operand(offset)); __ RecordWrite(r1, Operand(offset), r3, r2);
__ RecordWrite(r1, r3, r2);
} }
ASSERT_EQ(original_height + 1, frame_->height()); ASSERT_EQ(original_height + 1, frame_->height());
} }
@ -4279,8 +4276,7 @@ void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
// Store the value. // Store the value.
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset)); __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier. // Update the write barrier.
__ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag)); __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
__ RecordWrite(r1, r2, r3);
// Leave. // Leave.
leave.Bind(); leave.Bind();
frame_->EmitPush(r0); frame_->EmitPush(r0);
@ -4710,7 +4706,8 @@ void CodeGenerator::GenerateRandomHeapNumber(
Label slow_allocate_heapnumber; Label slow_allocate_heapnumber;
Label heapnumber_allocated; Label heapnumber_allocated;
__ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber); __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated); __ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber); __ bind(&slow_allocate_heapnumber);
@ -7207,7 +7204,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
void RecordWriteStub::Generate(MacroAssembler* masm) { void RecordWriteStub::Generate(MacroAssembler* masm) {
__ RecordWriteHelper(object_, offset_, scratch_); __ RecordWriteHelper(object_, Operand(offset_), offset_, scratch_);
__ Ret(); __ Ret();
} }
@ -7367,12 +7364,16 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
Register heap_number_map = r6;
if (ShouldGenerateSmiCode()) { if (ShouldGenerateSmiCode()) {
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Smi-smi case (overflow). // Smi-smi case (overflow).
// Since both are Smis there is no heap number to overwrite, so allocate. // Since both are Smis there is no heap number to overwrite, so allocate.
// The new heap number is in r5. r6 and r7 are scratch. // The new heap number is in r5. r3 and r7 are scratch.
__ AllocateHeapNumber(r5, r6, r7, lhs.is(r0) ? &slow_reverse : &slow); __ AllocateHeapNumber(
r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV, // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values. // using registers d7 and d6 for the double values.
@ -7385,14 +7386,14 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ vmov(s13, r7); __ vmov(s13, r7);
__ vcvt_f64_s32(d6, s13); __ vcvt_f64_s32(d6, s13);
} else { } else {
// Write Smi from rhs to r3 and r2 in double format. r6 is scratch. // Write Smi from rhs to r3 and r2 in double format. r3 is scratch.
__ mov(r7, Operand(rhs)); __ mov(r7, Operand(rhs));
ConvertToDoubleStub stub1(r3, r2, r7, r6); ConvertToDoubleStub stub1(r3, r2, r7, r9);
__ push(lr); __ push(lr);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
// Write Smi from lhs to r1 and r0 in double format. r6 is scratch. // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
__ mov(r7, Operand(lhs)); __ mov(r7, Operand(lhs));
ConvertToDoubleStub stub2(r1, r0, r7, r6); ConvertToDoubleStub stub2(r1, r0, r7, r9);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr); __ pop(lr);
} }
@ -7401,6 +7402,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
// We branch here if at least one of r0 and r1 is not a Smi. // We branch here if at least one of r0 and r1 is not a Smi.
__ bind(not_smi); __ bind(not_smi);
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// After this point we have the left hand side in r1 and the right hand side // After this point we have the left hand side in r1 and the right hand side
// in r0. // in r0.
@ -7423,18 +7425,22 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
default: default:
break; break;
} }
// Restore heap number map register.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
} }
if (mode_ == NO_OVERWRITE) { if (mode_ == NO_OVERWRITE) {
// In the case where there is no chance of an overwritable float we may as // In the case where there is no chance of an overwritable float we may as
// well do the allocation immediately while r0 and r1 are untouched. // well do the allocation immediately while r0 and r1 are untouched.
__ AllocateHeapNumber(r5, r6, r7, &slow); __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
} }
// Move r0 to a double in r2-r3. // Move r0 to a double in r2-r3.
__ tst(r0, Operand(kSmiTagMask)); __ tst(r0, Operand(kSmiTagMask));
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r4, heap_number_map);
__ b(ne, &slow); __ b(ne, &slow);
if (mode_ == OVERWRITE_RIGHT) { if (mode_ == OVERWRITE_RIGHT) {
__ mov(r5, Operand(r0)); // Overwrite this heap number. __ mov(r5, Operand(r0)); // Overwrite this heap number.
@ -7452,7 +7458,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ bind(&r0_is_smi); __ bind(&r0_is_smi);
if (mode_ == OVERWRITE_RIGHT) { if (mode_ == OVERWRITE_RIGHT) {
// We can't overwrite a Smi so get address of new heap number into r5. // We can't overwrite a Smi so get address of new heap number into r5.
__ AllocateHeapNumber(r5, r6, r7, &slow); __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
} }
if (use_fp_registers) { if (use_fp_registers) {
@ -7464,7 +7470,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
} else { } else {
// Write Smi from r0 to r3 and r2 in double format. // Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0)); __ mov(r7, Operand(r0));
ConvertToDoubleStub stub3(r3, r2, r7, r6); ConvertToDoubleStub stub3(r3, r2, r7, r4);
__ push(lr); __ push(lr);
__ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr); __ pop(lr);
@ -7477,6 +7483,8 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ tst(r1, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
__ b(ne, &r1_is_not_smi); __ b(ne, &r1_is_not_smi);
GenerateTypeTransition(masm); GenerateTypeTransition(masm);
// Restore heap number map register.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&r1_is_smi); __ jmp(&r1_is_smi);
} }
@ -7486,7 +7494,9 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ tst(r1, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ bind(&r1_is_not_smi); __ bind(&r1_is_not_smi);
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r4, heap_number_map);
__ b(ne, &slow); __ b(ne, &slow);
if (mode_ == OVERWRITE_LEFT) { if (mode_ == OVERWRITE_LEFT) {
__ mov(r5, Operand(r1)); // Overwrite this heap number. __ mov(r5, Operand(r1)); // Overwrite this heap number.
@ -7504,7 +7514,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ bind(&r1_is_smi); __ bind(&r1_is_smi);
if (mode_ == OVERWRITE_LEFT) { if (mode_ == OVERWRITE_LEFT) {
// We can't overwrite a Smi so get address of new heap number into r5. // We can't overwrite a Smi so get address of new heap number into r5.
__ AllocateHeapNumber(r5, r6, r7, &slow); __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
} }
if (use_fp_registers) { if (use_fp_registers) {
@ -7516,7 +7526,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
} else { } else {
// Write Smi from r1 to r1 and r0 in double format. // Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1)); __ mov(r7, Operand(r1));
ConvertToDoubleStub stub4(r1, r0, r7, r6); ConvertToDoubleStub stub4(r1, r0, r7, r9);
__ push(lr); __ push(lr);
__ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr); __ pop(lr);
@ -7577,13 +7587,14 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
} }
} }
if (lhs.is(r0)) { if (lhs.is(r0)) {
__ b(&slow); __ b(&slow);
__ bind(&slow_reverse); __ bind(&slow_reverse);
__ Swap(r0, r1, ip); __ Swap(r0, r1, ip);
} }
heap_number_map = no_reg; // Don't use this any more from here on.
// We jump to here if something goes wrong (one param is not a number of any // We jump to here if something goes wrong (one param is not a number of any
// sort or new-space allocation fails). // sort or new-space allocation fails).
__ bind(&slow); __ bind(&slow);
@ -7749,9 +7760,13 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
Label rhs_is_smi, lhs_is_smi; Label rhs_is_smi, lhs_is_smi;
Label done_checking_rhs, done_checking_lhs; Label done_checking_rhs, done_checking_lhs;
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ tst(lhs, Operand(kSmiTagMask)); __ tst(lhs, Operand(kSmiTagMask));
__ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow); __ b(ne, &slow);
GetInt32(masm, lhs, r3, r5, r4, &slow); GetInt32(masm, lhs, r3, r5, r4, &slow);
__ jmp(&done_checking_lhs); __ jmp(&done_checking_lhs);
@ -7761,7 +7776,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ tst(rhs, Operand(kSmiTagMask)); __ tst(rhs, Operand(kSmiTagMask));
__ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow); __ b(ne, &slow);
GetInt32(masm, rhs, r2, r5, r4, &slow); GetInt32(masm, rhs, r2, r5, r4, &slow);
__ jmp(&done_checking_rhs); __ jmp(&done_checking_rhs);
@ -7821,8 +7837,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
break; break;
} }
case NO_OVERWRITE: { case NO_OVERWRITE: {
// Get a new heap number in r5. r6 and r7 are scratch. // Get a new heap number in r5. r4 and r7 are scratch.
__ AllocateHeapNumber(r5, r6, r7, &slow); __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
} }
default: break; default: break;
} }
@ -7841,8 +7857,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
if (mode_ != NO_OVERWRITE) { if (mode_ != NO_OVERWRITE) {
__ bind(&have_to_allocate); __ bind(&have_to_allocate);
// Get a new heap number in r5. r6 and r7 are scratch. // Get a new heap number in r5. r4 and r7 are scratch.
__ AllocateHeapNumber(r5, r6, r7, &slow); __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
__ jmp(&got_a_heap_number); __ jmp(&got_a_heap_number);
} }
@ -7968,10 +7984,11 @@ const char* GenericBinaryOpStub::GetName() {
} }
OS::SNPrintF(Vector<char>(name_, len), OS::SNPrintF(Vector<char>(name_, len),
"GenericBinaryOpStub_%s_%s%s", "GenericBinaryOpStub_%s_%s%s_%s",
op_name, op_name,
overwrite_name, overwrite_name,
specialized_on_rhs_ ? "_ConstantRhs" : 0); specialized_on_rhs_ ? "_ConstantRhs" : "",
BinaryOpIC::GetName(runtime_operands_type_));
return name_; return name_;
} }
@ -8164,6 +8181,28 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
} }
__ Ret(); __ Ret();
__ bind(&smi_is_unsuitable); __ bind(&smi_is_unsuitable);
} else if (op_ == Token::MOD &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
runtime_operands_type_ != BinaryOpIC::STRINGS) {
// Do generate a bit of smi code for modulus even though the default for
// modulus is not to do it, but as the ARM processor has no coprocessor
// support for modulus checking for smis makes sense.
Label slow;
ASSERT(!ShouldGenerateSmiCode());
ASSERT(kSmiTag == 0); // Adjust code below.
// Check for two positive smis.
__ orr(smi_test_reg, lhs, Operand(rhs));
__ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
__ b(ne, &slow);
// Check that rhs is a power of two and not zero.
__ sub(scratch, rhs, Operand(1), SetCC);
__ b(mi, &slow);
__ tst(rhs, scratch);
__ b(ne, &slow);
// Calculate power of two modulus.
__ and_(result, lhs, Operand(scratch));
__ Ret();
__ bind(&slow);
} }
HandleBinaryOpSlowCases( HandleBinaryOpSlowCases(
masm, masm,
@ -8391,6 +8430,9 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void GenericUnaryOpStub::Generate(MacroAssembler* masm) { void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Label slow, done; Label slow, done;
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
if (op_ == Token::SUB) { if (op_ == Token::SUB) {
// Check whether the value is a smi. // Check whether the value is a smi.
Label try_float; Label try_float;
@ -8411,7 +8453,9 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ b(&done); __ b(&done);
__ bind(&try_float); __ bind(&try_float);
__ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE); __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r1, heap_number_map);
__ b(ne, &slow); __ b(ne, &slow);
// r0 is a heap number. Get a new heap number in r1. // r0 is a heap number. Get a new heap number in r1.
if (overwrite_) { if (overwrite_) {
@ -8419,7 +8463,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
} else { } else {
__ AllocateHeapNumber(r1, r2, r3, &slow); __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
@ -8429,7 +8473,9 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
} }
} else if (op_ == Token::BIT_NOT) { } else if (op_ == Token::BIT_NOT) {
// Check if the operand is a heap number. // Check if the operand is a heap number.
__ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE); __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r1, heap_number_map);
__ b(ne, &slow); __ b(ne, &slow);
// Convert the heap number is r0 to an untagged integer in r1. // Convert the heap number is r0 to an untagged integer in r1.
@ -8449,7 +8495,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
// Allocate a fresh heap number, but don't overwrite r0 until // Allocate a fresh heap number, but don't overwrite r0 until
// we're sure we can do it without going through the slow case // we're sure we can do it without going through the slow case
// that needs the value in r0. // that needs the value in r0.
__ AllocateHeapNumber(r2, r3, r4, &slow); __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ mov(r0, Operand(r2)); __ mov(r0, Operand(r2));
} }
@ -9431,17 +9477,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
RegExpImpl::kLastCaptureCountOffset)); RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input. // Store last subject and last input.
__ mov(r3, last_match_info_elements); // Moved up to reduce latency. __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
__ mov(r2, Operand(RegExpImpl::kLastSubjectOffset)); // Ditto.
__ str(subject, __ str(subject,
FieldMemOperand(last_match_info_elements, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset)); RegExpImpl::kLastSubjectOffset));
__ RecordWrite(r3, r2, r7); __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
__ str(subject, __ str(subject,
FieldMemOperand(last_match_info_elements, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset)); RegExpImpl::kLastInputOffset));
__ mov(r3, last_match_info_elements); __ mov(r3, last_match_info_elements);
__ mov(r2, Operand(RegExpImpl::kLastInputOffset)); __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
__ RecordWrite(r3, r2, r7);
// Get the static offsets vector filled by the native regexp code. // Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector = ExternalReference address_of_static_offsets_vector =
@ -10543,13 +10587,14 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
} }
Label non_ascii, allocated; Label non_ascii, allocated, ascii_data;
ASSERT_EQ(0, kTwoByteStringTag); ASSERT_EQ(0, kTwoByteStringTag);
__ tst(r4, Operand(kStringEncodingMask)); __ tst(r4, Operand(kStringEncodingMask));
__ tst(r5, Operand(kStringEncodingMask), ne); __ tst(r5, Operand(kStringEncodingMask), ne);
__ b(eq, &non_ascii); __ b(eq, &non_ascii);
// Allocate an ASCII cons string. // Allocate an ASCII cons string.
__ bind(&ascii_data);
__ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime); __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
__ bind(&allocated); __ bind(&allocated);
// Fill the fields of the cons string. // Fill the fields of the cons string.
@ -10561,6 +10606,19 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Ret(); __ Ret();
__ bind(&non_ascii); __ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
// to contain only ascii characters.
// r4: first instance type.
// r5: second instance type.
__ tst(r4, Operand(kAsciiDataHintMask));
__ tst(r5, Operand(kAsciiDataHintMask), ne);
__ b(ne, &ascii_data);
__ eor(r4, r4, Operand(r5));
ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
__ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
__ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
__ b(eq, &ascii_data);
// Allocate a two byte cons string. // Allocate a two byte cons string.
__ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime); __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
__ jmp(&allocated); __ jmp(&allocated);

4
deps/v8/src/arm/codegen-arm.h

@ -669,7 +669,9 @@ class GenericBinaryOpStub : public CodeStub {
} }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
void HandleNonSmiBitwiseOp(MacroAssembler* masm, Register lhs, Register rhs); void HandleNonSmiBitwiseOp(MacroAssembler* masm,
Register lhs,
Register rhs);
void HandleBinaryOpSlowCases(MacroAssembler* masm, void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi, Label* not_smi,
Register lhs, Register lhs,

3
deps/v8/src/arm/constants-arm.h

@ -284,6 +284,9 @@ class Instr {
// with immediate // with immediate
inline int RotateField() const { return Bits(11, 8); } inline int RotateField() const { return Bits(11, 8); }
inline int Immed8Field() const { return Bits(7, 0); } inline int Immed8Field() const { return Bits(7, 0); }
inline int Immed4Field() const { return Bits(19, 16); }
inline int ImmedMovwMovtField() const {
return Immed4Field() << 12 | Offset12Field(); }
// Fields used in Load/Store instructions // Fields used in Load/Store instructions
inline int PUField() const { return Bits(24, 23); } inline int PUField() const { return Bits(24, 23); }

22
deps/v8/src/arm/disasm-arm.cc

@ -101,6 +101,7 @@ class Decoder {
void PrintSRegister(int reg); void PrintSRegister(int reg);
void PrintDRegister(int reg); void PrintDRegister(int reg);
int FormatVFPRegister(Instr* instr, const char* format); int FormatVFPRegister(Instr* instr, const char* format);
void PrintMovwMovt(Instr* instr);
int FormatVFPinstruction(Instr* instr, const char* format); int FormatVFPinstruction(Instr* instr, const char* format);
void PrintCondition(Instr* instr); void PrintCondition(Instr* instr);
void PrintShiftRm(Instr* instr); void PrintShiftRm(Instr* instr);
@ -375,6 +376,16 @@ int Decoder::FormatVFPinstruction(Instr* instr, const char* format) {
} }
// Print the movw or movt instruction.
void Decoder::PrintMovwMovt(Instr* instr) {
int imm = instr->ImmedMovwMovtField();
int rd = instr->RdField();
PrintRegister(rd);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
", #%d", imm);
}
// FormatOption takes a formatting string and interprets it based on // FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first // the current instructions. The format string points to the first
// character of the option string (the option escape has already been // character of the option string (the option escape has already been
@ -430,7 +441,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
return 1; return 1;
} }
case 'm': { case 'm': {
if (format[1] == 'e') { // 'memop: load/store instructions if (format[1] == 'w') {
// 'mw: movt/movw instructions.
PrintMovwMovt(instr);
return 2;
}
if (format[1] == 'e') { // 'memop: load/store instructions.
ASSERT(STRING_STARTS_WITH(format, "memop")); ASSERT(STRING_STARTS_WITH(format, "memop"));
if (instr->HasL()) { if (instr->HasL()) {
Print("ldr"); Print("ldr");
@ -776,7 +792,7 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) { if (instr->HasS()) {
Format(instr, "tst'cond 'rn, 'shift_op"); Format(instr, "tst'cond 'rn, 'shift_op");
} else { } else {
Unknown(instr); // not used by V8 Format(instr, "movw'cond 'mw");
} }
break; break;
} }
@ -794,7 +810,7 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) { if (instr->HasS()) {
Format(instr, "cmp'cond 'rn, 'shift_op"); Format(instr, "cmp'cond 'rn, 'shift_op");
} else { } else {
Unknown(instr); // not used by V8 Format(instr, "movt'cond 'mw");
} }
break; break;
} }

3
deps/v8/src/arm/fast-codegen-arm.cc

@ -102,8 +102,7 @@ void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
} }
if (needs_write_barrier) { if (needs_write_barrier) {
__ mov(scratch1(), Operand(offset)); __ RecordWrite(scratch0(), Operand(offset), scratch1(), scratch2());
__ RecordWrite(scratch0(), scratch1(), scratch2());
} }
if (destination().is(accumulator1())) { if (destination().is(accumulator1())) {

25
deps/v8/src/arm/full-codegen-arm.cc

@ -110,10 +110,10 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
__ mov(r1, Operand(Context::SlotOffset(slot->index()))); __ mov(r1, Operand(Context::SlotOffset(slot->index())));
__ str(r0, MemOperand(cp, r1)); __ str(r0, MemOperand(cp, r1));
// Update the write barrier. This clobbers all involved // Update the write barrier. This clobbers all involved
// registers, so we have use a third register to avoid // registers, so we have to use two more registers to avoid
// clobbering cp. // clobbering cp.
__ mov(r2, Operand(cp)); __ mov(r2, Operand(cp));
__ RecordWrite(r2, r1, r0); __ RecordWrite(r2, Operand(r1), r3, r0);
} }
} }
} }
@ -666,8 +666,10 @@ void FullCodeGenerator::Move(Slot* dst,
__ str(src, location); __ str(src, location);
// Emit the write barrier code if the location is in the heap. // Emit the write barrier code if the location is in the heap.
if (dst->type() == Slot::CONTEXT) { if (dst->type() == Slot::CONTEXT) {
__ mov(scratch2, Operand(Context::SlotOffset(dst->index()))); __ RecordWrite(scratch1,
__ RecordWrite(scratch1, scratch2, src); Operand(Context::SlotOffset(dst->index())),
scratch2,
src);
} }
} }
@ -715,10 +717,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
__ str(result_register(), __ str(result_register(),
CodeGenerator::ContextOperand(cp, slot->index())); CodeGenerator::ContextOperand(cp, slot->index()));
int offset = Context::SlotOffset(slot->index()); int offset = Context::SlotOffset(slot->index());
__ mov(r2, Operand(offset));
// We know that we have written a function, which is not a smi. // We know that we have written a function, which is not a smi.
__ mov(r1, Operand(cp)); __ mov(r1, Operand(cp));
__ RecordWrite(r1, r2, result_register()); __ RecordWrite(r1, Operand(offset), r2, result_register());
} }
break; break;
@ -1252,8 +1253,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Update the write barrier for the array store with r0 as the scratch // Update the write barrier for the array store with r0 as the scratch
// register. // register.
__ mov(r2, Operand(offset)); __ RecordWrite(r1, Operand(offset), r2, result_register());
__ RecordWrite(r1, r2, result_register());
} }
if (result_saved) { if (result_saved) {
@ -1493,8 +1493,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments. // RecordWrite may destroy all its register arguments.
__ mov(r3, result_register()); __ mov(r3, result_register());
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
__ mov(r2, Operand(offset)); __ RecordWrite(r1, Operand(offset), r2, r3);
__ RecordWrite(r1, r2, r3);
break; break;
} }
@ -2157,7 +2156,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
Label slow_allocate_heapnumber; Label slow_allocate_heapnumber;
Label heapnumber_allocated; Label heapnumber_allocated;
__ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber); __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated); __ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber); __ bind(&slow_allocate_heapnumber);
@ -2276,8 +2276,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset)); __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be // Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward. // overwritten by the write barrier code and is needed afterward.
__ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag)); __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
__ RecordWrite(r1, r2, r3);
__ bind(&done); __ bind(&done);
Apply(context_, r0); Apply(context_, r0);

17
deps/v8/src/arm/ic-arm.cc

@ -1339,7 +1339,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ bind(&box_int); __ bind(&box_int);
// Allocate a HeapNumber for the result and perform int-to-double // Allocate a HeapNumber for the result and perform int-to-double
// conversion. Use r0 for result as key is not needed any more. // conversion. Use r0 for result as key is not needed any more.
__ AllocateHeapNumber(r0, r3, r4, &slow); __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, r3, r4, r6, &slow);
if (CpuFeatures::IsSupported(VFP3)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
@ -1370,7 +1371,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result and perform int-to-double // Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space. // registers - also when jumping due to exhausted young space.
__ AllocateHeapNumber(r2, r3, r4, &slow); __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_u32(d0, s0); __ vcvt_f64_u32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag)); __ sub(r1, r2, Operand(kHeapObjectTag));
@ -1407,7 +1409,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
// clobbers all registers - also when jumping due to exhausted young // clobbers all registers - also when jumping due to exhausted young
// space. // space.
__ AllocateHeapNumber(r4, r5, r6, &slow); __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r5, r7, r6, &slow);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
@ -1423,7 +1426,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result. Don't use r0 and r1 as // Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to // AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space. // exhausted young space.
__ AllocateHeapNumber(r2, r3, r4, &slow); __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_f32(d0, s0); __ vcvt_f64_f32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag)); __ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset); __ vstr(d0, r1, HeapNumber::kValueOffset);
@ -1434,7 +1438,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result. Don't use r0 and r1 as // Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to // AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space. // exhausted young space.
__ AllocateHeapNumber(r3, r4, r5, &slow); __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r3, r4, r5, r6, &slow);
// VFP is not available, do manual single to double conversion. // VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32) // r2: floating point value (binary32)
@ -1692,7 +1697,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ Ret(eq); __ Ret(eq);
// Update write barrier for the elements array address. // Update write barrier for the elements array address.
__ sub(r4, r5, Operand(elements)); __ sub(r4, r5, Operand(elements));
__ RecordWrite(elements, r4, r5); __ RecordWrite(elements, Operand(r4), r5, r6);
__ Ret(); __ Ret();
} }

71
deps/v8/src/arm/macro-assembler-arm.cc

@ -270,6 +270,17 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
} }
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
} else {
bfc(dst, lsb, width, cond);
}
}
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) { void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool. // Empty the const pool.
CheckConstPool(true, true); CheckConstPool(true, true);
@ -299,31 +310,32 @@ void MacroAssembler::StoreRoot(Register source,
void MacroAssembler::RecordWriteHelper(Register object, void MacroAssembler::RecordWriteHelper(Register object,
Register offset, Operand offset,
Register scratch) { Register scratch0,
Register scratch1) {
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Check that the object is not in new space. // Check that the object is not in new space.
Label not_in_new_space; Label not_in_new_space;
InNewSpace(object, scratch, ne, &not_in_new_space); InNewSpace(object, scratch1, ne, &not_in_new_space);
Abort("new-space object passed to RecordWriteHelper"); Abort("new-space object passed to RecordWriteHelper");
bind(&not_in_new_space); bind(&not_in_new_space);
} }
mov(ip, Operand(Page::kPageAlignmentMask)); // Load mask only once. // Add offset into the object.
add(scratch0, object, offset);
// Calculate region number.
add(offset, object, Operand(offset)); // Add offset into the object.
and_(offset, offset, Operand(ip)); // Offset into page of the object.
mov(offset, Operand(offset, LSR, Page::kRegionSizeLog2));
// Calculate page address. // Calculate page address.
bic(object, object, Operand(ip)); Bfc(object, 0, kPageSizeBits);
// Calculate region number.
Ubfx(scratch0, scratch0, Page::kRegionSizeLog2,
kPageSizeBits - Page::kRegionSizeLog2);
// Mark region dirty. // Mark region dirty.
ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset)); ldr(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1)); mov(ip, Operand(1));
orr(scratch, scratch, Operand(ip, LSL, offset)); orr(scratch1, scratch1, Operand(ip, LSL, scratch0));
str(scratch, MemOperand(object, Page::kDirtyFlagOffset)); str(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
} }
@ -341,21 +353,23 @@ void MacroAssembler::InNewSpace(Register object,
// Will clobber 4 registers: object, offset, scratch, ip. The // Will clobber 4 registers: object, offset, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object // register 'object' contains a heap object pointer. The heap object
// tag is shifted away. // tag is shifted away.
void MacroAssembler::RecordWrite(Register object, Register offset, void MacroAssembler::RecordWrite(Register object,
Register scratch) { Operand offset,
Register scratch0,
Register scratch1) {
// The compiled code assumes that record write doesn't change the // The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered // context register, so we check that none of the clobbered
// registers are cp. // registers are cp.
ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp)); ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
Label done; Label done;
// First, test that the object is not in the new space. We cannot set // First, test that the object is not in the new space. We cannot set
// region marks for new space pages. // region marks for new space pages.
InNewSpace(object, scratch, eq, &done); InNewSpace(object, scratch0, eq, &done);
// Record the actual write. // Record the actual write.
RecordWriteHelper(object, offset, scratch); RecordWriteHelper(object, offset, scratch0, scratch1);
bind(&done); bind(&done);
@ -363,8 +377,8 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
// turned on to provoke errors. // turned on to provoke errors.
if (FLAG_debug_code) { if (FLAG_debug_code) {
mov(object, Operand(BitCast<int32_t>(kZapValue))); mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(offset, Operand(BitCast<int32_t>(kZapValue))); mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch, Operand(BitCast<int32_t>(kZapValue))); mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
} }
} }
@ -1514,6 +1528,16 @@ void MacroAssembler::Assert(Condition cc, const char* msg) {
} }
void MacroAssembler::AssertRegisterIsRoot(Register reg,
Heap::RootListIndex index) {
if (FLAG_debug_code) {
LoadRoot(ip, index);
cmp(reg, ip);
Check(eq, "Register did not match expected root");
}
}
void MacroAssembler::Check(Condition cc, const char* msg) { void MacroAssembler::Check(Condition cc, const char* msg) {
Label L; Label L;
b(cc, &L); b(cc, &L);
@ -1632,6 +1656,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
void MacroAssembler::AllocateHeapNumber(Register result, void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register heap_number_map,
Label* gc_required) { Label* gc_required) {
// Allocate an object in the heap for the heap number and tag it as a heap // Allocate an object in the heap for the heap number and tag it as a heap
// object. // object.
@ -1642,9 +1667,9 @@ void MacroAssembler::AllocateHeapNumber(Register result,
gc_required, gc_required,
TAG_OBJECT); TAG_OBJECT);
// Get heap number map and store it in the allocated object. // Store heap number map in the allocated object.
LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex); AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
} }

15
deps/v8/src/arm/macro-assembler-arm.h

@ -100,6 +100,7 @@ class MacroAssembler: public Assembler {
Condition cond = al); Condition cond = al);
void Sbfx(Register dst, Register src, int lsb, int width, void Sbfx(Register dst, Register src, int lsb, int width,
Condition cond = al); Condition cond = al);
void Bfc(Register dst, int lsb, int width, Condition cond = al);
void Call(Label* target); void Call(Label* target);
void Move(Register dst, Handle<Object> value); void Move(Register dst, Handle<Object> value);
@ -127,13 +128,19 @@ class MacroAssembler: public Assembler {
// For the page containing |object| mark the region covering [object+offset] // For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page. // dirty. The object address must be in the first 8K of an allocated page.
void RecordWriteHelper(Register object, Register offset, Register scratch); void RecordWriteHelper(Register object,
Operand offset,
Register scratch0,
Register scratch1);
// For the page containing |object| mark the region covering [object+offset] // For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page. // dirty. The object address must be in the first 8K of an allocated page.
// The 'scratch' register is used in the implementation and all 3 registers // The 'scratch' registers are used in the implementation and all 3 registers
// are clobbered by the operation, as well as the ip register. // are clobbered by the operation, as well as the ip register.
void RecordWrite(Register object, Register offset, Register scratch); void RecordWrite(Register object,
Operand offset,
Register scratch0,
Register scratch1);
// Push two registers. Pushes leftmost register first (to highest address). // Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) { void Push(Register src1, Register src2, Condition cond = al) {
@ -372,6 +379,7 @@ class MacroAssembler: public Assembler {
void AllocateHeapNumber(Register result, void AllocateHeapNumber(Register result,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register heap_number_map,
Label* gc_required); Label* gc_required);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -551,6 +559,7 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied. // Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable. // Use --debug_code to enable.
void Assert(Condition cc, const char* msg); void Assert(Condition cc, const char* msg);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
// Like Assert(), but always enabled. // Like Assert(), but always enabled.
void Check(Condition cc, const char* msg); void Check(Condition cc, const char* msg);

9
deps/v8/src/arm/simulator-arm.cc

@ -1859,7 +1859,9 @@ void Simulator::DecodeType01(Instr* instr) {
SetNZFlags(alu_out); SetNZFlags(alu_out);
SetCFlag(shifter_carry_out); SetCFlag(shifter_carry_out);
} else { } else {
UNIMPLEMENTED(); // Format(instr, "movw'cond 'rd, 'imm").
alu_out = instr->ImmedMovwMovtField();
set_register(rd, alu_out);
} }
break; break;
} }
@ -1888,7 +1890,10 @@ void Simulator::DecodeType01(Instr* instr) {
SetCFlag(!BorrowFrom(rn_val, shifter_operand)); SetCFlag(!BorrowFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false)); SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
} else { } else {
UNIMPLEMENTED(); // Format(instr, "movt'cond 'rd, 'imm").
alu_out = (get_register(rd) & 0xffff) |
(instr->ImmedMovwMovtField() << 16);
set_register(rd, alu_out);
} }
break; break;
} }

8
deps/v8/src/arm/stub-cache-arm.cc

@ -336,9 +336,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ b(eq, &exit); __ b(eq, &exit);
// Update the write barrier for the array address. // Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg. // Pass the now unused name_reg as a scratch register.
__ mov(name_reg, Operand(offset)); __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
__ RecordWrite(receiver_reg, name_reg, scratch);
} else { } else {
// Write to the properties array. // Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize; int offset = index * kPointerSize + FixedArray::kHeaderSize;
@ -352,8 +351,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address. // Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return. // Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, Operand(offset)); __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
__ RecordWrite(scratch, name_reg, receiver_reg);
} }
// Return the value (register r0). // Return the value (register r0).

1
deps/v8/src/bootstrapper.cc

@ -1462,6 +1462,7 @@ bool Genesis::InstallExtensions(Handle<Context> global_context,
} }
if (FLAG_expose_gc) InstallExtension("v8/gc"); if (FLAG_expose_gc) InstallExtension("v8/gc");
if (FLAG_expose_externalize_string) InstallExtension("v8/externalize");
if (extensions == NULL) return true; if (extensions == NULL) return true;
// Install required extensions // Install required extensions

8
deps/v8/src/checks.h

@ -155,9 +155,9 @@ static inline void CheckNonEqualsHelper(const char* file,
static inline void CheckEqualsHelper(const char* file, static inline void CheckEqualsHelper(const char* file,
int line, int line,
const char* expected_source, const char* expected_source,
void* expected, const void* expected,
const char* value_source, const char* value_source,
void* value) { const void* value) {
if (expected != value) { if (expected != value) {
V8_Fatal(file, line, V8_Fatal(file, line,
"CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p", "CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p",
@ -170,9 +170,9 @@ static inline void CheckEqualsHelper(const char* file,
static inline void CheckNonEqualsHelper(const char* file, static inline void CheckNonEqualsHelper(const char* file,
int line, int line,
const char* expected_source, const char* expected_source,
void* expected, const void* expected,
const char* value_source, const char* value_source,
void* value) { const void* value) {
if (expected == value) { if (expected == value) {
V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p", V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p",
expected_source, value_source, value); expected_source, value_source, value);

6
deps/v8/src/debug-debugger.js

@ -295,7 +295,6 @@ ScriptBreakPoint.prototype.update_positions = function(line, column) {
} }
ScriptBreakPoint.prototype.hit_count = function() { ScriptBreakPoint.prototype.hit_count = function() {
return this.hit_count_; return this.hit_count_;
}; };
@ -389,7 +388,10 @@ ScriptBreakPoint.prototype.set = function (script) {
// Create a break point object and set the break point. // Create a break point object and set the break point.
break_point = MakeBreakPoint(pos, this.line(), this.column(), this); break_point = MakeBreakPoint(pos, this.line(), this.column(), this);
break_point.setIgnoreCount(this.ignoreCount()); break_point.setIgnoreCount(this.ignoreCount());
%SetScriptBreakPoint(script, pos, break_point); pos = %SetScriptBreakPoint(script, pos, break_point);
if (!IS_UNDEFINED(pos)) {
this.actual_location = script.locationFromPosition(pos);
}
return break_point; return break_point;
}; };

8
deps/v8/src/debug.cc

@ -1028,8 +1028,8 @@ Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared, void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
int source_position, Handle<Object> break_point_object,
Handle<Object> break_point_object) { int* source_position) {
HandleScope scope; HandleScope scope;
if (!EnsureDebugInfo(shared)) { if (!EnsureDebugInfo(shared)) {
@ -1043,9 +1043,11 @@ void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
// Find the break point and change it. // Find the break point and change it.
BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS); BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
it.FindBreakLocationFromPosition(source_position); it.FindBreakLocationFromPosition(*source_position);
it.SetBreakPoint(break_point_object); it.SetBreakPoint(break_point_object);
*source_position = it.position();
// At least one active break point now. // At least one active break point now.
ASSERT(debug_info->GetBreakPointCount() > 0); ASSERT(debug_info->GetBreakPointCount() > 0);
} }

4
deps/v8/src/debug.h

@ -230,8 +230,8 @@ class Debug {
static Object* Break(Arguments args); static Object* Break(Arguments args);
static void SetBreakPoint(Handle<SharedFunctionInfo> shared, static void SetBreakPoint(Handle<SharedFunctionInfo> shared,
int source_position, Handle<Object> break_point_object,
Handle<Object> break_point_object); int* source_position);
static void ClearBreakPoint(Handle<Object> break_point_object); static void ClearBreakPoint(Handle<Object> break_point_object);
static void ClearAllBreakPoints(); static void ClearAllBreakPoints();
static void FloodWithOneShot(Handle<SharedFunctionInfo> shared); static void FloodWithOneShot(Handle<SharedFunctionInfo> shared);

114
deps/v8/src/execution.cc

@ -679,7 +679,7 @@ Object* Execution::HandleStackGuardInterrupt() {
// --- G C E x t e n s i o n --- // --- G C E x t e n s i o n ---
const char* GCExtension::kSource = "native function gc();"; const char* const GCExtension::kSource = "native function gc();";
v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction( v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
@ -695,7 +695,115 @@ v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
} }
static GCExtension kGCExtension; static GCExtension gc_extension;
v8::DeclareExtension kGCExtensionDeclaration(&kGCExtension); static v8::DeclareExtension gc_extension_declaration(&gc_extension);
// --- E x t e r n a l i z e S t r i n g E x t e n s i o n ---
template <typename Char, typename Base>
class SimpleStringResource : public Base {
public:
// Takes ownership of |data|.
SimpleStringResource(Char* data, size_t length)
: data_(data),
length_(length) {}
virtual ~SimpleStringResource() { delete data_; }
virtual const Char* data() const { return data_; }
virtual size_t length() const { return length_; }
private:
Char* const data_;
const size_t length_;
};
typedef SimpleStringResource<char, v8::String::ExternalAsciiStringResource>
SimpleAsciiStringResource;
typedef SimpleStringResource<uc16, v8::String::ExternalStringResource>
SimpleTwoByteStringResource;
const char* const ExternalizeStringExtension::kSource =
"native function externalizeString();"
"native function isAsciiString();";
v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
v8::Handle<v8::String> str) {
if (strcmp(*v8::String::AsciiValue(str), "externalizeString") == 0) {
return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize);
} else {
ASSERT(strcmp(*v8::String::AsciiValue(str), "isAsciiString") == 0);
return v8::FunctionTemplate::New(ExternalizeStringExtension::IsAscii);
}
}
v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
const v8::Arguments& args) {
if (args.Length() < 1 || !args[0]->IsString()) {
return v8::ThrowException(v8::String::New(
"First parameter to externalizeString() must be a string."));
}
bool force_two_byte = false;
if (args.Length() >= 2) {
if (args[1]->IsBoolean()) {
force_two_byte = args[1]->BooleanValue();
} else {
return v8::ThrowException(v8::String::New(
"Second parameter to externalizeString() must be a boolean."));
}
}
bool result = false;
Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
if (string->IsExternalString()) {
return v8::ThrowException(v8::String::New(
"externalizeString() can't externalize twice."));
}
if (string->IsAsciiRepresentation() && !force_two_byte) {
char* data = new char[string->length()];
String::WriteToFlat(*string, data, 0, string->length());
SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
data, string->length());
result = string->MakeExternal(resource);
if (result && !string->IsSymbol()) {
i::ExternalStringTable::AddString(*string);
}
} else {
uc16* data = new uc16[string->length()];
String::WriteToFlat(*string, data, 0, string->length());
SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
data, string->length());
result = string->MakeExternal(resource);
if (result && !string->IsSymbol()) {
i::ExternalStringTable::AddString(*string);
}
}
if (!result) {
return v8::ThrowException(v8::String::New("externalizeString() failed."));
}
return v8::Undefined();
}
v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
const v8::Arguments& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
return v8::ThrowException(v8::String::New(
"isAsciiString() requires a single string argument."));
}
return Utils::OpenHandle(*args[0].As<v8::String>())->IsAsciiRepresentation() ?
v8::True() : v8::False();
}
static ExternalizeStringExtension externalize_extension;
static v8::DeclareExtension externalize_extension_declaration(
&externalize_extension);
} } // namespace v8::internal } } // namespace v8::internal

13
deps/v8/src/execution.h

@ -316,10 +316,21 @@ class GCExtension : public v8::Extension {
v8::Handle<v8::String> name); v8::Handle<v8::String> name);
static v8::Handle<v8::Value> GC(const v8::Arguments& args); static v8::Handle<v8::Value> GC(const v8::Arguments& args);
private: private:
static const char* kSource; static const char* const kSource;
}; };
class ExternalizeStringExtension : public v8::Extension {
public:
ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
static v8::Handle<v8::Value> Externalize(const v8::Arguments& args);
static v8::Handle<v8::Value> IsAscii(const v8::Arguments& args);
private:
static const char* const kSource;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_EXECUTION_H_ #endif // V8_EXECUTION_H_

4
deps/v8/src/flag-definitions.h

@ -123,6 +123,8 @@ DEFINE_bool(enable_armv7, true,
DEFINE_string(expose_natives_as, NULL, "expose natives in global object") DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object") DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
DEFINE_bool(expose_gc, false, "expose gc extension") DEFINE_bool(expose_gc, false, "expose gc extension")
DEFINE_bool(expose_externalize_string, false,
"expose externalize string extension")
DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture") DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
DEFINE_bool(disable_native_files, false, "disable builtin natives files") DEFINE_bool(disable_native_files, false, "disable builtin natives files")
@ -191,7 +193,7 @@ DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection") "print more details following each garbage collection")
DEFINE_bool(collect_maps, true, DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached") "garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, false, DEFINE_bool(flush_code, true,
"flush code that we expect not to use again before full gc") "flush code that we expect not to use again before full gc")
// v8.cc // v8.cc

8
deps/v8/src/heap-profiler.cc

@ -326,20 +326,27 @@ HeapProfiler::~HeapProfiler() {
delete snapshots_; delete snapshots_;
} }
#endif // ENABLE_LOGGING_AND_PROFILING
void HeapProfiler::Setup() { void HeapProfiler::Setup() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (singleton_ == NULL) { if (singleton_ == NULL) {
singleton_ = new HeapProfiler(); singleton_ = new HeapProfiler();
} }
#endif
} }
void HeapProfiler::TearDown() { void HeapProfiler::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
delete singleton_; delete singleton_;
singleton_ = NULL; singleton_ = NULL;
#endif
} }
#ifdef ENABLE_LOGGING_AND_PROFILING
HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name) { HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name) {
ASSERT(singleton_ != NULL); ASSERT(singleton_ != NULL);
return singleton_->TakeSnapshotImpl(name); return singleton_->TakeSnapshotImpl(name);
@ -353,6 +360,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(String* name) {
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) { HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) {
Heap::CollectAllGarbage(false);
HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++); HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
HeapSnapshotGenerator generator(result); HeapSnapshotGenerator generator(result);
generator.GenerateSnapshot(); generator.GenerateSnapshot();

7
deps/v8/src/heap-profiler.h

@ -38,12 +38,16 @@ namespace internal {
class HeapSnapshot; class HeapSnapshot;
class HeapSnapshotsCollection; class HeapSnapshotsCollection;
#endif
// The HeapProfiler writes data to the log files, which can be postprocessed // The HeapProfiler writes data to the log files, which can be postprocessed
// to generate .hp files for use by the GHC/Valgrind tool hp2ps. // to generate .hp files for use by the GHC/Valgrind tool hp2ps.
class HeapProfiler { class HeapProfiler {
public: public:
static void Setup(); static void Setup();
static void TearDown(); static void TearDown();
#ifdef ENABLE_LOGGING_AND_PROFILING
static HeapSnapshot* TakeSnapshot(const char* name); static HeapSnapshot* TakeSnapshot(const char* name);
static HeapSnapshot* TakeSnapshot(String* name); static HeapSnapshot* TakeSnapshot(String* name);
static int GetSnapshotsCount(); static int GetSnapshotsCount();
@ -68,9 +72,12 @@ class HeapProfiler {
unsigned next_snapshot_uid_; unsigned next_snapshot_uid_;
static HeapProfiler* singleton_; static HeapProfiler* singleton_;
#endif // ENABLE_LOGGING_AND_PROFILING
}; };
#ifdef ENABLE_LOGGING_AND_PROFILING
// JSObjectsCluster describes a group of JS objects that are // JSObjectsCluster describes a group of JS objects that are
// considered equivalent in terms of a particular profile. // considered equivalent in terms of a particular profile.
class JSObjectsCluster BASE_EMBEDDED { class JSObjectsCluster BASE_EMBEDDED {

53
deps/v8/src/heap.cc

@ -1929,6 +1929,18 @@ Object* Heap::AllocateConsString(String* first, String* second) {
return Failure::OutOfMemoryException(); return Failure::OutOfMemoryException();
} }
bool is_ascii_data_in_two_byte_string = false;
if (!is_ascii) {
// At least one of the strings uses two-byte representation so we
// can't use the fast case code for short ascii strings below, but
// we can try to save memory if all chars actually fit in ascii.
is_ascii_data_in_two_byte_string =
first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
if (is_ascii_data_in_two_byte_string) {
Counters::string_add_runtime_ext_to_ascii.Increment();
}
}
// If the resulting string is small make a flat string. // If the resulting string is small make a flat string.
if (length < String::kMinNonFlatLength) { if (length < String::kMinNonFlatLength) {
ASSERT(first->IsFlat()); ASSERT(first->IsFlat());
@ -1955,22 +1967,13 @@ Object* Heap::AllocateConsString(String* first, String* second) {
for (int i = 0; i < second_length; i++) *dest++ = src[i]; for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result; return result;
} else { } else {
// For short external two-byte strings we check whether they can if (is_ascii_data_in_two_byte_string) {
// be represented using ascii.
if (!first_is_ascii) {
first_is_ascii = first->IsExternalTwoByteStringWithAsciiChars();
}
if (first_is_ascii && !second_is_ascii) {
second_is_ascii = second->IsExternalTwoByteStringWithAsciiChars();
}
if (first_is_ascii && second_is_ascii) {
Object* result = AllocateRawAsciiString(length); Object* result = AllocateRawAsciiString(length);
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
// Copy the characters into the new object. // Copy the characters into the new object.
char* dest = SeqAsciiString::cast(result)->GetChars(); char* dest = SeqAsciiString::cast(result)->GetChars();
String::WriteToFlat(first, dest, 0, first_length); String::WriteToFlat(first, dest, 0, first_length);
String::WriteToFlat(second, dest + first_length, 0, second_length); String::WriteToFlat(second, dest + first_length, 0, second_length);
Counters::string_add_runtime_ext_to_ascii.Increment();
return result; return result;
} }
@ -1984,7 +1987,8 @@ Object* Heap::AllocateConsString(String* first, String* second) {
} }
} }
Map* map = is_ascii ? cons_ascii_string_map() : cons_string_map(); Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
cons_ascii_string_map() : cons_string_map();
Object* result = Allocate(map, NEW_SPACE); Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
@ -2070,7 +2074,23 @@ Object* Heap::AllocateExternalStringFromTwoByte(
return Failure::OutOfMemoryException(); return Failure::OutOfMemoryException();
} }
Map* map = Heap::external_string_map(); // For small strings we check whether the resource contains only
// ascii characters. If yes, we use a different string map.
bool is_ascii = true;
if (length >= static_cast<size_t>(String::kMinNonFlatLength)) {
is_ascii = false;
} else {
const uc16* data = resource->data();
for (size_t i = 0; i < length; i++) {
if (data[i] > String::kMaxAsciiCharCode) {
is_ascii = false;
break;
}
}
}
Map* map = is_ascii ?
Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
Object* result = Allocate(map, NEW_SPACE); Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
@ -2244,6 +2264,12 @@ static void FlushCodeForFunction(SharedFunctionInfo* function_info) {
ThreadManager::IterateArchivedThreads(&threadvisitor); ThreadManager::IterateArchivedThreads(&threadvisitor);
if (threadvisitor.FoundCode()) return; if (threadvisitor.FoundCode()) return;
// Check that there are heap allocated locals in the scopeinfo. If
// there is, we are potentially using eval and need the scopeinfo
// for variable resolution.
if (ScopeInfo<>::HasHeapAllocatedLocals(function_info->code()))
return;
HandleScope scope; HandleScope scope;
// Compute the lazy compilable version of the code. // Compute the lazy compilable version of the code.
function_info->set_code(*ComputeLazyCompile(function_info->length())); function_info->set_code(*ComputeLazyCompile(function_info->length()));
@ -2853,6 +2879,9 @@ Map* Heap::SymbolMapForString(String* string) {
if (map == cons_ascii_string_map()) return cons_ascii_symbol_map(); if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
if (map == external_string_map()) return external_symbol_map(); if (map == external_string_map()) return external_symbol_map();
if (map == external_ascii_string_map()) return external_ascii_symbol_map(); if (map == external_ascii_string_map()) return external_ascii_symbol_map();
if (map == external_string_with_ascii_data_map()) {
return external_symbol_with_ascii_data_map();
}
// No match found. // No match found.
return NULL; return NULL;

2
deps/v8/src/heap.h

@ -69,10 +69,12 @@ class ZoneScopeInfo;
V(Map, cons_symbol_map, ConsSymbolMap) \ V(Map, cons_symbol_map, ConsSymbolMap) \
V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \ V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
V(Map, external_symbol_map, ExternalSymbolMap) \ V(Map, external_symbol_map, ExternalSymbolMap) \
V(Map, external_symbol_with_ascii_data_map, ExternalSymbolWithAsciiDataMap) \
V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \ V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \
V(Map, cons_string_map, ConsStringMap) \ V(Map, cons_string_map, ConsStringMap) \
V(Map, cons_ascii_string_map, ConsAsciiStringMap) \ V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
V(Map, external_string_map, ExternalStringMap) \ V(Map, external_string_map, ExternalStringMap) \
V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \
V(Map, external_ascii_string_map, ExternalAsciiStringMap) \ V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
V(Map, undetectable_string_map, UndetectableStringMap) \ V(Map, undetectable_string_map, UndetectableStringMap) \
V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \ V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \

4
deps/v8/src/ia32/assembler-ia32-inl.h

@ -43,10 +43,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}
// The modes possibly affected by apply must be in kApplyMask. // The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) { void RelocInfo::apply(intptr_t delta) {

16
deps/v8/src/ia32/assembler-ia32.cc

@ -378,6 +378,11 @@ void Assembler::Align(int m) {
} }
void Assembler::CodeTargetAlign() {
Align(16); // Preferred alignment of jump targets on ia32.
}
void Assembler::cpuid() { void Assembler::cpuid() {
ASSERT(CpuFeatures::IsEnabled(CPUID)); ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
@ -2154,17 +2159,6 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
} }
void Assembler::comisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x2F);
emit_sse_operand(dst, src);
}
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2)); ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);

10
deps/v8/src/ia32/assembler-ia32.h

@ -146,7 +146,10 @@ enum Condition {
// Negation of the default no_condition (-1) results in a non-default // Negation of the default no_condition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check // no_condition value (-2). As long as tests for no_condition check
// for condition < 0, this will work as expected. // for condition < 0, this will work as expected.
inline Condition NegateCondition(Condition cc); inline Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}
// Corresponds to transposing the operands of a comparison. // Corresponds to transposing the operands of a comparison.
inline Condition ReverseCondition(Condition cc) { inline Condition ReverseCondition(Condition cc) {
@ -172,12 +175,14 @@ inline Condition ReverseCondition(Condition cc) {
}; };
} }
enum Hint { enum Hint {
no_hint = 0, no_hint = 0,
not_taken = 0x2e, not_taken = 0x2e,
taken = 0x3e taken = 0x3e
}; };
// The result of negating a hint is as if the corresponding condition // The result of negating a hint is as if the corresponding condition
// were negated by NegateCondition. That is, no_hint is mapped to // were negated by NegateCondition. That is, no_hint is mapped to
// itself and not_taken and taken are mapped to each other. // itself and not_taken and taken are mapped to each other.
@ -502,6 +507,8 @@ class Assembler : public Malloced {
// possible to align the pc offset to a multiple // possible to align the pc offset to a multiple
// of m. m must be a power of 2. // of m. m must be a power of 2.
void Align(int m); void Align(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
// Stack // Stack
void pushad(); void pushad();
@ -779,7 +786,6 @@ class Assembler : public Malloced {
void xorpd(XMMRegister dst, XMMRegister src); void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src); void sqrtsd(XMMRegister dst, XMMRegister src);
void comisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src); void ucomisd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src); void movmskpd(Register dst, XMMRegister src);

28
deps/v8/src/ia32/codegen-ia32.cc

@ -604,6 +604,10 @@ void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
RegisterFile empty_regs; RegisterFile empty_regs;
SetFrame(clone, &empty_regs); SetFrame(clone, &empty_regs);
__ bind(&allocation_failed); __ bind(&allocation_failed);
if (!CpuFeatures::IsSupported(SSE2)) {
// Pop the value from the floating point stack.
__ fstp(0);
}
unsafe_bailout_->Jump(); unsafe_bailout_->Jump();
done.Bind(value); done.Bind(value);
@ -2991,7 +2995,7 @@ void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
&not_numbers); &not_numbers);
LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side, LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
&not_numbers); &not_numbers);
__ comisd(xmm0, xmm1); __ ucomisd(xmm0, xmm1);
} else { } else {
Label check_right, compare; Label check_right, compare;
@ -7306,7 +7310,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
// Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3. // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
__ addsd(xmm2, xmm3); __ addsd(xmm2, xmm3);
// xmm2 now has 0.5. // xmm2 now has 0.5.
__ comisd(xmm2, xmm1); __ ucomisd(xmm2, xmm1);
call_runtime.Branch(not_equal); call_runtime.Branch(not_equal);
// Calculates square root. // Calculates square root.
__ movsd(xmm1, xmm0); __ movsd(xmm1, xmm0);
@ -11592,7 +11596,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
CpuFeatures::Scope fscope(SSE2); CpuFeatures::Scope fscope(SSE2);
__ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
__ comisd(xmm0, xmm1); __ ucomisd(xmm0, xmm1);
} else { } else {
__ fld_d(FieldOperand(object, HeapNumber::kValueOffset)); __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
__ fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
@ -11817,7 +11821,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope use_cmov(CMOV); CpuFeatures::Scope use_cmov(CMOV);
FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
__ comisd(xmm0, xmm1); __ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved. // Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, not_taken); __ j(parity_even, &unordered, not_taken);
@ -12848,7 +12852,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If result is not supposed to be flat allocate a cons string object. If both // If result is not supposed to be flat allocate a cons string object. If both
// strings are ascii the result is an ascii cons string. // strings are ascii the result is an ascii cons string.
Label non_ascii, allocated; Label non_ascii, allocated, ascii_data;
__ mov(edi, FieldOperand(eax, HeapObject::kMapOffset)); __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset)); __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
@ -12857,6 +12861,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
ASSERT(kStringEncodingMask == kAsciiStringTag); ASSERT(kStringEncodingMask == kAsciiStringTag);
__ test(ecx, Immediate(kAsciiStringTag)); __ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii); __ j(zero, &non_ascii);
__ bind(&ascii_data);
// Allocate an acsii cons string. // Allocate an acsii cons string.
__ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime); __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
__ bind(&allocated); __ bind(&allocated);
@ -12871,6 +12876,19 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(&Counters::string_add_native, 1); __ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize); __ ret(2 * kPointerSize);
__ bind(&non_ascii); __ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
// to contain only ascii characters.
// ecx: first instance type AND second instance type.
// edi: second instance type.
__ test(ecx, Immediate(kAsciiDataHintMask));
__ j(not_zero, &ascii_data);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ xor_(edi, Operand(ecx));
ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
__ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
__ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
__ j(equal, &ascii_data);
// Allocate a two byte cons string. // Allocate a two byte cons string.
__ AllocateConsString(ecx, edi, no_reg, &string_add_runtime); __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
__ jmp(&allocated); __ jmp(&allocated);

7
deps/v8/src/ia32/stub-cache-ia32.cc

@ -816,8 +816,13 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
__ push(other); __ push(other);
__ push(receiver); // receiver __ push(receiver); // receiver
__ push(reg); // holder __ push(reg); // holder
// Push data from AccessorInfo.
if (Heap::InNewSpace(callback_handle->data())) {
__ mov(other, Immediate(callback_handle)); __ mov(other, Immediate(callback_handle));
__ push(FieldOperand(other, AccessorInfo::kDataOffset)); // data __ push(FieldOperand(other, AccessorInfo::kDataOffset));
} else {
__ push(Immediate(Handle<Object>(callback_handle->data())));
}
__ push(name_reg); // name __ push(name_reg); // name
// Save a pointer to where we pushed the arguments pointer. // Save a pointer to where we pushed the arguments pointer.
// This will be passed as the const AccessorInfo& to the C++ callback. // This will be passed as the const AccessorInfo& to the C++ callback.

22
deps/v8/src/ic.cc

@ -734,6 +734,28 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
if (PatchInlinedLoad(address(), map, offset)) { if (PatchInlinedLoad(address(), map, offset)) {
set_target(megamorphic_stub()); set_target(megamorphic_stub());
return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex()); return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[LoadIC : inline patch %s]\n", *name->ToCString());
}
} else {
if (FLAG_trace_ic) {
PrintF("[LoadIC : no inline patch %s (patching failed)]\n",
*name->ToCString());
}
}
} else {
if (FLAG_trace_ic) {
PrintF("[LoadIC : no inline patch %s (not inobject)]\n",
*name->ToCString());
}
}
} else {
if (FLAG_use_ic && state == PREMONOMORPHIC) {
if (FLAG_trace_ic) {
PrintF("[LoadIC : no inline patch %s (not inlinable)]\n",
*name->ToCString());
#endif
} }
} }
} }

6
deps/v8/src/jsregexp.cc

@ -1747,9 +1747,11 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
if ((mask & char_mask) == char_mask) need_mask = false; if ((mask & char_mask) == char_mask) need_mask = false;
mask &= char_mask; mask &= char_mask;
} else { } else {
// For 2-character preloads in ASCII mode we also use a 16 bit load with // For 2-character preloads in ASCII mode or 1-character preloads in
// zero extend. // TWO_BYTE mode we also use a 16 bit load with zero extend.
if (details->characters() == 2 && compiler->ascii()) { if (details->characters() == 2 && compiler->ascii()) {
if ((mask & 0x7f7f) == 0x7f7f) need_mask = false;
} else if (details->characters() == 1 && !compiler->ascii()) {
if ((mask & 0xffff) == 0xffff) need_mask = false; if ((mask & 0xffff) == 0xffff) need_mask = false;
} else { } else {
if (mask == 0xffffffff) need_mask = false; if (mask == 0xffffffff) need_mask = false;

2
deps/v8/src/objects-debug.cc

@ -552,12 +552,14 @@ static const char* TypeToString(InstanceType type) {
case CONS_SYMBOL_TYPE: return "CONS_SYMBOL"; case CONS_SYMBOL_TYPE: return "CONS_SYMBOL";
case CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL"; case CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL";
case EXTERNAL_ASCII_SYMBOL_TYPE: case EXTERNAL_ASCII_SYMBOL_TYPE:
case EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL"; case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
case ASCII_STRING_TYPE: return "ASCII_STRING"; case ASCII_STRING_TYPE: return "ASCII_STRING";
case STRING_TYPE: return "TWO_BYTE_STRING"; case STRING_TYPE: return "TWO_BYTE_STRING";
case CONS_STRING_TYPE: case CONS_STRING_TYPE:
case CONS_ASCII_STRING_TYPE: return "CONS_STRING"; case CONS_ASCII_STRING_TYPE: return "CONS_STRING";
case EXTERNAL_ASCII_STRING_TYPE: case EXTERNAL_ASCII_STRING_TYPE:
case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING"; case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
case FIXED_ARRAY_TYPE: return "FIXED_ARRAY"; case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
case BYTE_ARRAY_TYPE: return "BYTE_ARRAY"; case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";

19
deps/v8/src/objects-inl.h

@ -237,31 +237,20 @@ bool StringShape::IsSymbol() {
bool String::IsAsciiRepresentation() { bool String::IsAsciiRepresentation() {
uint32_t type = map()->instance_type(); uint32_t type = map()->instance_type();
if ((type & kStringRepresentationMask) == kConsStringTag &&
ConsString::cast(this)->second()->length() == 0) {
return ConsString::cast(this)->first()->IsAsciiRepresentation();
}
return (type & kStringEncodingMask) == kAsciiStringTag; return (type & kStringEncodingMask) == kAsciiStringTag;
} }
bool String::IsTwoByteRepresentation() { bool String::IsTwoByteRepresentation() {
uint32_t type = map()->instance_type(); uint32_t type = map()->instance_type();
if ((type & kStringRepresentationMask) == kConsStringTag &&
ConsString::cast(this)->second()->length() == 0) {
return ConsString::cast(this)->first()->IsTwoByteRepresentation();
}
return (type & kStringEncodingMask) == kTwoByteStringTag; return (type & kStringEncodingMask) == kTwoByteStringTag;
} }
bool String::IsExternalTwoByteStringWithAsciiChars() { bool String::HasOnlyAsciiChars() {
if (!IsExternalTwoByteString()) return false; uint32_t type = map()->instance_type();
const uc16* data = ExternalTwoByteString::cast(this)->resource()->data(); return (type & kStringEncodingMask) == kAsciiStringTag ||
for (int i = 0, len = length(); i < len; i++) { (type & kAsciiDataHintMask) == kAsciiDataHintTag;
if (data[i] > kMaxAsciiCharCode) return false;
}
return true;
} }

18
deps/v8/src/objects.cc

@ -678,6 +678,9 @@ Object* String::SlowTryFlatten(PretenureFlag pretenure) {
bool String::MakeExternal(v8::String::ExternalStringResource* resource) { bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Externalizing twice leaks the external resouce, so it's
// prohibited by the API.
ASSERT(!this->IsExternalString());
#ifdef DEBUG #ifdef DEBUG
if (FLAG_enable_slow_asserts) { if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent. // Assert that the resource and the string are equivalent.
@ -697,13 +700,16 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
return false; return false;
} }
ASSERT(size >= ExternalString::kSize); ASSERT(size >= ExternalString::kSize);
bool is_ascii = this->IsAsciiRepresentation();
bool is_symbol = this->IsSymbol(); bool is_symbol = this->IsSymbol();
int length = this->length(); int length = this->length();
int hash_field = this->hash_field(); int hash_field = this->hash_field();
// Morph the object to an external string by adjusting the map and // Morph the object to an external string by adjusting the map and
// reinitializing the fields. // reinitializing the fields.
this->set_map(Heap::external_string_map()); this->set_map(is_ascii ?
Heap::external_string_with_ascii_data_map() :
Heap::external_string_map());
ExternalTwoByteString* self = ExternalTwoByteString::cast(this); ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
self->set_length(length); self->set_length(length);
self->set_hash_field(hash_field); self->set_hash_field(hash_field);
@ -713,7 +719,9 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (is_symbol) { if (is_symbol) {
self->Hash(); // Force regeneration of the hash value. self->Hash(); // Force regeneration of the hash value.
// Now morph this external string into a external symbol. // Now morph this external string into a external symbol.
this->set_map(Heap::external_symbol_map()); this->set_map(is_ascii ?
Heap::external_symbol_with_ascii_data_map() :
Heap::external_symbol_map());
} }
// Fill the remainder of the string with dead wood. // Fill the remainder of the string with dead wood.
@ -8147,7 +8155,7 @@ Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
template<typename Shape, typename Key> template<typename Shape, typename Key>
Object* Dictionary<Shape, Key>::AtPut(Key key, Object* value) { Object* Dictionary<Shape, Key>::AtPut(Key key, Object* value) {
int entry = FindEntry(key); int entry = this->FindEntry(key);
// If the entry is present set the value; // If the entry is present set the value;
if (entry != Dictionary<Shape, Key>::kNotFound) { if (entry != Dictionary<Shape, Key>::kNotFound) {
@ -8172,7 +8180,7 @@ Object* Dictionary<Shape, Key>::Add(Key key,
Object* value, Object* value,
PropertyDetails details) { PropertyDetails details) {
// Valdate key is absent. // Valdate key is absent.
SLOW_ASSERT((FindEntry(key) == Dictionary<Shape, Key>::kNotFound)); SLOW_ASSERT((this->FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
// Check whether the dictionary should be extended. // Check whether the dictionary should be extended.
Object* obj = EnsureCapacity(1, key); Object* obj = EnsureCapacity(1, key);
if (obj->IsFailure()) return obj; if (obj->IsFailure()) return obj;
@ -8231,7 +8239,7 @@ Object* NumberDictionary::AddNumberEntry(uint32_t key,
Object* value, Object* value,
PropertyDetails details) { PropertyDetails details) {
UpdateMaxNumberKey(key); UpdateMaxNumberKey(key);
SLOW_ASSERT(FindEntry(key) == kNotFound); SLOW_ASSERT(this->FindEntry(key) == kNotFound);
return Add(key, value, details); return Add(key, value, details);
} }

52
deps/v8/src/objects.h

@ -320,6 +320,10 @@ enum PropertyNormalizationMode {
ExternalTwoByteString::kSize, \ ExternalTwoByteString::kSize, \
external_symbol, \ external_symbol, \
ExternalSymbol) \ ExternalSymbol) \
V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE, \
ExternalTwoByteString::kSize, \
external_symbol_with_ascii_data, \
ExternalSymbolWithAsciiData) \
V(EXTERNAL_ASCII_SYMBOL_TYPE, \ V(EXTERNAL_ASCII_SYMBOL_TYPE, \
ExternalAsciiString::kSize, \ ExternalAsciiString::kSize, \
external_ascii_symbol, \ external_ascii_symbol, \
@ -344,6 +348,10 @@ enum PropertyNormalizationMode {
ExternalTwoByteString::kSize, \ ExternalTwoByteString::kSize, \
external_string, \ external_string, \
ExternalString) \ ExternalString) \
V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \
ExternalTwoByteString::kSize, \
external_string_with_ascii_data, \
ExternalStringWithAsciiData) \
V(EXTERNAL_ASCII_STRING_TYPE, \ V(EXTERNAL_ASCII_STRING_TYPE, \
ExternalAsciiString::kSize, \ ExternalAsciiString::kSize, \
external_ascii_string, \ external_ascii_string, \
@ -412,6 +420,11 @@ enum StringRepresentationTag {
}; };
const uint32_t kIsConsStringMask = 0x1; const uint32_t kIsConsStringMask = 0x1;
// If bit 7 is clear, then bit 3 indicates whether this two-byte
// string actually contains ascii data.
const uint32_t kAsciiDataHintMask = 0x08;
const uint32_t kAsciiDataHintTag = 0x08;
// A ConsString with an empty string as the right side is a candidate // A ConsString with an empty string as the right side is a candidate
// for being shortcut by the garbage collector unless it is a // for being shortcut by the garbage collector unless it is a
@ -427,18 +440,22 @@ const uint32_t kShortcutTypeTag = kConsStringTag;
enum InstanceType { enum InstanceType {
// String types. // String types.
SYMBOL_TYPE = kSymbolTag | kSeqStringTag, SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag, ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
CONS_SYMBOL_TYPE = kSymbolTag | kConsStringTag, CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag, CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag,
EXTERNAL_SYMBOL_TYPE = kSymbolTag | kExternalStringTag, EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag,
EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag,
EXTERNAL_ASCII_SYMBOL_TYPE = EXTERNAL_ASCII_SYMBOL_TYPE =
kAsciiStringTag | kSymbolTag | kExternalStringTag, kAsciiStringTag | kSymbolTag | kExternalStringTag,
STRING_TYPE = kSeqStringTag, STRING_TYPE = kTwoByteStringTag | kSeqStringTag,
ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag, ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag,
CONS_STRING_TYPE = kConsStringTag, CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag,
CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag, CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
EXTERNAL_STRING_TYPE = kExternalStringTag, EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag, EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE, PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
@ -474,10 +491,12 @@ enum InstanceType {
TYPE_SWITCH_INFO_TYPE, TYPE_SWITCH_INFO_TYPE,
SCRIPT_TYPE, SCRIPT_TYPE,
CODE_CACHE_TYPE, CODE_CACHE_TYPE,
#ifdef ENABLE_DEBUGGER_SUPPORT // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
// is defined. However as include/v8.h contain some of the instance type
// constants always having them avoids them getting different numbers
// depending on whether ENABLE_DEBUGGER_SUPPORT is defined or not.
DEBUG_INFO_TYPE, DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE, BREAK_POINT_INFO_TYPE,
#endif
FIXED_ARRAY_TYPE, FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE, SHARED_FUNCTION_INFO_TYPE,
@ -511,6 +530,11 @@ enum InstanceType {
}; };
STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
STATIC_CHECK(PROXY_TYPE == Internals::kProxyType);
enum CompareResult { enum CompareResult {
LESS = -1, LESS = -1,
EQUAL = 0, EQUAL = 0,
@ -4069,12 +4093,14 @@ class String: public HeapObject {
inline bool IsAsciiRepresentation(); inline bool IsAsciiRepresentation();
inline bool IsTwoByteRepresentation(); inline bool IsTwoByteRepresentation();
// Check whether this string is an external two-byte string that in // Returns whether this string has ascii chars, i.e. all of them can
// fact contains only ascii characters. // be ascii encoded. This might be the case even if the string is
// two-byte. Such strings may appear when the embedder prefers
// two-byte external representations even for ascii data.
// //
// Such strings may appear when the embedder prefers two-byte // NOTE: this should be considered only a hint. False negatives are
// representations even for ascii data. // possible.
inline bool IsExternalTwoByteStringWithAsciiChars(); inline bool HasOnlyAsciiChars();
// Get and set individual two byte chars in the string. // Get and set individual two byte chars in the string.
inline void Set(int index, uint16_t value); inline void Set(int index, uint16_t value);

47
deps/v8/src/profile-generator.cc

@ -818,7 +818,7 @@ HeapGraphEdge::HeapGraphEdge(Type type,
HeapEntry* from, HeapEntry* from,
HeapEntry* to) HeapEntry* to)
: type_(type), name_(name), from_(from), to_(to) { : type_(type), name_(name), from_(from), to_(to) {
ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY); ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL);
} }
@ -845,26 +845,30 @@ HeapEntry::~HeapEntry() {
} }
void HeapEntry::SetClosureReference(const char* name, HeapEntry* entry) { void HeapEntry::AddEdge(HeapGraphEdge* edge) {
HeapGraphEdge* edge =
new HeapGraphEdge(HeapGraphEdge::CONTEXT_VARIABLE, name, this, entry);
children_.Add(edge); children_.Add(edge);
entry->retainers_.Add(edge); edge->to()->retainers_.Add(edge);
}
void HeapEntry::SetClosureReference(const char* name, HeapEntry* entry) {
AddEdge(
new HeapGraphEdge(HeapGraphEdge::CONTEXT_VARIABLE, name, this, entry));
} }
void HeapEntry::SetElementReference(int index, HeapEntry* entry) { void HeapEntry::SetElementReference(int index, HeapEntry* entry) {
HeapGraphEdge* edge = new HeapGraphEdge(index, this, entry); AddEdge(new HeapGraphEdge(index, this, entry));
children_.Add(edge); }
entry->retainers_.Add(edge);
void HeapEntry::SetInternalReference(const char* name, HeapEntry* entry) {
AddEdge(new HeapGraphEdge(HeapGraphEdge::INTERNAL, name, this, entry));
} }
void HeapEntry::SetPropertyReference(const char* name, HeapEntry* entry) { void HeapEntry::SetPropertyReference(const char* name, HeapEntry* entry) {
HeapGraphEdge* edge = AddEdge(new HeapGraphEdge(HeapGraphEdge::PROPERTY, name, this, entry));
new HeapGraphEdge(HeapGraphEdge::PROPERTY, name, this, entry);
children_.Add(edge);
entry->retainers_.Add(edge);
} }
@ -1074,7 +1078,7 @@ void HeapEntry::CutEdges() {
void HeapEntry::Print(int max_depth, int indent) { void HeapEntry::Print(int max_depth, int indent) {
OS::Print("%6d %6d %6d", self_size_, TotalSize(), NonSharedTotalSize()); OS::Print("%6d %6d %6d ", self_size_, TotalSize(), NonSharedTotalSize());
if (type_ != STRING) { if (type_ != STRING) {
OS::Print("%s %.40s\n", TypeAsString(), name_); OS::Print("%s %.40s\n", TypeAsString(), name_);
} else { } else {
@ -1100,6 +1104,9 @@ void HeapEntry::Print(int max_depth, int indent) {
case HeapGraphEdge::ELEMENT: case HeapGraphEdge::ELEMENT:
OS::Print(" %*c %d: ", indent, ' ', edge->index()); OS::Print(" %*c %d: ", indent, ' ', edge->index());
break; break;
case HeapGraphEdge::INTERNAL:
OS::Print(" %*c $%s: ", indent, ' ', edge->name());
break;
case HeapGraphEdge::PROPERTY: case HeapGraphEdge::PROPERTY:
OS::Print(" %*c %s: ", indent, ' ', edge->name()); OS::Print(" %*c %s: ", indent, ' ', edge->name());
break; break;
@ -1145,6 +1152,9 @@ void HeapGraphPath::Print() {
case HeapGraphEdge::ELEMENT: case HeapGraphEdge::ELEMENT:
OS::Print("[%d] ", edge->index()); OS::Print("[%d] ", edge->index());
break; break;
case HeapGraphEdge::INTERNAL:
OS::Print("[$%s] ", edge->name());
break;
case HeapGraphEdge::PROPERTY: case HeapGraphEdge::PROPERTY:
OS::Print("[%s] ", edge->name()); OS::Print("[%s] ", edge->name());
break; break;
@ -1318,6 +1328,16 @@ void HeapSnapshot::SetElementReference(HeapEntry* parent,
} }
void HeapSnapshot::SetInternalReference(HeapEntry* parent,
const char* reference_name,
Object* child) {
HeapEntry* child_entry = GetEntry(child);
if (child_entry != NULL) {
parent->SetInternalReference(reference_name, child_entry);
}
}
void HeapSnapshot::SetPropertyReference(HeapEntry* parent, void HeapSnapshot::SetPropertyReference(HeapEntry* parent,
String* reference_name, String* reference_name,
Object* child) { Object* child) {
@ -1546,6 +1566,7 @@ void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj,
snapshot_->SetClosureReference(entry, local_name, context->get(idx)); snapshot_->SetClosureReference(entry, local_name, context->get(idx));
} }
} }
snapshot_->SetInternalReference(entry, "code", func->shared());
} }
} }

9
deps/v8/src/profile-generator.h

@ -431,7 +431,8 @@ class HeapGraphEdge {
enum Type { enum Type {
CONTEXT_VARIABLE = v8::HeapGraphEdge::CONTEXT_VARIABLE, CONTEXT_VARIABLE = v8::HeapGraphEdge::CONTEXT_VARIABLE,
ELEMENT = v8::HeapGraphEdge::ELEMENT, ELEMENT = v8::HeapGraphEdge::ELEMENT,
PROPERTY = v8::HeapGraphEdge::PROPERTY PROPERTY = v8::HeapGraphEdge::PROPERTY,
INTERNAL = v8::HeapGraphEdge::INTERNAL
}; };
HeapGraphEdge(Type type, const char* name, HeapEntry* from, HeapEntry* to); HeapGraphEdge(Type type, const char* name, HeapEntry* from, HeapEntry* to);
@ -443,7 +444,7 @@ class HeapGraphEdge {
return index_; return index_;
} }
const char* name() const { const char* name() const {
ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY); ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL);
return name_; return name_;
} }
HeapEntry* from() const { return from_; } HeapEntry* from() const { return from_; }
@ -533,6 +534,7 @@ class HeapEntry {
void PaintReachableFromOthers() { painted_ = kPaintReachableFromOthers; } void PaintReachableFromOthers() { painted_ = kPaintReachableFromOthers; }
void SetClosureReference(const char* name, HeapEntry* entry); void SetClosureReference(const char* name, HeapEntry* entry);
void SetElementReference(int index, HeapEntry* entry); void SetElementReference(int index, HeapEntry* entry);
void SetInternalReference(const char* name, HeapEntry* entry);
void SetPropertyReference(const char* name, HeapEntry* entry); void SetPropertyReference(const char* name, HeapEntry* entry);
void SetAutoIndexReference(HeapEntry* entry); void SetAutoIndexReference(HeapEntry* entry);
@ -542,6 +544,7 @@ class HeapEntry {
void Print(int max_depth, int indent); void Print(int max_depth, int indent);
private: private:
void AddEdge(HeapGraphEdge* edge);
int CalculateTotalSize(); int CalculateTotalSize();
int CalculateNonSharedTotalSize(); int CalculateNonSharedTotalSize();
void FindRetainingPaths(HeapEntry* node, CachedHeapGraphPath* prev_path); void FindRetainingPaths(HeapEntry* node, CachedHeapGraphPath* prev_path);
@ -641,6 +644,8 @@ class HeapSnapshot {
void SetClosureReference( void SetClosureReference(
HeapEntry* parent, String* reference_name, Object* child); HeapEntry* parent, String* reference_name, Object* child);
void SetElementReference(HeapEntry* parent, int index, Object* child); void SetElementReference(HeapEntry* parent, int index, Object* child);
void SetInternalReference(
HeapEntry* parent, const char* reference_name, Object* child);
void SetPropertyReference( void SetPropertyReference(
HeapEntry* parent, String* reference_name, Object* child); HeapEntry* parent, String* reference_name, Object* child);

47
deps/v8/src/runtime.cc

@ -4946,16 +4946,6 @@ static Object* ConvertCaseHelper(String* s,
} }
static inline SeqAsciiString* TryGetSeqAsciiString(String* s) {
if (!s->IsFlat() || !s->IsAsciiRepresentation()) return NULL;
if (s->IsConsString()) {
ASSERT(ConsString::cast(s)->second()->length() == 0);
return SeqAsciiString::cast(ConsString::cast(s)->first());
}
return SeqAsciiString::cast(s);
}
namespace { namespace {
struct ToLowerTraits { struct ToLowerTraits {
@ -5002,7 +4992,7 @@ static Object* ConvertCase(
unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) { unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
NoHandleAllocation ha; NoHandleAllocation ha;
CONVERT_CHECKED(String, s, args[0]); CONVERT_CHECKED(String, s, args[0]);
s->TryFlatten(); s = s->TryFlattenGetString();
const int length = s->length(); const int length = s->length();
// Assume that the string is not empty; we need this assumption later // Assume that the string is not empty; we need this assumption later
@ -5014,13 +5004,12 @@ static Object* ConvertCase(
// character is also ascii. This is currently the case, but it // character is also ascii. This is currently the case, but it
// might break in the future if we implement more context and locale // might break in the future if we implement more context and locale
// dependent upper/lower conversions. // dependent upper/lower conversions.
SeqAsciiString* seq_ascii = TryGetSeqAsciiString(s); if (s->IsSeqAsciiString()) {
if (seq_ascii != NULL) {
Object* o = Heap::AllocateRawAsciiString(length); Object* o = Heap::AllocateRawAsciiString(length);
if (o->IsFailure()) return o; if (o->IsFailure()) return o;
SeqAsciiString* result = SeqAsciiString::cast(o); SeqAsciiString* result = SeqAsciiString::cast(o);
bool has_changed_character = ConvertTraits::ConvertAscii( bool has_changed_character = ConvertTraits::ConvertAscii(
result->GetChars(), seq_ascii->GetChars(), length); result->GetChars(), SeqAsciiString::cast(s)->GetChars(), length);
return has_changed_character ? result : s; return has_changed_character ? result : s;
} }
@ -5564,7 +5553,7 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
if (first->IsString()) return first; if (first->IsString()) return first;
} }
bool ascii = special->IsAsciiRepresentation(); bool ascii = special->HasOnlyAsciiChars();
int position = 0; int position = 0;
for (int i = 0; i < array_length; i++) { for (int i = 0; i < array_length; i++) {
int increment = 0; int increment = 0;
@ -5605,7 +5594,7 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
String* element = String::cast(elt); String* element = String::cast(elt);
int element_length = element->length(); int element_length = element->length();
increment = element_length; increment = element_length;
if (ascii && !element->IsAsciiRepresentation()) { if (ascii && !element->HasOnlyAsciiChars()) {
ascii = false; ascii = false;
} }
} else { } else {
@ -9061,7 +9050,7 @@ static Object* Runtime_SetFunctionBreakPoint(Arguments args) {
Handle<Object> break_point_object_arg = args.at<Object>(2); Handle<Object> break_point_object_arg = args.at<Object>(2);
// Set break point. // Set break point.
Debug::SetBreakPoint(shared, source_position, break_point_object_arg); Debug::SetBreakPoint(shared, break_point_object_arg, &source_position);
return Heap::undefined_value(); return Heap::undefined_value();
} }
@ -9081,8 +9070,6 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
// The current candidate for the source position: // The current candidate for the source position:
int target_start_position = RelocInfo::kNoPosition; int target_start_position = RelocInfo::kNoPosition;
Handle<SharedFunctionInfo> target; Handle<SharedFunctionInfo> target;
// The current candidate for the last function in script:
Handle<SharedFunctionInfo> last;
while (!done) { while (!done) {
HeapIterator iterator; HeapIterator iterator;
for (HeapObject* obj = iterator.next(); for (HeapObject* obj = iterator.next();
@ -9123,26 +9110,13 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
} }
} }
} }
// Keep track of the last function in the script.
if (last.is_null() ||
shared->end_position() > last->start_position()) {
last = shared;
}
} }
} }
} }
// Make sure some candidate is selected.
if (target.is_null()) { if (target.is_null()) {
if (!last.is_null()) {
// Position after the last function - use last.
target = last;
} else {
// Unable to find function - possibly script without any function.
return Heap::undefined_value(); return Heap::undefined_value();
} }
}
// If the candidate found is compiled we are done. NOTE: when lazy // If the candidate found is compiled we are done. NOTE: when lazy
// compilation of inner functions is introduced some additional checking // compilation of inner functions is introduced some additional checking
@ -9159,8 +9133,9 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
} }
// Change the state of a break point in a script. NOTE: Regarding performance // Changes the state of a break point in a script and returns source position
// see the NOTE for GetScriptFromScriptData. // where break point was set. NOTE: Regarding performance see the NOTE for
// GetScriptFromScriptData.
// args[0]: script to set break point in // args[0]: script to set break point in
// args[1]: number: break source position (within the script source) // args[1]: number: break source position (within the script source)
// args[2]: number: break point object // args[2]: number: break point object
@ -9188,7 +9163,9 @@ static Object* Runtime_SetScriptBreakPoint(Arguments args) {
} else { } else {
position = source_position - shared->start_position(); position = source_position - shared->start_position();
} }
Debug::SetBreakPoint(shared, position, break_point_object_arg); Debug::SetBreakPoint(shared, break_point_object_arg, &position);
position += shared->start_position();
return Smi::FromInt(position);
} }
return Heap::undefined_value(); return Heap::undefined_value();
} }

3
deps/v8/src/scanner.cc

@ -200,6 +200,7 @@ void ExternalStringUTF16Buffer<StringType, CharType>::SeekForward(int pos) {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Keyword Matcher // Keyword Matcher
KeywordMatcher::FirstState KeywordMatcher::first_states_[] = { KeywordMatcher::FirstState KeywordMatcher::first_states_[] = {
{ "break", KEYWORD_PREFIX, Token::BREAK }, { "break", KEYWORD_PREFIX, Token::BREAK },
{ NULL, C, Token::ILLEGAL }, { NULL, C, Token::ILLEGAL },
@ -335,7 +336,7 @@ void KeywordMatcher::Step(uc32 input) {
// Scanner // Scanner
Scanner::Scanner(ParserMode pre) Scanner::Scanner(ParserMode pre)
: stack_overflow_(false), is_pre_parsing_(pre == PREPARSE) { } : is_pre_parsing_(pre == PREPARSE), stack_overflow_(false) { }
void Scanner::Initialize(Handle<String> source, void Scanner::Initialize(Handle<String> source,

109
deps/v8/src/scanner.h

@ -154,7 +154,12 @@ class KeywordMatcher {
// *: Actually "future reserved keywords". These are the only ones we // *: Actually "future reserved keywords". These are the only ones we
// recognized, the remaining are allowed as identifiers. // recognized, the remaining are allowed as identifiers.
public: public:
KeywordMatcher() : state_(INITIAL), token_(Token::IDENTIFIER) {} KeywordMatcher()
: state_(INITIAL),
token_(Token::IDENTIFIER),
keyword_(NULL),
counter_(0),
keyword_token_(Token::ILLEGAL) {}
Token::Value token() { return token_; } Token::Value token() { return token_; }
@ -206,17 +211,6 @@ class KeywordMatcher {
// State map for first keyword character range. // State map for first keyword character range.
static FirstState first_states_[kFirstCharRangeLength]; static FirstState first_states_[kFirstCharRangeLength];
// Current state.
State state_;
// Token for currently added characters.
Token::Value token_;
// Matching a specific keyword string (there is only one possible valid
// keyword with the current prefix).
const char* keyword_;
int counter_;
Token::Value keyword_token_;
// If input equals keyword's character at position, continue matching keyword // If input equals keyword's character at position, continue matching keyword
// from that position. // from that position.
inline bool MatchKeywordStart(uc32 input, inline bool MatchKeywordStart(uc32 input,
@ -246,15 +240,26 @@ class KeywordMatcher {
char match, char match,
State new_state, State new_state,
Token::Value keyword_token) { Token::Value keyword_token) {
if (input == match) { // Matched "do". if (input != match) {
return false;
}
state_ = new_state; state_ = new_state;
token_ = keyword_token; token_ = keyword_token;
return true; return true;
} }
return false;
}
void Step(uc32 input); void Step(uc32 input);
// Current state.
State state_;
// Token for currently added characters.
Token::Value token_;
// Matching a specific keyword string (there is only one possible valid
// keyword with the current prefix).
const char* keyword_;
int counter_;
Token::Value keyword_token_;
}; };
@ -362,37 +367,6 @@ class Scanner {
static const int kNoEndPosition = 1; static const int kNoEndPosition = 1;
private: private:
void Init(Handle<String> source,
unibrow::CharacterStream* stream,
int start_position, int end_position,
ParserLanguage language);
// Different UTF16 buffers used to pull characters from. Based on input one of
// these will be initialized as the actual data source.
CharacterStreamUTF16Buffer char_stream_buffer_;
ExternalStringUTF16Buffer<ExternalTwoByteString, uint16_t>
two_byte_string_buffer_;
ExternalStringUTF16Buffer<ExternalAsciiString, char> ascii_string_buffer_;
// Source. Will point to one of the buffers declared above.
UTF16Buffer* source_;
// Used to convert the source string into a character stream when a stream
// is not passed to the scanner.
SafeStringInputBuffer safe_string_input_buffer_;
// Buffer to hold literal values (identifiers, strings, numbers)
// using 0-terminated UTF-8 encoding.
UTF8Buffer literal_buffer_1_;
UTF8Buffer literal_buffer_2_;
bool stack_overflow_;
static StaticResource<Utf8Decoder> utf8_decoder_;
// One Unicode character look-ahead; c0_ < 0 at the end of the input.
uc32 c0_;
// The current and look-ahead token. // The current and look-ahead token.
struct TokenDesc { struct TokenDesc {
Token::Value token; Token::Value token;
@ -400,11 +374,10 @@ class Scanner {
UTF8Buffer* literal_buffer; UTF8Buffer* literal_buffer;
}; };
TokenDesc current_; // desc for current token (as returned by Next()) void Init(Handle<String> source,
TokenDesc next_; // desc for next token (one token look-ahead) unibrow::CharacterStream* stream,
bool has_line_terminator_before_next_; int start_position, int end_position,
bool is_pre_parsing_; ParserLanguage language);
bool is_parsing_json_;
// Literal buffer support // Literal buffer support
void StartLiteral(); void StartLiteral();
@ -426,6 +399,7 @@ class Scanner {
return SkipJavaScriptWhiteSpace(); return SkipJavaScriptWhiteSpace();
} }
} }
bool SkipJavaScriptWhiteSpace(); bool SkipJavaScriptWhiteSpace();
bool SkipJsonWhiteSpace(); bool SkipJsonWhiteSpace();
Token::Value SkipSingleLineComment(); Token::Value SkipSingleLineComment();
@ -460,11 +434,13 @@ class Scanner {
// the integer part is zero), and may include an exponent part (e.g., "e-10"). // the integer part is zero), and may include an exponent part (e.g., "e-10").
// Hexadecimal and octal numbers are not allowed. // Hexadecimal and octal numbers are not allowed.
Token::Value ScanJsonNumber(); Token::Value ScanJsonNumber();
// A JSON string (production JSONString) is subset of valid JavaScript string // A JSON string (production JSONString) is subset of valid JavaScript string
// literals. The string must only be double-quoted (not single-quoted), and // literals. The string must only be double-quoted (not single-quoted), and
// the only allowed backslash-escapes are ", /, \, b, f, n, r, t and // the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
// four-digit hex escapes (uXXXX). Any other use of backslashes is invalid. // four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
Token::Value ScanJsonString(); Token::Value ScanJsonString();
// Used to recognizes one of the literals "true", "false", or "null". These // Used to recognizes one of the literals "true", "false", or "null". These
// are the only valid JSON identifiers (productions JSONBooleanLiteral, // are the only valid JSON identifiers (productions JSONBooleanLiteral,
// JSONNullLiteral). // JSONNullLiteral).
@ -489,6 +465,37 @@ class Scanner {
// Decodes a unicode escape-sequence which is part of an identifier. // Decodes a unicode escape-sequence which is part of an identifier.
// If the escape sequence cannot be decoded the result is kBadRune. // If the escape sequence cannot be decoded the result is kBadRune.
uc32 ScanIdentifierUnicodeEscape(); uc32 ScanIdentifierUnicodeEscape();
TokenDesc current_; // desc for current token (as returned by Next())
TokenDesc next_; // desc for next token (one token look-ahead)
bool has_line_terminator_before_next_;
bool is_pre_parsing_;
bool is_parsing_json_;
// Different UTF16 buffers used to pull characters from. Based on input one of
// these will be initialized as the actual data source.
CharacterStreamUTF16Buffer char_stream_buffer_;
ExternalStringUTF16Buffer<ExternalTwoByteString, uint16_t>
two_byte_string_buffer_;
ExternalStringUTF16Buffer<ExternalAsciiString, char> ascii_string_buffer_;
// Source. Will point to one of the buffers declared above.
UTF16Buffer* source_;
// Used to convert the source string into a character stream when a stream
// is not passed to the scanner.
SafeStringInputBuffer safe_string_input_buffer_;
// Buffer to hold literal values (identifiers, strings, numbers)
// using 0-terminated UTF-8 encoding.
UTF8Buffer literal_buffer_1_;
UTF8Buffer literal_buffer_2_;
bool stack_overflow_;
static StaticResource<Utf8Decoder> utf8_decoder_;
// One Unicode character look-ahead; c0_ < 0 at the end of the input.
uc32 c0_;
}; };
} } // namespace v8::internal } } // namespace v8::internal

12
deps/v8/src/scopeinfo.cc

@ -407,6 +407,18 @@ int ScopeInfo<Allocator>::NumberOfContextSlots(Code* code) {
} }
template<class Allocator>
bool ScopeInfo<Allocator>::HasHeapAllocatedLocals(Code* code) {
if (code->sinfo_size() > 0) {
Object** p = ContextEntriesAddr(code);
int n; // number of context slots;
ReadInt(p, &n);
return n > 0;
}
return false;
}
template<class Allocator> template<class Allocator>
int ScopeInfo<Allocator>::StackSlotIndex(Code* code, String* name) { int ScopeInfo<Allocator>::StackSlotIndex(Code* code, String* name) {
ASSERT(name->IsSymbol()); ASSERT(name->IsSymbol());

3
deps/v8/src/scopeinfo.h

@ -112,6 +112,9 @@ class ScopeInfo BASE_EMBEDDED {
// Return the number of context slots for code. // Return the number of context slots for code.
static int NumberOfContextSlots(Code* code); static int NumberOfContextSlots(Code* code);
// Return if this has context slots besides MIN_CONTEXT_SLOTS;
static bool HasHeapAllocatedLocals(Code* code);
// Lookup support for scope info embedded in Code objects. Returns // Lookup support for scope info embedded in Code objects. Returns
// the stack slot index for a given slot name if the slot is // the stack slot index for a given slot name if the slot is
// present; otherwise returns a value < 0. The name must be a symbol // present; otherwise returns a value < 0. The name must be a symbol

1
deps/v8/src/stub-cache.cc

@ -1105,6 +1105,7 @@ Object* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
Code* code = Code::cast(result); Code* code = Code::cast(result);
USE(code); USE(code);
Code::Kind kind = Code::ExtractKindFromFlags(flags); Code::Kind kind = Code::ExtractKindFromFlags(flags);
USE(kind);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG), PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
code, code->arguments_count())); code, code->arguments_count()));
} }

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2 #define MAJOR_VERSION 2
#define MINOR_VERSION 2 #define MINOR_VERSION 2
#define BUILD_NUMBER 18 #define BUILD_NUMBER 19
#define PATCH_LEVEL 0 #define PATCH_LEVEL 0
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false

5
deps/v8/src/x64/assembler-x64-inl.h

@ -35,16 +35,11 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
inline Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of Assembler // Implementation of Assembler
void Assembler::emitl(uint32_t x) { void Assembler::emitl(uint32_t x) {
Memory::uint32_at(pc_) = x; Memory::uint32_at(pc_) = x;
pc_ += sizeof(uint32_t); pc_ += sizeof(uint32_t);

25
deps/v8/src/x64/assembler-x64.cc

@ -382,6 +382,11 @@ void Assembler::Align(int m) {
} }
void Assembler::CodeTargetAlign() {
Align(16); // Preferred alignment of jump targets on x64.
}
void Assembler::bind_to(Label* L, int pos) { void Assembler::bind_to(Label* L, int pos) {
ASSERT(!L->is_bound()); // Label may only be bound once. ASSERT(!L->is_bound()); // Label may only be bound once.
last_pc_ = NULL; last_pc_ = NULL;
@ -1148,6 +1153,15 @@ void Assembler::incl(const Operand& dst) {
} }
void Assembler::incl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFF);
emit_modrm(0, dst);
}
void Assembler::int3() { void Assembler::int3() {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -2738,17 +2752,6 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
} }
void Assembler::comisd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0f);
emit(0x2f);
emit_sse_operand(dst, src);
}
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;

10
deps/v8/src/x64/assembler-x64.h

@ -215,7 +215,10 @@ enum Condition {
// Negation of the default no_condition (-1) results in a non-default // Negation of the default no_condition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check // no_condition value (-2). As long as tests for no_condition check
// for condition < 0, this will work as expected. // for condition < 0, this will work as expected.
inline Condition NegateCondition(Condition cc); inline Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}
// Corresponds to transposing the operands of a comparison. // Corresponds to transposing the operands of a comparison.
inline Condition ReverseCondition(Condition cc) { inline Condition ReverseCondition(Condition cc) {
@ -241,6 +244,7 @@ inline Condition ReverseCondition(Condition cc) {
}; };
} }
enum Hint { enum Hint {
no_hint = 0, no_hint = 0,
not_taken = 0x2e, not_taken = 0x2e,
@ -495,6 +499,8 @@ class Assembler : public Malloced {
// possible to align the pc offset to a multiple // possible to align the pc offset to a multiple
// of m. m must be a power of 2. // of m. m must be a power of 2.
void Align(int m); void Align(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
// Stack // Stack
void pushfq(); void pushfq();
@ -761,6 +767,7 @@ class Assembler : public Malloced {
void incq(Register dst); void incq(Register dst);
void incq(const Operand& dst); void incq(const Operand& dst);
void incl(Register dst);
void incl(const Operand& dst); void incl(const Operand& dst);
void lea(Register dst, const Operand& src); void lea(Register dst, const Operand& src);
@ -1122,7 +1129,6 @@ class Assembler : public Malloced {
void xorpd(XMMRegister dst, XMMRegister src); void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src); void sqrtsd(XMMRegister dst, XMMRegister src);
void comisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src); void ucomisd(XMMRegister dst, XMMRegister src);
// The first argument is the reg field, the second argument is the r/m field. // The first argument is the reg field, the second argument is the r/m field.

533
deps/v8/src/x64/codegen-x64.cc

@ -262,63 +262,23 @@ class DeferredInlineSmiOperationReversed: public DeferredCode {
class FloatingPointHelper : public AllStatic { class FloatingPointHelper : public AllStatic {
public: public:
// Code pattern for loading a floating point value. Input value must // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
// be either a smi or a heap number object (fp value). Requirements: // If the operands are not both numbers, jump to not_numbers.
// operand on TOS+1. Returns operand as floating point number on FPU // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
// stack. // NumberOperands assumes both are smis or heap numbers.
static void LoadFloatOperand(MacroAssembler* masm, Register scratch); static void LoadSSE2SmiOperands(MacroAssembler* masm);
static void LoadSSE2NumberOperands(MacroAssembler* masm);
// Code pattern for loading a floating point value. Input value must static void LoadSSE2UnknownOperands(MacroAssembler* masm,
// be either a smi or a heap number object (fp value). Requirements: Label* not_numbers);
// operand in src register. Returns operand as floating point number
// in XMM register. May destroy src register.
static void LoadFloatOperand(MacroAssembler* masm,
Register src,
XMMRegister dst);
// Code pattern for loading a possible number into a XMM register.
// If the contents of src is not a number, control branches to
// the Label not_number. If contents of src is a smi or a heap number
// object (fp value), it is loaded into the XMM register as a double.
// The register src is not changed, and src may not be kScratchRegister.
static void LoadFloatOperand(MacroAssembler* masm,
Register src,
XMMRegister dst,
Label *not_number);
// Code pattern for loading floating point values. Input values must
// be either smi or heap number objects (fp values). Requirements:
// operand_1 in rdx, operand_2 in rax; Returns operands as
// floating point numbers in XMM registers.
static void LoadFloatOperands(MacroAssembler* masm,
XMMRegister dst1,
XMMRegister dst2);
// Similar to LoadFloatOperands, assumes that the operands are smis.
static void LoadFloatOperandsFromSmis(MacroAssembler* masm,
XMMRegister dst1,
XMMRegister dst2);
// Code pattern for loading floating point values onto the fp stack.
// Input values must be either smi or heap number objects (fp values).
// Requirements:
// Register version: operands in registers lhs and rhs.
// Stack version: operands on TOS+1 and TOS+2.
// Returns operands as floating point numbers on fp stack.
static void LoadFloatOperands(MacroAssembler* masm,
Register lhs,
Register rhs);
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in rax, operand_2 in rdx; falls through on float or smi
// operands, jumps to the non_float label otherwise.
static void CheckNumberOperands(MacroAssembler* masm,
Label* non_float);
// Takes the operands in rdx and rax and loads them as integers in rax // Takes the operands in rdx and rax and loads them as integers in rax
// and rcx. // and rcx.
static void LoadAsIntegers(MacroAssembler* masm, static void LoadAsIntegers(MacroAssembler* masm,
Label* operand_conversion_failure); Label* operand_conversion_failure,
Register heap_number_map);
// As above, but we know the operands to be numbers. In that case,
// conversion can't fail.
static void LoadNumbersAsIntegers(MacroAssembler* masm);
}; };
@ -4429,7 +4389,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
// Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3. // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
__ addsd(xmm2, xmm3); __ addsd(xmm2, xmm3);
// xmm2 now has 0.5. // xmm2 now has 0.5.
__ comisd(xmm2, xmm1); __ ucomisd(xmm2, xmm1);
call_runtime.Branch(not_equal); call_runtime.Branch(not_equal);
// Calculates square root. // Calculates square root.
@ -4769,8 +4729,8 @@ void DeferredSearchCache::Generate() {
__ cmpq(ArrayElement(cache_, dst_), key_); __ cmpq(ArrayElement(cache_, dst_), key_);
__ j(not_equal, &first_loop); __ j(not_equal, &first_loop);
__ Integer32ToSmi(scratch_, dst_); __ Integer32ToSmiField(
__ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_); FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ movq(dst_, ArrayElement(cache_, dst_, 1)); __ movq(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label()); __ jmp(exit_label());
@ -4791,8 +4751,8 @@ void DeferredSearchCache::Generate() {
__ cmpq(ArrayElement(cache_, dst_), key_); __ cmpq(ArrayElement(cache_, dst_), key_);
__ j(not_equal, &second_loop); __ j(not_equal, &second_loop);
__ Integer32ToSmi(scratch_, dst_); __ Integer32ToSmiField(
__ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_); FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ movq(dst_, ArrayElement(cache_, dst_, 1)); __ movq(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label()); __ jmp(exit_label());
@ -4814,50 +4774,50 @@ void DeferredSearchCache::Generate() {
// cache miss this optimization would hardly matter much. // cache miss this optimization would hardly matter much.
// Check if we could add new entry to cache. // Check if we could add new entry to cache.
__ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset)); __ SmiToInteger32(r9,
__ SmiCompare(rbx, r9); FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
__ cmpl(rbx, r9);
__ j(greater, &add_new_entry); __ j(greater, &add_new_entry);
// Check if we could evict entry after finger. // Check if we could evict entry after finger.
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); __ SmiToInteger32(rdx,
__ SmiToInteger32(rdx, rdx); FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
__ SmiToInteger32(rbx, rbx); __ addl(rdx, kEntrySizeImm);
__ addq(rdx, kEntrySizeImm);
Label forward; Label forward;
__ cmpq(rbx, rdx); __ cmpl(rbx, rdx);
__ j(greater, &forward); __ j(greater, &forward);
// Need to wrap over the cache. // Need to wrap over the cache.
__ movl(rdx, kEntriesIndexImm); __ movl(rdx, kEntriesIndexImm);
__ bind(&forward); __ bind(&forward);
__ Integer32ToSmi(r9, rdx); __ movl(r9, rdx);
__ jmp(&update_cache); __ jmp(&update_cache);
__ bind(&add_new_entry); __ bind(&add_new_entry);
// r9 holds cache size as smi. // r9 holds cache size as int32.
__ SmiToInteger32(rdx, r9); __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
__ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize)); __ Integer32ToSmiField(
__ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx); FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
// Update the cache itself. // Update the cache itself.
// rdx holds the index as int. // r9 holds the index as int32.
// r9 holds the index as smi.
__ bind(&update_cache); __ bind(&update_cache);
__ pop(rbx); // restore the key __ pop(rbx); // restore the key
__ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9); __ Integer32ToSmiField(
FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
// Store key. // Store key.
__ movq(ArrayElement(rcx, rdx), rbx); __ movq(ArrayElement(rcx, r9), rbx);
__ RecordWrite(rcx, 0, rbx, r9); __ RecordWrite(rcx, 0, rbx, r9);
// Store value. // Store value.
__ pop(rcx); // restore the cache. __ pop(rcx); // restore the cache.
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); __ SmiToInteger32(rdx,
__ SmiAddConstant(rdx, rdx, Smi::FromInt(1)); FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
__ movq(r9, rdx); __ incl(rdx);
__ SmiToInteger32(rdx, rdx); // Backup rax, because the RecordWrite macro clobbers its arguments.
__ movq(rbx, rax); __ movq(rbx, rax);
__ movq(ArrayElement(rcx, rdx), rbx); __ movq(ArrayElement(rcx, rdx), rax);
__ RecordWrite(rcx, 0, rbx, r9); __ RecordWrite(rcx, 0, rbx, rdx);
if (!dst_.is(rax)) { if (!dst_.is(rax)) {
__ movq(dst_, rax); __ movq(dst_, rax);
@ -6512,7 +6472,7 @@ void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
&not_numbers); &not_numbers);
LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side, LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
&not_numbers); &not_numbers);
__ comisd(xmm0, xmm1); __ ucomisd(xmm0, xmm1);
// Bail out if a NaN is involved. // Bail out if a NaN is involved.
not_numbers.Branch(parity_even, left_side, right_side); not_numbers.Branch(parity_even, left_side, right_side);
@ -8551,18 +8511,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rcx: RegExp data (FixedArray) // rcx: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
__ movq(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset)); __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
__ SmiCompare(rbx, Smi::FromInt(JSRegExp::IRREGEXP)); __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
__ j(not_equal, &runtime); __ j(not_equal, &runtime);
// rcx: RegExp data (FixedArray) // rcx: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer. // Check that the number of captures fit in the static offsets vector buffer.
__ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); __ SmiToInteger32(rdx,
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2. // Calculate number of capture registers (number_of_captures + 1) * 2.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1); __ leal(rdx, Operand(rdx, rdx, times_1, 2));
__ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
// Check that the static offsets vector buffer is large enough. // Check that the static offsets vector buffer is large enough.
__ cmpq(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize)); __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
__ j(above, &runtime); __ j(above, &runtime);
// rcx: RegExp data (FixedArray) // rcx: RegExp data (FixedArray)
@ -8572,17 +8532,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(rax, &runtime); __ JumpIfSmi(rax, &runtime);
Condition is_string = masm->IsObjectStringType(rax, rbx, rbx); Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
__ j(NegateCondition(is_string), &runtime); __ j(NegateCondition(is_string), &runtime);
// Get the length of the string to rbx.
__ movq(rbx, FieldOperand(rax, String::kLengthOffset));
// rbx: Length of subject string as smi // rax: Subject string.
// rcx: RegExp data (FixedArray) // rcx: RegExp data (FixedArray).
// rdx: Number of capture registers // rdx: Number of capture registers.
// Check that the third argument is a positive smi less than the string // Check that the third argument is a positive smi less than the string
// length. A negative value will be greater (unsigned comparison). // length. A negative value will be greater (unsigned comparison).
__ movq(rax, Operand(rsp, kPreviousIndexOffset)); __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
__ JumpIfNotSmi(rax, &runtime); __ JumpIfNotSmi(rbx, &runtime);
__ SmiCompare(rax, rbx); __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
__ j(above_equal, &runtime); __ j(above_equal, &runtime);
// rcx: RegExp data (FixedArray) // rcx: RegExp data (FixedArray)
@ -8600,8 +8558,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the last match info has space for the capture registers and the // Check that the last match info has space for the capture registers and the
// additional information. Ensure no overflow in add. // additional information. Ensure no overflow in add.
ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
__ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
__ SmiToInteger32(rax, rax);
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
__ cmpl(rdx, rax); __ cmpl(rdx, rax);
__ j(greater, &runtime); __ j(greater, &runtime);
@ -8674,8 +8631,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r12: code // r12: code
// Load used arguments before starting to push arguments for call to native // Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height. // RegExp code to avoid handling changing stack height.
__ movq(rbx, Operand(rsp, kPreviousIndexOffset)); __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
__ SmiToInteger64(rbx, rbx); // Previous index from smi.
// rax: subject string // rax: subject string
// rbx: previous index // rbx: previous index
@ -8787,10 +8743,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&success); __ bind(&success);
__ movq(rax, Operand(rsp, kJSRegExpOffset)); __ movq(rax, Operand(rsp, kJSRegExpOffset));
__ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset)); __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
__ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); __ SmiToInteger32(rax,
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2. // Calculate number of capture registers (number_of_captures + 1) * 2.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1); __ leal(rdx, Operand(rax, rax, times_1, 2));
__ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
// rdx: Number of capture registers // rdx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray. // Load last_match_info which is still known to be a fast case JSArray.
@ -8877,9 +8833,9 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It // Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry. // contains two elements (number and string) for each cache entry.
__ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); __ SmiToInteger32(
// Divide smi tagged length by two. mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
__ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1); __ shrl(mask, Immediate(1));
__ subq(mask, Immediate(1)); // Make mask. __ subq(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the // Calculate the entry in the number string cache. The hash value in the
@ -8909,15 +8865,14 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
CpuFeatures::Scope fscope(SSE2); CpuFeatures::Scope fscope(SSE2);
__ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
__ comisd(xmm0, xmm1); __ ucomisd(xmm0, xmm1);
__ j(parity_even, not_found); // Bail out if NaN is involved. __ j(parity_even, not_found); // Bail out if NaN is involved.
__ j(not_equal, not_found); // The cache did not contain this value. __ j(not_equal, not_found); // The cache did not contain this value.
__ jmp(&load_result_from_cache); __ jmp(&load_result_from_cache);
} }
__ bind(&is_smi); __ bind(&is_smi);
__ movq(scratch, object); __ SmiToInteger32(scratch, object);
__ SmiToInteger32(scratch, scratch);
GenerateConvertHashCodeToIndex(masm, scratch, mask); GenerateConvertHashCodeToIndex(masm, scratch, mask);
Register index = scratch; Register index = scratch;
@ -9111,12 +9066,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (include_number_compare_) { if (include_number_compare_) {
Label non_number_comparison; Label non_number_comparison;
Label unordered; Label unordered;
FloatingPointHelper::LoadFloatOperand(masm, rdx, xmm0, FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
&non_number_comparison); __ ucomisd(xmm0, xmm1);
FloatingPointHelper::LoadFloatOperand(masm, rax, xmm1,
&non_number_comparison);
__ comisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved. // Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered); __ j(parity_even, &unordered);
@ -9344,29 +9295,30 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ j(equal, &adaptor_frame); __ j(equal, &adaptor_frame);
// Get the length from the frame. // Get the length from the frame.
__ movq(rcx, Operand(rsp, 1 * kPointerSize)); __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
__ jmp(&try_allocate); __ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer. // Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame); __ bind(&adaptor_frame);
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ SmiToInteger32(rcx,
__ movq(Operand(rsp, 1 * kPointerSize), rcx); Operand(rdx,
ArgumentsAdaptorFrameConstants::kLengthOffset));
// Space on stack must already hold a smi.
__ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
// Do not clobber the length index for the indexing operation since // Do not clobber the length index for the indexing operation since
// it is used compute the size for allocation later. // it is used compute the size for allocation later.
SmiIndex index = masm->SmiToIndex(rbx, rcx, kPointerSizeLog2); __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
__ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
__ movq(Operand(rsp, 2 * kPointerSize), rdx); __ movq(Operand(rsp, 2 * kPointerSize), rdx);
// Try the new space allocation. Start out with computing the size of // Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array. // the arguments object and the elements array.
Label add_arguments_object; Label add_arguments_object;
__ bind(&try_allocate); __ bind(&try_allocate);
__ testq(rcx, rcx); __ testl(rcx, rcx);
__ j(zero, &add_arguments_object); __ j(zero, &add_arguments_object);
index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2); __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ lea(rcx, Operand(index.reg, index.scale, FixedArray::kHeaderSize));
__ bind(&add_arguments_object); __ bind(&add_arguments_object);
__ addq(rcx, Immediate(Heap::kArgumentsObjectSize)); __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
// Do the allocation of both objects in one go. // Do the allocation of both objects in one go.
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
@ -9378,10 +9330,13 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ movq(rdi, Operand(rdi, offset)); __ movq(rdi, Operand(rdi, offset));
// Copy the JS object part. // Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
__ movq(kScratchRegister, FieldOperand(rdi, i)); __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
__ movq(FieldOperand(rax, i), kScratchRegister); __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
} __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
__ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
__ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
__ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
// Setup the callee in-object property. // Setup the callee in-object property.
ASSERT(Heap::arguments_callee_index == 0); ASSERT(Heap::arguments_callee_index == 0);
@ -9395,7 +9350,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// If there are no actual arguments, we're done. // If there are no actual arguments, we're done.
Label done; Label done;
__ testq(rcx, rcx); __ SmiTest(rcx);
__ j(zero, &done); __ j(zero, &done);
// Get the parameters pointer from the stack and untag the length. // Get the parameters pointer from the stack and untag the length.
@ -9417,7 +9372,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
__ addq(rdi, Immediate(kPointerSize)); __ addq(rdi, Immediate(kPointerSize));
__ subq(rdx, Immediate(kPointerSize)); __ subq(rdx, Immediate(kPointerSize));
__ decq(rcx); __ decl(rcx);
__ j(not_zero, &loop); __ j(not_zero, &loop);
// Return and remove the on-stack parameters. // Return and remove the on-stack parameters.
@ -9968,86 +9923,73 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
} }
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
Register number) { __ SmiToInteger32(kScratchRegister, rdx);
Label load_smi, done; __ cvtlsi2sd(xmm0, kScratchRegister);
__ SmiToInteger32(kScratchRegister, rax);
__ JumpIfSmi(number, &load_smi); __ cvtlsi2sd(xmm1, kScratchRegister);
__ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi);
__ SmiToInteger32(number, number);
__ push(number);
__ fild_s(Operand(rsp, 0));
__ pop(number);
__ bind(&done);
} }
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
Register src, Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
XMMRegister dst) { // Load operand in rdx into xmm0.
Label load_smi, done; __ JumpIfSmi(rdx, &load_smi_rdx);
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ JumpIfSmi(src, &load_smi); // Load operand in rax into xmm1.
__ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset)); __ JumpIfSmi(rax, &load_smi_rax);
__ bind(&load_nonsmi_rax);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done); __ jmp(&done);
__ bind(&load_smi); __ bind(&load_smi_rdx);
__ SmiToInteger32(src, src); __ SmiToInteger32(kScratchRegister, rdx);
__ cvtlsi2sd(dst, src); __ cvtlsi2sd(xmm0, kScratchRegister);
__ JumpIfNotSmi(rax, &load_nonsmi_rax);
__ bind(&done);
}
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register src,
XMMRegister dst,
Label* not_number) {
Label load_smi, done;
ASSERT(!src.is(kScratchRegister));
__ JumpIfSmi(src, &load_smi);
__ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
__ cmpq(FieldOperand(src, HeapObject::kMapOffset), kScratchRegister);
__ j(not_equal, not_number);
__ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi); __ bind(&load_smi_rax);
__ SmiToInteger32(kScratchRegister, src); __ SmiToInteger32(kScratchRegister, rax);
__ cvtlsi2sd(dst, kScratchRegister); __ cvtlsi2sd(xmm1, kScratchRegister);
__ bind(&done); __ bind(&done);
} }
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
XMMRegister dst1, Label* not_numbers) {
XMMRegister dst2) { Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
__ movq(kScratchRegister, rdx); // Load operand in rdx into xmm0, or branch to not_numbers.
LoadFloatOperand(masm, kScratchRegister, dst1); __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
__ movq(kScratchRegister, rax); __ JumpIfSmi(rdx, &load_smi_rdx);
LoadFloatOperand(masm, kScratchRegister, dst2); __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
} __ j(not_equal, not_numbers); // Argument in rdx is not a number.
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Load operand in rax into xmm1, or branch to not_numbers.
__ JumpIfSmi(rax, &load_smi_rax);
__ bind(&load_nonsmi_rax);
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
void FloatingPointHelper::LoadFloatOperandsFromSmis(MacroAssembler* masm, __ bind(&load_smi_rdx);
XMMRegister dst1,
XMMRegister dst2) {
__ SmiToInteger32(kScratchRegister, rdx); __ SmiToInteger32(kScratchRegister, rdx);
__ cvtlsi2sd(dst1, kScratchRegister); __ cvtlsi2sd(xmm0, kScratchRegister);
__ JumpIfNotSmi(rax, &load_nonsmi_rax);
__ bind(&load_smi_rax);
__ SmiToInteger32(kScratchRegister, rax); __ SmiToInteger32(kScratchRegister, rax);
__ cvtlsi2sd(dst2, kScratchRegister); __ cvtlsi2sd(xmm1, kScratchRegister);
__ bind(&done);
} }
// Input: rdx, rax are the left and right objects of a bit op. // Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op. // Output: rax, rcx are left and right integers for a bit op.
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
Label* conversion_failure) { Label* conversion_failure,
Register heap_number_map) {
// Check float operands. // Check float operands.
Label arg1_is_object, check_undefined_arg1; Label arg1_is_object, check_undefined_arg1;
Label arg2_is_object, check_undefined_arg2; Label arg2_is_object, check_undefined_arg2;
@ -10065,8 +10007,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ jmp(&load_arg2); __ jmp(&load_arg2);
__ bind(&arg1_is_object); __ bind(&arg1_is_object);
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
__ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &check_undefined_arg1); __ j(not_equal, &check_undefined_arg1);
// Get the untagged integer version of the edx heap number in rcx. // Get the untagged integer version of the edx heap number in rcx.
IntegerConvert(masm, rdx, rdx); IntegerConvert(masm, rdx, rdx);
@ -10087,8 +10028,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ jmp(&done); __ jmp(&done);
__ bind(&arg2_is_object); __ bind(&arg2_is_object);
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
__ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &check_undefined_arg2); __ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the eax heap number in ecx. // Get the untagged integer version of the eax heap number in ecx.
IntegerConvert(masm, rcx, rax); IntegerConvert(masm, rcx, rax);
@ -10097,51 +10037,35 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
} }
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, // Input: rdx, rax are the left and right objects of a bit op.
Register lhs, // Output: rax, rcx are left and right integers for a bit op.
Register rhs) { void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
Label load_smi_lhs, load_smi_rhs, done_load_lhs, done; if (FLAG_debug_code) {
__ JumpIfSmi(lhs, &load_smi_lhs); // Both arguments can not be smis. That case is handled by smi-only code.
__ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset)); Label ok;
__ bind(&done_load_lhs); __ JumpIfNotBothSmi(rax, rdx, &ok);
__ Abort("Both arguments smi but not handled by smi-code.");
__ JumpIfSmi(rhs, &load_smi_rhs); __ bind(&ok);
__ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset)); }
__ jmp(&done); // Check float operands.
Label done;
__ bind(&load_smi_lhs); Label rax_is_object;
__ SmiToInteger64(kScratchRegister, lhs); Label rdx_is_object;
__ push(kScratchRegister);
__ fild_d(Operand(rsp, 0));
__ pop(kScratchRegister);
__ jmp(&done_load_lhs);
__ bind(&load_smi_rhs);
__ SmiToInteger64(kScratchRegister, rhs);
__ push(kScratchRegister);
__ fild_d(Operand(rsp, 0));
__ pop(kScratchRegister);
__ bind(&done);
}
__ JumpIfNotSmi(rdx, &rdx_is_object);
__ SmiToInteger32(rdx, rdx);
void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm, __ bind(&rax_is_object);
Label* non_float) { IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
Label test_other, done; __ jmp(&done);
// Test if both operands are numbers (heap_numbers or smis).
// If not, jump to label non_float.
__ JumpIfSmi(rdx, &test_other); // argument in rdx is OK
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
__ j(not_equal, non_float); // The argument in rdx is not a number.
__ bind(&test_other); __ bind(&rdx_is_object);
__ JumpIfSmi(rax, &done); // argument in rax is OK IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
__ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map()); __ JumpIfNotSmi(rax, &rax_is_object);
__ j(not_equal, non_float); // The argument in rax is not a number. __ SmiToInteger32(rcx, rax);
// Fall-through: Both operands are numbers.
__ bind(&done); __ bind(&done);
__ movl(rax, rdx);
} }
@ -10451,15 +10375,15 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
} }
// left is rdx, right is rax. // left is rdx, right is rax.
__ AllocateHeapNumber(rbx, rcx, slow); __ AllocateHeapNumber(rbx, rcx, slow);
FloatingPointHelper::LoadFloatOperandsFromSmis(masm, xmm4, xmm5); FloatingPointHelper::LoadSSE2SmiOperands(masm);
switch (op_) { switch (op_) {
case Token::ADD: __ addsd(xmm4, xmm5); break; case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm4, xmm5); break; case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm4, xmm5); break; case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm4, xmm5); break; case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE(); default: UNREACHABLE();
} }
__ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm4); __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rbx); __ movq(rax, rbx);
GenerateReturn(masm); GenerateReturn(masm);
} }
@ -10522,22 +10446,23 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label not_floats; Label not_floats;
// rax: y // rax: y
// rdx: x // rdx: x
if (static_operands_type_.IsNumber() && FLAG_debug_code) { ASSERT(!static_operands_type_.IsSmi());
if (static_operands_type_.IsNumber()) {
if (FLAG_debug_code) {
// Assert at runtime that inputs are only numbers. // Assert at runtime that inputs are only numbers.
__ AbortIfNotNumber(rdx); __ AbortIfNotNumber(rdx);
__ AbortIfNotNumber(rax); __ AbortIfNotNumber(rax);
}
FloatingPointHelper::LoadSSE2NumberOperands(masm);
} else { } else {
FloatingPointHelper::CheckNumberOperands(masm, &call_runtime); FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
} }
// Fast-case: Both operands are numbers.
// xmm4 and xmm5 are volatile XMM registers.
FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
switch (op_) { switch (op_) {
case Token::ADD: __ addsd(xmm4, xmm5); break; case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm4, xmm5); break; case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm4, xmm5); break; case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm4, xmm5); break; case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE(); default: UNREACHABLE();
} }
// Allocate a heap number, if needed. // Allocate a heap number, if needed.
@ -10572,7 +10497,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
break; break;
default: UNREACHABLE(); default: UNREACHABLE();
} }
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4); __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
GenerateReturn(masm); GenerateReturn(masm);
__ bind(&not_floats); __ bind(&not_floats);
if (runtime_operands_type_ == BinaryOpIC::DEFAULT && if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
@ -10597,34 +10522,52 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SAR: case Token::SAR:
case Token::SHL: case Token::SHL:
case Token::SHR: { case Token::SHR: {
Label skip_allocation, non_smi_result; Label skip_allocation, non_smi_shr_result;
FloatingPointHelper::LoadAsIntegers(masm, &call_runtime); Register heap_number_map = r9;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
if (static_operands_type_.IsNumber()) {
if (FLAG_debug_code) {
// Assert at runtime that inputs are only numbers.
__ AbortIfNotNumber(rdx);
__ AbortIfNotNumber(rax);
}
FloatingPointHelper::LoadNumbersAsIntegers(masm);
} else {
FloatingPointHelper::LoadAsIntegers(masm,
&call_runtime,
heap_number_map);
}
switch (op_) { switch (op_) {
case Token::BIT_OR: __ orl(rax, rcx); break; case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break; case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break; case Token::BIT_XOR: __ xorl(rax, rcx); break;
case Token::SAR: __ sarl_cl(rax); break; case Token::SAR: __ sarl_cl(rax); break;
case Token::SHL: __ shll_cl(rax); break; case Token::SHL: __ shll_cl(rax); break;
case Token::SHR: __ shrl_cl(rax); break; case Token::SHR: {
default: UNREACHABLE(); __ shrl_cl(rax);
}
if (op_ == Token::SHR) {
// Check if result is negative. This can only happen for a shift // Check if result is negative. This can only happen for a shift
// by zero, which also doesn't update the sign flag. // by zero.
__ testl(rax, rax); __ testl(rax, rax);
__ j(negative, &non_smi_result); __ j(negative, &non_smi_shr_result);
break;
}
default: UNREACHABLE();
} }
__ JumpIfNotValidSmiValue(rax, &non_smi_result);
// Tag smi result, if possible, and return. STATIC_ASSERT(kSmiValueSize == 32);
// Tag smi result and return.
__ Integer32ToSmi(rax, rax); __ Integer32ToSmi(rax, rax);
GenerateReturn(masm); GenerateReturn(masm);
// All ops except SHR return a signed int32 that we load in // All bit-ops except SHR return a signed int32 that can be
// a HeapNumber. // returned immediately as a smi.
if (op_ != Token::SHR && non_smi_result.is_linked()) { // We might need to allocate a HeapNumber if we shift a negative
__ bind(&non_smi_result); // number right by zero (i.e., convert to UInt32).
if (op_ == Token::SHR) {
ASSERT(non_smi_shr_result.is_linked());
__ bind(&non_smi_shr_result);
// Allocate a heap number if needed. // Allocate a heap number if needed.
__ movsxlq(rbx, rax); // rbx: sign extended 32-bit result __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
switch (mode_) { switch (mode_) {
case OVERWRITE_LEFT: case OVERWRITE_LEFT:
case OVERWRITE_RIGHT: case OVERWRITE_RIGHT:
@ -10635,22 +10578,33 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ JumpIfNotSmi(rax, &skip_allocation); __ JumpIfNotSmi(rax, &skip_allocation);
// Fall through! // Fall through!
case NO_OVERWRITE: case NO_OVERWRITE:
__ AllocateHeapNumber(rax, rcx, &call_runtime); // Allocate heap number in new space.
// Not using AllocateHeapNumber macro in order to reuse
// already loaded heap_number_map.
__ AllocateInNewSpace(HeapNumber::kSize,
rax,
rcx,
no_reg,
&call_runtime,
TAG_OBJECT);
// Set the map.
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
__ movq(FieldOperand(rax, HeapObject::kMapOffset),
heap_number_map);
__ bind(&skip_allocation); __ bind(&skip_allocation);
break; break;
default: UNREACHABLE(); default: UNREACHABLE();
} }
// Store the result in the HeapNumber and return. // Store the result in the HeapNumber and return.
__ movq(Operand(rsp, 1 * kPointerSize), rbx); __ cvtqsi2sd(xmm0, rbx);
__ fild_s(Operand(rsp, 1 * kPointerSize)); __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
GenerateReturn(masm); GenerateReturn(masm);
} }
// SHR should return uint32 - go to runtime for non-smi/negative result.
if (op_ == Token::SHR) {
__ bind(&non_smi_result);
}
break; break;
} }
default: UNREACHABLE(); break; default: UNREACHABLE(); break;
@ -10683,7 +10637,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label not_strings, both_strings, not_string1, string1, string1_smi2; Label not_strings, both_strings, not_string1, string1, string1_smi2;
// If this stub has already generated FP-specific code then the arguments // If this stub has already generated FP-specific code then the arguments
// are already in rdx, rax // are already in rdx and rax.
if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
GenerateLoadArguments(masm); GenerateLoadArguments(masm);
} }
@ -10832,19 +10786,13 @@ void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ push(rax); __ push(rax);
// Push this stub's key. // Push this stub's key.
__ movq(rax, Immediate(MinorKey())); __ Push(Smi::FromInt(MinorKey()));
__ Integer32ToSmi(rax, rax);
__ push(rax);
// Although the operation and the type info are encoded into the key, // Although the operation and the type info are encoded into the key,
// the encoding is opaque, so push them too. // the encoding is opaque, so push them too.
__ movq(rax, Immediate(op_)); __ Push(Smi::FromInt(op_));
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ movq(rax, Immediate(runtime_operands_type_)); __ Push(Smi::FromInt(runtime_operands_type_));
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ push(rcx); __ push(rcx);
@ -11212,16 +11160,17 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If result is not supposed to be flat, allocate a cons string object. If // If result is not supposed to be flat, allocate a cons string object. If
// both strings are ascii the result is an ascii cons string. // both strings are ascii the result is an ascii cons string.
// rax: first string // rax: first string
// ebx: length of resulting flat string // rbx: length of resulting flat string
// rdx: second string // rdx: second string
// r8: instance type of first string // r8: instance type of first string
// r9: instance type of second string // r9: instance type of second string
Label non_ascii, allocated; Label non_ascii, allocated, ascii_data;
__ movl(rcx, r8); __ movl(rcx, r8);
__ and_(rcx, r9); __ and_(rcx, r9);
ASSERT(kStringEncodingMask == kAsciiStringTag); ASSERT(kStringEncodingMask == kAsciiStringTag);
__ testl(rcx, Immediate(kAsciiStringTag)); __ testl(rcx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii); __ j(zero, &non_ascii);
__ bind(&ascii_data);
// Allocate an acsii cons string. // Allocate an acsii cons string.
__ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime); __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
__ bind(&allocated); __ bind(&allocated);
@ -11235,6 +11184,18 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(&Counters::string_add_native, 1); __ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize); __ ret(2 * kPointerSize);
__ bind(&non_ascii); __ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
// to contain only ascii characters.
// rcx: first instance type AND second instance type.
// r8: first instance type.
// r9: second instance type.
__ testb(rcx, Immediate(kAsciiDataHintMask));
__ j(not_zero, &ascii_data);
__ xor_(r8, r9);
ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
__ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
__ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
__ j(equal, &ascii_data);
// Allocate a two byte cons string. // Allocate a two byte cons string.
__ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime); __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
__ jmp(&allocated); __ jmp(&allocated);
@ -11242,7 +11203,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Handle creating a flat result. First check that both strings are not // Handle creating a flat result. First check that both strings are not
// external strings. // external strings.
// rax: first string // rax: first string
// ebx: length of resulting flat string as smi // rbx: length of resulting flat string as smi
// rdx: second string // rdx: second string
// r8: instance type of first string // r8: instance type of first string
// r9: instance type of first string // r9: instance type of first string
@ -11258,7 +11219,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ j(equal, &string_add_runtime); __ j(equal, &string_add_runtime);
// Now check if both strings are ascii strings. // Now check if both strings are ascii strings.
// rax: first string // rax: first string
// ebx: length of resulting flat string // rbx: length of resulting flat string
// rdx: second string // rdx: second string
// r8: instance type of first string // r8: instance type of first string
// r9: instance type of second string // r9: instance type of second string

59
deps/v8/src/x64/ic-x64.cc

@ -893,19 +893,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// -- rdx : receiver // -- rdx : receiver
// -- rsp[0] : return address // -- rsp[0] : return address
// ----------------------------------- // -----------------------------------
Label slow, fast, array, extra, check_pixel_array; Label slow, slow_with_tagged_index, fast, array, extra, check_pixel_array;
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow); __ JumpIfSmi(rdx, &slow_with_tagged_index);
// Get the map from the receiver. // Get the map from the receiver.
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need // Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks. // to do this because this generic stub does not perform map checks.
__ testb(FieldOperand(rbx, Map::kBitFieldOffset), __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded)); Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow); __ j(not_zero, &slow_with_tagged_index);
// Check that the key is a smi. // Check that the key is a smi.
__ JumpIfNotSmi(rcx, &slow); __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
__ SmiToInteger32(rcx, rcx);
__ CmpInstanceType(rbx, JS_ARRAY_TYPE); __ CmpInstanceType(rbx, JS_ARRAY_TYPE);
__ j(equal, &array); __ j(equal, &array);
@ -916,27 +917,30 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Object case: Check key against length in the elements array. // Object case: Check key against length in the elements array.
// rax: value // rax: value
// rdx: JSObject // rdx: JSObject
// rcx: index (as a smi) // rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary). // Check that the object is in fast mode (not dictionary).
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex); Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array); __ j(not_equal, &check_pixel_array);
__ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset)); __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value // rax: value
// rbx: FixedArray // rbx: FixedArray
// rcx: index (as a smi) // rcx: index
__ j(below, &fast); __ j(above, &fast);
// Slow case: call runtime. // Slow case: call runtime.
__ bind(&slow); __ bind(&slow);
__ Integer32ToSmi(rcx, rcx);
__ bind(&slow_with_tagged_index);
GenerateRuntimeSetProperty(masm); GenerateRuntimeSetProperty(masm);
// Never returns to here.
// Check whether the elements is a pixel array. // Check whether the elements is a pixel array.
// rax: value // rax: value
// rdx: receiver // rdx: receiver
// rbx: receiver's elements array // rbx: receiver's elements array
// rcx: index (as a smi), zero-extended. // rcx: index, zero-extended.
__ bind(&check_pixel_array); __ bind(&check_pixel_array);
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kPixelArrayMapRootIndex); Heap::kPixelArrayMapRootIndex);
@ -944,21 +948,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the value is a smi. If a conversion is needed call into the // Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp. // runtime to convert and clamp.
__ JumpIfNotSmi(rax, &slow); __ JumpIfNotSmi(rax, &slow);
__ SmiToInteger32(rdi, rcx); __ cmpl(rcx, FieldOperand(rbx, PixelArray::kLengthOffset));
__ cmpl(rdi, FieldOperand(rbx, PixelArray::kLengthOffset));
__ j(above_equal, &slow); __ j(above_equal, &slow);
// No more bailouts to slow case on this path, so key not needed. // No more bailouts to slow case on this path, so key not needed.
__ SmiToInteger32(rcx, rax); __ SmiToInteger32(rdi, rax);
{ // Clamp the value to [0..255]. { // Clamp the value to [0..255].
Label done; Label done;
__ testl(rcx, Immediate(0xFFFFFF00)); __ testl(rdi, Immediate(0xFFFFFF00));
__ j(zero, &done); __ j(zero, &done);
__ setcc(negative, rcx); // 1 if negative, 0 if positive. __ setcc(negative, rdi); // 1 if negative, 0 if positive.
__ decb(rcx); // 0 if negative, 255 if positive. __ decb(rdi); // 0 if negative, 255 if positive.
__ bind(&done); __ bind(&done);
} }
__ movq(rbx, FieldOperand(rbx, PixelArray::kExternalPointerOffset)); __ movq(rbx, FieldOperand(rbx, PixelArray::kExternalPointerOffset));
__ movb(Operand(rbx, rdi, times_1, 0), rcx); __ movb(Operand(rbx, rcx, times_1, 0), rdi);
__ ret(0); __ ret(0);
// Extra capacity case: Check if there is extra capacity to // Extra capacity case: Check if there is extra capacity to
@ -968,14 +971,14 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rax: value // rax: value
// rdx: receiver (a JSArray) // rdx: receiver (a JSArray)
// rbx: receiver's elements array (a FixedArray) // rbx: receiver's elements array (a FixedArray)
// rcx: index (as a smi) // rcx: index
// flags: smicompare (rdx.length(), rbx) // flags: smicompare (rdx.length(), rbx)
__ j(not_equal, &slow); // do not leave holes in the array __ j(not_equal, &slow); // do not leave holes in the array
__ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset)); __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
__ j(above_equal, &slow); __ j(below_equal, &slow);
// Increment index to get new length. // Increment index to get new length.
__ SmiAddConstant(rdi, rcx, Smi::FromInt(1)); __ leal(rdi, Operand(rcx, 1));
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rdi); __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
__ jmp(&fast); __ jmp(&fast);
// Array case: Get the length and the elements array from the JS // Array case: Get the length and the elements array from the JS
@ -984,7 +987,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&array); __ bind(&array);
// rax: value // rax: value
// rdx: receiver (a JSArray) // rdx: receiver (a JSArray)
// rcx: index (as a smi) // rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex); Heap::kFixedArrayMapRootIndex);
@ -992,26 +995,22 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Check the key against the length in the array, compute the // Check the key against the length in the array, compute the
// address to store into and fall through to fast case. // address to store into and fall through to fast case.
__ SmiCompare(FieldOperand(rdx, JSArray::kLengthOffset), rcx); __ SmiCompareInteger32(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
__ j(below_equal, &extra); __ j(below_equal, &extra);
// Fast case: Do the store. // Fast case: Do the store.
__ bind(&fast); __ bind(&fast);
// rax: value // rax: value
// rbx: receiver's elements array (a FixedArray) // rbx: receiver's elements array (a FixedArray)
// rcx: index (as a smi) // rcx: index
Label non_smi_value; Label non_smi_value;
__ JumpIfNotSmi(rax, &non_smi_value); __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
__ movq(FieldOperand(rbx, index.reg, index.scale, FixedArray::kHeaderSize),
rax); rax);
__ JumpIfNotSmi(rax, &non_smi_value);
__ ret(0); __ ret(0);
__ bind(&non_smi_value); __ bind(&non_smi_value);
// Slow case that needs to retain rcx for use by RecordWrite. // Slow case that needs to retain rcx for use by RecordWrite.
// Update write barrier for the elements array address. // Update write barrier for the elements array address.
SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rcx, kPointerSizeLog2);
__ movq(FieldOperand(rbx, index2.reg, index2.scale, FixedArray::kHeaderSize),
rax);
__ movq(rdx, rax); __ movq(rdx, rax);
__ RecordWriteNonSmi(rbx, 0, rdx, rcx); __ RecordWriteNonSmi(rbx, 0, rdx, rcx);
__ ret(0); __ ret(0);

167
deps/v8/src/x64/macro-assembler-x64.cc

@ -35,6 +35,7 @@
#include "macro-assembler-x64.h" #include "macro-assembler-x64.h"
#include "serialize.h" #include "serialize.h"
#include "debug.h" #include "debug.h"
#include "heap.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -96,8 +97,8 @@ void MacroAssembler::RecordWriteHelper(Register object,
// Compute number of region covering addr. See Page::GetRegionNumberForAddress // Compute number of region covering addr. See Page::GetRegionNumberForAddress
// method for more details. // method for more details.
and_(addr, Immediate(Page::kPageAlignmentMask));
shrl(addr, Immediate(Page::kRegionSizeLog2)); shrl(addr, Immediate(Page::kRegionSizeLog2));
andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
// Set dirty mark for region. // Set dirty mark for region.
bts(Operand(object, Page::kDirtyFlagOffset), addr); bts(Operand(object, Page::kDirtyFlagOffset), addr);
@ -106,25 +107,25 @@ void MacroAssembler::RecordWriteHelper(Register object,
// For page containing |object| mark region covering [object+offset] dirty. // For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored. // object is the object being stored into, value is the object being stored.
// If offset is zero, then the smi_index register contains the array index into // If offset is zero, then the index register contains the array index into
// the elements array represented as a smi. Otherwise it can be used as a // the elements array represented a zero extended int32. Otherwise it can be
// scratch register. // used as a scratch register.
// All registers are clobbered by the operation. // All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object, void MacroAssembler::RecordWrite(Register object,
int offset, int offset,
Register value, Register value,
Register smi_index) { Register index) {
// The compiled code assumes that record write doesn't change the // The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered // context register, so we check that none of the clobbered
// registers are rsi. // registers are rsi.
ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi)); ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
// First, check if a write barrier is even needed. The tests below // First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen. // catch stores of Smis and stores into young gen.
Label done; Label done;
JumpIfSmi(value, &done); JumpIfSmi(value, &done);
RecordWriteNonSmi(object, offset, value, smi_index); RecordWriteNonSmi(object, offset, value, index);
bind(&done); bind(&done);
// Clobber all input registers when running with the debug-code flag // Clobber all input registers when running with the debug-code flag
@ -135,7 +136,7 @@ void MacroAssembler::RecordWrite(Register object,
if (FLAG_debug_code) { if (FLAG_debug_code) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
} }
} }
@ -143,7 +144,7 @@ void MacroAssembler::RecordWrite(Register object,
void MacroAssembler::RecordWriteNonSmi(Register object, void MacroAssembler::RecordWriteNonSmi(Register object,
int offset, int offset,
Register scratch, Register scratch,
Register smi_index) { Register index) {
Label done; Label done;
if (FLAG_debug_code) { if (FLAG_debug_code) {
@ -151,6 +152,16 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
JumpIfNotSmi(object, &okay); JumpIfNotSmi(object, &okay);
Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
bind(&okay); bind(&okay);
if (offset == 0) {
// index must be int32.
Register tmp = index.is(rax) ? rbx : rax;
push(tmp);
movl(tmp, index);
cmpq(tmp, index);
Check(equal, "Index register for RecordWrite must be untagged int32.");
pop(tmp);
}
} }
// Test that the object address is not in the new space. We cannot // Test that the object address is not in the new space. We cannot
@ -163,16 +174,15 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
ASSERT(IsAligned(offset, kPointerSize) || ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize)); IsAligned(offset + kHeapObjectTag, kPointerSize));
Register dst = smi_index; Register dst = index;
if (offset != 0) { if (offset != 0) {
lea(dst, Operand(object, offset)); lea(dst, Operand(object, offset));
} else { } else {
// array access: calculate the destination address in the same manner as // array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric. // KeyedStoreIC::GenerateGeneric.
SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
lea(dst, FieldOperand(object, lea(dst, FieldOperand(object,
index.reg, index,
index.scale, times_pointer_size,
FixedArray::kHeaderSize)); FixedArray::kHeaderSize));
} }
RecordWriteHelper(object, dst, scratch); RecordWriteHelper(object, dst, scratch);
@ -184,7 +194,7 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
if (FLAG_debug_code) { if (FLAG_debug_code) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
} }
} }
@ -446,13 +456,8 @@ void MacroAssembler::Set(Register dst, int64_t x) {
void MacroAssembler::Set(const Operand& dst, int64_t x) { void MacroAssembler::Set(const Operand& dst, int64_t x) {
if (x == 0) { if (is_int32(x)) {
xor_(kScratchRegister, kScratchRegister);
movq(dst, kScratchRegister);
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x))); movq(dst, Immediate(static_cast<int32_t>(x)));
} else if (is_uint32(x)) {
movl(dst, Immediate(static_cast<uint32_t>(x)));
} else { } else {
movq(kScratchRegister, x, RelocInfo::NONE); movq(kScratchRegister, x, RelocInfo::NONE);
movq(dst, kScratchRegister); movq(dst, kScratchRegister);
@ -485,6 +490,23 @@ void MacroAssembler::Integer32ToSmi(Register dst,
} }
void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
if (FLAG_debug_code) {
testb(dst, Immediate(0x01));
Label ok;
j(zero, &ok);
if (allow_stub_calls()) {
Abort("Integer32ToSmiField writing to non-smi location");
} else {
int3();
}
bind(&ok);
}
ASSERT(kSmiShift % kBitsPerByte == 0);
movl(Operand(dst, kSmiShift / kBitsPerByte), src);
}
void MacroAssembler::Integer64PlusConstantToSmi(Register dst, void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
Register src, Register src,
int constant) { int constant) {
@ -520,6 +542,11 @@ void MacroAssembler::SmiToInteger64(Register dst, Register src) {
} }
void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
}
void MacroAssembler::SmiTest(Register src) { void MacroAssembler::SmiTest(Register src) {
testq(src, src); testq(src, src);
} }
@ -556,6 +583,11 @@ void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
} }
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
}
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
Register src, Register src,
int power) { int power) {
@ -696,15 +728,12 @@ void MacroAssembler::SmiAdd(Register dst,
movq(dst, src1); movq(dst, src1);
addq(dst, src2); addq(dst, src2);
} }
Assert(no_overflow, "Smi addition onverflow"); Assert(no_overflow, "Smi addition overflow");
} else if (dst.is(src1)) { } else if (dst.is(src1)) {
addq(dst, src2); movq(kScratchRegister, src1);
Label smi_result; addq(kScratchRegister, src2);
j(no_overflow, &smi_result); j(overflow, on_not_smi_result);
// Restore src1. movq(dst, kScratchRegister);
subq(src1, src2);
jmp(on_not_smi_result);
bind(&smi_result);
} else { } else {
movq(dst, src1); movq(dst, src1);
addq(dst, src2); addq(dst, src2);
@ -727,15 +756,11 @@ void MacroAssembler::SmiSub(Register dst,
movq(dst, src1); movq(dst, src1);
subq(dst, src2); subq(dst, src2);
} }
Assert(no_overflow, "Smi substraction onverflow"); Assert(no_overflow, "Smi subtraction overflow");
} else if (dst.is(src1)) { } else if (dst.is(src1)) {
cmpq(dst, src2);
j(overflow, on_not_smi_result);
subq(dst, src2); subq(dst, src2);
Label smi_result;
j(no_overflow, &smi_result);
// Restore src1.
addq(src1, src2);
jmp(on_not_smi_result);
bind(&smi_result);
} else { } else {
movq(dst, src1); movq(dst, src1);
subq(dst, src2); subq(dst, src2);
@ -757,15 +782,12 @@ void MacroAssembler::SmiSub(Register dst,
movq(dst, src1); movq(dst, src1);
subq(dst, src2); subq(dst, src2);
} }
Assert(no_overflow, "Smi substraction onverflow"); Assert(no_overflow, "Smi subtraction overflow");
} else if (dst.is(src1)) { } else if (dst.is(src1)) {
subq(dst, src2); movq(kScratchRegister, src1);
Label smi_result; subq(kScratchRegister, src2);
j(no_overflow, &smi_result); j(overflow, on_not_smi_result);
// Restore src1. movq(src1, kScratchRegister);
addq(src1, src2);
jmp(on_not_smi_result);
bind(&smi_result);
} else { } else {
movq(dst, src1); movq(dst, src1);
subq(dst, src2); subq(dst, src2);
@ -883,12 +905,9 @@ void MacroAssembler::SmiAddConstant(Register dst,
ASSERT(!dst.is(kScratchRegister)); ASSERT(!dst.is(kScratchRegister));
Move(kScratchRegister, constant); Move(kScratchRegister, constant);
addq(dst, kScratchRegister); addq(kScratchRegister, dst);
Label result_ok; j(overflow, on_not_smi_result);
j(no_overflow, &result_ok); movq(dst, kScratchRegister);
subq(dst, kScratchRegister);
jmp(on_not_smi_result);
bind(&result_ok);
} else { } else {
Move(dst, constant); Move(dst, constant);
addq(dst, src); addq(dst, src);
@ -910,10 +929,12 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
} else { } else {
// Subtract by adding the negative, to do it in two operations. // Subtract by adding the negative, to do it in two operations.
if (constant->value() == Smi::kMinValue) { if (constant->value() == Smi::kMinValue) {
Move(kScratchRegister, constant); Move(dst, constant);
movq(dst, src); // Adding and subtracting the min-value gives the same result, it only
subq(dst, kScratchRegister); // differs on the overflow bit, which we don't check here.
addq(dst, src);
} else { } else {
// Subtract by adding the negation.
Move(dst, Smi::FromInt(-constant->value())); Move(dst, Smi::FromInt(-constant->value()));
addq(dst, src); addq(dst, src);
} }
@ -931,21 +952,32 @@ void MacroAssembler::SmiSubConstant(Register dst,
} }
} else if (dst.is(src)) { } else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister)); ASSERT(!dst.is(kScratchRegister));
Move(kScratchRegister, constant);
subq(dst, kScratchRegister);
Label sub_success;
j(no_overflow, &sub_success);
addq(src, kScratchRegister);
jmp(on_not_smi_result);
bind(&sub_success);
} else {
if (constant->value() == Smi::kMinValue) { if (constant->value() == Smi::kMinValue) {
// Subtracting min-value from any non-negative value will overflow.
// We test the non-negativeness before doing the subtraction.
testq(src, src);
j(not_sign, on_not_smi_result);
Move(kScratchRegister, constant); Move(kScratchRegister, constant);
movq(dst, src);
subq(dst, kScratchRegister); subq(dst, kScratchRegister);
} else {
// Subtract by adding the negation.
Move(kScratchRegister, Smi::FromInt(-constant->value()));
addq(kScratchRegister, dst);
j(overflow, on_not_smi_result); j(overflow, on_not_smi_result);
movq(dst, kScratchRegister);
}
} else {
if (constant->value() == Smi::kMinValue) {
// Subtracting min-value from any non-negative value will overflow.
// We test the non-negativeness before doing the subtraction.
testq(src, src);
j(not_sign, on_not_smi_result);
Move(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
addq(dst, src);
} else { } else {
// Subtract by adding the negation.
Move(dst, Smi::FromInt(-(constant->value()))); Move(dst, Smi::FromInt(-(constant->value())));
addq(dst, src); addq(dst, src);
j(overflow, on_not_smi_result); j(overflow, on_not_smi_result);
@ -1695,6 +1727,17 @@ void MacroAssembler::AbortIfNotSmi(Register object) {
} }
void MacroAssembler::AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message) {
ASSERT(!src.is(kScratchRegister));
LoadRoot(kScratchRegister, root_value_index);
cmpq(src, kScratchRegister);
Check(equal, message);
}
Condition MacroAssembler::IsObjectStringType(Register heap_object, Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map, Register map,
Register instance_type) { Register instance_type) {

11
deps/v8/src/x64/macro-assembler-x64.h

@ -203,6 +203,9 @@ class MacroAssembler: public Assembler {
// NOTICE: Destroys the dst register even if unsuccessful! // NOTICE: Destroys the dst register even if unsuccessful!
void Integer32ToSmi(Register dst, Register src, Label* on_overflow); void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
// Stores an integer32 value into a memory field that already holds a smi.
void Integer32ToSmiField(const Operand& dst, Register src);
// Adds constant to src and tags the result as a smi. // Adds constant to src and tags the result as a smi.
// Result must be a valid smi. // Result must be a valid smi.
void Integer64PlusConstantToSmi(Register dst, Register src, int constant); void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
@ -214,6 +217,7 @@ class MacroAssembler: public Assembler {
// Convert smi to 64-bit integer (sign extended if necessary). // Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src); void SmiToInteger64(Register dst, Register src);
void SmiToInteger64(Register dst, const Operand& src);
// Multiply a positive smi's integer value by a power of two. // Multiply a positive smi's integer value by a power of two.
// Provides result as 64-bit integer value. // Provides result as 64-bit integer value.
@ -234,6 +238,8 @@ class MacroAssembler: public Assembler {
void SmiCompare(Register dst, const Operand& src); void SmiCompare(Register dst, const Operand& src);
void SmiCompare(const Operand& dst, Register src); void SmiCompare(const Operand& dst, Register src);
void SmiCompare(const Operand& dst, Smi* src); void SmiCompare(const Operand& dst, Smi* src);
// Compare the int32 in src register to the value of the smi stored at dst.
void SmiCompareInteger32(const Operand& dst, Register src);
// Sets sign and zero flags depending on value of smi in register. // Sets sign and zero flags depending on value of smi in register.
void SmiTest(Register src); void SmiTest(Register src);
@ -550,6 +556,11 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a smi. Used in debug code. // Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object); void AbortIfNotSmi(Register object);
// Abort execution if argument is not the root value with the given index.
void AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Exception handling // Exception handling

39
deps/v8/src/x64/stub-cache-x64.cc

@ -985,30 +985,30 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements; Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into rax and calculate new length. // Get the array's length into rax and calculate new length.
__ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset)); __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue); STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
__ SmiAddConstant(rax, rax, Smi::FromInt(argc)); __ addl(rax, Immediate(argc));
// Get the element's length into rcx. // Get the element's length into rcx.
__ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset)); __ SmiToInteger32(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
// Check if we could survive without allocation. // Check if we could survive without allocation.
__ SmiCompare(rax, rcx); __ cmpl(rax, rcx);
__ j(greater, &attempt_to_grow_elements); __ j(greater, &attempt_to_grow_elements);
// Save new length. // Save new length.
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax); __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Push the element. // Push the element.
__ movq(rcx, Operand(rsp, argc * kPointerSize)); __ movq(rcx, Operand(rsp, argc * kPointerSize));
SmiIndex index =
masm()->SmiToIndex(kScratchRegister, rax, times_pointer_size);
__ lea(rdx, FieldOperand(rbx, __ lea(rdx, FieldOperand(rbx,
index.reg, index.scale, rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize)); FixedArray::kHeaderSize - argc * kPointerSize));
__ movq(Operand(rdx, 0), rcx); __ movq(Operand(rdx, 0), rcx);
// Check if value is a smi. // Check if value is a smi.
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ JumpIfNotSmi(rcx, &with_write_barrier); __ JumpIfNotSmi(rcx, &with_write_barrier);
__ bind(&exit); __ bind(&exit);
@ -1020,6 +1020,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
RecordWriteStub stub(rbx, rdx, rcx); RecordWriteStub stub(rbx, rdx, rcx);
__ CallStub(&stub); __ CallStub(&stub);
__ ret((argc + 1) * kPointerSize); __ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements); __ bind(&attempt_to_grow_elements);
@ -1034,9 +1035,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ movq(rcx, Operand(rcx, 0)); __ movq(rcx, Operand(rcx, 0));
// Check if it's the end of elements. // Check if it's the end of elements.
index = masm()->SmiToIndex(kScratchRegister, rax, times_pointer_size);
__ lea(rdx, FieldOperand(rbx, __ lea(rdx, FieldOperand(rbx,
index.reg, index.scale, rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize)); FixedArray::kHeaderSize - argc * kPointerSize));
__ cmpq(rdx, rcx); __ cmpq(rdx, rcx);
__ j(not_equal, &call_builtin); __ j(not_equal, &call_builtin);
@ -1064,8 +1064,9 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Increment element's and array's sizes. // Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset), __ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
Smi::FromInt(kAllocationDelta)); Smi::FromInt(kAllocationDelta));
// Make new length a smi before returning it.
__ Integer32ToSmi(rax, rax);
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax); __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Elements are in new space, so write barrier is not required. // Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize); __ ret((argc + 1) * kPointerSize);
@ -1128,28 +1129,26 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
__ j(not_equal, &miss); __ j(not_equal, &miss);
// Get the array's length into rcx and calculate new length. // Get the array's length into rcx and calculate new length.
__ movq(rcx, FieldOperand(rdx, JSArray::kLengthOffset)); __ SmiToInteger32(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
__ SmiSubConstant(rcx, rcx, Smi::FromInt(1)); __ subl(rcx, Immediate(1));
__ SmiTest(rcx);
__ j(negative, &return_undefined); __ j(negative, &return_undefined);
// Get the last element. // Get the last element.
__ Move(r9, Factory::the_hole_value()); __ Move(r9, Factory::the_hole_value());
SmiIndex index =
masm()->SmiToIndex(r8, rcx, times_pointer_size);
__ movq(rax, FieldOperand(rbx, __ movq(rax, FieldOperand(rbx,
index.reg, index.scale, rcx, times_pointer_size,
FixedArray::kHeaderSize)); FixedArray::kHeaderSize));
// Check if element is already the hole. // Check if element is already the hole.
__ cmpq(rax, r9); __ cmpq(rax, r9);
// If so, call slow-case to also check prototypes for value.
__ j(equal, &call_builtin); __ j(equal, &call_builtin);
// Set the array's length. // Set the array's length.
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rcx); __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
// Fill with the hole and return original value.. // Fill with the hole and return original value.
__ movq(FieldOperand(rbx, __ movq(FieldOperand(rbx,
index.reg, index.scale, rcx, times_pointer_size,
FixedArray::kHeaderSize), FixedArray::kHeaderSize),
r9); r9);
__ ret((argc + 1) * kPointerSize); __ ret((argc + 1) * kPointerSize);

18
deps/v8/src/x64/virtual-frame-x64.cc

@ -961,16 +961,18 @@ void VirtualFrame::SyncRange(int begin, int end) {
// Sync elements below the range if they have not been materialized // Sync elements below the range if they have not been materialized
// on the stack. // on the stack.
int start = Min(begin, stack_pointer_ + 1); int start = Min(begin, stack_pointer_ + 1);
int end_or_stack_pointer = Min(stack_pointer_, end);
// Emit normal push instructions for elements above stack pointer
// and use mov instructions if we are below stack pointer.
int i = start;
// If positive we have to adjust the stack pointer. while (i <= end_or_stack_pointer) {
int delta = end - stack_pointer_;
if (delta > 0) {
stack_pointer_ = end;
__ subq(rsp, Immediate(delta * kPointerSize));
}
for (int i = start; i <= end; i++) {
if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i); if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
i++;
}
while (i <= end) {
SyncElementByPushing(i);
i++;
} }
} }

8
deps/v8/test/cctest/test-api.cc

@ -10175,7 +10175,13 @@ v8::Handle<Value> AnalyzeStackInNativeCode(const v8::Arguments& args) {
stackTrace->GetFrame(0)); stackTrace->GetFrame(0));
checkStackFrame(origin, "baz", 8, 3, false, true, checkStackFrame(origin, "baz", 8, 3, false, true,
stackTrace->GetFrame(1)); stackTrace->GetFrame(1));
checkStackFrame(NULL, "", 1, 1, true, false, #ifdef ENABLE_DEBUGGER_SUPPORT
bool is_eval = true;
#else // ENABLE_DEBUGGER_SUPPORT
bool is_eval = false;
#endif // ENABLE_DEBUGGER_SUPPORT
checkStackFrame(NULL, "", 1, 1, is_eval, false,
stackTrace->GetFrame(2)); stackTrace->GetFrame(2));
// The last frame is an anonymous function that has the initial call to foo. // The last frame is an anonymous function that has the initial call to foo.
checkStackFrame(origin, "", 10, 1, false, false, checkStackFrame(origin, "", 10, 1, false, false,

53
deps/v8/test/cctest/test-debug.cc

@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef ENABLE_DEBUGGER_SUPPORT
#include <stdlib.h> #include <stdlib.h>
#include "v8.h" #include "v8.h"
@ -192,8 +194,9 @@ static int SetBreakPoint(Handle<v8::internal::JSFunction> fun, int position) {
static int break_point = 0; static int break_point = 0;
Handle<v8::internal::SharedFunctionInfo> shared(fun->shared()); Handle<v8::internal::SharedFunctionInfo> shared(fun->shared());
Debug::SetBreakPoint( Debug::SetBreakPoint(
shared, position, shared,
Handle<Object>(v8::internal::Smi::FromInt(++break_point))); Handle<Object>(v8::internal::Smi::FromInt(++break_point)),
&position);
return break_point; return break_point;
} }
@ -2027,6 +2030,51 @@ TEST(ScriptBreakPointLine) {
} }
// Test top level script break points set on lines.
TEST(ScriptBreakPointLineTopLevel) {
v8::HandleScope scope;
DebugLocalContext env;
env.ExposeDebug();
v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount,
v8::Undefined());
v8::Local<v8::String> script = v8::String::New(
"function f() {\n"
" a = 1; // line 1\n"
"}\n"
"a = 2; // line 3\n");
v8::Local<v8::Function> f;
{
v8::HandleScope scope;
v8::Script::Compile(script, v8::String::New("test.html"))->Run();
}
f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
Heap::CollectAllGarbage(false);
SetScriptBreakPointByNameFromJS("test.html", 3, -1);
// Call f and check that there was no break points.
break_point_hit_count = 0;
f->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
// Recompile and run script and check that break point was hit.
break_point_hit_count = 0;
v8::Script::Compile(script, v8::String::New("test.html"))->Run();
CHECK_EQ(1, break_point_hit_count);
// Call f and check that there are still no break points.
break_point_hit_count = 0;
f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
CHECK_EQ(0, break_point_hit_count);
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
}
// Test that it is possible to remove the last break point for a function // Test that it is possible to remove the last break point for a function
// inside the break handling of that break point. // inside the break handling of that break point.
TEST(RemoveBreakPointInBreak) { TEST(RemoveBreakPointInBreak) {
@ -6569,3 +6617,4 @@ TEST(DebugEventContext) {
CheckDebuggerUnloaded(); CheckDebuggerUnloaded();
} }
#endif // ENABLE_DEBUGGER_SUPPORT

27
deps/v8/test/cctest/test-disasm-arm.cc

@ -269,6 +269,33 @@ TEST(Type0) {
COMPARE(mvn(r6, Operand(-1), LeaveCC, ne), COMPARE(mvn(r6, Operand(-1), LeaveCC, ne),
"13a06000 movne r6, #0"); "13a06000 movne r6, #0");
// mov -> movw.
if (CpuFeatures::IsSupported(ARMv7)) {
COMPARE(mov(r5, Operand(0x01234), LeaveCC, ne),
"13015234 movwne r5, #4660");
// We only disassemble one instruction so the eor instruction is not here.
COMPARE(eor(r5, r4, Operand(0x1234), LeaveCC, ne),
"1301c234 movwne ip, #4660");
// Movw can't do setcc so we don't get that here. Mov immediate with setcc
// is pretty strange anyway.
COMPARE(mov(r5, Operand(0x01234), SetCC, ne),
"159fc000 ldrne ip, [pc, #+0]");
// We only disassemble one instruction so the eor instruction is not here.
// The eor does the setcc so we get a movw here.
COMPARE(eor(r5, r4, Operand(0x1234), SetCC, ne),
"1301c234 movwne ip, #4660");
COMPARE(movt(r5, 0x4321, ne),
"13445321 movtne r5, #17185");
COMPARE(movw(r5, 0xabcd, eq),
"030a5bcd movweq r5, #43981");
}
// Eor doesn't have an eor-negative variant, but we can do an mvn followed by
// an eor to get the same effect.
COMPARE(eor(r5, r4, Operand(0xffffff34), SetCC, ne),
"13e0c0cb mvnne ip, #203");
// and <-> bic. // and <-> bic.
COMPARE(and_(r3, r5, Operand(0xfc03ffff)), COMPARE(and_(r3, r5, Operand(0xfc03ffff)),
"e3c537ff bic r3, r5, #66846720"); "e3c537ff bic r3, r5, #66846720");

4
deps/v8/test/cctest/test-disasm-ia32.cc

@ -276,9 +276,11 @@ TEST(DisasmIa320) {
__ jmp(&L1); __ jmp(&L1);
__ jmp(Operand(ebx, ecx, times_4, 10000)); __ jmp(Operand(ebx, ecx, times_4, 10000));
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference after_break_target = ExternalReference after_break_target =
ExternalReference(Debug_Address::AfterBreakTarget()); ExternalReference(Debug_Address::AfterBreakTarget());
__ jmp(Operand::StaticVariable(after_break_target)); __ jmp(Operand::StaticVariable(after_break_target));
#endif // ENABLE_DEBUGGER_SUPPORT
__ jmp(ic, RelocInfo::CODE_TARGET); __ jmp(ic, RelocInfo::CODE_TARGET);
__ nop(); __ nop();
@ -375,7 +377,7 @@ TEST(DisasmIa320) {
__ divsd(xmm1, xmm0); __ divsd(xmm1, xmm0);
__ movdbl(xmm1, Operand(ebx, ecx, times_4, 10000)); __ movdbl(xmm1, Operand(ebx, ecx, times_4, 10000));
__ movdbl(Operand(ebx, ecx, times_4, 10000), xmm1); __ movdbl(Operand(ebx, ecx, times_4, 10000), xmm1);
__ comisd(xmm0, xmm1); __ ucomisd(xmm0, xmm1);
// 128 bit move instructions. // 128 bit move instructions.
__ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000)); __ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000));

2
deps/v8/test/cctest/test-func-name-inference.cc

@ -81,6 +81,7 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
int func_pos = Runtime::StringMatch(script_src, func_pos_str, 0); int func_pos = Runtime::StringMatch(script_src, func_pos_str, 0);
CHECK_NE(0, func_pos); CHECK_NE(0, func_pos);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Obtain SharedFunctionInfo for the function. // Obtain SharedFunctionInfo for the function.
Object* shared_func_info_ptr = Object* shared_func_info_ptr =
Runtime::FindSharedFunctionInfoInScript(i_script, func_pos); Runtime::FindSharedFunctionInfoInScript(i_script, func_pos);
@ -92,6 +93,7 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
SmartPointer<char> inferred_name = SmartPointer<char> inferred_name =
shared_func_info->inferred_name()->ToCString(); shared_func_info->inferred_name()->ToCString();
CHECK_EQ(ref_inferred_name, *inferred_name); CHECK_EQ(ref_inferred_name, *inferred_name);
#endif // ENABLE_DEBUGGER_SUPPORT
} }

147
deps/v8/test/cctest/test-heap-profiler.cc

@ -428,6 +428,40 @@ class NamedEntriesDetector {
} // namespace } // namespace
static const v8::HeapGraphNode* GetGlobalObject(
const v8::HeapSnapshot* snapshot) {
CHECK_EQ(1, snapshot->GetHead()->GetChildrenCount());
return snapshot->GetHead()->GetChild(0)->GetToNode();
}
static const v8::HeapGraphNode* GetProperty(const v8::HeapGraphNode* node,
v8::HeapGraphEdge::Type type,
const char* name) {
for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = node->GetChild(i);
v8::String::AsciiValue prop_name(prop->GetName());
if (prop->GetType() == type && strcmp(name, *prop_name) == 0)
return prop->GetToNode();
}
return NULL;
}
static bool HasString(const v8::HeapGraphNode* node, const char* contents) {
for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = node->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
if (node->GetType() == v8::HeapGraphNode::STRING) {
v8::String::AsciiValue node_name(node->GetName());
if (strcmp(contents, *node_name) == 0) return true;
}
}
return false;
}
TEST(HeapSnapshot) { TEST(HeapSnapshot) {
v8::HandleScope scope; v8::HandleScope scope;
@ -458,53 +492,20 @@ TEST(HeapSnapshot) {
"var c2 = new C2(a2);"); "var c2 = new C2(a2);");
const v8::HeapSnapshot* snapshot_env2 = const v8::HeapSnapshot* snapshot_env2 =
v8::HeapProfiler::TakeSnapshot(v8::String::New("env2")); v8::HeapProfiler::TakeSnapshot(v8::String::New("env2"));
const v8::HeapGraphNode* global_env2; const v8::HeapGraphNode* global_env2 = GetGlobalObject(snapshot_env2);
if (i::Snapshot::IsEnabled()) {
// In case if snapshots are enabled, there will present a
// vanilla deserealized global object, without properties
// added by the test code.
CHECK_EQ(2, snapshot_env2->GetHead()->GetChildrenCount());
// Choose the global object of a bigger size.
const v8::HeapGraphNode* node0 =
snapshot_env2->GetHead()->GetChild(0)->GetToNode();
const v8::HeapGraphNode* node1 =
snapshot_env2->GetHead()->GetChild(1)->GetToNode();
global_env2 = node0->GetTotalSize() > node1->GetTotalSize() ?
node0 : node1;
} else {
CHECK_EQ(1, snapshot_env2->GetHead()->GetChildrenCount());
global_env2 = snapshot_env2->GetHead()->GetChild(0)->GetToNode();
}
// Verify, that JS global object of env2 doesn't have '..1' // Verify, that JS global object of env2 doesn't have '..1'
// properties, but has '..2' properties. // properties, but has '..2' properties.
bool has_a1 = false, has_b1_1 = false, has_b1_2 = false, has_c1 = false; CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "a1"));
bool has_a2 = false, has_b2_1 = false, has_b2_2 = false, has_c2 = false; CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b1_1"));
// This will be needed further. CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b1_2"));
const v8::HeapGraphNode* a2_node = NULL; CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "c1"));
for (int i = 0, count = global_env2->GetChildrenCount(); i < count; ++i) { const v8::HeapGraphNode* a2_node =
const v8::HeapGraphEdge* prop = global_env2->GetChild(i); GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "a2");
v8::String::AsciiValue prop_name(prop->GetName()); CHECK_NE(NULL, a2_node);
if (strcmp("a1", *prop_name) == 0) has_a1 = true; CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b2_1"));
if (strcmp("b1_1", *prop_name) == 0) has_b1_1 = true; CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b2_2"));
if (strcmp("b1_2", *prop_name) == 0) has_b1_2 = true; CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "c2"));
if (strcmp("c1", *prop_name) == 0) has_c1 = true;
if (strcmp("a2", *prop_name) == 0) {
has_a2 = true;
a2_node = prop->GetToNode();
}
if (strcmp("b2_1", *prop_name) == 0) has_b2_1 = true;
if (strcmp("b2_2", *prop_name) == 0) has_b2_2 = true;
if (strcmp("c2", *prop_name) == 0) has_c2 = true;
}
CHECK(!has_a1);
CHECK(!has_b1_1);
CHECK(!has_b1_2);
CHECK(!has_c1);
CHECK(has_a2);
CHECK(has_b2_1);
CHECK(has_b2_2);
CHECK(has_c2);
// Verify that anything related to '[ABC]1' is not reachable. // Verify that anything related to '[ABC]1' is not reachable.
NamedEntriesDetector det; NamedEntriesDetector det;
@ -565,4 +566,62 @@ TEST(HeapSnapshot) {
CHECK(has_b2_2_x_ref); CHECK(has_b2_2_x_ref);
} }
TEST(HeapSnapshotCodeObjects) {
v8::HandleScope scope;
v8::Handle<v8::Context> env = v8::Context::New();
env->Enter();
CompileAndRunScript(
"function lazy(x) { return x - 1; }\n"
"function compiled(x) { return x + 1; }\n"
"compiled(1)");
const v8::HeapSnapshot* snapshot =
v8::HeapProfiler::TakeSnapshot(v8::String::New("code"));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* compiled =
GetProperty(global, v8::HeapGraphEdge::PROPERTY, "compiled");
CHECK_NE(NULL, compiled);
CHECK_EQ(v8::HeapGraphNode::CLOSURE, compiled->GetType());
const v8::HeapGraphNode* lazy =
GetProperty(global, v8::HeapGraphEdge::PROPERTY, "lazy");
CHECK_NE(NULL, lazy);
CHECK_EQ(v8::HeapGraphNode::CLOSURE, lazy->GetType());
// Find references to code.
const v8::HeapGraphNode* compiled_code =
GetProperty(compiled, v8::HeapGraphEdge::INTERNAL, "code");
CHECK_NE(NULL, compiled_code);
const v8::HeapGraphNode* lazy_code =
GetProperty(lazy, v8::HeapGraphEdge::INTERNAL, "code");
CHECK_NE(NULL, lazy_code);
// Verify that non-compiled code doesn't contain references to "x"
// literal, while compiled code does.
bool compiled_references_x = false, lazy_references_x = false;
for (int i = 0, count = compiled_code->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = compiled_code->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
if (node->GetType() == v8::HeapGraphNode::CODE) {
if (HasString(node, "x")) {
compiled_references_x = true;
break;
}
}
}
for (int i = 0, count = lazy_code->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = lazy_code->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
if (node->GetType() == v8::HeapGraphNode::CODE) {
if (HasString(node, "x")) {
lazy_references_x = true;
break;
}
}
}
CHECK(compiled_references_x);
CHECK(!lazy_references_x);
}
#endif // ENABLE_LOGGING_AND_PROFILING #endif // ENABLE_LOGGING_AND_PROFILING

4
deps/v8/test/cctest/test-liveedit.cc

@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef ENABLE_DEBUGGER_SUPPORT
#include <stdlib.h> #include <stdlib.h>
#include "v8.h" #include "v8.h"
@ -172,3 +174,5 @@ TEST(LiveEditDiffer) {
CompareStrings("abbabababababaaabbabababababbabbbbbbbababa", CompareStrings("abbabababababaaabbabababababbabbbbbbbababa",
"bbbbabababbbabababbbabababababbabbababa"); "bbbbabababbbabababbbabababababbabbababa");
} }
#endif // ENABLE_DEBUGGER_SUPPORT

10
deps/v8/test/cctest/test-serialize.cc

@ -98,9 +98,11 @@ static int make_code(TypeCode type, int id) {
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
static int register_code(int reg) { static int register_code(int reg) {
return Debug::k_register_address << kDebugIdShift | reg; return Debug::k_register_address << kDebugIdShift | reg;
} }
#endif // ENABLE_DEBUGGER_SUPPORT
TEST(ExternalReferenceEncoder) { TEST(ExternalReferenceEncoder) {
@ -113,8 +115,10 @@ TEST(ExternalReferenceEncoder) {
Encode(encoder, Runtime::kAbort)); Encode(encoder, Runtime::kAbort));
CHECK_EQ(make_code(IC_UTILITY, IC::kLoadCallbackProperty), CHECK_EQ(make_code(IC_UTILITY, IC::kLoadCallbackProperty),
Encode(encoder, IC_Utility(IC::kLoadCallbackProperty))); Encode(encoder, IC_Utility(IC::kLoadCallbackProperty)));
#ifdef ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(make_code(DEBUG_ADDRESS, register_code(3)), CHECK_EQ(make_code(DEBUG_ADDRESS, register_code(3)),
Encode(encoder, Debug_Address(Debug::k_register_address, 3))); Encode(encoder, Debug_Address(Debug::k_register_address, 3)));
#endif // ENABLE_DEBUGGER_SUPPORT
ExternalReference keyed_load_function_prototype = ExternalReference keyed_load_function_prototype =
ExternalReference(&Counters::keyed_load_function_prototype); ExternalReference(&Counters::keyed_load_function_prototype);
CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype), CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype),
@ -131,8 +135,10 @@ TEST(ExternalReferenceEncoder) {
ExternalReference::address_of_real_stack_limit(); ExternalReference::address_of_real_stack_limit();
CHECK_EQ(make_code(UNCLASSIFIED, 5), CHECK_EQ(make_code(UNCLASSIFIED, 5),
encoder.Encode(real_stack_limit_address.address())); encoder.Encode(real_stack_limit_address.address()));
#ifdef ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(make_code(UNCLASSIFIED, 15), CHECK_EQ(make_code(UNCLASSIFIED, 15),
encoder.Encode(ExternalReference::debug_break().address())); encoder.Encode(ExternalReference::debug_break().address()));
#endif // ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(make_code(UNCLASSIFIED, 10), CHECK_EQ(make_code(UNCLASSIFIED, 10),
encoder.Encode(ExternalReference::new_space_start().address())); encoder.Encode(ExternalReference::new_space_start().address()));
CHECK_EQ(make_code(UNCLASSIFIED, 3), CHECK_EQ(make_code(UNCLASSIFIED, 3),
@ -150,8 +156,10 @@ TEST(ExternalReferenceDecoder) {
decoder.Decode(make_code(RUNTIME_FUNCTION, Runtime::kAbort))); decoder.Decode(make_code(RUNTIME_FUNCTION, Runtime::kAbort)));
CHECK_EQ(AddressOf(IC_Utility(IC::kLoadCallbackProperty)), CHECK_EQ(AddressOf(IC_Utility(IC::kLoadCallbackProperty)),
decoder.Decode(make_code(IC_UTILITY, IC::kLoadCallbackProperty))); decoder.Decode(make_code(IC_UTILITY, IC::kLoadCallbackProperty)));
#ifdef ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(AddressOf(Debug_Address(Debug::k_register_address, 3)), CHECK_EQ(AddressOf(Debug_Address(Debug::k_register_address, 3)),
decoder.Decode(make_code(DEBUG_ADDRESS, register_code(3)))); decoder.Decode(make_code(DEBUG_ADDRESS, register_code(3))));
#endif // ENABLE_DEBUGGER_SUPPORT
ExternalReference keyed_load_function = ExternalReference keyed_load_function =
ExternalReference(&Counters::keyed_load_function_prototype); ExternalReference(&Counters::keyed_load_function_prototype);
CHECK_EQ(keyed_load_function.address(), CHECK_EQ(keyed_load_function.address(),
@ -164,8 +172,10 @@ TEST(ExternalReferenceDecoder) {
decoder.Decode(make_code(UNCLASSIFIED, 4))); decoder.Decode(make_code(UNCLASSIFIED, 4)));
CHECK_EQ(ExternalReference::address_of_real_stack_limit().address(), CHECK_EQ(ExternalReference::address_of_real_stack_limit().address(),
decoder.Decode(make_code(UNCLASSIFIED, 5))); decoder.Decode(make_code(UNCLASSIFIED, 5)));
#ifdef ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(ExternalReference::debug_break().address(), CHECK_EQ(ExternalReference::debug_break().address(),
decoder.Decode(make_code(UNCLASSIFIED, 15))); decoder.Decode(make_code(UNCLASSIFIED, 15)));
#endif // ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(ExternalReference::new_space_start().address(), CHECK_EQ(ExternalReference::new_space_start().address(),
decoder.Decode(make_code(UNCLASSIFIED, 10))); decoder.Decode(make_code(UNCLASSIFIED, 10)));
} }

15
deps/v8/test/mjsunit/apply.js

@ -112,12 +112,25 @@ function al() {
return arguments.length + arguments[arguments.length - 1]; return arguments.length + arguments[arguments.length - 1];
} }
var stack_corner_case_failure = false;
for (var j = 1; j < 0x40000000; j <<= 1) { for (var j = 1; j < 0x40000000; j <<= 1) {
try { try {
var a = new Array(j); var a = new Array(j);
a[j - 1] = 42; a[j - 1] = 42;
assertEquals(42 + j, al.apply(345, a)); assertEquals(42 + j, al.apply(345, a));
} catch (e) { } catch (e) {
if (e.toString().indexOf("Maximum call stack size exceeded") != -1) {
// For some combinations of build settings, it may be the case that the
// stack here is just tall enough to contain the array whose size is
// specified by j but is not tall enough to contain the activation
// record for the apply call. Allow one such corner case through,
// checking that the length check will do the right thing for an array
// the next size up.
assertEquals(false, stack_corner_case_failure);
stack_corner_case_failure = true;
continue;
}
assertTrue(e.toString().indexOf("Function.prototype.apply") != -1, assertTrue(e.toString().indexOf("Function.prototype.apply") != -1,
"exception does not contain Function.prototype.apply: " + "exception does not contain Function.prototype.apply: " +
e.toString()); e.toString());
@ -127,7 +140,7 @@ for (var j = 1; j < 0x40000000; j <<= 1) {
a = new Array(j); a = new Array(j);
a[j - 1] = 42; a[j - 1] = 42;
al.apply(345, a); al.apply(345, a);
assertUnreachable("Apply of arrray with length " + a.length + assertUnreachable("Apply of array with length " + a.length +
" should have thrown"); " should have thrown");
} catch (e) { } catch (e) {
assertTrue(e.toString().indexOf("Function.prototype.apply") != -1, assertTrue(e.toString().indexOf("Function.prototype.apply") != -1,

8
deps/v8/test/mjsunit/debug-setbreakpoint.js

@ -116,7 +116,7 @@ function listener(event, exec_state, event_data, data) {
mirror = debug.MakeMirror(o.a); mirror = debug.MakeMirror(o.a);
testArguments(dcp, '{"type":"handle","target":' + mirror.handle() + '}', true, false); testArguments(dcp, '{"type":"handle","target":' + mirror.handle() + '}', true, false);
testArguments(dcp, '{"type":"script","target":"sourceUrlScript","line":1}', true, true); testArguments(dcp, '{"type":"script","target":"sourceUrlScript","line":0}', true, true);
// Indicate that all was processed. // Indicate that all was processed.
listenerComplete = true; listenerComplete = true;
@ -134,6 +134,7 @@ function f() {
}; };
function g() { function g() {
// Comment.
f(); f();
}; };
@ -184,3 +185,8 @@ Debug.setListener(breakListener);
sourceUrlFunc(); sourceUrlFunc();
assertTrue(breakListenerCalled, "Break listener not called on breakpoint set by sourceURL"); assertTrue(breakListenerCalled, "Break listener not called on breakpoint set by sourceURL");
// Set a break point on a line with the comment, and check that actual position
// is the next line after the comment.
var number = Debug.setScriptBreakPointById(g_script_id, g_line + 1);
assertEquals(g_line + 2, Debug.findBreakPoint(number).actual_location.line);

3
deps/v8/test/mjsunit/mjsunit.status

@ -34,9 +34,6 @@ bugs: FAIL
# too long to run in debug mode on ARM. # too long to run in debug mode on ARM.
fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm) fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm)
# Issue 494: new snapshot code breaks mjsunit/apply on mac debug snapshot.
apply: PASS, FAIL if ($system == macos && $mode == debug)
big-object-literal: PASS, SKIP if ($arch == arm) big-object-literal: PASS, SKIP if ($arch == arm)
# Issue 488: this test sometimes times out. # Issue 488: this test sometimes times out.

56
deps/v8/test/mjsunit/regress/regress-747.js

@ -0,0 +1,56 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose_gc
// This test makes sure that we do flush code with heap allocated locals.
// This can be a problem if eval is used within the scope.
// See: http://code.google.com/p/v8/issues/detail?id=747
(function() {
var x = 42;
this.callEval = function() {eval('x');};
})();
try {
callEval();
} catch (e) {
assertUnreachable();
}
gc();
gc();
gc();
gc();
gc();
gc();
try {
callEval();
} catch (e) {
assertUnreachable();
}

95
deps/v8/test/mjsunit/string-externalize.js

@ -0,0 +1,95 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-externalize-string
var size = 1024;
function test() {
var str = "";
// Build an ascii cons string.
for (var i = 0; i < size; i++) {
str += String.fromCharCode(i & 0x7f);
}
assertTrue(isAsciiString(str));
var twoByteExternalWithAsciiData =
"AA" + (function() { return "A"; })();
externalizeString(twoByteExternalWithAsciiData, true /* force two-byte */);
assertFalse(isAsciiString(twoByteExternalWithAsciiData));
var realTwoByteExternalString =
"\u1234\u1234" + (function() { return "\u1234"; })();
externalizeString(realTwoByteExternalString);
assertFalse(isAsciiString(realTwoByteExternalString));
assertTrue(isAsciiString(["a", twoByteExternalWithAsciiData].join("")));
// Appending a two-byte string that contains only ascii chars should
// still produce an ascii cons.
var str1 = str + twoByteExternalWithAsciiData;
assertTrue(isAsciiString(str1));
// Force flattening of the string.
var old_length = str1.length - twoByteExternalWithAsciiData.length;
for (var i = 0; i < old_length; i++) {
assertEquals(String.fromCharCode(i & 0x7f), str1[i]);
}
for (var i = old_length; i < str1.length; i++) {
assertEquals("A", str1[i]);
}
// Flattened string should still be ascii.
assertTrue(isAsciiString(str1));
// Lower-casing an ascii string should produce ascii.
assertTrue(isAsciiString(str1.toLowerCase()));
assertFalse(isAsciiString(["a", realTwoByteExternalString].join("")));
// Appending a real two-byte string should produce a two-byte cons.
var str2 = str + realTwoByteExternalString;
assertFalse(isAsciiString(str2));
// Force flattening of the string.
old_length = str2.length - realTwoByteExternalString.length;
for (var i = 0; i < old_length; i++) {
assertEquals(String.fromCharCode(i & 0x7f), str2[i]);
}
for (var i = old_length; i < str.length; i++) {
assertEquals("\u1234", str2[i]);
}
// Flattened string should still be two-byte.
assertFalse(isAsciiString(str2));
}
// Run the test many times to ensure IC-s don't break things.
for (var i = 0; i < 10; i++) {
test();
}
Loading…
Cancel
Save