Browse Source

Upgrade to V8 2.2.13

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
f86a214357
  1. 12
      deps/v8/ChangeLog
  2. 19
      deps/v8/SConstruct
  3. 90
      deps/v8/src/arm/assembler-arm.cc
  4. 15
      deps/v8/src/arm/assembler-arm.h
  5. 45
      deps/v8/src/arm/builtins-arm.cc
  6. 24
      deps/v8/src/arm/codegen-arm-inl.h
  7. 958
      deps/v8/src/arm/codegen-arm.cc
  8. 67
      deps/v8/src/arm/codegen-arm.h
  9. 7
      deps/v8/src/arm/constants-arm.h
  10. 44
      deps/v8/src/arm/disasm-arm.cc
  11. 205
      deps/v8/src/arm/full-codegen-arm.cc
  12. 109
      deps/v8/src/arm/ic-arm.cc
  13. 11
      deps/v8/src/arm/jump-target-arm.cc
  14. 82
      deps/v8/src/arm/macro-assembler-arm.cc
  15. 21
      deps/v8/src/arm/macro-assembler-arm.h
  16. 45
      deps/v8/src/arm/simulator-arm.cc
  17. 366
      deps/v8/src/arm/stub-cache-arm.cc
  18. 97
      deps/v8/src/arm/virtual-frame-arm.cc
  19. 24
      deps/v8/src/arm/virtual-frame-arm.h
  20. 6
      deps/v8/src/builtins.cc
  21. 286
      deps/v8/src/codegen.h
  22. 1
      deps/v8/src/flag-definitions.h
  23. 72
      deps/v8/src/full-codegen.cc
  24. 5
      deps/v8/src/full-codegen.h
  25. 9
      deps/v8/src/globals.h
  26. 56
      deps/v8/src/heap-inl.h
  27. 632
      deps/v8/src/heap.cc
  28. 113
      deps/v8/src/heap.h
  29. 23
      deps/v8/src/ia32/builtins-ia32.cc
  30. 569
      deps/v8/src/ia32/codegen-ia32.cc
  31. 76
      deps/v8/src/ia32/codegen-ia32.h
  32. 202
      deps/v8/src/ia32/full-codegen-ia32.cc
  33. 103
      deps/v8/src/ia32/ic-ia32.cc
  34. 106
      deps/v8/src/ia32/macro-assembler-ia32.cc
  35. 6
      deps/v8/src/ia32/macro-assembler-ia32.h
  36. 504
      deps/v8/src/ia32/stub-cache-ia32.cc
  37. 2
      deps/v8/src/ia32/virtual-frame-ia32.h
  38. 29
      deps/v8/src/jump-target-heavy.cc
  39. 5
      deps/v8/src/jump-target-light.cc
  40. 10
      deps/v8/src/macros.py
  41. 193
      deps/v8/src/mark-compact.cc
  42. 62
      deps/v8/src/mark-compact.h
  43. 3
      deps/v8/src/objects-debug.cc
  44. 110
      deps/v8/src/objects-inl.h
  45. 21
      deps/v8/src/objects.cc
  46. 214
      deps/v8/src/objects.h
  47. 6
      deps/v8/src/platform-freebsd.cc
  48. 3
      deps/v8/src/platform-linux.cc
  49. 6
      deps/v8/src/profile-generator.cc
  50. 138
      deps/v8/src/runtime.cc
  51. 1
      deps/v8/src/runtime.h
  52. 224
      deps/v8/src/spaces-inl.h
  53. 444
      deps/v8/src/spaces.cc
  54. 278
      deps/v8/src/spaces.h
  55. 39
      deps/v8/src/string.js
  56. 8
      deps/v8/src/stub-cache.h
  57. 4
      deps/v8/src/v8.cc
  58. 20
      deps/v8/src/v8natives.js
  59. 2
      deps/v8/src/version.cc
  60. 20
      deps/v8/src/virtual-frame-light-inl.h
  61. 2
      deps/v8/src/x64/assembler-x64.h
  62. 44
      deps/v8/src/x64/builtins-x64.cc
  63. 705
      deps/v8/src/x64/codegen-x64.cc
  64. 39
      deps/v8/src/x64/codegen-x64.h
  65. 196
      deps/v8/src/x64/full-codegen-x64.cc
  66. 131
      deps/v8/src/x64/ic-x64.cc
  67. 169
      deps/v8/src/x64/macro-assembler-x64.cc
  68. 16
      deps/v8/src/x64/macro-assembler-x64.h
  69. 392
      deps/v8/src/x64/stub-cache-x64.cc
  70. 2
      deps/v8/src/x64/virtual-frame-x64.h
  71. 37
      deps/v8/test/cctest/test-api.cc
  72. 36
      deps/v8/test/cctest/test-assembler-arm.cc
  73. 46
      deps/v8/test/cctest/test-disasm-arm.cc
  74. 26
      deps/v8/test/cctest/test-heap.cc
  75. 22
      deps/v8/test/cctest/test-spaces.cc
  76. 8
      deps/v8/test/es5conform/es5conform.status
  77. 64
      deps/v8/test/mjsunit/get-own-property-descriptor.js
  78. 228
      deps/v8/test/mjsunit/string-charat.js
  79. 24
      deps/v8/test/mjsunit/string-index.js

12
deps/v8/ChangeLog

@ -1,3 +1,15 @@
2010-05-31: Version 2.2.13
Implement Object.getOwnPropertyDescriptor for element indices and
strings (issue 599).
Fix bug for windows 64 bit C calls from generated code.
Add new scons flag unalignedaccesses for arm builds.
Performance improvements on all platforms.
2010-05-26: Version 2.2.12
Allowed accessors to be defined on objects rather than just object

19
deps/v8/SConstruct

@ -204,10 +204,16 @@ LIBRARY_FLAGS = {
'LINKFLAGS': ['-m32']
},
'arch:arm': {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM']
'CPPDEFINES': ['V8_TARGET_ARCH_ARM'],
'unalignedaccesses:on' : {
'CPPDEFINES' : ['CAN_USE_UNALIGNED_ACCESSES=1']
},
'unalignedaccesses:off' : {
'CPPDEFINES' : ['CAN_USE_UNALIGNED_ACCESSES=0']
}
},
'simulator:arm': {
'CCFLAGS': ['-m32', '-DCAN_USE_UNALIGNED_ACCESSES=1'],
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:mips': {
@ -733,6 +739,11 @@ SIMPLE_OPTIONS = {
'default': 'none',
'help': 'build with simulator'
},
'unalignedaccesses': {
'values': ['default', 'on', 'off'],
'default': 'default',
'help': 'set whether the ARM target supports unaligned accesses'
},
'disassembler': {
'values': ['on', 'off'],
'default': 'off',
@ -851,6 +862,10 @@ def VerifyOptions(env):
Abort("Shared Object soname not applicable for static library.")
if env['os'] != 'win32' and env['pgo'] != 'off':
Abort("Profile guided optimization only supported on Windows.")
if not (env['arch'] == 'arm' or env['simulator'] == 'arm') and ('unalignedaccesses' in ARGUMENTS):
print env['arch']
print env['simulator']
Abort("Option unalignedaccesses only supported for the ARM architecture.")
for (name, option) in SIMPLE_OPTIONS.iteritems():
if (not option.get('default')) and (name not in ARGUMENTS):
message = ("A value for option %s must be specified (%s)." %

90
deps/v8/src/arm/assembler-arm.cc

@ -903,20 +903,6 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
// Data-processing instructions.
// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
// Instruction details available in ARM DDI 0406A, A8-464.
// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
// Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
const Operand& src3, Condition cond) {
ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
emit(cond | 0x3F*B21 | src3.imm32_*B16 |
dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
}
void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 0*B21 | s, src1, dst, src2);
@ -1106,6 +1092,82 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
}
// Bitfield manipulation instructions.
// Unsigned bit field extract.
// Extracts #width adjacent bits from position #lsb in a register, and
// writes them to the low bits of a destination register.
// ubfx dst, src, #lsb, #width
void Assembler::ubfx(Register dst,
Register src,
int lsb,
int width,
Condition cond) {
// v7 and above.
ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
lsb*B7 | B6 | B4 | src.code());
}
// Signed bit field extract.
// Extracts #width adjacent bits from position #lsb in a register, and
// writes them to the low bits of a destination register. The extracted
// value is sign extended to fill the destination register.
// sbfx dst, src, #lsb, #width
void Assembler::sbfx(Register dst,
Register src,
int lsb,
int width,
Condition cond) {
// v7 and above.
ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
lsb*B7 | B6 | B4 | src.code());
}
// Bit field clear.
// Sets #width adjacent bits at position #lsb in the destination register
// to zero, preserving the value of the other bits.
// bfc dst, #lsb, #width
void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
// v7 and above.
ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
int msb = lsb + width - 1;
emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
}
// Bit field insert.
// Inserts #width adjacent bits from the low bits of the source register
// into position #lsb of the destination register.
// bfi dst, src, #lsb, #width
void Assembler::bfi(Register dst,
Register src,
int lsb,
int width,
Condition cond) {
// v7 and above.
ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
int msb = lsb + width - 1;
emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
src.code());
}
// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
ASSERT(!dst.is(pc));

15
deps/v8/src/arm/assembler-arm.h

@ -671,8 +671,6 @@ class Assembler : public Malloced {
void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
// Data-processing instructions
void ubfx(Register dst, Register src1, const Operand& src2,
const Operand& src3, Condition cond = al);
void and_(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
@ -759,6 +757,19 @@ class Assembler : public Malloced {
void clz(Register dst, Register src, Condition cond = al); // v5 and above
// Bitfield manipulation instructions. v7 and above.
void ubfx(Register dst, Register src, int lsb, int width,
Condition cond = al);
void sbfx(Register dst, Register src, int lsb, int width,
Condition cond = al);
void bfc(Register dst, int lsb, int width, Condition cond = al);
void bfi(Register dst, Register src, int lsb, int width,
Condition cond = al);
// Status register access instructions
void mrs(Register dst, SRegister s, Condition cond = al);

45
deps/v8/src/arm/builtins-arm.cc

@ -138,7 +138,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// Clear the heap tag on the elements array.
__ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
// Initialize the FixedArray and fill it with holes. FixedArray length is not
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array (untagged)
@ -146,7 +146,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
__ mov(scratch3, Operand(initial_capacity));
__ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
@ -243,23 +243,23 @@ static void AllocateJSArray(MacroAssembler* masm,
__ and_(elements_array_storage,
elements_array_storage,
Operand(~kHeapObjectTagMask));
// Initialize the fixed array and fill it with holes. FixedArray length is not
// Initialize the fixed array and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// elements_array_storage: elements array (untagged)
// array_size: size of array (smi)
ASSERT(kSmiTag == 0);
__ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
// Convert array_size from smi to value.
__ mov(array_size,
Operand(array_size, ASR, kSmiTagSize));
ASSERT(kSmiTag == 0);
__ tst(array_size, array_size);
// Length of the FixedArray is the number of pre-allocated elements if
// the actual JSArray has length 0 and the size of the JSArray for non-empty
// JSArrays. The length of a FixedArray is not stored as a smi.
__ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq);
// JSArrays. The length of a FixedArray is stored as a smi.
__ mov(array_size,
Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)),
LeaveCC,
eq);
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(array_size,
MemOperand(elements_array_storage, kPointerSize, PostIndex));
@ -267,10 +267,11 @@ static void AllocateJSArray(MacroAssembler* masm,
// Calculate elements array and elements array end.
// result: JSObject
// elements_array_storage: elements array element storage
// array_size: size of elements array
// array_size: smi-tagged size of elements array
ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ add(elements_array_end,
elements_array_storage,
Operand(array_size, LSL, kPointerSizeLog2));
Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
@ -543,7 +544,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
// r7: undefined
// r7: undefined value
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &rt_call);
@ -555,14 +556,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// instance type would be JS_FUNCTION_TYPE.
// r1: constructor function
// r2: initial map
// r7: undefined
// r7: undefined value
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
__ b(eq, &rt_call);
// Now allocate the JSObject on the heap.
// r1: constructor function
// r2: initial map
// r7: undefined
// r7: undefined value
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
@ -572,7 +573,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r2: initial map
// r3: object size
// r4: JSObject (not tagged)
// r7: undefined
// r7: undefined value
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
@ -588,7 +589,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: object size (in words)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
// r7: undefined
// r7: undefined value
__ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
{ Label loop, entry;
@ -611,7 +612,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
// r4: JSObject
// r5: start of next object (not tagged)
// r7: undefined
// r7: undefined value
__ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
// The field instance sizes contains both pre-allocated property fields and
// in-object properties.
@ -633,7 +634,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array
// r4: JSObject
// r5: start of next object
// r7: undefined
// r7: undefined value
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
__ AllocateInNewSpace(
r0,
@ -648,13 +649,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array
// r4: JSObject
// r5: FixedArray (not tagged)
// r7: undefined
// r7: undefined value
__ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
__ mov(r2, r5);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
__ str(r6, MemOperand(r2, kPointerSize, PostIndex));
ASSERT_EQ(1 * kPointerSize, Array::kLengthOffset);
__ str(r3, MemOperand(r2, kPointerSize, PostIndex));
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ mov(r0, Operand(r3, LSL, kSmiTagSize));
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
// Initialize the fields to undefined.
// r1: constructor function
@ -1047,6 +1049,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r2, r0); // Check formal and actual parameter counts.

24
deps/v8/src/arm/codegen-arm-inl.h

@ -36,30 +36,6 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
void CodeGenerator::LoadConditionAndSpill(Expression* expression,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_control) {
LoadCondition(expression, true_target, false_target, force_control);
}
void CodeGenerator::LoadAndSpill(Expression* expression) {
ASSERT(VirtualFrame::SpilledScope::is_spilled());
Load(expression);
}
void CodeGenerator::VisitAndSpill(Statement* statement) {
Visit(statement);
}
void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
VisitStatements(statements);
}
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }

958
deps/v8/src/arm/codegen-arm.cc

File diff suppressed because it is too large

67
deps/v8/src/arm/codegen-arm.h

@ -101,6 +101,11 @@ class Reference BASE_EMBEDDED {
// is popped from beneath it (unloaded).
void SetValue(InitState init_state);
// This is in preparation for something that uses the reference on the stack.
// If we need this reference afterwards get then dup it now. Otherwise mark
// it as used.
inline void DupIfPersist();
private:
CodeGenerator* cgen_;
Expression* expression_;
@ -252,16 +257,6 @@ class CodeGenerator: public AstVisitor {
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
// reach the end of the statement (ie, it does not exit via break,
// continue, return, or throw). This function is used temporarily while
// the code generator is being transformed.
inline void VisitAndSpill(Statement* statement);
// Visit a list of statements and then spill the virtual frame if control
// flow can reach the end of the list.
inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
void Generate(CompilationInfo* info);
@ -299,19 +294,6 @@ class CodeGenerator: public AstVisitor {
void LoadGlobal();
void LoadGlobalReceiver(Register scratch);
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
inline void LoadAndSpill(Expression* expression);
// Call LoadCondition and then spill the virtual frame unless control flow
// cannot reach the end of the expression (ie, by emitting only
// unconditional jumps to the control targets).
inline void LoadConditionAndSpill(Expression* expression,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_control);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
@ -445,10 +427,13 @@ class CodeGenerator: public AstVisitor {
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateCharFromCode(ZoneList<Expression*>* args);
void GenerateStringCharFromCode(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateStringCharAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@ -693,38 +678,6 @@ class GenericBinaryOpStub : public CodeStub {
class StringHelper : public AllStatic {
public:
// Generates fast code for getting a char code out of a string
// object at the given index. May bail out for four reasons (in the
// listed order):
// * Receiver is not a string (receiver_not_string label).
// * Index is not a smi (index_not_smi label).
// * Index is out of range (index_out_of_range).
// * Some other reason (slow_case label). In this case it's
// guaranteed that the above conditions are not violated,
// e.g. it's safe to assume the receiver is a string and the
// index is a non-negative smi < length.
// When successful, object, index, and scratch are clobbered.
// Otherwise, scratch and result are clobbered.
static void GenerateFastCharCodeAt(MacroAssembler* masm,
Register object,
Register index,
Register scratch,
Register result,
Label* receiver_not_string,
Label* index_not_smi,
Label* index_out_of_range,
Label* slow_case);
// Generates code for creating a one-char string from the given char
// code. May do a runtime call, so any register can be clobbered
// and, if the given invoke flag specifies a call, an internal frame
// is required. In tail call mode the result must be r0 register.
static void GenerateCharFromCode(MacroAssembler* masm,
Register code,
Register scratch,
Register result,
InvokeFlag flag);
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersLong adds too much

7
deps/v8/src/arm/constants-arm.h

@ -66,10 +66,15 @@
# define CAN_USE_THUMB_INSTRUCTIONS 1
#endif
// Simulator should support ARM5 instructions.
// Simulator should support ARM5 instructions and unaligned access by default.
#if !defined(__arm__)
# define CAN_USE_ARMV5_INSTRUCTIONS 1
# define CAN_USE_THUMB_INSTRUCTIONS 1
# ifndef CAN_USE_UNALIGNED_ACCESSES
# define CAN_USE_UNALIGNED_ACCESSES 1
# endif
#endif
#if CAN_USE_UNALIGNED_ACCESSES

44
deps/v8/src/arm/disasm-arm.cc

@ -401,6 +401,20 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
PrintCondition(instr);
return 4;
}
case 'f': { // 'f: bitfield instructions - v7 and above.
uint32_t lsbit = instr->Bits(11, 7);
uint32_t width = instr->Bits(20, 16) + 1;
if (instr->Bit(21) == 0) {
// BFC/BFI:
// Bits 20-16 represent most-significant bit. Covert to width.
width -= lsbit;
ASSERT(width > 0);
}
ASSERT((width + lsbit) <= 32);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"#%d, #%d", lsbit, width);
return 1;
}
case 'h': { // 'h: halfword operation for extra loads and stores
if (instr->HasH()) {
Print("h");
@ -446,16 +460,6 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->Offset12Field());
return 5;
} else if ((format[3] == '1') && (format[4] == '6')) {
ASSERT(STRING_STARTS_WITH(format, "off16to20"));
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->Bits(20, 16) +1);
return 9;
} else if (format[3] == '7') {
ASSERT(STRING_STARTS_WITH(format, "off7to11"));
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->ShiftAmountField());
return 8;
} else if (format[3] == '0') {
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
@ -882,10 +886,26 @@ void Decoder::DecodeType3(Instr* instr) {
case 3: {
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
uint32_t msbit = widthminus1 + lsbit;
if (msbit <= 31) {
Format(instr, "ubfx'cond 'rd, 'rm, #'off7to11, #'off16to20");
if (instr->Bit(22)) {
Format(instr, "ubfx'cond 'rd, 'rm, 'f");
} else {
Format(instr, "sbfx'cond 'rd, 'rm, 'f");
}
} else {
UNREACHABLE();
}
} else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
if (msbit >= lsbit) {
if (instr->RmField() == 15) {
Format(instr, "bfc'cond 'rd, 'f");
} else {
Format(instr, "bfi'cond 'rd, 'rm, 'f");
}
} else {
UNREACHABLE();
}

205
deps/v8/src/arm/full-codegen-arm.cc

@ -917,7 +917,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Setup the four remaining stack slots.
__ push(r0); // Map.
__ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ mov(r1, Operand(r1, LSL, kSmiTagSize));
__ mov(r0, Operand(Smi::FromInt(0)));
// Push enumeration cache, enumeration cache length (as smi) and zero.
__ Push(r2, r1, r0);
@ -928,7 +927,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(r1, Operand(Smi::FromInt(0))); // Map (0) - force slow check.
__ Push(r1, r0);
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
__ mov(r1, Operand(r1, LSL, kSmiTagSize));
__ mov(r0, Operand(Smi::FromInt(0)));
__ Push(r1, r0); // Fixed array length (as smi) and initial index.
@ -1829,76 +1827,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
Handle<String> name = expr->name();
if (strcmp("_IsSmi", *name->ToCString()) == 0) {
EmitIsSmi(expr->arguments());
} else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
EmitIsNonNegativeSmi(expr->arguments());
} else if (strcmp("_IsObject", *name->ToCString()) == 0) {
EmitIsObject(expr->arguments());
} else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
EmitIsUndetectableObject(expr->arguments());
} else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
EmitIsFunction(expr->arguments());
} else if (strcmp("_IsArray", *name->ToCString()) == 0) {
EmitIsArray(expr->arguments());
} else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
EmitIsRegExp(expr->arguments());
} else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
EmitIsConstructCall(expr->arguments());
} else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
EmitObjectEquals(expr->arguments());
} else if (strcmp("_Arguments", *name->ToCString()) == 0) {
EmitArguments(expr->arguments());
} else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
EmitArgumentsLength(expr->arguments());
} else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
EmitClassOf(expr->arguments());
} else if (strcmp("_Log", *name->ToCString()) == 0) {
EmitLog(expr->arguments());
} else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
EmitRandomHeapNumber(expr->arguments());
} else if (strcmp("_SubString", *name->ToCString()) == 0) {
EmitSubString(expr->arguments());
} else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
EmitRegExpExec(expr->arguments());
} else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
EmitValueOf(expr->arguments());
} else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
EmitSetValueOf(expr->arguments());
} else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
EmitNumberToString(expr->arguments());
} else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
EmitCharFromCode(expr->arguments());
} else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
EmitFastCharCodeAt(expr->arguments());
} else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
EmitStringAdd(expr->arguments());
} else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
EmitStringCompare(expr->arguments());
} else if (strcmp("_MathPow", *name->ToCString()) == 0) {
EmitMathPow(expr->arguments());
} else if (strcmp("_MathSin", *name->ToCString()) == 0) {
EmitMathSin(expr->arguments());
} else if (strcmp("_MathCos", *name->ToCString()) == 0) {
EmitMathCos(expr->arguments());
} else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
EmitMathSqrt(expr->arguments());
} else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
EmitCallFunction(expr->arguments());
} else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
EmitRegExpConstructResult(expr->arguments());
} else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
EmitSwapElements(expr->arguments());
} else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
EmitGetFromCache(expr->arguments());
} else {
UNREACHABLE();
}
}
void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@ -2349,49 +2277,120 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
Label slow_case, done;
// Fast case of Heap::LookupSingleCharacterStringFromCode.
ASSERT(kSmiTag == 0);
ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
__ tst(r0, Operand(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
__ b(nz, &slow_case);
__ mov(r1, Operand(Factory::single_character_string_cache()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
ASSERT(kSmiShiftSize == 0);
// At this point code register contains smi tagged ascii char code.
__ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(r1, MemOperand(r1, FixedArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ cmp(r1, r2);
__ b(eq, &slow_case);
__ mov(r0, r1);
__ b(&done);
Label done;
StringCharFromCodeGenerator generator(r0, r1);
generator.GenerateFast(masm_);
__ jmp(&done);
__ bind(&slow_case);
__ push(r0);
__ CallRuntime(Runtime::kCharFromCode, 1);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
Apply(context_, r0);
Apply(context_, r1);
}
void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
// TODO(fsc): Port the complete implementation from the classic back-end.
// Move the undefined value into the result register, which will
// trigger the slow case.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
Apply(context_, r0);
void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
VisitForValue(args->at(0), kStack);
VisitForValue(args->at(1), kAccumulator);
Register object = r1;
Register index = r0;
Register scratch = r2;
Register result = r3;
__ pop(object);
Label need_conversion;
Label index_out_of_range;
Label done;
StringCharCodeAtGenerator generator(object,
index,
scratch,
result,
&need_conversion,
&need_conversion,
&index_out_of_range,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm_);
__ jmp(&done);
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// NaN.
__ LoadRoot(result, Heap::kNanValueRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
// Load the undefined value into the result register, which will
// trigger conversion.
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ jmp(&done);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
Apply(context_, result);
}
void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
VisitForValue(args->at(0), kStack);
VisitForValue(args->at(1), kAccumulator);
Register object = r1;
Register index = r0;
Register scratch1 = r2;
Register scratch2 = r3;
Register result = r0;
__ pop(object);
Label need_conversion;
Label index_out_of_range;
Label done;
StringCharAtGenerator generator(object,
index,
scratch1,
scratch2,
result,
&need_conversion,
&need_conversion,
&index_out_of_range,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm_);
__ jmp(&done);
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
__ LoadRoot(result, Heap::kEmptyStringRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
// Move smi zero into the result register, which will trigger
// conversion.
__ mov(result, Operand(Smi::FromInt(0)));
__ jmp(&done);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
Apply(context_, result);
}
void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());

109
deps/v8/src/arm/ic-arm.cc

@ -163,11 +163,11 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
//
// key - holds the smi key on entry and is unchanged if a branch is
// performed to the miss label.
// Holds the result on exit if the load succeeded.
//
// Scratch registers:
//
// t0 - holds the untagged key on entry and holds the hash once computed.
// Holds the result on exit if the load succeeded.
//
// t1 - used to hold the capacity mask of the dictionary
//
@ -235,7 +235,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index and return.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
__ ldr(t0, FieldMemOperand(t2, kValueOffset));
__ ldr(key, FieldMemOperand(t2, kValueOffset));
}
@ -579,7 +579,13 @@ static inline bool IsInlinedICSite(Address address,
}
Address address_after_nop = address_after_call + Assembler::kInstrSize;
Instr instr_after_nop = Assembler::instr_at(address_after_nop);
ASSERT(Assembler::IsBranch(instr_after_nop));
// There may be some reg-reg move and frame merging code to skip over before
// the branch back from the DeferredReferenceGetKeyedValue code to the inlined
// code.
while (!Assembler::IsBranch(instr_after_nop)) {
address_after_nop += Assembler::kInstrSize;
instr_after_nop = Assembler::instr_at(address_after_nop);
}
// Find the end of the inlined code for handling the load.
int b_offset =
@ -743,9 +749,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
// Untag key into r2..
__ mov(r2, Operand(key, ASR, kSmiTagSize));
// Get the elements array of the object.
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
@ -754,12 +757,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r3, ip);
__ b(ne, &check_pixel_array);
// Check that the key (index) is within bounds.
__ ldr(r3, FieldMemOperand(r4, Array::kLengthOffset));
__ cmp(r2, r3);
__ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
__ cmp(key, Operand(r3));
__ b(hs, &slow);
// Fast case: Do the load.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));
// The key is a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ ldr(r2, MemOperand(r3, key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r2, ip);
// In case the loaded value is the_hole we have to consult GetProperty
@ -770,7 +775,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a pixel array.
// r0: key
// r2: untagged index
// r3: elements map
// r4: elements
__ bind(&check_pixel_array);
@ -778,6 +782,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r3, ip);
__ b(ne, &check_number_dictionary);
__ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset));
__ mov(r2, Operand(key, ASR, kSmiTagSize));
__ cmp(r2, ip);
__ b(hs, &slow);
__ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset));
@ -788,14 +793,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
// r0: key
// r2: untagged index
// r3: elements map
// r4: elements
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow);
__ mov(r2, Operand(r0, ASR, kSmiTagSize));
GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r2, r3, r5);
__ mov(r0, r2);
__ Ret();
// Slow case, key and receiver still in r0 and r1.
@ -808,70 +812,39 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r0 : key (index)
// -- r1 : receiver
// -----------------------------------
Label miss;
Label index_not_smi;
Label index_out_of_range;
Label slow_char_code;
Label got_char_code;
Register object = r1;
Register receiver = r1;
Register index = r0;
Register code = r2;
Register scratch = r3;
Register scratch1 = r2;
Register scratch2 = r3;
Register result = r0;
StringCharAtGenerator char_at_generator(receiver,
index,
scratch1,
scratch2,
result,
&miss, // When not a string.
&miss, // When not a number.
&index_out_of_range,
STRING_INDEX_IS_ARRAY_INDEX);
char_at_generator.GenerateFast(masm);
__ Ret();
StringHelper::GenerateFastCharCodeAt(masm,
object,
index,
scratch,
code,
&miss, // When not a string.
&index_not_smi,
&index_out_of_range,
&slow_char_code);
// If we didn't bail out, code register contains smi tagged char
// code.
__ bind(&got_char_code);
StringHelper::GenerateCharFromCode(masm, code, scratch, r0, JUMP_FUNCTION);
#ifdef DEBUG
__ Abort("Unexpected fall-through from char from code tail call");
#endif
// Check if key is a heap number.
__ bind(&index_not_smi);
__ CheckMap(index, scratch, Factory::heap_number_map(), &miss, true);
// Push receiver and key on the stack (now that we know they are a
// string and a number), and call runtime.
__ bind(&slow_char_code);
__ EnterInternalFrame();
__ Push(object, index);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
ASSERT(!code.is(r0));
__ mov(code, r0);
__ LeaveInternalFrame();
ICRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
// Check if the runtime call returned NaN char code. If yes, return
// undefined. Otherwise, we can continue.
if (FLAG_debug_code) {
__ BranchOnSmi(code, &got_char_code);
__ ldr(scratch, FieldMemOperand(code, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, ip);
__ Assert(eq, "StringCharCodeAt must return smi or heap number");
}
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ cmp(code, scratch);
__ b(ne, &got_char_code);
__ bind(&index_out_of_range);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ Ret();
__ bind(&miss);
GenerateGeneric(masm);
GenerateMiss(masm);
}
@ -1283,11 +1256,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
__ b(ne, &check_pixel_array);
// Untag the key (for checking against untagged length in the fixed array).
__ mov(r4, Operand(key, ASR, kSmiTagSize));
// Compute address to store into and check array bounds.
// Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(r4, Operand(ip));
__ cmp(key, Operand(ip));
__ b(lo, &fast);
// Slow case, handle jump to runtime.
@ -1333,9 +1304,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Condition code from comparing key and array length is still available.
__ b(ne, &slow); // Only support writing to writing to array[array.length].
// Check for room in the elements backing store.
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag key.
// Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(r4, Operand(ip));
__ cmp(key, Operand(ip));
__ b(hs, &slow);
// Calculate key + 1 as smi.
ASSERT_EQ(0, kSmiTag);

11
deps/v8/src/arm/jump-target-arm.cc

@ -69,18 +69,15 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
if (entry_frame_set_) {
// Backward branch. We have an expected frame to merge to on the
// backward edge.
if (cc == al) {
cgen()->frame()->MergeTo(&entry_frame_);
} else {
// We can't do conditional merges yet so you have to ensure that all
// conditional branches to the JumpTarget have the same virtual frame.
ASSERT(cgen()->frame()->Equals(&entry_frame_));
}
cgen()->frame()->MergeTo(&entry_frame_, cc);
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
}
__ b(cc, &entry_label_);
if (cc == al) {
cgen()->DeleteFrame();
}
}

82
deps/v8/src/arm/macro-assembler-arm.cc

@ -183,15 +183,18 @@ void MacroAssembler::Drop(int count, Condition cond) {
}
void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch,
Condition cond) {
if (scratch.is(no_reg)) {
eor(reg1, reg1, Operand(reg2));
eor(reg2, reg2, Operand(reg1));
eor(reg1, reg1, Operand(reg2));
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
} else {
mov(scratch, reg1);
mov(reg1, reg2);
mov(reg2, scratch);
mov(scratch, reg1, LeaveCC, cond);
mov(reg1, reg2, LeaveCC, cond);
mov(reg2, scratch, LeaveCC, cond);
}
}
@ -252,63 +255,21 @@ void MacroAssembler::RecordWriteHelper(Register object,
bind(&not_in_new_space);
}
// This is how much we shift the remembered set bit offset to get the
// offset of the word in the remembered set. We divide by kBitsPerInt (32,
// shift right 5) and then multiply by kIntSize (4, shift left 2).
const int kRSetWordShift = 3;
Label fast;
mov(ip, Operand(Page::kPageAlignmentMask)); // Load mask only once.
// Compute the bit offset in the remembered set.
// object: heap object pointer (with tag)
// offset: offset to store location from the object
mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once
and_(scratch, object, Operand(ip)); // offset into page of the object
add(offset, scratch, Operand(offset)); // add offset into the object
mov(offset, Operand(offset, LSR, kObjectAlignmentBits));
// Calculate region number.
add(offset, object, Operand(offset)); // Add offset into the object.
and_(offset, offset, Operand(ip)); // Offset into page of the object.
mov(offset, Operand(offset, LSR, Page::kRegionSizeLog2));
// Compute the page address from the heap object pointer.
// object: heap object pointer (with tag)
// offset: bit offset of store position in the remembered set
// Calculate page address.
bic(object, object, Operand(ip));
// If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object.
// object: page start
// offset: bit offset of store position in the remembered set
cmp(offset, Operand(Page::kPageSize / kPointerSize));
b(lt, &fast);
// Adjust the bit offset to be relative to the start of the extra
// remembered set and the start address to be the address of the extra
// remembered set.
sub(offset, offset, Operand(Page::kPageSize / kPointerSize));
// Load the array length into 'scratch' and multiply by four to get the
// size in bytes of the elements.
ldr(scratch, MemOperand(object, Page::kObjectStartOffset
+ FixedArray::kLengthOffset));
mov(scratch, Operand(scratch, LSL, kObjectAlignmentBits));
// Add the page header (including remembered set), array header, and array
// body size to the page address.
add(object, object, Operand(Page::kObjectStartOffset
+ FixedArray::kHeaderSize));
add(object, object, Operand(scratch));
bind(&fast);
// Get address of the rset word.
// object: start of the remembered set (page start for the fast case)
// offset: bit offset of store position in the remembered set
bic(scratch, offset, Operand(kBitsPerInt - 1)); // clear the bit offset
add(object, object, Operand(scratch, LSR, kRSetWordShift));
// Get bit offset in the rset word.
// object: address of remembered set word
// offset: bit offset of store position
and_(offset, offset, Operand(kBitsPerInt - 1));
ldr(scratch, MemOperand(object));
// Mark region dirty.
ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
orr(scratch, scratch, Operand(ip, LSL, offset));
str(scratch, MemOperand(object));
str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
}
@ -336,7 +297,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
Label done;
// First, test that the object is not in the new space. We cannot set
// remembered set bits in the new space.
// region marks for new space pages.
InNewSpace(object, scratch, eq, &done);
// Record the actual write.
@ -664,6 +625,7 @@ void MacroAssembler::InvokeFunction(Register fun,
ldr(expected_reg,
FieldMemOperand(code_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
ldr(code_reg,
MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
@ -1328,7 +1290,7 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
if (CpuFeatures::IsSupported(ARMv7)) {
ubfx(dst, src, Operand(kSmiTagSize), Operand(num_least_bits - 1));
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
mov(dst, Operand(src, ASR, kSmiTagSize));
and_(dst, dst, Operand((1 << num_least_bits) - 1));

21
deps/v8/src/arm/macro-assembler-arm.h

@ -88,7 +88,10 @@ class MacroAssembler: public Assembler {
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg);
void Swap(Register reg1,
Register reg2,
Register scratch = no_reg,
Condition cond = al);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
@ -114,16 +117,14 @@ class MacroAssembler: public Assembler {
Label* branch);
// Set the remebered set bit for an offset into an
// object. RecordWriteHelper only works if the object is not in new
// space.
void RecordWriteHelper(Register object, Register offset, Register scracth);
// For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page.
void RecordWriteHelper(Register object, Register offset, Register scratch);
// Sets the remembered set bit for [address+offset], where address is the
// address of the heap object 'object'. The address must be in the first 8K
// of an allocated page. The 'scratch' register is used in the
// implementation and all 3 registers are clobbered by the operation, as
// well as the ip register.
// For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page.
// The 'scratch' register is used in the implementation and all 3 registers
// are clobbered by the operation, as well as the ip register.
void RecordWrite(Register object, Register offset, Register scratch);
// Push two registers. Pushes leftmost register first (to highest address).

45
deps/v8/src/arm/simulator-arm.cc

@ -2031,7 +2031,6 @@ void Simulator::DecodeType2(Instr* instr) {
void Simulator::DecodeType3(Instr* instr) {
ASSERT(instr->Bits(6, 4) == 0x5 || instr->Bit(4) == 0);
int rd = instr->RdField();
int rn = instr->RnField();
int32_t rn_val = get_register(rn);
@ -2058,17 +2057,47 @@ void Simulator::DecodeType3(Instr* instr) {
break;
}
case 3: {
// UBFX.
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
uint32_t msbit = widthminus1 + lsbit;
if (msbit <= 31) {
uint32_t rm_val =
static_cast<uint32_t>(get_register(instr->RmField()));
uint32_t extr_val = rm_val << (31 - msbit);
extr_val = extr_val >> (31 - widthminus1);
set_register(instr->RdField(), extr_val);
if (instr->Bit(22)) {
// ubfx - unsigned bitfield extract.
uint32_t rm_val =
static_cast<uint32_t>(get_register(instr->RmField()));
uint32_t extr_val = rm_val << (31 - msbit);
extr_val = extr_val >> (31 - widthminus1);
set_register(instr->RdField(), extr_val);
} else {
// sbfx - signed bitfield extract.
int32_t rm_val = get_register(instr->RmField());
int32_t extr_val = rm_val << (31 - msbit);
extr_val = extr_val >> (31 - widthminus1);
set_register(instr->RdField(), extr_val);
}
} else {
UNREACHABLE();
}
return;
} else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
if (msbit >= lsbit) {
// bfc or bfi - bitfield clear/insert.
uint32_t rd_val =
static_cast<uint32_t>(get_register(instr->RdField()));
uint32_t bitcount = msbit - lsbit + 1;
uint32_t mask = (1 << bitcount) - 1;
rd_val &= ~(mask << lsbit);
if (instr->RmField() != 15) {
// bfi - bitfield insert.
uint32_t rm_val =
static_cast<uint32_t>(get_register(instr->RmField()));
rm_val &= mask;
rd_val |= rm_val << lsbit;
}
set_register(instr->RdField(), rd_val);
} else {
UNREACHABLE();
}

366
deps/v8/src/arm/stub-cache-arm.cc

@ -426,191 +426,6 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
}
class LoadInterceptorCompiler BASE_EMBEDDED {
public:
explicit LoadInterceptorCompiler(Register name) : name_(name) {}
void CompileCacheable(MacroAssembler* masm,
StubCompiler* stub_compiler,
Register receiver,
Register holder,
Register scratch1,
Register scratch2,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
Label* miss_label) {
AccessorInfo* callback = NULL;
bool optimize = false;
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
if (lookup->type() == FIELD) {
optimize = true;
} else if (lookup->type() == CALLBACKS) {
Object* callback_object = lookup->GetCallbackObject();
if (callback_object->IsAccessorInfo()) {
callback = AccessorInfo::cast(callback_object);
optimize = callback->getter() != NULL;
}
}
if (!optimize) {
CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
miss_label);
return;
}
// Note: starting a frame here makes GC aware of pointers pushed below.
__ EnterInternalFrame();
__ push(receiver);
__ Push(holder, name_);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm,
receiver,
holder,
name_,
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
__ cmp(r0, scratch1);
__ b(eq, &interceptor_failed);
__ LeaveInternalFrame();
__ Ret();
__ bind(&interceptor_failed);
__ pop(name_);
__ pop(holder);
__ pop(receiver);
__ LeaveInternalFrame();
if (lookup->type() == FIELD) {
// We found FIELD property in prototype chain of interceptor's holder.
// Check that the maps from interceptor's holder to field's holder
// haven't changed...
holder = stub_compiler->CheckPrototypes(interceptor_holder,
holder,
lookup->holder(),
scratch1,
scratch2,
name,
miss_label);
// ... and retrieve a field from field's holder.
stub_compiler->GenerateFastPropertyLoad(masm,
r0,
holder,
lookup->holder(),
lookup->GetFieldIndex());
__ Ret();
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
ASSERT(callback != NULL);
ASSERT(callback->getter() != NULL);
// Prepare for tail call: push receiver to stack.
Label cleanup;
__ push(receiver);
// Check that the maps from interceptor's holder to callback's holder
// haven't changed.
holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
lookup->holder(), scratch1,
scratch2,
name,
&cleanup);
// Continue tail call preparation: push remaining parameters.
__ push(holder);
__ Move(holder, Handle<AccessorInfo>(callback));
__ push(holder);
__ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
__ Push(scratch1, name_);
// Tail call to runtime.
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(ref, 5, 1);
// Clean up code: we pushed receiver and need to remove it.
__ bind(&cleanup);
__ pop(scratch2);
}
}
void CompileRegular(MacroAssembler* masm,
Register receiver,
Register holder,
Register scratch,
JSObject* interceptor_holder,
Label* miss_label) {
PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
__ TailCallExternalReference(ref, 5, 1);
}
private:
Register name_;
};
static void CompileLoadInterceptor(LoadInterceptorCompiler* compiler,
StubCompiler* stub_compiler,
MacroAssembler* masm,
JSObject* object,
JSObject* holder,
String* name,
LookupResult* lookup,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
stub_compiler->CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
if (lookup->IsProperty() && lookup->IsCacheable()) {
compiler->CompileCacheable(masm,
stub_compiler,
receiver,
reg,
scratch1,
scratch2,
holder,
lookup,
name,
miss);
} else {
compiler->CompileRegular(masm,
receiver,
reg,
scratch2,
holder,
miss);
}
}
// Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame.
//
@ -770,9 +585,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name,
depth1, miss);
stub_compiler_->CheckPrototypes(object, receiver,
interceptor_holder, scratch1,
scratch2, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@ -785,9 +600,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(), scratch1,
scratch2, name, depth2, miss);
if (interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(), scratch1,
scratch2, name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
// safe to omit it here, as if present, it should be fetched
// by the previous CheckPrototypes.
ASSERT(depth2 == kInvalidProtoDepth);
}
// Invoke function.
if (can_do_fast_api_call) {
@ -1015,7 +838,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
JSObject* holder,
JSObject* interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
@ -1023,18 +846,133 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register scratch2,
String* name,
Label* miss) {
LoadInterceptorCompiler compiler(name_reg);
CompileLoadInterceptor(&compiler,
this,
masm(),
object,
holder,
name,
lookup,
receiver,
scratch1,
scratch2,
miss);
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, miss);
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
bool compile_followup_inline = false;
if (lookup->IsProperty() && lookup->IsCacheable()) {
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo() &&
AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
compile_followup_inline = true;
}
}
if (compile_followup_inline) {
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name, miss);
ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
__ EnterInternalFrame();
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
// CALLBACKS case needs a receiver to be passed into C++ callback.
__ Push(receiver, holder_reg, name_reg);
} else {
__ Push(holder_reg, name_reg);
}
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm(),
receiver,
holder_reg,
name_reg,
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
__ cmp(r0, scratch1);
__ b(eq, &interceptor_failed);
__ LeaveInternalFrame();
__ Ret();
__ bind(&interceptor_failed);
__ pop(name_reg);
__ pop(holder_reg);
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
__ pop(receiver);
}
__ LeaveInternalFrame();
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
if (interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
lookup->holder(),
scratch1,
scratch2,
name,
miss);
}
if (lookup->type() == FIELD) {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), r0, holder_reg,
lookup->holder(), lookup->GetFieldIndex());
__ Ret();
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
ASSERT(callback != NULL);
ASSERT(callback->getter() != NULL);
// Tail call to runtime.
// Important invariant in CALLBACKS case: the code above must be
// structured to never clobber |receiver| register.
__ Move(scratch2, Handle<AccessorInfo>(callback));
// holder_reg is either receiver or scratch1.
if (!receiver.is(holder_reg)) {
ASSERT(scratch1.is(holder_reg));
__ Push(receiver, holder_reg, scratch2);
__ ldr(scratch1,
FieldMemOperand(holder_reg, AccessorInfo::kDataOffset));
__ Push(scratch1, name_reg);
} else {
__ push(receiver);
__ ldr(scratch1,
FieldMemOperand(holder_reg, AccessorInfo::kDataOffset));
__ Push(holder_reg, scratch2, scratch1, name_reg);
}
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(ref, 5, 1);
}
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name, miss);
PushInterceptorArguments(masm(), receiver, holder_reg,
name_reg, interceptor_holder);
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
__ TailCallExternalReference(ref, 5, 1);
}
}
@ -1204,6 +1142,26 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
}
Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
// TODO(722): implement this.
return Heap::undefined_value();
}
Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
// TODO(722): implement this.
return Heap::undefined_value();
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,

97
deps/v8/src/arm/virtual-frame-arm.cc

@ -40,10 +40,8 @@ namespace internal {
#define __ ACCESS_MASM(masm())
void VirtualFrame::PopToR1R0() {
VirtualFrame where_to_go = *this;
// Shuffle things around so the top of stack is in r0 and r1.
where_to_go.top_of_stack_state_ = R0_R1_TOS;
MergeTo(&where_to_go);
MergeTOSTo(R0_R1_TOS);
// Pop the two registers off the stack so they are detached from the frame.
element_count_ -= 2;
top_of_stack_state_ = NO_TOS_REGISTERS;
@ -51,10 +49,8 @@ void VirtualFrame::PopToR1R0() {
void VirtualFrame::PopToR1() {
VirtualFrame where_to_go = *this;
// Shuffle things around so the top of stack is only in r1.
where_to_go.top_of_stack_state_ = R1_TOS;
MergeTo(&where_to_go);
MergeTOSTo(R1_TOS);
// Pop the register off the stack so it is detached from the frame.
element_count_ -= 1;
top_of_stack_state_ = NO_TOS_REGISTERS;
@ -62,100 +58,98 @@ void VirtualFrame::PopToR1() {
void VirtualFrame::PopToR0() {
VirtualFrame where_to_go = *this;
// Shuffle things around so the top of stack only in r0.
where_to_go.top_of_stack_state_ = R0_TOS;
MergeTo(&where_to_go);
MergeTOSTo(R0_TOS);
// Pop the register off the stack so it is detached from the frame.
element_count_ -= 1;
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::MergeTo(VirtualFrame* expected) {
void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
if (Equals(expected)) return;
MergeTOSTo(expected->top_of_stack_state_);
MergeTOSTo(expected->top_of_stack_state_, cond);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
void VirtualFrame::MergeTOSTo(
VirtualFrame::TopOfStack expected_top_of_stack_state) {
VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) {
#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
__ pop(r0);
__ pop(r0, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
__ pop(r1);
__ pop(r1, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
__ pop(r0);
__ pop(r1);
__ pop(r0, cond);
__ pop(r1, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
__ pop(r1);
__ pop(r0);
__ pop(r1, cond);
__ pop(r0, cond);
break;
case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
__ push(r0);
__ push(r0, cond);
break;
case CASE_NUMBER(R0_TOS, R0_TOS):
break;
case CASE_NUMBER(R0_TOS, R1_TOS):
__ mov(r1, r0);
__ mov(r1, r0, LeaveCC, cond);
break;
case CASE_NUMBER(R0_TOS, R0_R1_TOS):
__ pop(r1);
__ pop(r1, cond);
break;
case CASE_NUMBER(R0_TOS, R1_R0_TOS):
__ mov(r1, r0);
__ pop(r0);
__ mov(r1, r0, LeaveCC, cond);
__ pop(r0, cond);
break;
case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
__ push(r1);
__ push(r1, cond);
break;
case CASE_NUMBER(R1_TOS, R0_TOS):
__ mov(r0, r1);
__ mov(r0, r1, LeaveCC, cond);
break;
case CASE_NUMBER(R1_TOS, R1_TOS):
break;
case CASE_NUMBER(R1_TOS, R0_R1_TOS):
__ mov(r0, r1);
__ pop(r1);
__ mov(r0, r1, LeaveCC, cond);
__ pop(r1, cond);
break;
case CASE_NUMBER(R1_TOS, R1_R0_TOS):
__ pop(r0);
__ pop(r0, cond);
break;
case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
__ Push(r1, r0);
__ Push(r1, r0, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R0_TOS):
__ push(r1);
__ push(r1, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R1_TOS):
__ push(r1);
__ mov(r1, r0);
__ push(r1, cond);
__ mov(r1, r0, LeaveCC, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
break;
case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
__ Swap(r0, r1, ip);
__ Swap(r0, r1, ip, cond);
break;
case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
__ Push(r0, r1);
__ Push(r0, r1, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R0_TOS):
__ push(r0);
__ mov(r0, r1);
__ push(r0, cond);
__ mov(r0, r1, LeaveCC, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R1_TOS):
__ push(r0);
__ push(r0, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
__ Swap(r0, r1, ip);
__ Swap(r0, r1, ip, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
break;
@ -163,7 +157,16 @@ void VirtualFrame::MergeTOSTo(
UNREACHABLE();
#undef CASE_NUMBER
}
top_of_stack_state_ = expected_top_of_stack_state;
// A conditional merge will be followed by a conditional branch and the
// fall-through code will have an unchanged virtual frame state. If the
// merge is unconditional ('al'ways) then it might be followed by a fall
// through. We need to update the virtual frame state to match the code we
// are falling into. The final case is an unconditional merge followed by an
// unconditional branch, in which case it doesn't matter what we do to the
// virtual frame state, because the virtual frame will be invalidated.
if (cond == al) {
top_of_stack_state_ = expected_top_of_stack_state;
}
}
@ -264,7 +267,8 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
void VirtualFrame::CallJSFunction(int arg_count) {
// InvokeFunction requires function in r1.
EmitPop(r1);
PopToR1();
SpillAll();
// +1 for receiver.
Forget(arg_count + 1);
@ -277,7 +281,7 @@ void VirtualFrame::CallJSFunction(int arg_count) {
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
ASSERT(SpilledScope::is_spilled());
SpillAll();
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
@ -285,6 +289,7 @@ void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
SpillAll();
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
@ -622,7 +627,17 @@ void VirtualFrame::EnsureOneFreeTOSRegister() {
void VirtualFrame::EmitPush(Register reg) {
element_count_++;
if (reg.is(cp)) {
// If we are pushing cp then we are about to make a call and things have to
// be pushed to the physical stack. There's nothing to be gained my moving
// to a TOS register and then pushing that, we might as well push to the
// physical stack immediately.
MergeTOSTo(NO_TOS_REGISTERS);
__ push(reg);
return;
}
if (SpilledScope::is_spilled()) {
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
__ push(reg);
return;
}

24
deps/v8/src/arm/virtual-frame-arm.h

@ -107,14 +107,14 @@ class VirtualFrame : public ZoneObject {
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
inline CodeGenerator* cgen();
inline CodeGenerator* cgen() const;
inline MacroAssembler* masm();
// The number of elements on the virtual frame.
int element_count() { return element_count_; }
int element_count() const { return element_count_; }
// The height of the virtual expression stack.
inline int height();
inline int height() const;
bool is_used(int num) {
switch (num) {
@ -162,7 +162,7 @@ class VirtualFrame : public ZoneObject {
// Spill all values from the frame to memory.
void SpillAll();
void AssertIsSpilled() {
void AssertIsSpilled() const {
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
ASSERT(register_allocation_map_ == 0);
}
@ -184,7 +184,7 @@ class VirtualFrame : public ZoneObject {
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
void MergeTo(VirtualFrame* expected);
void MergeTo(const VirtualFrame* expected, Condition cond = al);
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
@ -426,13 +426,13 @@ class VirtualFrame : public ZoneObject {
int stack_pointer() { return element_count_ - 1; }
// The number of frame-allocated locals and parameters respectively.
inline int parameter_count();
inline int local_count();
inline int parameter_count() const;
inline int local_count() const;
// The index of the element that is at the processor's frame pointer
// (the fp register). The parameters, receiver, function, and context
// are below the frame pointer.
inline int frame_pointer();
inline int frame_pointer() const;
// The index of the first parameter. The receiver lies below the first
// parameter.
@ -448,10 +448,10 @@ class VirtualFrame : public ZoneObject {
// The index of the first local. Between the frame pointer and the
// locals lies the return address.
inline int local0_index();
inline int local0_index() const;
// The index of the base of the expression stack.
inline int expression_base_index();
inline int expression_base_index() const;
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
@ -469,9 +469,9 @@ class VirtualFrame : public ZoneObject {
// Emit instructions to get the top of stack state from where we are to where
// we want to be.
void MergeTOSTo(TopOfStack expected_state);
void MergeTOSTo(TopOfStack expected_state, Condition cond = al);
inline bool Equals(VirtualFrame* other);
inline bool Equals(const VirtualFrame* other);
friend class JumpTarget;
};

6
deps/v8/src/builtins.cc

@ -305,7 +305,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
// In old space we do not use this trick to avoid dealing with
// remembered sets.
// region dirty marks.
ASSERT(Heap::new_space()->Contains(elms));
STATIC_ASSERT(FixedArray::kMapOffset == 0);
@ -322,7 +322,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
Heap::CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
former_start[to_trim] = Heap::fixed_array_map();
former_start[to_trim + 1] = reinterpret_cast<Object*>(len - to_trim);
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
ASSERT_EQ(elms->address() + to_trim * kPointerSize,
(elms + to_trim * kPointerSize)->address());
@ -500,7 +500,7 @@ BUILTIN(ArrayShift) {
if (Heap::new_space()->Contains(elms)) {
// As elms still in the same space they used to be (new space),
// there is no need to update remembered set.
// there is no need to update region dirty mark.
array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
} else {
// Shift the elements.

286
deps/v8/src/codegen.h

@ -110,8 +110,9 @@ namespace internal {
F(ClassOf, 1, 1) \
F(ValueOf, 1, 1) \
F(SetValueOf, 2, 1) \
F(FastCharCodeAt, 2, 1) \
F(CharFromCode, 1, 1) \
F(StringCharCodeAt, 2, 1) \
F(StringCharFromCode, 1, 1) \
F(StringCharAt, 2, 1) \
F(ObjectEquals, 2, 1) \
F(Log, 3, 1) \
F(RandomHeapNumber, 0, 1) \
@ -179,6 +180,111 @@ class CodeGeneratorScope BASE_EMBEDDED {
};
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
// State of used registers in a virtual frame.
class FrameRegisterState {
public:
// Captures the current state of the given frame.
explicit FrameRegisterState(VirtualFrame* frame);
// Saves the state in the stack.
void Save(MacroAssembler* masm) const;
// Restores the state from the stack.
void Restore(MacroAssembler* masm) const;
private:
// Constants indicating special actions. They should not be multiples
// of kPointerSize so they will not collide with valid offsets from
// the frame pointer.
static const int kIgnore = -1;
static const int kPush = 1;
// This flag is ored with a valid offset from the frame pointer, so
// it should fit in the low zero bits of a valid offset.
static const int kSyncedFlag = 2;
int registers_[RegisterAllocator::kNumRegisters];
};
#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
class FrameRegisterState {
public:
inline FrameRegisterState(VirtualFrame frame) : frame_(frame) { }
inline const VirtualFrame* frame() const { return &frame_; }
private:
VirtualFrame frame_;
};
#else
#error Unsupported target architecture.
#endif
// Helper interface to prepare to/restore after making runtime calls.
class RuntimeCallHelper {
public:
virtual ~RuntimeCallHelper() {}
virtual void BeforeCall(MacroAssembler* masm) const = 0;
virtual void AfterCall(MacroAssembler* masm) const = 0;
protected:
RuntimeCallHelper() {}
private:
DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
};
// RuntimeCallHelper implementation that saves/restores state of a
// virtual frame.
class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
public:
// Does not take ownership of |frame_state|.
explicit VirtualFrameRuntimeCallHelper(const FrameRegisterState* frame_state)
: frame_state_(frame_state) {}
virtual void BeforeCall(MacroAssembler* masm) const;
virtual void AfterCall(MacroAssembler* masm) const;
private:
const FrameRegisterState* frame_state_;
};
// RuntimeCallHelper implementation used in IC stubs: enters/leaves a
// newly created internal frame before/after the runtime call.
class ICRuntimeCallHelper : public RuntimeCallHelper {
public:
ICRuntimeCallHelper() {}
virtual void BeforeCall(MacroAssembler* masm) const;
virtual void AfterCall(MacroAssembler* masm) const;
};
// Trivial RuntimeCallHelper implementation.
class NopRuntimeCallHelper : public RuntimeCallHelper {
public:
NopRuntimeCallHelper() {}
virtual void BeforeCall(MacroAssembler* masm) const {}
virtual void AfterCall(MacroAssembler* masm) const {}
};
// Deferred code objects are small pieces of code that are compiled
// out of line. They are used to defer the compilation of uncommon
// paths thereby avoiding expensive jumps around uncommon code parts.
@ -209,6 +315,8 @@ class DeferredCode: public ZoneObject {
inline void Branch(Condition cc);
void BindExit() { masm_->bind(&exit_label_); }
const FrameRegisterState* frame_state() const { return &frame_state_; }
void SaveRegisters();
void RestoreRegisters();
@ -216,28 +324,13 @@ class DeferredCode: public ZoneObject {
MacroAssembler* masm_;
private:
// Constants indicating special actions. They should not be multiples
// of kPointerSize so they will not collide with valid offsets from
// the frame pointer.
static const int kIgnore = -1;
static const int kPush = 1;
// This flag is ored with a valid offset from the frame pointer, so
// it should fit in the low zero bits of a valid offset.
static const int kSyncedFlag = 2;
int statement_position_;
int position_;
Label entry_label_;
Label exit_label_;
// C++ doesn't allow zero length arrays, so we make the array length 1 even
// if we don't need it.
static const int kRegistersArrayLength =
(RegisterAllocator::kNumRegisters == 0) ?
1 : RegisterAllocator::kNumRegisters;
int registers_[kRegistersArrayLength];
FrameRegisterState frame_state_;
#ifdef DEBUG
const char* comment_;
@ -611,6 +704,163 @@ class ToBooleanStub: public CodeStub {
};
enum StringIndexFlags {
// Accepts smis or heap numbers.
STRING_INDEX_IS_NUMBER,
// Accepts smis or heap numbers that are valid array indices
// (ECMA-262 15.4). Invalid indices are reported as being out of
// range.
STRING_INDEX_IS_ARRAY_INDEX
};
// Generates code implementing String.prototype.charCodeAt.
//
// Only supports the case when the receiver is a string and the index
// is a number (smi or heap number) that is a valid index into the
// string. Additional index constraints are specified by the
// flags. Otherwise, bails out to the provided labels.
//
// Register usage: |object| may be changed to another string in a way
// that doesn't affect charCodeAt/charAt semantics, |index| is
// preserved, |scratch| and |result| are clobbered.
class StringCharCodeAtGenerator {
public:
StringCharCodeAtGenerator(Register object,
Register index,
Register scratch,
Register result,
Label* receiver_not_string,
Label* index_not_number,
Label* index_out_of_range,
StringIndexFlags index_flags)
: object_(object),
index_(index),
scratch_(scratch),
result_(result),
receiver_not_string_(receiver_not_string),
index_not_number_(index_not_number),
index_out_of_range_(index_out_of_range),
index_flags_(index_flags) {
ASSERT(!scratch_.is(object_));
ASSERT(!scratch_.is(index_));
ASSERT(!scratch_.is(result_));
ASSERT(!result_.is(object_));
ASSERT(!result_.is(index_));
}
// Generates the fast case code. On the fallthrough path |result|
// register contains the result.
void GenerateFast(MacroAssembler* masm);
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
void GenerateSlow(MacroAssembler* masm,
const RuntimeCallHelper& call_helper);
private:
Register object_;
Register index_;
Register scratch_;
Register result_;
Label* receiver_not_string_;
Label* index_not_number_;
Label* index_out_of_range_;
StringIndexFlags index_flags_;
Label call_runtime_;
Label index_not_smi_;
Label got_smi_index_;
Label exit_;
DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
};
// Generates code for creating a one-char string from a char code.
class StringCharFromCodeGenerator {
public:
StringCharFromCodeGenerator(Register code,
Register result)
: code_(code),
result_(result) {
ASSERT(!code_.is(result_));
}
// Generates the fast case code. On the fallthrough path |result|
// register contains the result.
void GenerateFast(MacroAssembler* masm);
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
void GenerateSlow(MacroAssembler* masm,
const RuntimeCallHelper& call_helper);
private:
Register code_;
Register result_;
Label slow_case_;
Label exit_;
DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
};
// Generates code implementing String.prototype.charAt.
//
// Only supports the case when the receiver is a string and the index
// is a number (smi or heap number) that is a valid index into the
// string. Additional index constraints are specified by the
// flags. Otherwise, bails out to the provided labels.
//
// Register usage: |object| may be changed to another string in a way
// that doesn't affect charCodeAt/charAt semantics, |index| is
// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
class StringCharAtGenerator {
public:
StringCharAtGenerator(Register object,
Register index,
Register scratch1,
Register scratch2,
Register result,
Label* receiver_not_string,
Label* index_not_number,
Label* index_out_of_range,
StringIndexFlags index_flags)
: char_code_at_generator_(object,
index,
scratch1,
scratch2,
receiver_not_string,
index_not_number,
index_out_of_range,
index_flags),
char_from_code_generator_(scratch2, result) {}
// Generates the fast case code. On the fallthrough path |result|
// register contains the result.
void GenerateFast(MacroAssembler* masm);
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
void GenerateSlow(MacroAssembler* masm,
const RuntimeCallHelper& call_helper);
private:
StringCharCodeAtGenerator char_code_at_generator_;
StringCharFromCodeGenerator char_from_code_generator_;
DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
};
} // namespace internal
} // namespace v8

1
deps/v8/src/flag-definitions.h

@ -333,7 +333,6 @@ DEFINE_bool(code_stats, false, "report code statistics after GC")
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
DEFINE_bool(print_handles, false, "report handles after GC")
DEFINE_bool(print_global_handles, false, "report global handles after GC")
DEFINE_bool(print_rset, false, "print remembered sets before GC")
// ic.cc
DEFINE_bool(trace_ic, false, "trace inline cache state transitions")

72
deps/v8/src/full-codegen.cc

@ -571,6 +571,78 @@ void FullCodeGenerator::SetSourcePosition(int pos) {
}
void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
Handle<String> name = expr->name();
if (strcmp("_IsSmi", *name->ToCString()) == 0) {
EmitIsSmi(expr->arguments());
} else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
EmitIsNonNegativeSmi(expr->arguments());
} else if (strcmp("_IsObject", *name->ToCString()) == 0) {
EmitIsObject(expr->arguments());
} else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
EmitIsUndetectableObject(expr->arguments());
} else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
EmitIsFunction(expr->arguments());
} else if (strcmp("_IsArray", *name->ToCString()) == 0) {
EmitIsArray(expr->arguments());
} else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
EmitIsRegExp(expr->arguments());
} else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
EmitIsConstructCall(expr->arguments());
} else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
EmitObjectEquals(expr->arguments());
} else if (strcmp("_Arguments", *name->ToCString()) == 0) {
EmitArguments(expr->arguments());
} else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
EmitArgumentsLength(expr->arguments());
} else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
EmitClassOf(expr->arguments());
} else if (strcmp("_Log", *name->ToCString()) == 0) {
EmitLog(expr->arguments());
} else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
EmitRandomHeapNumber(expr->arguments());
} else if (strcmp("_SubString", *name->ToCString()) == 0) {
EmitSubString(expr->arguments());
} else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
EmitRegExpExec(expr->arguments());
} else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
EmitValueOf(expr->arguments());
} else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
EmitSetValueOf(expr->arguments());
} else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
EmitNumberToString(expr->arguments());
} else if (strcmp("_StringCharFromCode", *name->ToCString()) == 0) {
EmitStringCharFromCode(expr->arguments());
} else if (strcmp("_StringCharCodeAt", *name->ToCString()) == 0) {
EmitStringCharCodeAt(expr->arguments());
} else if (strcmp("_StringCharAt", *name->ToCString()) == 0) {
EmitStringCharAt(expr->arguments());
} else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
EmitStringAdd(expr->arguments());
} else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
EmitStringCompare(expr->arguments());
} else if (strcmp("_MathPow", *name->ToCString()) == 0) {
EmitMathPow(expr->arguments());
} else if (strcmp("_MathSin", *name->ToCString()) == 0) {
EmitMathSin(expr->arguments());
} else if (strcmp("_MathCos", *name->ToCString()) == 0) {
EmitMathCos(expr->arguments());
} else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
EmitMathSqrt(expr->arguments());
} else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
EmitCallFunction(expr->arguments());
} else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
EmitRegExpConstructResult(expr->arguments());
} else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
EmitSwapElements(expr->arguments());
} else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
EmitGetFromCache(expr->arguments());
} else {
UNREACHABLE();
}
}
void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
Label eval_right, done;

5
deps/v8/src/full-codegen.h

@ -388,8 +388,9 @@ class FullCodeGenerator: public AstVisitor {
void EmitValueOf(ZoneList<Expression*>* arguments);
void EmitSetValueOf(ZoneList<Expression*>* arguments);
void EmitNumberToString(ZoneList<Expression*>* arguments);
void EmitCharFromCode(ZoneList<Expression*>* arguments);
void EmitFastCharCodeAt(ZoneList<Expression*>* arguments);
void EmitStringCharFromCode(ZoneList<Expression*>* arguments);
void EmitStringCharCodeAt(ZoneList<Expression*>* arguments);
void EmitStringCharAt(ZoneList<Expression*>* arguments);
void EmitStringCompare(ZoneList<Expression*>* arguments);
void EmitStringAdd(ZoneList<Expression*>* arguments);
void EmitLog(ZoneList<Expression*>* arguments);

9
deps/v8/src/globals.h

@ -303,7 +303,6 @@ class HeapObject;
class IC;
class InterceptorInfo;
class IterationStatement;
class Array;
class JSArray;
class JSFunction;
class JSObject;
@ -544,16 +543,16 @@ enum StateTag {
#define HAS_FAILURE_TAG(value) \
((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
#define OBJECT_SIZE_ALIGN(value) \
// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
#define OBJECT_POINTER_ALIGN(value) \
(((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
#define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
// MAP_SIZE_ALIGN returns the value aligned as a map pointer.
#define MAP_SIZE_ALIGN(value) \
// MAP_POINTER_ALIGN returns the value aligned as a map pointer.
#define MAP_POINTER_ALIGN(value) \
(((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
// The expression OFFSET_OF(type, field) computes the byte-offset

56
deps/v8/src/heap-inl.h

@ -184,7 +184,7 @@ void Heap::RecordWrite(Address address, int offset) {
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
SLOW_ASSERT(Contains(address + offset));
Page::SetRSet(address, offset);
Page::FromAddress(address)->MarkRegionDirty(address + offset);
}
@ -195,7 +195,7 @@ void Heap::RecordWrites(Address address, int start, int len) {
offset < start + len * kPointerSize;
offset += kPointerSize) {
SLOW_ASSERT(Contains(address + offset));
Page::SetRSet(address, offset);
Page::FromAddress(address)->MarkRegionDirty(address + offset);
}
}
@ -234,13 +234,40 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
}
void Heap::CopyBlock(Object** dst, Object** src, int byte_size) {
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
CopyWords(dst, src, byte_size / kPointerSize);
CopyWords(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
byte_size / kPointerSize);
}
void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
Page* page = Page::FromAddress(dst);
uint32_t marks = page->GetRegionMarks();
for (int remaining = byte_size / kPointerSize;
remaining > 0;
remaining--) {
Memory::Object_at(dst) = Memory::Object_at(src);
if (Heap::InNewSpace(Memory::Object_at(dst))) {
marks |= page->GetRegionMaskForAddress(dst);
}
dst += kPointerSize;
src += kPointerSize;
}
page->SetRegionMarks(marks);
}
void Heap::MoveBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
int size_in_words = byte_size / kPointerSize;
@ -250,10 +277,12 @@ void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
((OffsetFrom(reinterpret_cast<Address>(src)) -
OffsetFrom(reinterpret_cast<Address>(dst))) >= kPointerSize));
Object** end = src + size_in_words;
Object** src_slot = reinterpret_cast<Object**>(src);
Object** dst_slot = reinterpret_cast<Object**>(dst);
Object** end_slot = src_slot + size_in_words;
while (src != end) {
*dst++ = *src++;
while (src_slot != end_slot) {
*dst_slot++ = *src_slot++;
}
} else {
memmove(dst, src, byte_size);
@ -261,6 +290,17 @@ void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
}
void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
ASSERT((dst >= (src + byte_size)) ||
((OffsetFrom(src) - OffsetFrom(dst)) >= kPointerSize));
CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
}
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
ASSERT(InFromSpace(object));

632
deps/v8/src/heap.cc

@ -326,13 +326,6 @@ void Heap::GarbageCollectionPrologue() {
}
if (FLAG_gc_verbose) Print();
if (FLAG_print_rset) {
// Not all spaces have remembered set bits that we care about.
old_pointer_space_->PrintRSet();
map_space_->PrintRSet();
lo_space_->PrintRSet();
}
#endif
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@ -519,9 +512,8 @@ void Heap::ReserveSpace(
Heap::CollectGarbage(cell_space_size, CELL_SPACE);
gc_performed = true;
}
// We add a slack-factor of 2 in order to have space for the remembered
// set and a series of large-object allocations that are only just larger
// than the page size.
// We add a slack-factor of 2 in order to have space for a series of
// large-object allocations that are only just larger than the page size.
large_object_size *= 2;
// The ReserveSpace method on the large object space checks how much
// we can expand the old generation. This includes expansion caused by
@ -572,6 +564,25 @@ void Heap::ClearJSFunctionResultCaches() {
}
#ifdef DEBUG
enum PageWatermarkValidity {
ALL_VALID,
ALL_INVALID
};
static void VerifyPageWatermarkValidity(PagedSpace* space,
PageWatermarkValidity validity) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
bool expected_value = (validity == ALL_VALID);
while (it.has_next()) {
Page* page = it.next();
ASSERT(page->IsWatermarkValid() == expected_value);
}
}
#endif
void Heap::PerformGarbageCollection(AllocationSpace space,
GarbageCollector collector,
GCTracer* tracer) {
@ -816,6 +827,20 @@ void Heap::Scavenge() {
gc_state_ = SCAVENGE;
Page::FlipMeaningOfInvalidatedWatermarkFlag();
#ifdef DEBUG
VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
VerifyPageWatermarkValidity(map_space_, ALL_VALID);
#endif
// We do not update an allocation watermark of the top page during linear
// allocation to avoid overhead. So to maintain the watermark invariant
// we have to manually cache the watermark and mark the top page as having an
// invalid watermark. This guarantees that dirty regions iteration will use a
// correct watermark even if a linear allocation happens.
old_pointer_space_->FlushTopPageWatermark();
map_space_->FlushTopPageWatermark();
// Implements Cheney's copying algorithm
LOG(ResourceEvent("scavenge", "begin"));
@ -858,9 +883,17 @@ void Heap::Scavenge() {
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateRSet(old_pointer_space_, &ScavengePointer);
IterateRSet(map_space_, &ScavengePointer);
lo_space_->IterateRSet(&ScavengePointer);
IterateDirtyRegions(old_pointer_space_,
&IteratePointersInDirtyRegion,
&ScavengePointer,
WATERMARK_CAN_BE_INVALID);
IterateDirtyRegions(map_space_,
&IteratePointersInDirtyMapsRegion,
&ScavengePointer,
WATERMARK_CAN_BE_INVALID);
lo_space_->IterateDirtyRegions(&ScavengePointer);
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
@ -963,9 +996,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// Copy the from-space object to its new location (given by the
// forwarding address) and fix its map.
HeapObject* target = source->map_word().ToForwardingAddress();
CopyBlock(reinterpret_cast<Object**>(target->address()),
reinterpret_cast<Object**>(source->address()),
source->SizeFromMap(map));
int size = source->SizeFromMap(map);
CopyBlock(target->address(), source->address(), size);
target->set_map(map);
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@ -973,8 +1005,10 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
RecordCopiedObject(target);
#endif
// Visit the newly copied object for pointers to new space.
target->Iterate(scavenge_visitor);
UpdateRSet(target);
ASSERT(!target->IsMap());
IterateAndMarkPointersToNewSpace(target->address(),
target->address() + size,
&ScavengePointer);
}
// Take another spin if there are now unswept objects in new space
@ -985,117 +1019,6 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
}
void Heap::ClearRSetRange(Address start, int size_in_bytes) {
uint32_t start_bit;
Address start_word_address =
Page::ComputeRSetBitPosition(start, 0, &start_bit);
uint32_t end_bit;
Address end_word_address =
Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
0,
&end_bit);
// We want to clear the bits in the starting word starting with the
// first bit, and in the ending word up to and including the last
// bit. Build a pair of bitmasks to do that.
uint32_t start_bitmask = start_bit - 1;
uint32_t end_bitmask = ~((end_bit << 1) - 1);
// If the start address and end address are the same, we mask that
// word once, otherwise mask the starting and ending word
// separately and all the ones in between.
if (start_word_address == end_word_address) {
Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
} else {
Memory::uint32_at(start_word_address) &= start_bitmask;
Memory::uint32_at(end_word_address) &= end_bitmask;
start_word_address += kIntSize;
memset(start_word_address, 0, end_word_address - start_word_address);
}
}
class UpdateRSetVisitor: public ObjectVisitor {
public:
void VisitPointer(Object** p) {
UpdateRSet(p);
}
void VisitPointers(Object** start, Object** end) {
// Update a store into slots [start, end), used (a) to update remembered
// set when promoting a young object to old space or (b) to rebuild
// remembered sets after a mark-compact collection.
for (Object** p = start; p < end; p++) UpdateRSet(p);
}
private:
void UpdateRSet(Object** p) {
// The remembered set should not be set. It should be clear for objects
// newly copied to old space, and it is cleared before rebuilding in the
// mark-compact collector.
ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
if (Heap::InNewSpace(*p)) {
Page::SetRSet(reinterpret_cast<Address>(p), 0);
}
}
};
int Heap::UpdateRSet(HeapObject* obj) {
ASSERT(!InNewSpace(obj));
// Special handling of fixed arrays to iterate the body based on the start
// address and offset. Just iterating the pointers as in UpdateRSetVisitor
// will not work because Page::SetRSet needs to have the start of the
// object for large object pages.
if (obj->IsFixedArray()) {
FixedArray* array = FixedArray::cast(obj);
int length = array->length();
for (int i = 0; i < length; i++) {
int offset = FixedArray::kHeaderSize + i * kPointerSize;
ASSERT(!Page::IsRSetSet(obj->address(), offset));
if (Heap::InNewSpace(array->get(i))) {
Page::SetRSet(obj->address(), offset);
}
}
} else if (!obj->IsCode()) {
// Skip code object, we know it does not contain inter-generational
// pointers.
UpdateRSetVisitor v;
obj->Iterate(&v);
}
return obj->Size();
}
void Heap::RebuildRSets() {
// By definition, we do not care about remembered set bits in code,
// data, or cell spaces.
map_space_->ClearRSet();
RebuildRSets(map_space_);
old_pointer_space_->ClearRSet();
RebuildRSets(old_pointer_space_);
Heap::lo_space_->ClearRSet();
RebuildRSets(lo_space_);
}
void Heap::RebuildRSets(PagedSpace* space) {
HeapObjectIterator it(space);
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
Heap::UpdateRSet(obj);
}
void Heap::RebuildRSets(LargeObjectSpace* space) {
LargeObjectIterator it(space);
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
Heap::UpdateRSet(obj);
}
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
void Heap::RecordCopiedObject(HeapObject* obj) {
bool should_record = false;
@ -1121,9 +1044,7 @@ HeapObject* Heap::MigrateObject(HeapObject* source,
HeapObject* target,
int size) {
// Copy the content of source to target.
CopyBlock(reinterpret_cast<Object**>(target->address()),
reinterpret_cast<Object**>(source->address()),
size);
CopyBlock(target->address(), source->address(), size);
// Set the forwarding address.
source->set_map_word(MapWord::FromForwardingAddress(target));
@ -1178,21 +1099,30 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
if (object_size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawFixedArray(object_size);
if (!result->IsFailure()) {
// Save the from-space object pointer and its map pointer at the
// top of the to space to be swept and copied later. Write the
// forwarding address over the map word of the from-space
// object.
HeapObject* target = HeapObject::cast(result);
promotion_queue.insert(object, first_word.ToMap());
object->set_map_word(MapWord::FromForwardingAddress(target));
// Give the space allocated for the result a proper map by
// treating it as a free list node (not linked into the free
// list).
FreeListNode* node = FreeListNode::FromAddress(target->address());
node->set_size(object_size);
if (object->IsFixedArray()) {
// Save the from-space object pointer and its map pointer at the
// top of the to space to be swept and copied later. Write the
// forwarding address over the map word of the from-space
// object.
promotion_queue.insert(object, first_word.ToMap());
object->set_map_word(MapWord::FromForwardingAddress(target));
// Give the space allocated for the result a proper map by
// treating it as a free list node (not linked into the free
// list).
FreeListNode* node = FreeListNode::FromAddress(target->address());
node->set_size(object_size);
*p = target;
} else {
// In large object space only fixed arrays might possibly contain
// intergenerational references.
// All other objects can be copied immediately and not revisited.
*p = MigrateObject(object, target, object_size);
}
*p = target;
tracer()->increment_promoted_objects_size(object_size);
return;
}
@ -1682,7 +1612,7 @@ bool Heap::CreateInitialObjects() {
// loop above because it needs to be allocated manually with the special
// hash code in place. The hash code for the hidden_symbol is zero to ensure
// that it will always be at the first entry in property descriptors.
obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask);
obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
if (obj->IsFailure()) return false;
hidden_symbol_ = String::cast(obj);
@ -1918,6 +1848,9 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_compiler_hints(0);
share->set_this_property_assignments_count(0);
share->set_this_property_assignments(undefined_value());
share->set_num_literals(0);
share->set_end_position(0);
share->set_function_token_position(0);
return result;
}
@ -2179,8 +2112,8 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
: lo_space_->AllocateRaw(size);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
reinterpret_cast<Array*>(result)->set_length(length);
reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@ -2195,8 +2128,8 @@ Object* Heap::AllocateByteArray(int length) {
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
reinterpret_cast<Array*>(result)->set_length(length);
reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@ -2312,9 +2245,7 @@ Object* Heap::CopyCode(Code* code) {
// Copy code object.
Address old_addr = code->address();
Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
CopyBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
obj_size);
CopyBlock(new_addr, old_addr, obj_size);
// Relocate the copy.
Code* new_code = Code::cast(result);
ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
@ -2460,8 +2391,8 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
// Copy the content. The arguments boilerplate doesn't have any
// fields that point to new space so it's safe to skip the write
// barrier here.
CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
reinterpret_cast<Object**>(boilerplate->address()),
CopyBlock(HeapObject::cast(result)->address(),
boilerplate->address(),
kArgumentsObjectSize);
// Set the two properties.
@ -2683,8 +2614,8 @@ Object* Heap::CopyJSObject(JSObject* source) {
clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
if (clone->IsFailure()) return clone;
Address clone_address = HeapObject::cast(clone)->address();
CopyBlock(reinterpret_cast<Object**>(clone_address),
reinterpret_cast<Object**>(source->address()),
CopyBlock(clone_address,
source->address(),
object_size);
// Update write barrier for all fields that lie beyond the header.
RecordWrites(clone_address,
@ -2696,8 +2627,8 @@ Object* Heap::CopyJSObject(JSObject* source) {
ASSERT(Heap::InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
reinterpret_cast<Object**>(source->address()),
CopyBlock(HeapObject::cast(clone)->address(),
source->address(),
object_size);
}
@ -2968,8 +2899,8 @@ Object* Heap::AllocateEmptyFixedArray() {
Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
// Initialize the object.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
reinterpret_cast<Array*>(result)->set_length(0);
reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
reinterpret_cast<FixedArray*>(result)->set_length(0);
return result;
}
@ -2994,9 +2925,7 @@ Object* Heap::CopyFixedArray(FixedArray* src) {
if (obj->IsFailure()) return obj;
if (Heap::InNewSpace(obj)) {
HeapObject* dst = HeapObject::cast(obj);
CopyBlock(reinterpret_cast<Object**>(dst->address()),
reinterpret_cast<Object**>(src->address()),
FixedArray::SizeFor(len));
CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len));
return obj;
}
HeapObject::cast(obj)->set_map(src->map());
@ -3017,8 +2946,8 @@ Object* Heap::AllocateFixedArray(int length) {
Object* result = AllocateRawFixedArray(length);
if (!result->IsFailure()) {
// Initialize header.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
FixedArray* array = reinterpret_cast<FixedArray*>(result);
array->set_map(fixed_array_map());
array->set_length(length);
// Initialize body.
ASSERT(!Heap::InNewSpace(undefined_value()));
@ -3045,27 +2974,10 @@ Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
space = LO_SPACE;
}
// Specialize allocation for the space.
Object* result = Failure::OutOfMemoryException();
if (space == NEW_SPACE) {
// We cannot use Heap::AllocateRaw() because it will not properly
// allocate extra remembered set bits if always_allocate() is true and
// new space allocation fails.
result = new_space_.AllocateRaw(size);
if (result->IsFailure() && always_allocate()) {
if (size <= MaxObjectSizeInPagedSpace()) {
result = old_pointer_space_->AllocateRaw(size);
} else {
result = lo_space_->AllocateRawFixedArray(size);
}
}
} else if (space == OLD_POINTER_SPACE) {
result = old_pointer_space_->AllocateRaw(size);
} else {
ASSERT(space == LO_SPACE);
result = lo_space_->AllocateRawFixedArray(size);
}
return result;
AllocationSpace retry_space =
(size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
return AllocateRaw(size, space, retry_space);
}
@ -3113,7 +3025,7 @@ Object* Heap::AllocateUninitializedFixedArray(int length) {
Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
Object* result = Heap::AllocateFixedArray(length, pretenure);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(hash_table_map());
reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
ASSERT(result->IsHashTable());
return result;
}
@ -3365,6 +3277,49 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
#ifdef DEBUG
static void DummyScavengePointer(HeapObject** p) {
}
static void VerifyPointersUnderWatermark(
PagedSpace* space,
DirtyRegionCallback visit_dirty_region) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* page = it.next();
Address start = page->ObjectAreaStart();
Address end = page->AllocationWatermark();
Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
start,
end,
visit_dirty_region,
&DummyScavengePointer);
}
}
static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
LargeObjectIterator it(space);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
if (object->IsFixedArray()) {
Address slot_address = object->address();
Address end = object->address() + object->Size();
while (slot_address < end) {
HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
// When we are not in GC the Heap::InNewSpace() predicate
// checks that pointers which satisfy predicate point into
// the active semispace.
Heap::InNewSpace(*slot);
slot_address += kPointerSize;
}
}
}
}
void Heap::Verify() {
ASSERT(HasBeenSetup());
@ -3373,14 +3328,23 @@ void Heap::Verify() {
new_space_.Verify();
VerifyPointersAndRSetVisitor rset_visitor;
old_pointer_space_->Verify(&rset_visitor);
map_space_->Verify(&rset_visitor);
VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
old_pointer_space_->Verify(&dirty_regions_visitor);
map_space_->Verify(&dirty_regions_visitor);
VerifyPointersVisitor no_rset_visitor;
old_data_space_->Verify(&no_rset_visitor);
code_space_->Verify(&no_rset_visitor);
cell_space_->Verify(&no_rset_visitor);
VerifyPointersUnderWatermark(old_pointer_space_,
&IteratePointersInDirtyRegion);
VerifyPointersUnderWatermark(map_space_,
&IteratePointersInDirtyMapsRegion);
VerifyPointersUnderWatermark(lo_space_);
VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
VerifyPointersVisitor no_dirty_regions_visitor;
old_data_space_->Verify(&no_dirty_regions_visitor);
code_space_->Verify(&no_dirty_regions_visitor);
cell_space_->Verify(&no_dirty_regions_visitor);
lo_space_->Verify();
}
@ -3433,65 +3397,253 @@ void Heap::ZapFromSpace() {
#endif // DEBUG
int Heap::IterateRSetRange(Address object_start,
Address object_end,
Address rset_start,
ObjectSlotCallback copy_object_func) {
Address object_address = object_start;
Address rset_address = rset_start;
int set_bits_count = 0;
// Loop over all the pointers in [object_start, object_end).
while (object_address < object_end) {
uint32_t rset_word = Memory::uint32_at(rset_address);
if (rset_word != 0) {
uint32_t result_rset = rset_word;
for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
// Do not dereference pointers at or past object_end.
if ((rset_word & bitmask) != 0 && object_address < object_end) {
Object** object_p = reinterpret_cast<Object**>(object_address);
if (Heap::InNewSpace(*object_p)) {
copy_object_func(reinterpret_cast<HeapObject**>(object_p));
}
// If this pointer does not need to be remembered anymore, clear
// the remembered set bit.
if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
set_bits_count++;
}
object_address += kPointerSize;
bool Heap::IteratePointersInDirtyRegion(Address start,
Address end,
ObjectSlotCallback copy_object_func) {
Address slot_address = start;
bool pointers_to_new_space_found = false;
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
copy_object_func(reinterpret_cast<HeapObject**>(slot));
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
pointers_to_new_space_found = true;
}
// Update the remembered set if it has changed.
if (result_rset != rset_word) {
Memory::uint32_at(rset_address) = result_rset;
}
slot_address += kPointerSize;
}
return pointers_to_new_space_found;
}
// Compute start address of the first map following given addr.
static inline Address MapStartAlign(Address addr) {
Address page = Page::FromAddress(addr)->ObjectAreaStart();
return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
}
// Compute end address of the first map preceding given addr.
static inline Address MapEndAlign(Address addr) {
Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
return page + ((addr - page) / Map::kSize * Map::kSize);
}
static bool IteratePointersInDirtyMaps(Address start,
Address end,
ObjectSlotCallback copy_object_func) {
ASSERT(MapStartAlign(start) == start);
ASSERT(MapEndAlign(end) == end);
Address map_address = start;
bool pointers_to_new_space_found = false;
while (map_address < end) {
ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
ASSERT(Memory::Object_at(map_address)->IsMap());
Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
pointer_fields_end,
copy_object_func)) {
pointers_to_new_space_found = true;
}
map_address += Map::kSize;
}
return pointers_to_new_space_found;
}
bool Heap::IteratePointersInDirtyMapsRegion(
Address start,
Address end,
ObjectSlotCallback copy_object_func) {
Address map_aligned_start = MapStartAlign(start);
Address map_aligned_end = MapEndAlign(end);
bool contains_pointers_to_new_space = false;
if (map_aligned_start != start) {
Address prev_map = map_aligned_start - Map::kSize;
ASSERT(Memory::Object_at(prev_map)->IsMap());
Address pointer_fields_start =
Max(start, prev_map + Map::kPointerFieldsBeginOffset);
Address pointer_fields_end =
Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end);
contains_pointers_to_new_space =
IteratePointersInDirtyRegion(pointer_fields_start,
pointer_fields_end,
copy_object_func)
|| contains_pointers_to_new_space;
}
contains_pointers_to_new_space =
IteratePointersInDirtyMaps(map_aligned_start,
map_aligned_end,
copy_object_func)
|| contains_pointers_to_new_space;
if (map_aligned_end != end) {
ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset;
Address pointer_fields_end =
Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize);
contains_pointers_to_new_space =
IteratePointersInDirtyRegion(pointer_fields_start,
pointer_fields_end,
copy_object_func)
|| contains_pointers_to_new_space;
}
return contains_pointers_to_new_space;
}
void Heap::IterateAndMarkPointersToNewSpace(Address start,
Address end,
ObjectSlotCallback callback) {
Address slot_address = start;
Page* page = Page::FromAddress(start);
uint32_t marks = page->GetRegionMarks();
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
callback(reinterpret_cast<HeapObject**>(slot));
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
marks |= page->GetRegionMaskForAddress(slot_address);
}
}
slot_address += kPointerSize;
}
page->SetRegionMarks(marks);
}
uint32_t Heap::IterateDirtyRegions(
uint32_t marks,
Address area_start,
Address area_end,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback copy_object_func) {
uint32_t newmarks = 0;
uint32_t mask = 1;
if (area_start >= area_end) {
return newmarks;
}
Address region_start = area_start;
// area_start does not necessarily coincide with start of the first region.
// Thus to calculate the beginning of the next region we have to align
// area_start by Page::kRegionSize.
Address second_region =
reinterpret_cast<Address>(
reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
~Page::kRegionAlignmentMask);
// Next region might be beyond area_end.
Address region_end = Min(second_region, area_end);
if (marks & mask) {
if (visit_dirty_region(region_start, region_end, copy_object_func)) {
newmarks |= mask;
}
}
mask <<= 1;
// Iterate subsequent regions which fully lay inside [area_start, area_end[.
region_start = region_end;
region_end = region_start + Page::kRegionSize;
while (region_end <= area_end) {
if (marks & mask) {
if (visit_dirty_region(region_start, region_end, copy_object_func)) {
newmarks |= mask;
}
}
region_start = region_end;
region_end = region_start + Page::kRegionSize;
mask <<= 1;
}
if (region_start != area_end) {
// A small piece of area left uniterated because area_end does not coincide
// with region end. Check whether region covering last part of area is
// dirty.
if (marks & mask) {
if (visit_dirty_region(region_start, area_end, copy_object_func)) {
newmarks |= mask;
}
} else {
// No bits in the word were set. This is the common case.
object_address += kPointerSize * kBitsPerInt;
}
rset_address += kIntSize;
}
return set_bits_count;
return newmarks;
}
void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
ASSERT(Page::is_rset_in_use());
ASSERT(space == old_pointer_space_ || space == map_space_);
static void* paged_rset_histogram = StatsTable::CreateHistogram(
"V8.RSetPaged",
0,
Page::kObjectAreaSize / kPointerSize,
30);
void Heap::IterateDirtyRegions(
PagedSpace* space,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback copy_object_func,
ExpectedPageWatermarkState expected_page_watermark_state) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* page = it.next();
int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
page->RSetStart(), copy_object_func);
if (paged_rset_histogram != NULL) {
StatsTable::AddHistogramSample(paged_rset_histogram, count);
uint32_t marks = page->GetRegionMarks();
if (marks != Page::kAllRegionsCleanMarks) {
Address start = page->ObjectAreaStart();
// Do not try to visit pointers beyond page allocation watermark.
// Page can contain garbage pointers there.
Address end;
if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
page->IsWatermarkValid()) {
end = page->AllocationWatermark();
} else {
end = page->CachedAllocationWatermark();
}
ASSERT(space == old_pointer_space_ ||
(space == map_space_ &&
((page->ObjectAreaStart() - end) % Map::kSize == 0)));
page->SetRegionMarks(IterateDirtyRegions(marks,
start,
end,
visit_dirty_region,
copy_object_func));
}
// Mark page watermark as invalid to maintain watermark validity invariant.
// See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
page->InvalidateWatermark(true);
}
}

113
deps/v8/src/heap.h

@ -206,6 +206,10 @@ class HeapStats;
typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
typedef bool (*DirtyRegionCallback)(Address start,
Address end,
ObjectSlotCallback copy_object_func);
// The all static Heap captures the interface to the global object heap.
// All JavaScript contexts by this process share the same object heap.
@ -740,17 +744,54 @@ class Heap : public AllStatic {
// Iterates over all the other roots in the heap.
static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
// Iterates remembered set of an old space.
static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
enum ExpectedPageWatermarkState {
WATERMARK_SHOULD_BE_VALID,
WATERMARK_CAN_BE_INVALID
};
// For each dirty region on a page in use from an old space call
// visit_dirty_region callback.
// If either visit_dirty_region or callback can cause an allocation
// in old space and changes in allocation watermark then
// can_preallocate_during_iteration should be set to true.
// All pages will be marked as having invalid watermark upon
// iteration completion.
static void IterateDirtyRegions(
PagedSpace* space,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback,
ExpectedPageWatermarkState expected_page_watermark_state);
// Interpret marks as a bitvector of dirty marks for regions of size
// Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
// memory interval from start to top. For each dirty region call a
// visit_dirty_region callback. Return updated bitvector of dirty marks.
static uint32_t IterateDirtyRegions(uint32_t marks,
Address start,
Address end,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// Update dirty marks for page containing start address.
static void IterateAndMarkPointersToNewSpace(Address start,
Address end,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// Return true if pointers to new space was found.
static bool IteratePointersInDirtyRegion(Address start,
Address end,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// This interval is considered to belong to the map space.
// Return true if pointers to new space was found.
static bool IteratePointersInDirtyMapsRegion(Address start,
Address end,
ObjectSlotCallback callback);
// Iterates a range of remembered set addresses starting with rset_start
// corresponding to the range of allocated pointers
// [object_start, object_end).
// Returns the number of bits that were set.
static int IterateRSetRange(Address object_start,
Address object_end,
Address rset_start,
ObjectSlotCallback copy_object_func);
// Returns whether the object resides in new space.
static inline bool InNewSpace(Object* object);
@ -852,17 +893,6 @@ class Heap : public AllStatic {
static void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
// Clear a range of remembered set addresses corresponding to the object
// area address 'start' with size 'size_in_bytes', eg, when adding blocks
// to the free list.
static void ClearRSetRange(Address start, int size_in_bytes);
// Rebuild remembered set in old and map spaces.
static void RebuildRSets();
// Update an old object's remembered set
static int UpdateRSet(HeapObject* obj);
// Commits from space if it is uncommitted.
static void EnsureFromSpaceIsCommitted();
@ -955,11 +985,19 @@ class Heap : public AllStatic {
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Object** dst, Object** src, int byte_size);
static inline void CopyBlock(Address dst, Address src, int byte_size);
static inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size);
// Optimized version of memmove for blocks with pointer size aligned sizes and
// pointer size aligned addresses.
static inline void MoveBlock(Object** dst, Object** src, int byte_size);
static inline void MoveBlock(Address dst, Address src, int byte_size);
static inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size);
// Check new space expansion criteria and expand semispaces if it was hit.
static void CheckNewSpaceExpansionCriteria();
@ -1207,12 +1245,6 @@ class Heap : public AllStatic {
static void ReportStatisticsAfterGC();
#endif
// Rebuild remembered set in an old space.
static void RebuildRSets(PagedSpace* space);
// Rebuild remembered set in the large object space.
static void RebuildRSets(LargeObjectSpace* space);
// Slow part of scavenge object.
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
@ -1301,11 +1333,11 @@ class LinearAllocationScope {
#ifdef DEBUG
// Visitor class to verify interior pointers that do not have remembered set
// bits. All heap object pointers have to point into the heap to a location
// that has a map pointer at its first word. Caveat: Heap::Contains is an
// approximation because it can return true for objects in a heap space but
// above the allocation pointer.
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
// Caveat: Heap::Contains is an approximation because it can return true for
// objects in a heap space but above the allocation pointer.
class VerifyPointersVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
@ -1320,10 +1352,11 @@ class VerifyPointersVisitor: public ObjectVisitor {
};
// Visitor class to verify interior pointers that have remembered set bits.
// As VerifyPointersVisitor but also checks that remembered set bits are
// always set for pointers into new space.
class VerifyPointersAndRSetVisitor: public ObjectVisitor {
// Visitor class to verify interior pointers in spaces that use region marks
// to keep track of intergenerational references.
// As VerifyPointersVisitor but also checks that dirty marks are set
// for regions covering intergenerational references.
class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
@ -1332,7 +1365,9 @@ class VerifyPointersAndRSetVisitor: public ObjectVisitor {
ASSERT(Heap::Contains(object));
ASSERT(object->map()->IsMap());
if (Heap::InNewSpace(object)) {
ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0));
ASSERT(Heap::InToSpace(object));
Address addr = reinterpret_cast<Address>(current);
ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
}
}
}

23
deps/v8/src/ia32/builtins-ia32.cc

@ -226,8 +226,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// edx: number of elements
// ecx: start of next object
__ mov(eax, Factory::fixed_array_map());
__ mov(Operand(edi, JSObject::kMapOffset), eax); // setup the map
__ mov(Operand(edi, Array::kLengthOffset), edx); // and length
__ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
__ SmiTag(edx);
__ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
// Initialize the fields to undefined.
// ebx: JSObject
@ -548,6 +549,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ SmiUntag(ebx);
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ cmp(eax, Operand(ebx));
@ -752,15 +754,15 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ lea(scratch1, Operand(result, JSArray::kSize));
__ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
// Initialize the FixedArray and fill it with holes. FixedArray length is not
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array
// scratch2: start of next object
__ mov(FieldOperand(scratch1, JSObject::kMapOffset),
__ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
Factory::fixed_array_map());
__ mov(FieldOperand(scratch1, Array::kLengthOffset),
Immediate(initial_capacity));
__ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(initial_capacity)));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
@ -847,23 +849,22 @@ static void AllocateJSArray(MacroAssembler* masm,
__ lea(elements_array, Operand(result, JSArray::kSize));
__ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
// Initialize the fixed array. FixedArray length is not stored as a smi.
// Initialize the fixed array. FixedArray length is stored as a smi.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
// array_size: size of array (smi)
ASSERT(kSmiTag == 0);
__ SmiUntag(array_size); // Convert from smi to value.
__ mov(FieldOperand(elements_array, JSObject::kMapOffset),
__ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
Factory::fixed_array_map());
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ mov(FieldOperand(elements_array, Array::kLengthOffset), array_size);
__ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
// elements_array: elements array
if (fill_with_hole) {
__ SmiUntag(array_size);
__ lea(edi, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ mov(eax, Factory::the_hole_value());

569
deps/v8/src/ia32/codegen-ia32.cc

@ -46,12 +46,12 @@
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
#define __ ACCESS_MASM(masm)
// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
// Platform-specific FrameRegisterState functions.
void DeferredCode::SaveRegisters() {
void FrameRegisterState::Save(MacroAssembler* masm) const {
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int action = registers_[i];
if (action == kPush) {
@ -63,7 +63,7 @@ void DeferredCode::SaveRegisters() {
}
void DeferredCode::RestoreRegisters() {
void FrameRegisterState::Restore(MacroAssembler* masm) const {
// Restore registers in reverse order due to the stack.
for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
int action = registers_[i];
@ -77,6 +77,45 @@ void DeferredCode::RestoreRegisters() {
}
#undef __
#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
void DeferredCode::SaveRegisters() {
frame_state_.Save(masm_);
}
void DeferredCode::RestoreRegisters() {
frame_state_.Restore(masm_);
}
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
frame_state_->Save(masm);
}
void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
frame_state_->Restore(masm);
}
void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterInternalFrame();
}
void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveInternalFrame();
}
// -------------------------------------------------------------------------
// CodeGenState implementation.
@ -4198,7 +4237,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(eax); // <- slot 3
frame_->EmitPush(edx); // <- slot 2
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
__ SmiTag(eax);
frame_->EmitPush(eax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
entry.Jump();
@ -4210,7 +4248,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Push the length of the array and the initial index onto the stack.
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ SmiTag(eax);
frame_->EmitPush(eax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
@ -6020,29 +6057,67 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
// This generates code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It can handle flat, 8 and 16 bit characters and cons strings where the
// answer is found in the left hand branch of the cons. The slow case will
// flatten the string, which will ensure that the answer is in the left hand
// side the next time around.
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Comment(masm_, "[ GenerateFastCharCodeAt");
class DeferredStringCharCodeAt : public DeferredCode {
public:
DeferredStringCharCodeAt(Register object,
Register index,
Register scratch,
Register result)
: result_(result),
char_code_at_generator_(object,
index,
scratch,
result,
&need_conversion_,
&need_conversion_,
&index_out_of_range_,
STRING_INDEX_IS_NUMBER) {}
StringCharCodeAtGenerator* fast_case_generator() {
return &char_code_at_generator_;
}
virtual void Generate() {
VirtualFrameRuntimeCallHelper call_helper(frame_state());
char_code_at_generator_.GenerateSlow(masm(), call_helper);
__ bind(&need_conversion_);
// Move the undefined value into the result register, which will
// trigger conversion.
__ Set(result_, Immediate(Factory::undefined_value()));
__ jmp(exit_label());
__ bind(&index_out_of_range_);
// When the index is out of range, the spec requires us to return
// NaN.
__ Set(result_, Immediate(Factory::nan_value()));
__ jmp(exit_label());
}
private:
Register result_;
Label need_conversion_;
Label index_out_of_range_;
StringCharCodeAtGenerator char_code_at_generator_;
};
// This generates code that performs a String.prototype.charCodeAt() call
// or returns a smi in order to trigger conversion.
void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
Comment(masm_, "[ GenerateStringCharCodeAt");
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
Result index = frame_->Pop();
Result object = frame_->Pop();
// We will mutate the index register and possibly the object register.
// The case where they are somehow the same register is handled
// because we only mutate them in the case where the receiver is a
// heap object and the index is not.
object.ToRegister();
index.ToRegister();
// We might mutate the object register.
frame_->Spill(object.reg());
frame_->Spill(index.reg());
// We need two extra registers.
Result result = allocator()->Allocate();
@ -6050,33 +6125,40 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Result scratch = allocator()->Allocate();
ASSERT(scratch.is_valid());
// There is no virtual frame effect from here up to the final result
// push.
Label slow_case;
Label exit;
StringHelper::GenerateFastCharCodeAt(masm_,
object.reg(),
index.reg(),
scratch.reg(),
result.reg(),
&slow_case,
&slow_case,
&slow_case,
&slow_case);
__ jmp(&exit);
__ bind(&slow_case);
// Move the undefined value into the result register, which will
// trigger the slow case.
__ Set(result.reg(), Immediate(Factory::undefined_value()));
__ bind(&exit);
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(object.reg(),
index.reg(),
scratch.reg(),
result.reg());
deferred->fast_case_generator()->GenerateFast(masm_);
deferred->BindExit();
frame_->Push(&result);
}
void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
Comment(masm_, "[ GenerateCharFromCode");
class DeferredStringCharFromCode : public DeferredCode {
public:
DeferredStringCharFromCode(Register code,
Register result)
: char_from_code_generator_(code, result) {}
StringCharFromCodeGenerator* fast_case_generator() {
return &char_from_code_generator_;
}
virtual void Generate() {
VirtualFrameRuntimeCallHelper call_helper(frame_state());
char_from_code_generator_.GenerateSlow(masm(), call_helper);
}
private:
StringCharFromCodeGenerator char_from_code_generator_;
};
// Generates code for creating a one-char string from a char code.
void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
Comment(masm_, "[ GenerateStringCharFromCode");
ASSERT(args->length() == 1);
Load(args->at(0));
@ -6085,16 +6167,97 @@ void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
code.ToRegister();
ASSERT(code.is_valid());
// StringHelper::GenerateCharFromCode may do a runtime call.
frame_->SpillAll();
Result result = allocator()->Allocate();
ASSERT(result.is_valid());
StringHelper::GenerateCharFromCode(masm_,
code.reg(),
result.reg(),
CALL_FUNCTION);
DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
code.reg(), result.reg());
deferred->fast_case_generator()->GenerateFast(masm_);
deferred->BindExit();
frame_->Push(&result);
}
class DeferredStringCharAt : public DeferredCode {
public:
DeferredStringCharAt(Register object,
Register index,
Register scratch1,
Register scratch2,
Register result)
: result_(result),
char_at_generator_(object,
index,
scratch1,
scratch2,
result,
&need_conversion_,
&need_conversion_,
&index_out_of_range_,
STRING_INDEX_IS_NUMBER) {}
StringCharAtGenerator* fast_case_generator() {
return &char_at_generator_;
}
virtual void Generate() {
VirtualFrameRuntimeCallHelper call_helper(frame_state());
char_at_generator_.GenerateSlow(masm(), call_helper);
__ bind(&need_conversion_);
// Move smi zero into the result register, which will trigger
// conversion.
__ Set(result_, Immediate(Smi::FromInt(0)));
__ jmp(exit_label());
__ bind(&index_out_of_range_);
// When the index is out of range, the spec requires us to return
// the empty string.
__ Set(result_, Immediate(Factory::empty_string()));
__ jmp(exit_label());
}
private:
Register result_;
Label need_conversion_;
Label index_out_of_range_;
StringCharAtGenerator char_at_generator_;
};
// This generates code that performs a String.prototype.charAt() call
// or returns a smi in order to trigger conversion.
void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
Comment(masm_, "[ GenerateStringCharAt");
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
Result index = frame_->Pop();
Result object = frame_->Pop();
object.ToRegister();
index.ToRegister();
// We might mutate the object register.
frame_->Spill(object.reg());
// We need three extra registers.
Result result = allocator()->Allocate();
ASSERT(result.is_valid());
Result scratch1 = allocator()->Allocate();
ASSERT(scratch1.is_valid());
Result scratch2 = allocator()->Allocate();
ASSERT(scratch2.is_valid());
DeferredStringCharAt* deferred =
new DeferredStringCharAt(object.reg(),
index.reg(),
scratch1.reg(),
scratch2.reg(),
result.reg());
deferred->fast_case_generator()->GenerateFast(masm_);
deferred->BindExit();
frame_->Push(&result);
}
@ -6600,9 +6763,9 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
__ mov(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
// Set length.
__ SmiUntag(ecx);
__ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
// Fill contents of fixed-array with the-hole.
__ SmiUntag(ecx);
__ mov(edx, Immediate(Factory::the_hole_value()));
__ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
// Fill fixed array elements with hole.
@ -6706,7 +6869,6 @@ void DeferredSearchCache::Generate() {
// Check if we could add new entry to cache.
__ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
__ SmiTag(ebx);
__ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
__ j(greater, &add_new_entry);
@ -6904,12 +7066,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
// (or them and test against Smi mask.)
__ mov(tmp2.reg(), tmp1.reg());
RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
__ CallStub(&recordWrite1);
RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
__ CallStub(&recordWrite2);
__ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg());
__ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg());
__ bind(&done);
deferred->BindExit();
@ -8608,13 +8766,8 @@ Result CodeGenerator::EmitKeyedLoad() {
key.ToRegister();
receiver.ToRegister();
// Use a fresh temporary for the index and later the loaded
// value.
result = allocator()->Allocate();
ASSERT(result.is_valid());
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(result.reg(),
new DeferredReferenceGetKeyedValue(elements.reg(),
receiver.reg(),
key.reg());
@ -8647,19 +8800,20 @@ Result CodeGenerator::EmitKeyedLoad() {
Immediate(Factory::fixed_array_map()));
deferred->Branch(not_equal);
// Shift the key to get the actual index value and check that
// it is within bounds. Use unsigned comparison to handle negative keys.
__ mov(result.reg(), key.reg());
__ SmiUntag(result.reg());
__ cmp(result.reg(),
// Check that the key is within bounds.
__ cmp(key.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
__ mov(result.reg(), Operand(elements.reg(),
result.reg(),
times_4,
FixedArray::kHeaderSize - kHeapObjectTag));
elements.Unuse();
// Load and check that the result is not the hole.
// Key holds a smi.
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
__ mov(elements.reg(),
FieldOperand(elements.reg(),
key.reg(),
times_2,
FixedArray::kHeaderSize));
result = elements;
__ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
@ -8744,7 +8898,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
// Check whether it is possible to omit the write barrier. If the elements
// array is in new space or the value written is a smi we can safely update
// the elements array without updating the remembered set.
// the elements array without write barrier.
Label in_new_space;
__ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
if (!value_is_constant) {
@ -9014,7 +9168,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header.
__ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
__ mov(FieldOperand(eax, Array::kLengthOffset), Immediate(length));
__ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(length)));
// Setup the fixed slots.
__ xor_(ebx, Operand(ebx)); // Set to NULL.
@ -10977,9 +11132,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ test(ecx, Operand(ecx));
__ j(zero, &done);
// Get the parameters pointer from the stack and untag the length.
// Get the parameters pointer from the stack.
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ SmiUntag(ecx);
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@ -10988,6 +11142,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(Factory::fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
// Untag the length for the loop below.
__ SmiUntag(ecx);
// Copy the fixed array slots.
Label loop;
@ -11116,6 +11272,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the last match info has space for the capture registers and the
// additional information.
__ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
__ SmiUntag(eax);
__ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
__ cmp(edx, Operand(eax));
__ j(greater, &runtime);
@ -11359,7 +11516,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
__ shr(mask, 1); // Divide length by two (length is not a smi).
__ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
__ sub(Operand(mask), Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
@ -11450,12 +11607,6 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
void RecordWriteStub::Generate(MacroAssembler* masm) {
masm->RecordWriteHelper(object_, addr_, scratch_);
masm->ret(0);
}
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@ -12390,152 +12541,204 @@ const char* CompareStub::GetName() {
}
void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
Register object,
Register index,
Register scratch,
Register result,
Label* receiver_not_string,
Label* index_not_smi,
Label* index_out_of_range,
Label* slow_case) {
Label not_a_flat_string;
Label try_again_with_new_string;
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
Label ascii_string;
Label got_char_code;
// If the receiver is a smi trigger the non-string case.
ASSERT(kSmiTag == 0);
__ test(object, Immediate(kSmiTagMask));
__ j(zero, receiver_not_string);
__ test(object_, Immediate(kSmiTagMask));
__ j(zero, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
__ mov(result, FieldOperand(object, HeapObject::kMapOffset));
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
__ test(result, Immediate(kIsNotStringMask));
__ j(not_zero, receiver_not_string);
__ test(result_, Immediate(kIsNotStringMask));
__ j(not_zero, receiver_not_string_);
// If the index is non-smi trigger the non-smi case.
ASSERT(kSmiTag == 0);
__ test(index, Immediate(kSmiTagMask));
__ j(not_zero, index_not_smi);
__ test(index_, Immediate(kSmiTagMask));
__ j(not_zero, &index_not_smi_);
// Check for index out of range.
__ cmp(index, FieldOperand(object, String::kLengthOffset));
__ j(above_equal, index_out_of_range);
// Put smi-tagged index into scratch register.
__ mov(scratch_, index_);
__ bind(&got_smi_index_);
__ bind(&try_again_with_new_string);
// ----------- S t a t e -------------
// -- object : string to access
// -- result : instance type of the string
// -- scratch : non-negative index < length
// -----------------------------------
// Check for index out of range.
__ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
__ j(above_equal, index_out_of_range_);
// We need special handling for non-flat strings.
ASSERT(kSeqStringTag == 0);
__ test(result, Immediate(kStringRepresentationMask));
__ j(not_zero, &not_a_flat_string);
// Check for 1-byte or 2-byte string.
ASSERT(kAsciiStringTag != 0);
__ test(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
// 2-byte string.
// Load the 2-byte character code into the result register.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); // index is smi (powered by 2).
__ movzx_w(result, FieldOperand(object,
index, times_1,
SeqTwoByteString::kHeaderSize));
__ jmp(&got_char_code);
__ test(result_, Immediate(kStringRepresentationMask));
__ j(zero, &flat_string);
// Handle non-flat strings.
__ bind(&not_a_flat_string);
__ and_(result, kStringRepresentationMask);
__ cmp(result, kConsStringTag);
__ j(not_equal, slow_case);
__ test(result_, Immediate(kIsConsStringMask));
__ j(zero, &call_runtime_);
// ConsString.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
__ mov(result, FieldOperand(object, ConsString::kSecondOffset));
__ cmp(Operand(result), Factory::empty_string());
__ j(not_equal, slow_case);
__ cmp(FieldOperand(object_, ConsString::kSecondOffset),
Immediate(Factory::empty_string()));
__ j(not_equal, &call_runtime_);
// Get the first of the two strings and load its instance type.
__ mov(object, FieldOperand(object, ConsString::kFirstOffset));
__ mov(result, FieldOperand(object, HeapObject::kMapOffset));
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
__ jmp(&try_again_with_new_string);
__ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the first cons component is also non-flat, then go to runtime.
ASSERT(kSeqStringTag == 0);
__ test(result_, Immediate(kStringRepresentationMask));
__ j(not_zero, &call_runtime_);
// ASCII string.
__ bind(&ascii_string);
// Put untagged index into scratch register.
__ mov(scratch, index);
__ SmiUntag(scratch);
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
ASSERT(kAsciiStringTag != 0);
__ test(result_, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
// 2-byte string.
// Load the 2-byte character code into the result register.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ movzx_w(result_, FieldOperand(object_,
scratch_, times_1, // Scratch is smi-tagged.
SeqTwoByteString::kHeaderSize));
__ jmp(&got_char_code);
// ASCII string.
// Load the byte into the result register.
__ movzx_b(result, FieldOperand(object,
scratch, times_1,
SeqAsciiString::kHeaderSize));
__ bind(&ascii_string);
__ SmiUntag(scratch_);
__ movzx_b(result_, FieldOperand(object_,
scratch_, times_1,
SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
__ SmiTag(result);
__ SmiTag(result_);
__ bind(&exit_);
}
void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
Register code,
Register result,
InvokeFlag flag) {
ASSERT(!code.is(result));
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
// Index is not a smi.
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
__ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
call_helper.BeforeCall(masm);
__ push(object_);
__ push(index_);
__ push(result_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
__ CallRuntime(Runtime::kNumberToSmi, 1);
}
if (!scratch_.is(eax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ mov(scratch_, eax);
}
__ pop(result_);
__ pop(index_);
__ pop(object_);
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
ASSERT(kSmiTag == 0);
__ test(scratch_, Immediate(kSmiTagMask));
__ j(not_zero, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
// Call runtime. We get here when the receiver is a string and the
// index is a number, but the code of getting the actual character
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
__ push(object_);
__ push(index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
call_helper.AfterCall(masm);
__ jmp(&exit_);
__ Abort("Unexpected fallthrough from CharCodeAt slow case");
}
Label slow_case;
Label exit;
// -------------------------------------------------------------------------
// StringCharFromCodeGenerator
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
ASSERT(kSmiTag == 0);
ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
__ test(code,
__ test(code_,
Immediate(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
__ j(not_zero, &slow_case, not_taken);
__ j(not_zero, &slow_case_, not_taken);
__ Set(result, Immediate(Factory::single_character_string_cache()));
__ Set(result_, Immediate(Factory::single_character_string_cache()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
ASSERT(kSmiShiftSize == 0);
// At this point code register contains smi tagged ascii char code.
__ mov(result, FieldOperand(result,
code, times_half_pointer_size,
FixedArray::kHeaderSize));
__ cmp(result, Factory::undefined_value());
__ j(equal, &slow_case, not_taken);
__ jmp(&exit);
__ mov(result_, FieldOperand(result_,
code_, times_half_pointer_size,
FixedArray::kHeaderSize));
__ cmp(result_, Factory::undefined_value());
__ j(equal, &slow_case_, not_taken);
__ bind(&exit_);
}
__ bind(&slow_case);
if (flag == CALL_FUNCTION) {
__ push(code);
__ CallRuntime(Runtime::kCharFromCode, 1);
if (!result.is(eax)) {
__ mov(result, eax);
}
} else {
ASSERT(flag == JUMP_FUNCTION);
ASSERT(result.is(eax));
__ pop(eax); // Save return address.
__ push(code);
__ push(eax); // Restore return address.
__ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
}
__ bind(&exit);
if (flag == JUMP_FUNCTION) {
ASSERT(result.is(eax));
__ ret(0);
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
__ CallRuntime(Runtime::kCharFromCode, 1);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
call_helper.AfterCall(masm);
__ jmp(&exit_);
__ Abort("Unexpected fallthrough from CharFromCode slow case");
}
// -------------------------------------------------------------------------
// StringCharAtGenerator
void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
char_code_at_generator_.GenerateFast(masm);
char_from_code_generator_.GenerateFast(masm);
}
void StringCharAtGenerator::GenerateSlow(
MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}

76
deps/v8/src/ia32/codegen-ia32.h

@ -38,8 +38,10 @@ namespace internal {
// Forward declarations
class CompilationInfo;
class DeferredCode;
class FrameRegisterState;
class RegisterAllocator;
class RegisterFile;
class RuntimeCallHelper;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
@ -621,10 +623,13 @@ class CodeGenerator: public AstVisitor {
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateCharFromCode(ZoneList<Expression*>* args);
void GenerateStringCharFromCode(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateStringCharAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@ -910,37 +915,6 @@ class GenericBinaryOpStub: public CodeStub {
class StringHelper : public AllStatic {
public:
// Generates fast code for getting a char code out of a string
// object at the given index. May bail out for four reasons (in the
// listed order):
// * Receiver is not a string (receiver_not_string label).
// * Index is not a smi (index_not_smi label).
// * Index is out of range (index_out_of_range).
// * Some other reason (slow_case label). In this case it's
// guaranteed that the above conditions are not violated,
// e.g. it's safe to assume the receiver is a string and the
// index is a non-negative smi < length.
// When successful, object, index, and scratch are clobbered.
// Otherwise, scratch and result are clobbered.
static void GenerateFastCharCodeAt(MacroAssembler* masm,
Register object,
Register index,
Register scratch,
Register result,
Label* receiver_not_string,
Label* index_not_smi,
Label* index_out_of_range,
Label* slow_case);
// Generates code for creating a one-char string from the given char
// code. May do a runtime call, so any register can be clobbered
// and, if the given invoke flag specifies a call, an internal frame
// is required. In tail call mode the result must be eax register.
static void GenerateCharFromCode(MacroAssembler* masm,
Register code,
Register result,
InvokeFlag flag);
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersREP adds too much
@ -1083,42 +1057,6 @@ class NumberToStringStub: public CodeStub {
};
class RecordWriteStub : public CodeStub {
public:
RecordWriteStub(Register object, Register addr, Register scratch)
: object_(object), addr_(addr), scratch_(scratch) { }
void Generate(MacroAssembler* masm);
private:
Register object_;
Register addr_;
Register scratch_;
#ifdef DEBUG
void Print() {
PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
object_.code(), addr_.code(), scratch_.code());
}
#endif
// Minor key encoding in 12 bits. 4 bits for each of the three
// registers (object, address and scratch) OOOOAAAASSSS.
class ScratchBits: public BitField<uint32_t, 0, 4> {};
class AddressBits: public BitField<uint32_t, 4, 4> {};
class ObjectBits: public BitField<uint32_t, 8, 4> {};
Major MajorKey() { return RecordWrite; }
int MinorKey() {
// Encode the registers.
return ObjectBits::encode(object_.code()) |
AddressBits::encode(addr_.code()) |
ScratchBits::encode(scratch_.code());
}
};
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_

202
deps/v8/src/ia32/full-codegen-ia32.cc

@ -1009,7 +1009,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax); // Map.
__ push(edx); // Enumeration cache.
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
__ SmiTag(eax);
__ push(eax); // Enumeration cache length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.
__ jmp(&loop);
@ -1019,7 +1018,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(Immediate(Smi::FromInt(0))); // Map (0) - force slow check.
__ push(eax);
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ SmiTag(eax);
__ push(eax); // Fixed array length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.
@ -1904,76 +1902,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
Handle<String> name = expr->name();
if (strcmp("_IsSmi", *name->ToCString()) == 0) {
EmitIsSmi(expr->arguments());
} else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
EmitIsNonNegativeSmi(expr->arguments());
} else if (strcmp("_IsObject", *name->ToCString()) == 0) {
EmitIsObject(expr->arguments());
} else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
EmitIsUndetectableObject(expr->arguments());
} else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
EmitIsFunction(expr->arguments());
} else if (strcmp("_IsArray", *name->ToCString()) == 0) {
EmitIsArray(expr->arguments());
} else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
EmitIsRegExp(expr->arguments());
} else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
EmitIsConstructCall(expr->arguments());
} else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
EmitObjectEquals(expr->arguments());
} else if (strcmp("_Arguments", *name->ToCString()) == 0) {
EmitArguments(expr->arguments());
} else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
EmitArgumentsLength(expr->arguments());
} else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
EmitClassOf(expr->arguments());
} else if (strcmp("_Log", *name->ToCString()) == 0) {
EmitLog(expr->arguments());
} else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
EmitRandomHeapNumber(expr->arguments());
} else if (strcmp("_SubString", *name->ToCString()) == 0) {
EmitSubString(expr->arguments());
} else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
EmitRegExpExec(expr->arguments());
} else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
EmitValueOf(expr->arguments());
} else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
EmitSetValueOf(expr->arguments());
} else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
EmitNumberToString(expr->arguments());
} else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
EmitCharFromCode(expr->arguments());
} else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
EmitFastCharCodeAt(expr->arguments());
} else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
EmitStringAdd(expr->arguments());
} else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
EmitStringCompare(expr->arguments());
} else if (strcmp("_MathPow", *name->ToCString()) == 0) {
EmitMathPow(expr->arguments());
} else if (strcmp("_MathSin", *name->ToCString()) == 0) {
EmitMathSin(expr->arguments());
} else if (strcmp("_MathCos", *name->ToCString()) == 0) {
EmitMathCos(expr->arguments());
} else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
EmitMathSqrt(expr->arguments());
} else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
EmitCallFunction(expr->arguments());
} else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
EmitRegExpConstructResult(expr->arguments());
} else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
EmitSwapElements(expr->arguments());
} else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
EmitGetFromCache(expr->arguments());
} else {
UNREACHABLE();
}
}
void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@ -2432,50 +2360,120 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
Label slow_case, done;
// Fast case of Heap::LookupSingleCharacterStringFromCode.
ASSERT(kSmiTag == 0);
ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
__ test(eax,
Immediate(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
__ j(not_zero, &slow_case);
__ Set(ebx, Immediate(Factory::single_character_string_cache()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
ASSERT(kSmiShiftSize == 0);
// At this point code register contains smi tagged ascii char code.
__ mov(ebx, FieldOperand(ebx,
eax, times_half_pointer_size,
FixedArray::kHeaderSize));
__ cmp(ebx, Factory::undefined_value());
__ j(equal, &slow_case);
__ mov(eax, ebx);
Label done;
StringCharFromCodeGenerator generator(eax, ebx);
generator.GenerateFast(masm_);
__ jmp(&done);
__ bind(&slow_case);
__ push(eax);
__ CallRuntime(Runtime::kCharFromCode, 1);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
Apply(context_, eax);
Apply(context_, ebx);
}
void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
// TODO(fsc): Port the complete implementation from the classic back-end.
void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
VisitForValue(args->at(0), kStack);
VisitForValue(args->at(1), kAccumulator);
Register object = ebx;
Register index = eax;
Register scratch = ecx;
Register result = edx;
__ pop(object);
Label need_conversion;
Label index_out_of_range;
Label done;
StringCharCodeAtGenerator generator(object,
index,
scratch,
result,
&need_conversion,
&need_conversion,
&index_out_of_range,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm_);
__ jmp(&done);
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// NaN.
__ Set(result, Immediate(Factory::nan_value()));
__ jmp(&done);
__ bind(&need_conversion);
// Move the undefined value into the result register, which will
// trigger the slow case.
__ Set(eax, Immediate(Factory::undefined_value()));
Apply(context_, eax);
// trigger conversion.
__ Set(result, Immediate(Factory::undefined_value()));
__ jmp(&done);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
Apply(context_, result);
}
void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
VisitForValue(args->at(0), kStack);
VisitForValue(args->at(1), kAccumulator);
Register object = ebx;
Register index = eax;
Register scratch1 = ecx;
Register scratch2 = edx;
Register result = eax;
__ pop(object);
Label need_conversion;
Label index_out_of_range;
Label done;
StringCharAtGenerator generator(object,
index,
scratch1,
scratch2,
result,
&need_conversion,
&need_conversion,
&index_out_of_range,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm_);
__ jmp(&done);
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
__ Set(result, Immediate(Factory::empty_string()));
__ jmp(&done);
__ bind(&need_conversion);
// Move smi zero into the result register, which will trigger
// conversion.
__ Set(result, Immediate(Smi::FromInt(0)));
__ jmp(&done);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
Apply(context_, result);
}
void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());

103
deps/v8/src/ia32/ic-ia32.cc

@ -304,7 +304,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, check_string, index_int, index_string;
Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary;
Label check_number_dictionary;
@ -329,18 +329,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &check_string, not_taken);
__ mov(ebx, eax);
__ SmiUntag(ebx);
// Get the elements array of the object.
__ bind(&index_int);
__ bind(&index_smi);
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CheckMap(ecx, Factory::fixed_array_map(), &check_pixel_array, true);
// Check that the key (index) is within bounds.
__ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
__ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
// Fast case: Do the load.
__ mov(ecx, FieldOperand(ecx, ebx, times_4, FixedArray::kHeaderSize));
ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(ecx, FieldOperand(ecx, eax, times_2, FixedArray::kHeaderSize));
__ cmp(Operand(ecx), Immediate(Factory::the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
@ -352,9 +351,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_pixel_array);
// Check whether the elements is a pixel array.
// edx: receiver
// ebx: untagged index
// eax: key
// ecx: elements
__ mov(ebx, eax);
__ SmiUntag(ebx);
__ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true);
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
@ -485,9 +485,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
__ and_(ebx, String::kArrayIndexHashMask);
__ shr(ebx, String::kHashShift);
__ jmp(&index_int);
// We want the smi-tagged index in eax. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
ASSERT(String::kHashShift >= kSmiTagSize);
__ and_(ebx, String::kArrayIndexValueMask);
__ shr(ebx, String::kHashShift - kSmiTagSize);
__ mov(eax, ebx);
__ jmp(&index_smi);
}
@ -498,60 +502,29 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- esp[0] : return address
// -----------------------------------
Label miss;
Label index_not_smi;
Label index_out_of_range;
Label slow_char_code;
Label got_char_code;
Register receiver = edx;
Register index = eax;
Register code = ebx;
Register scratch = ecx;
StringHelper::GenerateFastCharCodeAt(masm,
receiver,
index,
scratch,
code,
&miss, // When not a string.
&index_not_smi,
&index_out_of_range,
&slow_char_code);
// If we didn't bail out, code register contains smi tagged char
// code.
__ bind(&got_char_code);
StringHelper::GenerateCharFromCode(masm, code, eax, JUMP_FUNCTION);
#ifdef DEBUG
__ Abort("Unexpected fall-through from char from code tail call");
#endif
// Check if key is a heap number.
__ bind(&index_not_smi);
__ CheckMap(index, Factory::heap_number_map(), &miss, true);
// Push receiver and key on the stack (now that we know they are a
// string and a number), and call runtime.
__ bind(&slow_char_code);
__ EnterInternalFrame();
__ push(receiver);
__ push(index);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
ASSERT(!code.is(eax));
__ mov(code, eax);
__ LeaveInternalFrame();
Register scratch1 = ebx;
Register scratch2 = ecx;
Register result = eax;
StringCharAtGenerator char_at_generator(receiver,
index,
scratch1,
scratch2,
result,
&miss, // When not a string.
&miss, // When not a number.
&index_out_of_range,
STRING_INDEX_IS_ARRAY_INDEX);
char_at_generator.GenerateFast(masm);
__ ret(0);
ICRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
// Check if the runtime call returned NaN char code. If yes, return
// undefined. Otherwise, we can continue.
if (FLAG_debug_code) {
ASSERT(kSmiTag == 0);
__ test(code, Immediate(kSmiTagMask));
__ j(zero, &got_char_code);
__ mov(scratch, FieldOperand(code, HeapObject::kMapOffset));
__ cmp(scratch, Factory::heap_number_map());
__ Assert(equal, "StringCharCodeAt must return smi or heap number");
}
__ cmp(code, Factory::nan_value());
__ j(not_equal, &got_char_code);
__ bind(&index_out_of_range);
__ Set(eax, Immediate(Factory::undefined_value()));
__ ret(0);
@ -792,9 +765,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
__ mov(ebx, Operand(ecx));
__ SmiUntag(ebx);
__ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(below, &fast, taken);
// Slow case: call runtime.
@ -804,7 +775,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a pixel array.
__ bind(&check_pixel_array);
// eax: value
// ecx: key
// ecx: key (a smi)
// edx: receiver
// edi: elements array
__ CheckMap(edi, Factory::pixel_array_map(), &slow, true);
@ -840,13 +811,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// edi: receiver->elements, a FixedArray
// flags: compare (ecx, edx.length())
__ j(not_equal, &slow, not_taken); // do not leave holes in the array
__ mov(ebx, ecx);
__ SmiUntag(ebx); // untag
__ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(above_equal, &slow, not_taken);
// Add 1 to receiver->length, and go to fast array write.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(1 << kSmiTagSize));
Immediate(Smi::FromInt(1)));
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS

106
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -60,49 +60,17 @@ void MacroAssembler::RecordWriteHelper(Register object,
bind(&not_in_new_space);
}
Label fast;
// Compute the page start address from the heap object pointer, and reuse
// the 'object' register for it.
and_(object, ~Page::kPageAlignmentMask);
Register page_start = object;
// Compute the bit addr in the remembered set/index of the pointer in the
// page. Reuse 'addr' as pointer_offset.
sub(addr, Operand(page_start));
shr(addr, kObjectAlignmentBits);
Register pointer_offset = addr;
// If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object.
cmp(pointer_offset, Page::kPageSize / kPointerSize);
j(less, &fast);
// Adjust 'page_start' so that addressing using 'pointer_offset' hits the
// extra remembered set after the large object.
// Find the length of the large object (FixedArray).
mov(scratch, Operand(page_start, Page::kObjectStartOffset
+ FixedArray::kLengthOffset));
Register array_length = scratch;
// Extra remembered set starts right after the large object (a FixedArray), at
// page_start + kObjectStartOffset + objectSize
// where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
// Add the delta between the end of the normal RSet and the start of the
// extra RSet to 'page_start', so that addressing the bit using
// 'pointer_offset' hits the extra RSet words.
lea(page_start,
Operand(page_start, array_length, times_pointer_size,
Page::kObjectStartOffset + FixedArray::kHeaderSize
- Page::kRSetEndOffset));
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
bind(&fast);
bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
// Compute number of region covering addr. See Page::GetRegionNumberForAddress
// method for more details.
and_(addr, Page::kPageAlignmentMask);
shr(addr, Page::kRegionSizeLog2);
// Set dirty mark for region.
bts(Operand(object, Page::kDirtyFlagOffset), addr);
}
@ -130,7 +98,7 @@ void MacroAssembler::InNewSpace(Register object,
}
// Set the remembered set bit for [object+offset].
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
@ -142,9 +110,8 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// registers are esi.
ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi));
// First, check if a remembered set write is even needed. The tests below
// catch stores of Smis and stores into young gen (which does not have space
// for the remembered set bits).
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
// Skip barrier if writing a smi.
@ -160,47 +127,19 @@ void MacroAssembler::RecordWrite(Register object, int offset,
ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize));
// We use optimized write barrier code if the word being written to is not in
// a large object chunk or is in the first page of a large object chunk.
// We make sure that an offset is inside the right limits whether it is
// tagged or untagged.
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
// Compute the bit offset in the remembered set, leave it in 'value'.
lea(value, Operand(object, offset));
and_(value, Page::kPageAlignmentMask);
shr(value, kPointerSizeLog2);
// Compute the page address from the heap object pointer, leave it in
// 'object'.
and_(object, ~Page::kPageAlignmentMask);
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
bts(Operand(object, Page::kRSetOffset), value);
Register dst = scratch;
if (offset != 0) {
lea(dst, Operand(object, offset));
} else {
Register dst = scratch;
if (offset != 0) {
lea(dst, Operand(object, offset));
} else {
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
// into an array of words.
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
lea(dst, Operand(object, dst, times_half_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
}
// If we are already generating a shared stub, not inlining the
// record write code isn't going to save us any memory.
if (generating_stub()) {
RecordWriteHelper(object, dst, value);
} else {
RecordWriteStub stub(object, dst, value);
CallStub(&stub);
}
// Array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
// into an array of words.
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
lea(dst, Operand(object, dst, times_half_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
}
RecordWriteHelper(object, dst, value);
bind(&done);
@ -1384,6 +1323,7 @@ void MacroAssembler::InvokeFunction(Register fun,
mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(ebx);
mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
lea(edx, FieldOperand(edx, Code::kHeaderSize));

6
deps/v8/src/ia32/macro-assembler-ia32.h

@ -59,8 +59,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
// Set the remebered set bit for an address which points into an
// object. RecordWriteHelper only works if the object is not in new
// For page containing |object| mark region covering |addr| dirty.
// RecordWriteHelper only works if the object is not in new
// space.
void RecordWriteHelper(Register object,
Register addr,
@ -73,7 +73,7 @@ class MacroAssembler: public Assembler {
Condition cc, // equal for new space, not_equal otherwise.
Label* branch);
// Set the remembered set bit for [object+offset].
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.

504
deps/v8/src/ia32/stub-cache-ia32.cc

@ -300,203 +300,6 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
}
template <class Compiler>
static void CompileLoadInterceptor(Compiler* compiler,
StubCompiler* stub_compiler,
MacroAssembler* masm,
JSObject* object,
JSObject* holder,
String* name,
LookupResult* lookup,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
// Check that the maps haven't changed.
Register reg =
stub_compiler->CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
if (lookup->IsProperty() && lookup->IsCacheable()) {
compiler->CompileCacheable(masm,
stub_compiler,
receiver,
reg,
scratch1,
scratch2,
holder,
lookup,
name,
miss);
} else {
compiler->CompileRegular(masm,
receiver,
reg,
scratch2,
holder,
miss);
}
}
class LoadInterceptorCompiler BASE_EMBEDDED {
public:
explicit LoadInterceptorCompiler(Register name) : name_(name) {}
void CompileCacheable(MacroAssembler* masm,
StubCompiler* stub_compiler,
Register receiver,
Register holder,
Register scratch1,
Register scratch2,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
Label* miss_label) {
AccessorInfo* callback = NULL;
bool optimize = false;
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
if (lookup->type() == FIELD) {
optimize = true;
} else if (lookup->type() == CALLBACKS) {
Object* callback_object = lookup->GetCallbackObject();
if (callback_object->IsAccessorInfo()) {
callback = AccessorInfo::cast(callback_object);
optimize = callback->getter() != NULL;
}
}
if (!optimize) {
CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
miss_label);
return;
}
// Note: starting a frame here makes GC aware of pointers pushed below.
__ EnterInternalFrame();
if (lookup->type() == CALLBACKS) {
__ push(receiver);
}
__ push(holder);
__ push(name_);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm,
receiver,
holder,
name_,
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ cmp(eax, Factory::no_interceptor_result_sentinel());
__ j(equal, &interceptor_failed);
__ LeaveInternalFrame();
__ ret(0);
__ bind(&interceptor_failed);
__ pop(name_);
__ pop(holder);
if (lookup->type() == CALLBACKS) {
__ pop(receiver);
}
__ LeaveInternalFrame();
if (lookup->type() == FIELD) {
// We found FIELD property in prototype chain of interceptor's holder.
// Check that the maps from interceptor's holder to field's holder
// haven't changed...
holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
lookup->holder(), scratch1,
scratch2,
name,
miss_label);
// ... and retrieve a field from field's holder.
stub_compiler->GenerateFastPropertyLoad(masm, eax,
holder, lookup->holder(),
lookup->GetFieldIndex());
__ ret(0);
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
ASSERT(callback != NULL);
ASSERT(callback->getter() != NULL);
// Prepare for tail call: push receiver to stack after return address.
Label cleanup;
__ pop(scratch2); // return address
__ push(receiver);
__ push(scratch2);
// Check that the maps from interceptor's holder to callback's holder
// haven't changed.
holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
lookup->holder(), scratch1,
scratch2,
name,
&cleanup);
// Continue tail call preparation: push remaining parameters after
// return address.
__ pop(scratch2); // return address
__ push(holder);
__ mov(holder, Immediate(Handle<AccessorInfo>(callback)));
__ push(holder);
__ push(FieldOperand(holder, AccessorInfo::kDataOffset));
__ push(name_);
__ push(scratch2); // restore return address
// Tail call to runtime.
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(ref, 5, 1);
// Clean up code: we pushed receiver after return address and
// need to remove it from there.
__ bind(&cleanup);
__ pop(scratch1); // return address.
__ pop(scratch2); // receiver.
__ push(scratch1);
}
}
void CompileRegular(MacroAssembler* masm,
Register receiver,
Register holder,
Register scratch,
JSObject* interceptor_holder,
Label* miss_label) {
__ pop(scratch); // save old return address
PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ push(scratch); // restore old return address
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
__ TailCallExternalReference(ref, 5, 1);
}
private:
Register name_;
};
// Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame.
//
@ -683,9 +486,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name,
depth1, miss);
stub_compiler_->CheckPrototypes(object, receiver,
interceptor_holder, scratch1,
scratch2, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@ -698,10 +501,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(),
scratch1, scratch2, name,
depth2, miss);
if (interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(), scratch1,
scratch2, name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
// safe to omit it here, as if present, it should be fetched
// by the previous CheckPrototypes.
ASSERT(depth2 == kInvalidProtoDepth);
}
// Invoke function.
if (can_do_fast_api_call) {
@ -1060,7 +870,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
JSObject* holder,
JSObject* interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
@ -1068,18 +878,130 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register scratch2,
String* name,
Label* miss) {
LoadInterceptorCompiler compiler(name_reg);
CompileLoadInterceptor(&compiler,
this,
masm(),
object,
holder,
name,
lookup,
receiver,
scratch1,
scratch2,
miss);
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
bool compile_followup_inline = false;
if (lookup->IsProperty() && lookup->IsCacheable()) {
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo() &&
AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
compile_followup_inline = true;
}
}
if (compile_followup_inline) {
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name, miss);
ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
__ EnterInternalFrame();
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
// CALLBACKS case needs a receiver to be passed into C++ callback.
__ push(receiver);
}
__ push(holder_reg);
__ push(name_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm(),
receiver,
holder_reg,
name_reg,
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ cmp(eax, Factory::no_interceptor_result_sentinel());
__ j(equal, &interceptor_failed);
__ LeaveInternalFrame();
__ ret(0);
__ bind(&interceptor_failed);
__ pop(name_reg);
__ pop(holder_reg);
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
__ pop(receiver);
}
__ LeaveInternalFrame();
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into holder_reg.
if (interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
lookup->holder(),
scratch1,
scratch2,
name,
miss);
}
if (lookup->type() == FIELD) {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), eax, holder_reg,
lookup->holder(), lookup->GetFieldIndex());
__ ret(0);
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
ASSERT(callback != NULL);
ASSERT(callback->getter() != NULL);
// Tail call to runtime.
// Important invariant in CALLBACKS case: the code above must be
// structured to never clobber |receiver| register.
__ pop(scratch2); // return address
__ push(receiver);
__ push(holder_reg);
__ mov(holder_reg, Immediate(Handle<AccessorInfo>(callback)));
__ push(holder_reg);
__ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
__ push(name_reg);
__ push(scratch2); // restore return address
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(ref, 5, 1);
}
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
Register holder_reg =
CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name, miss);
__ pop(scratch2); // save old return address
PushInterceptorArguments(masm(), receiver, holder_reg,
name_reg, interceptor_holder);
__ push(scratch2); // restore old return address
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
__ TailCallExternalReference(ref, 5, 1);
}
}
@ -1206,7 +1128,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &miss);
if (argc == 1) { // Otherwise fall through to call builtin.
Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@ -1216,7 +1138,6 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Get the element's length into ecx.
__ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ SmiTag(ecx);
// Check if we could survive without allocation.
__ cmp(eax, Operand(ecx));
@ -1234,17 +1155,16 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check if value is a smi.
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &with_rset_update);
__ j(not_zero, &with_write_barrier);
__ bind(&exit);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_rset_update);
__ bind(&with_write_barrier);
__ InNewSpace(ebx, ecx, equal, &exit);
RecordWriteStub stub(ebx, edx, ecx);
__ CallStub(&stub);
__ RecordWriteHelper(ebx, edx, ecx);
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
@ -1284,10 +1204,10 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Increment element's and array's sizes.
__ add(FieldOperand(ebx, FixedArray::kLengthOffset),
Immediate(kAllocationDelta));
Immediate(Smi::FromInt(kAllocationDelta)));
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
// Elements are in new space, so no remembered set updates are necessary.
// Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
@ -1389,6 +1309,140 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
}
Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
const int argc = arguments().immediate();
Label miss;
Label index_out_of_range;
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
Register receiver = ebx;
Register index = ecx;
Register scratch = edx;
Register result = eax;
__ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
if (argc > 0) {
__ mov(index, Operand(esp, (argc - 0) * kPointerSize));
} else {
__ Set(index, Immediate(Factory::undefined_value()));
}
StringCharCodeAtGenerator char_code_at_generator(receiver,
index,
scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
&index_out_of_range,
STRING_INDEX_IS_NUMBER);
char_code_at_generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
ICRuntimeCallHelper call_helper;
char_code_at_generator.GenerateSlow(masm(), call_helper);
__ bind(&index_out_of_range);
__ Set(eax, Immediate(Factory::nan_value()));
__ ret((argc + 1) * kPointerSize);
__ bind(&miss);
// Restore function name in ecx.
__ Set(ecx, Immediate(Handle<String>(name)));
Handle<Code> ic = ComputeCallMiss(argc);
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(function);
}
Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
const int argc = arguments().immediate();
Label miss;
Label index_out_of_range;
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
Register receiver = eax;
Register index = ecx;
Register scratch1 = ebx;
Register scratch2 = edx;
Register result = eax;
__ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
if (argc > 0) {
__ mov(index, Operand(esp, (argc - 0) * kPointerSize));
} else {
__ Set(index, Immediate(Factory::undefined_value()));
}
StringCharAtGenerator char_at_generator(receiver,
index,
scratch1,
scratch2,
result,
&miss, // When not a string.
&miss, // When not a number.
&index_out_of_range,
STRING_INDEX_IS_NUMBER);
char_at_generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
ICRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm(), call_helper);
__ bind(&index_out_of_range);
__ Set(eax, Immediate(Factory::empty_string()));
__ ret((argc + 1) * kPointerSize);
__ bind(&miss);
// Restore function name in ecx.
__ Set(ecx, Immediate(Handle<String>(name)));
Handle<Code> ic = ComputeCallMiss(argc);
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(function);
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,

2
deps/v8/src/ia32/virtual-frame-ia32.h

@ -615,7 +615,7 @@ class VirtualFrame: public ZoneObject {
inline bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
friend class DeferredCode;
friend class FrameRegisterState;
friend class JumpTarget;
};

29
deps/v8/src/jump-target-heavy.cc

@ -332,22 +332,10 @@ void JumpTarget::ComputeEntryFrame() {
}
DeferredCode::DeferredCode()
: masm_(CodeGeneratorScope::Current()->masm()),
statement_position_(masm_->current_statement_position()),
position_(masm_->current_position()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
CodeGeneratorScope::Current()->AddDeferred(this);
#ifdef DEBUG
comment_ = "";
#endif
FrameRegisterState::FrameRegisterState(VirtualFrame* frame) {
// Copy the register locations from the code generator's frame.
// These are the registers that will be spilled on entry to the
// deferred code and restored on exit.
VirtualFrame* frame = CodeGeneratorScope::Current()->frame();
int sp_offset = frame->fp_relative(frame->stack_pointer_);
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int loc = frame->register_location(i);
@ -423,4 +411,19 @@ void BreakTarget::Branch(Condition cc, Hint hint) {
}
}
DeferredCode::DeferredCode()
: masm_(CodeGeneratorScope::Current()->masm()),
statement_position_(masm_->current_statement_position()),
position_(masm_->current_position()),
frame_state_(CodeGeneratorScope::Current()->frame()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
CodeGeneratorScope::Current()->AddDeferred(this);
#ifdef DEBUG
comment_ = "";
#endif
}
} } // namespace v8::internal

5
deps/v8/src/jump-target-light.cc

@ -37,14 +37,15 @@ namespace internal {
DeferredCode::DeferredCode()
: masm_(CodeGeneratorScope::Current()->masm()),
statement_position_(masm_->current_statement_position()),
position_(masm_->current_position()) {
position_(masm_->current_position()),
frame_state_(*CodeGeneratorScope::Current()->frame()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
CodeGeneratorScope::Current()->AddDeferred(this);
#ifdef DEBUG
CodeGeneratorScope::Current()->frame()->AssertIsSpilled();
comment_ = "";
#endif
}

10
deps/v8/src/macros.py

@ -159,3 +159,13 @@ macro LAST_INPUT(array) = ((array)[2]);
macro CAPTURE(index) = (3 + (index));
const CAPTURE0 = 3;
const CAPTURE1 = 4;
# PropertyDescriptor return value indices - must match
# PropertyDescriptorIndices in runtime.cc.
const IS_ACCESSOR_INDEX = 0;
const VALUE_INDEX = 1;
const GETTER_INDEX = 2;
const SETTER_INDEX = 3;
const WRITABLE_INDEX = 4;
const ENUMERABLE_INDEX = 5;
const CONFIGURABLE_INDEX = 6;

193
deps/v8/src/mark-compact.cc

@ -84,9 +84,6 @@ void MarkCompactCollector::CollectGarbage() {
UpdatePointers();
RelocateObjects();
RebuildRSets();
} else {
SweepSpaces();
}
@ -121,14 +118,6 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
#ifdef DEBUG
if (compacting_collection_) {
// We will write bookkeeping information to the remembered set area
// starting now.
Page::set_rset_state(Page::NOT_IN_USE);
}
#endif
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL; space = spaces.next()) {
@ -150,7 +139,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
void MarkCompactCollector::Finish() {
#ifdef DEBUG
ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS);
ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
state_ = IDLE;
#endif
// The stub cache is not traversed during GC; clear the cache to
@ -244,8 +233,8 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
}
// Since we don't have the object's start, it is impossible to update the
// remembered set. Therefore, we only replace the string with its left
// substring when the remembered set does not change.
// page dirty marks. Therefore, we only replace the string with its left
// substring when page dirty marks do not change.
Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
@ -776,6 +765,7 @@ void MarkCompactCollector::SweepLargeObjectSpace() {
Heap::lo_space()->FreeUnmarkedObjects();
}
// Safe to use during marking phase only.
bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
MapWord metamap = object->map_word();
@ -783,6 +773,7 @@ bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
return metamap.ToMap()->instance_type() == MAP_TYPE;
}
void MarkCompactCollector::ClearNonLiveTransitions() {
HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
// Iterate over the map space, setting map transitions that go from
@ -1078,13 +1069,18 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
// first word of object without any encoding. If object is dead we are writing
// NULL as a forwarding address.
// The second pass updates pointers to new space in all spaces. It is possible
// to encounter pointers to dead objects during traversal of remembered set for
// map space because remembered set bits corresponding to dead maps are cleared
// later during map space sweeping.
static void MigrateObject(Address dst, Address src, int size) {
Heap::CopyBlock(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
size);
// to encounter pointers to dead objects during traversal of dirty regions we
// should clear them to avoid encountering them during next dirty regions
// iteration.
static void MigrateObject(Address dst,
Address src,
int size,
bool to_old_space) {
if (to_old_space) {
Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
} else {
Heap::CopyBlock(dst, src, size);
}
Memory::Address_at(src) = dst;
}
@ -1131,6 +1127,7 @@ class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
}
};
// Visitor for updating pointers from live objects in old spaces to new space.
// It can encounter pointers to dead objects in new space when traversing map
// space (see comment for MigrateObject).
@ -1142,10 +1139,13 @@ static void UpdatePointerToNewGen(HeapObject** p) {
Address new_addr = Memory::Address_at(old_addr);
// Object pointed by *p is dead. Update is not required.
if (new_addr == NULL) return;
*p = HeapObject::FromAddress(new_addr);
if (new_addr == NULL) {
// We encountered pointer to a dead object. Clear it so we will
// not visit it again during next iteration of dirty regions.
*p = NULL;
} else {
*p = HeapObject::FromAddress(new_addr);
}
}
@ -1163,8 +1163,7 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
result = Heap::lo_space()->AllocateRawFixedArray(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
MigrateObject(target->address(), object->address(), object_size);
Heap::UpdateRSet(target);
MigrateObject(target->address(), object->address(), object_size, true);
MarkCompactCollector::tracer()->
increment_promoted_objects_size(object_size);
return true;
@ -1177,10 +1176,10 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
result = target_space->AllocateRaw(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
MigrateObject(target->address(), object->address(), object_size);
if (target_space == Heap::old_pointer_space()) {
Heap::UpdateRSet(target);
}
MigrateObject(target->address(),
object->address(),
object_size,
target_space == Heap::old_pointer_space());
MarkCompactCollector::tracer()->
increment_promoted_objects_size(object_size);
return true;
@ -1222,14 +1221,16 @@ static void SweepNewSpace(NewSpace* space) {
continue;
}
// Promotion either failed or not required.
// Copy the content of the object.
// Promotion failed. Just migrate object to another semispace.
Object* target = space->AllocateRaw(size);
// Allocation cannot fail at this point: semispaces are of equal size.
ASSERT(!target->IsFailure());
MigrateObject(HeapObject::cast(target)->address(), current, size);
MigrateObject(HeapObject::cast(target)->address(),
current,
size,
false);
} else {
size = object->Size();
Memory::Address_at(current) = NULL;
@ -1255,9 +1256,12 @@ static void SweepNewSpace(NewSpace* space) {
Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
// Update pointers in old spaces.
Heap::IterateRSet(Heap::old_pointer_space(), &UpdatePointerToNewGen);
Heap::IterateRSet(Heap::map_space(), &UpdatePointerToNewGen);
Heap::lo_space()->IterateRSet(&UpdatePointerToNewGen);
Heap::IterateDirtyRegions(Heap::old_pointer_space(),
&Heap::IteratePointersInDirtyRegion,
&UpdatePointerToNewGen,
Heap::WATERMARK_SHOULD_BE_VALID);
Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
// Update pointers from cells.
HeapObjectIterator cell_iterator(Heap::cell_space());
@ -1323,7 +1327,10 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
dealloc(free_start, static_cast<int>(current - free_start), true);
dealloc(free_start,
static_cast<int>(current - free_start),
true,
false);
is_previous_alive = true;
}
} else {
@ -1353,7 +1360,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) {
dealloc(free_start, size_in_bytes, false);
dealloc(free_start, size_in_bytes, false, true);
}
}
} else {
@ -1367,7 +1374,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// If there is a free ending area on one of the previous pages we have
// deallocate that area and put it on the free list.
if (last_free_size > 0) {
dealloc(last_free_start, last_free_size, true);
Page::FromAddress(last_free_start)->
SetAllocationWatermark(last_free_start);
dealloc(last_free_start, last_free_size, true, true);
last_free_start = NULL;
last_free_size = 0;
}
@ -1398,7 +1407,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area.
dealloc(last_free_start, last_free_size, false);
dealloc(last_free_start, last_free_size, false, true);
new_allocation_top = last_free_start;
}
@ -1421,34 +1430,36 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
Heap::ClearRSetRange(start, size_in_bytes);
bool add_to_freelist,
bool last_on_page) {
Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateOldDataBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
bool add_to_freelist,
bool last_on_page) {
Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateCodeBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
bool add_to_freelist,
bool last_on_page) {
Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateMapBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
bool add_to_freelist,
bool last_on_page) {
// Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into
// chunks and free them separately.
ASSERT(size_in_bytes % Map::kSize == 0);
Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += Map::kSize) {
Heap::map_space()->Free(a, add_to_freelist);
@ -1458,13 +1469,13 @@ void MarkCompactCollector::DeallocateMapBlock(Address start,
void MarkCompactCollector::DeallocateCellBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
bool add_to_freelist,
bool last_on_page) {
// Free-list elements in cell space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
int size = Heap::cell_space()->object_size_in_bytes();
ASSERT(size_in_bytes % size == 0);
Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
Heap::cell_space()->Free(a, add_to_freelist);
@ -1563,20 +1574,6 @@ class MapCompact {
GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
}
void FinishMapSpace() {
// Iterate through to space and finish move.
MapIterator it;
HeapObject* o = it.next();
for (; o != first_map_to_evacuate_; o = it.next()) {
ASSERT(o != NULL);
Map* map = reinterpret_cast<Map*>(o);
ASSERT(!map->IsMarked());
ASSERT(!map->IsOverflowed());
ASSERT(map->IsMap());
Heap::UpdateRSet(map);
}
}
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
ASSERT(space != Heap::map_space());
@ -1669,9 +1666,9 @@ class MapCompact {
ASSERT(Map::kSize % 4 == 0);
Heap::CopyBlock(reinterpret_cast<Object**>(vacant_map->address()),
reinterpret_cast<Object**>(map_to_evacuate->address()),
Map::kSize);
Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
map_to_evacuate->address(),
Map::kSize);
ASSERT(vacant_map->IsMap()); // Due to memcpy above.
@ -1756,6 +1753,12 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
SweepNewSpace(Heap::new_space());
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
Heap::IterateDirtyRegions(Heap::map_space(),
&Heap::IteratePointersInDirtyMapsRegion,
&UpdatePointerToNewGen,
Heap::WATERMARK_SHOULD_BE_VALID);
int live_maps_size = Heap::map_space()->Size();
int live_maps = live_maps_size / Map::kSize;
ASSERT(live_map_objects_size_ == live_maps_size);
@ -1766,7 +1769,6 @@ void MarkCompactCollector::SweepSpaces() {
map_compact.CompactMaps();
map_compact.UpdateMapPointersInRoots();
map_compact.FinishMapSpace();
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL; space = spaces.next()) {
@ -2039,9 +2041,8 @@ Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
Page* forwarded_page = Page::FromAddress(first_forwarded);
int forwarded_offset = forwarded_page->Offset(first_forwarded);
// Find end of allocation of in the page of first_forwarded.
Address mc_top = forwarded_page->mc_relocation_top;
int mc_top_offset = forwarded_page->Offset(mc_top);
// Find end of allocation in the page of first_forwarded.
int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
// Check if current object's forward pointer is in the same page
// as the first live object's forwarding pointer
@ -2058,7 +2059,7 @@ Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
offset += Page::kObjectStartOffset;
ASSERT_PAGE_OFFSET(offset);
ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top);
ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
return next_page->OffsetToAddress(offset);
}
@ -2103,16 +2104,12 @@ void MarkCompactCollector::RelocateObjects() {
// Flip from and to spaces
Heap::new_space()->Flip();
Heap::new_space()->MCCommitRelocationInfo();
// Set age_mark to bottom in to space
Address mark = Heap::new_space()->bottom();
Heap::new_space()->set_age_mark(mark);
Heap::new_space()->MCCommitRelocationInfo();
#ifdef DEBUG
// It is safe to write to the remembered sets as remembered sets on a
// page-by-page basis after committing the m-c forwarding pointer.
Page::set_rset_state(Page::IN_USE);
#endif
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
space->MCCommitRelocationInfo();
@ -2139,9 +2136,9 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
if (new_addr != old_addr) {
// Move contents.
Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
Map::kSize);
Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
old_addr,
Map::kSize);
}
#ifdef DEBUG
@ -2198,9 +2195,13 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
if (new_addr != old_addr) {
// Move contents.
Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
obj_size);
if (space == Heap::old_data_space()) {
Heap::MoveBlock(new_addr, old_addr, obj_size);
} else {
Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
old_addr,
obj_size);
}
}
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
@ -2245,9 +2246,7 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
if (new_addr != old_addr) {
// Move contents.
Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
obj_size);
Heap::MoveBlock(new_addr, old_addr, obj_size);
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
@ -2283,9 +2282,13 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
#endif
// New and old addresses cannot overlap.
Heap::CopyBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
obj_size);
if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
Heap::CopyBlock(new_addr, old_addr, obj_size);
} else {
Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
old_addr,
obj_size);
}
#ifdef DEBUG
if (FLAG_gc_verbose) {
@ -2302,18 +2305,6 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
}
// -------------------------------------------------------------------------
// Phase 5: rebuild remembered sets
void MarkCompactCollector::RebuildRSets() {
#ifdef DEBUG
ASSERT(state_ == RELOCATE_OBJECTS);
state_ = REBUILD_RSETS;
#endif
Heap::RebuildRSets();
}
void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {

62
deps/v8/src/mark-compact.h

@ -41,7 +41,8 @@ typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// no attempt to add area to free list is made.
typedef void (*DeallocateFunction)(Address start,
int size_in_bytes,
bool add_to_freelist);
bool add_to_freelist,
bool last_on_page);
// Forward declarations.
@ -131,8 +132,7 @@ class MarkCompactCollector: public AllStatic {
SWEEP_SPACES,
ENCODE_FORWARDING_ADDRESSES,
UPDATE_POINTERS,
RELOCATE_OBJECTS,
REBUILD_RSETS
RELOCATE_OBJECTS
};
// The current stage of the collector.
@ -269,22 +269,22 @@ class MarkCompactCollector: public AllStatic {
// written to their map word's offset in the inactive
// semispace.
//
// Bookkeeping data is written to the remembered-set are of
// Bookkeeping data is written to the page header of
// eached paged-space page that contains live objects after
// compaction:
//
// The 3rd word of the page (first word of the remembered
// set) contains the relocation top address, the address of
// the first word after the end of the last live object in
// the page after compaction.
// The allocation watermark field is used to track the
// relocation top address, the address of the first word
// after the end of the last live object in the page after
// compaction.
//
// The 4th word contains the zero-based index of the page in
// its space. This word is only used for map space pages, in
// The Page::mc_page_index field contains the zero-based index of the
// page in its space. This word is only used for map space pages, in
// order to encode the map addresses in 21 bits to free 11
// bits per map word for the forwarding address.
//
// The 5th word contains the (nonencoded) forwarding address
// of the first live object in the page.
// The Page::mc_first_forwarded field contains the (nonencoded)
// forwarding address of the first live object in the page.
//
// In both the new space and the paged spaces, a linked list
// of live regions is constructructed (linked through
@ -319,23 +319,28 @@ class MarkCompactCollector: public AllStatic {
// generation.
static void DeallocateOldPointerBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
bool add_to_freelist,
bool last_on_page);
static void DeallocateOldDataBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
bool add_to_freelist,
bool last_on_page);
static void DeallocateCodeBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
bool add_to_freelist,
bool last_on_page);
static void DeallocateMapBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
bool add_to_freelist,
bool last_on_page);
static void DeallocateCellBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
bool add_to_freelist,
bool last_on_page);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
@ -349,9 +354,7 @@ class MarkCompactCollector: public AllStatic {
//
// After: All pointers in live objects, including encoded map
// pointers, are updated to point to their target's new
// location. The remembered set area of each paged-space
// page containing live objects still contains bookkeeping
// information.
// location.
friend class UpdatingVisitor; // helper for updating visited objects
@ -373,13 +376,9 @@ class MarkCompactCollector: public AllStatic {
// Phase 4: Relocating objects.
//
// Before: Pointers to live objects are updated to point to their
// target's new location. The remembered set area of each
// paged-space page containing live objects still contains
// bookkeeping information.
// target's new location.
//
// After: Objects have been moved to their new addresses. The
// remembered set area of each paged-space page containing
// live objects still contains bookkeeping information.
// After: Objects have been moved to their new addresses.
// Relocates objects in all spaces.
static void RelocateObjects();
@ -408,17 +407,6 @@ class MarkCompactCollector: public AllStatic {
// Copy a new object.
static int RelocateNewObject(HeapObject* obj);
// -----------------------------------------------------------------------
// Phase 5: Rebuilding remembered sets.
//
// Before: The heap is in a normal state except that remembered sets
// in the paged spaces are not correct.
//
// After: The heap is in a normal state.
// Rebuild remembered set in old and map spaces.
static void RebuildRSets();
#ifdef DEBUG
// -----------------------------------------------------------------------
// Debugging variables, functions and classes

3
deps/v8/src/objects-debug.cc

@ -806,7 +806,8 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
VerifyObjectField(JSGlobalProxy::kContextOffset);
// Make sure that this object has no properties, elements.
CHECK_EQ(0, properties()->length());
CHECK_EQ(0, elements()->length());
CHECK(HasFastElements());
CHECK_EQ(0, FixedArray::cast(elements())->length());
}

110
deps/v8/src/objects-inl.h

@ -759,7 +759,8 @@ Object* Object::GetProperty(String* key, PropertyAttributes* attributes) {
ASSERT(mode == SKIP_WRITE_BARRIER); \
ASSERT(Heap::InNewSpace(object) || \
!Heap::InNewSpace(READ_FIELD(object, offset)) || \
Page::IsRSetSet(object->address(), offset)); \
Page::FromAddress(object->address())-> \
IsRegionDirty(object->address() + offset)); \
}
#define READ_DOUBLE_FIELD(p, offset) \
@ -1045,6 +1046,10 @@ Address MapWord::ToEncodedAddress() {
void HeapObject::VerifyObjectField(int offset) {
VerifyPointer(READ_FIELD(this, offset));
}
void HeapObject::VerifySmiField(int offset) {
ASSERT(READ_FIELD(this, offset)->IsSmi());
}
#endif
@ -1064,7 +1069,7 @@ MapWord HeapObject::map_word() {
void HeapObject::set_map_word(MapWord map_word) {
// WRITE_FIELD does not update the remembered set, but there is no need
// WRITE_FIELD does not invoke write barrier, but there is no need
// here.
WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
}
@ -1162,16 +1167,16 @@ int HeapNumber::get_sign() {
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
Array* JSObject::elements() {
HeapObject* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset);
// In the assert below Dictionary is covered under FixedArray.
ASSERT(array->IsFixedArray() || array->IsPixelArray() ||
array->IsExternalArray());
return reinterpret_cast<Array*>(array);
return reinterpret_cast<HeapObject*>(array);
}
void JSObject::set_elements(Array* value, WriteBarrierMode mode) {
void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
// In the assert below Dictionary is covered under FixedArray.
ASSERT(value->IsFixedArray() || value->IsPixelArray() ||
value->IsExternalArray());
@ -1342,15 +1347,15 @@ bool JSObject::HasFastProperties() {
}
bool Array::IndexFromObject(Object* object, uint32_t* index) {
if (object->IsSmi()) {
int value = Smi::cast(object)->value();
bool Object::ToArrayIndex(uint32_t* index) {
if (IsSmi()) {
int value = Smi::cast(this)->value();
if (value < 0) return false;
*index = value;
return true;
}
if (object->IsHeapNumber()) {
double value = HeapNumber::cast(object)->value();
if (IsHeapNumber()) {
double value = HeapNumber::cast(this)->value();
uint32_t uint_value = static_cast<uint32_t>(value);
if (value == static_cast<double>(uint_value)) {
*index = uint_value;
@ -1665,7 +1670,11 @@ HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
}
INT_ACCESSORS(Array, length, kLengthOffset)
SMI_ACCESSORS(FixedArray, length, kLengthOffset)
SMI_ACCESSORS(ByteArray, length, kLengthOffset)
INT_ACCESSORS(PixelArray, length, kLengthOffset)
INT_ACCESSORS(ExternalArray, length, kLengthOffset)
SMI_ACCESSORS(String, length, kLengthOffset)
@ -1678,6 +1687,9 @@ uint32_t String::hash_field() {
void String::set_hash_field(uint32_t value) {
WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
#if V8_HOST_ARCH_64_BIT
WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0);
#endif
}
@ -2456,22 +2468,65 @@ BOOL_ACCESSORS(SharedFunctionInfo,
try_full_codegen,
kTryFullCodegen)
INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
#if V8_HOST_ARCH_32_BIT
SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
SMI_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
kFormalParameterCountOffset)
INT_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
SMI_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
kExpectedNofPropertiesOffset)
INT_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
INT_ACCESSORS(SharedFunctionInfo, start_position_and_type,
SMI_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
SMI_ACCESSORS(SharedFunctionInfo, start_position_and_type,
kStartPositionAndTypeOffset)
INT_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
INT_ACCESSORS(SharedFunctionInfo, function_token_position,
SMI_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
SMI_ACCESSORS(SharedFunctionInfo, function_token_position,
kFunctionTokenPositionOffset)
INT_ACCESSORS(SharedFunctionInfo, compiler_hints,
SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
kCompilerHintsOffset)
INT_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
kThisPropertyAssignmentsCountOffset)
#else
#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
int holder::name() { \
int value = READ_INT_FIELD(this, offset); \
ASSERT(kHeapObjectTag == 1); \
ASSERT((value & kHeapObjectTag) == 0); \
return value >> 1; \
} \
void holder::set_##name(int value) { \
ASSERT(kHeapObjectTag == 1); \
ASSERT((value & 0xC0000000) == 0xC0000000 || \
(value & 0xC0000000) == 0x000000000); \
WRITE_INT_FIELD(this, \
offset, \
(value << 1) & ~kHeapObjectTag); \
}
#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \
INT_ACCESSORS(holder, name, offset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, length, kLengthOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, formal_parameter_count,
kFormalParameterCountOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, expected_nof_properties,
kExpectedNofPropertiesOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, start_position_and_type,
kStartPositionAndTypeOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, end_position, kEndPositionOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, function_token_position,
kFunctionTokenPositionOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, compiler_hints,
kCompilerHintsOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, this_property_assignments_count,
kThisPropertyAssignmentsCountOffset)
#endif
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@ -2785,7 +2840,7 @@ void JSRegExp::SetDataAt(int index, Object* value) {
JSObject::ElementsKind JSObject::GetElementsKind() {
Array* array = elements();
HeapObject* array = elements();
if (array->IsFixedArray()) {
// FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray.
if (array->map() == Heap::fixed_array_map()) {
@ -2908,15 +2963,20 @@ NumberDictionary* JSObject::element_dictionary() {
}
bool String::IsHashFieldComputed(uint32_t field) {
return (field & kHashNotComputedMask) == 0;
}
bool String::HasHashCode() {
return (hash_field() & kHashComputedMask) != 0;
return IsHashFieldComputed(hash_field());
}
uint32_t String::Hash() {
// Fast case: has hash code already been computed?
uint32_t field = hash_field();
if (field & kHashComputedMask) return field >> kHashShift;
if (IsHashFieldComputed(field)) return field >> kHashShift;
// Slow case: compute hash code and set it.
return ComputeAndSetHash();
}
@ -2989,7 +3049,7 @@ uint32_t StringHasher::GetHash() {
bool String::AsArrayIndex(uint32_t* index) {
uint32_t field = hash_field();
if ((field & kHashComputedMask) && !(field & kIsArrayIndexMask)) return false;
if (IsHashFieldComputed(field) && !(field & kIsArrayIndexMask)) return false;
return SlowAsArrayIndex(index);
}
@ -3113,7 +3173,7 @@ void Map::ClearCodeCache() {
void JSArray::EnsureSize(int required_size) {
ASSERT(HasFastElements());
Array* elts = elements();
FixedArray* elts = FixedArray::cast(elements());
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
if (elts->length() < required_size) {
// Doubling in size would be overkill, but leave some slack to avoid

21
deps/v8/src/objects.cc

@ -2037,7 +2037,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
VMState state(EXTERNAL);
result = getter(v8::Utils::ToLocal(name_handle), info);
}
if (!result.IsEmpty()) return NONE;
if (!result.IsEmpty()) return DONT_ENUM;
}
return holder_handle->GetPropertyAttributePostInterceptor(*receiver_handle,
*name_handle,
@ -4784,7 +4784,7 @@ static inline uint32_t HashSequentialString(const schar* chars, int length) {
uint32_t String::ComputeAndSetHash() {
// Should only be called if hash code has not yet been computed.
ASSERT(!(hash_field() & kHashComputedMask));
ASSERT(!HasHashCode());
const int len = length();
@ -4803,7 +4803,7 @@ uint32_t String::ComputeAndSetHash() {
set_hash_field(field);
// Check the hash code is there.
ASSERT(hash_field() & kHashComputedMask);
ASSERT(HasHashCode());
uint32_t result = field >> kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
@ -4858,8 +4858,7 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
static inline uint32_t HashField(uint32_t hash,
bool is_array_index,
int length = -1) {
uint32_t result =
(hash << String::kHashShift) | String::kHashComputedMask;
uint32_t result = (hash << String::kHashShift);
if (is_array_index) {
// For array indexes mix the length into the hash as an array index could
// be zero.
@ -5639,7 +5638,7 @@ Object* JSObject::SetElementsLength(Object* len) {
// General slow case.
if (len->IsNumber()) {
uint32_t length;
if (Array::IndexFromObject(len, &length)) {
if (len->ToArrayIndex(&length)) {
return SetSlowElements(len);
} else {
return ArrayLengthRangeError();
@ -6063,8 +6062,7 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) {
if (IsJSArray()) {
// Update the length of the array if needed.
uint32_t array_length = 0;
CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
&array_length));
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
if (index >= array_length) {
JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
}
@ -6202,8 +6200,7 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
if (ShouldConvertToFastElements()) {
uint32_t new_length = 0;
if (IsJSArray()) {
CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
&new_length));
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
JSArray::cast(this)->set_length(Smi::FromInt(new_length));
} else {
new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
@ -6234,7 +6231,7 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
Object* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index, Object* value) {
uint32_t old_len = 0;
CHECK(Array::IndexFromObject(length(), &old_len));
CHECK(length()->ToArrayIndex(&old_len));
// Check to see if we need to update the length. For now, we make
// sure that the length stays within 32-bits (unsigned).
if (index >= old_len && index != 0xffffffff) {
@ -6516,7 +6513,7 @@ bool JSObject::ShouldConvertToFastElements() {
// fast elements.
uint32_t length = 0;
if (IsJSArray()) {
CHECK(Array::IndexFromObject(JSArray::cast(this)->length(), &length));
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
} else {
length = dictionary->max_number_key();
}

214
deps/v8/src/objects.h

@ -54,29 +54,28 @@
// - JSGlobalObject
// - JSBuiltinsObject
// - JSGlobalProxy
// - JSValue
// - Array
// - ByteArray
// - PixelArray
// - ExternalArray
// - ExternalByteArray
// - ExternalUnsignedByteArray
// - ExternalShortArray
// - ExternalUnsignedShortArray
// - ExternalIntArray
// - ExternalUnsignedIntArray
// - ExternalFloatArray
// - FixedArray
// - DescriptorArray
// - HashTable
// - Dictionary
// - SymbolTable
// - CompilationCacheTable
// - CodeCacheHashTable
// - MapCache
// - Context
// - GlobalContext
// - JSFunctionResultCache
// - JSValue
// - ByteArray
// - PixelArray
// - ExternalArray
// - ExternalByteArray
// - ExternalUnsignedByteArray
// - ExternalShortArray
// - ExternalUnsignedShortArray
// - ExternalIntArray
// - ExternalUnsignedIntArray
// - ExternalFloatArray
// - FixedArray
// - DescriptorArray
// - HashTable
// - Dictionary
// - SymbolTable
// - CompilationCacheTable
// - CodeCacheHashTable
// - MapCache
// - Context
// - GlobalContext
// - JSFunctionResultCache
// - String
// - SeqString
// - SeqAsciiString
@ -411,6 +410,7 @@ enum StringRepresentationTag {
kConsStringTag = 0x1,
kExternalStringTag = 0x3
};
const uint32_t kIsConsStringMask = 0x1;
// A ConsString with an empty string as the right side is a candidate
@ -676,6 +676,10 @@ class Object BASE_EMBEDDED {
// Return the object's prototype (might be Heap::null_value()).
Object* GetPrototype();
// Tries to convert an object to an array index. Returns true and sets
// the output parameter if it succeeds.
inline bool ToArrayIndex(uint32_t* index);
// Returns true if this is a JSValue containing a string and the index is
// < the length of the string. Used to implement [] on strings.
inline bool IsStringObjectWithCharacterAt(uint32_t index);
@ -1026,7 +1030,7 @@ class HeapObject: public Object {
// Returns the field at offset in obj, as a read/write Object* reference.
// Does no checking, and is safe to use during GC, while maps are invalid.
// Does not update remembered sets, so should only be assigned to
// Does not invoke write barrier, so should only be assigned to
// during marking GC.
static inline Object** RawField(HeapObject* obj, int offset);
@ -1046,6 +1050,7 @@ class HeapObject: public Object {
void HeapObjectPrint();
void HeapObjectVerify();
inline void VerifyObjectField(int offset);
inline void VerifySmiField(int offset);
void PrintHeader(const char* id);
@ -1150,7 +1155,7 @@ class JSObject: public HeapObject {
};
// [properties]: Backing storage for properties.
// properties is a FixedArray in the fast case, and a Dictionary in the
// properties is a FixedArray in the fast case and a Dictionary in the
// slow case.
DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
inline void initialize_properties();
@ -1158,9 +1163,9 @@ class JSObject: public HeapObject {
inline StringDictionary* property_dictionary(); // Gets slow properties.
// [elements]: The elements (properties with names that are integers).
// elements is a FixedArray in the fast case, and a Dictionary in the slow
// case or a PixelArray in a special case.
DECL_ACCESSORS(elements, Array) // Get and set fast elements.
// elements is a FixedArray in the fast case, a Dictionary in the slow
// case, and a PixelArray or ExternalArray in special cases.
DECL_ACCESSORS(elements, HeapObject)
inline void initialize_elements();
inline ElementsKind GetElementsKind();
inline bool HasFastElements();
@ -1594,37 +1599,13 @@ class JSObject: public HeapObject {
};
// Abstract super class arrays. It provides length behavior.
class Array: public HeapObject {
// FixedArray describes fixed-sized arrays with element type Object*.
class FixedArray: public HeapObject {
public:
// [length]: length of the array.
inline int length();
inline void set_length(int value);
// Convert an object to an array index.
// Returns true if the conversion succeeded.
static inline bool IndexFromObject(Object* object, uint32_t* index);
// Layout descriptor.
static const int kLengthOffset = HeapObject::kHeaderSize;
protected:
// No code should use the Array class directly, only its subclasses.
// Use the kHeaderSize of the appropriate subclass, which may be aligned.
static const int kHeaderSize = kLengthOffset + kIntSize;
static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
};
// FixedArray describes fixed sized arrays where element
// type is Object*.
class FixedArray: public Array {
public:
// Setter and getter for elements.
inline Object* get(int index);
// Setter that uses write barrier.
@ -1665,7 +1646,10 @@ class FixedArray: public Array {
// Casting.
static inline FixedArray* cast(Object* obj);
static const int kHeaderSize = Array::kAlignedSize;
// Layout description.
// Length is smi tagged when it is stored.
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kHeaderSize = kLengthOffset + kPointerSize;
// Maximal allowed size, in bytes, of a single FixedArray.
// Prevents overflowing size computations, as well as extreme memory
@ -2364,8 +2348,12 @@ class JSFunctionResultCache: public FixedArray {
// ByteArray represents fixed sized byte arrays. Used by the outside world,
// such as PCRE, and also by the memory allocator and garbage collector to
// fill in free blocks in the heap.
class ByteArray: public Array {
class ByteArray: public HeapObject {
public:
// [length]: length of the array.
inline int length();
inline void set_length(int value);
// Setter and getter.
inline byte get(int index);
inline void set(int index, byte value);
@ -2374,7 +2362,7 @@ class ByteArray: public Array {
inline int get_int(int index);
static int SizeFor(int length) {
return OBJECT_SIZE_ALIGN(kHeaderSize + length);
return OBJECT_POINTER_ALIGN(kHeaderSize + length);
}
// We use byte arrays for free blocks in the heap. Given a desired size in
// bytes that is a multiple of the word size and big enough to hold a byte
@ -2402,9 +2390,12 @@ class ByteArray: public Array {
void ByteArrayVerify();
#endif
// ByteArray headers are not quadword aligned.
static const int kHeaderSize = Array::kHeaderSize;
static const int kAlignedSize = Array::kAlignedSize;
// Layout description.
// Length is smi tagged when it is stored.
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kHeaderSize = kLengthOffset + kPointerSize;
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
// Maximal memory consumption for a single ByteArray.
static const int kMaxSize = 512 * MB;
@ -2423,8 +2414,12 @@ class ByteArray: public Array {
// multipage/the-canvas-element.html#canvaspixelarray
// In particular, write access clamps the value written to 0 or 255 if the
// value written is outside this range.
class PixelArray: public Array {
class PixelArray: public HeapObject {
public:
// [length]: length of the array.
inline int length();
inline void set_length(int value);
// [external_pointer]: The pointer to the external memory area backing this
// pixel array.
DECL_ACCESSORS(external_pointer, uint8_t) // Pointer to the data store.
@ -2449,9 +2444,11 @@ class PixelArray: public Array {
static const int kMaxLength = 0x3fffffff;
// PixelArray headers are not quadword aligned.
static const int kExternalPointerOffset = Array::kAlignedSize;
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kExternalPointerOffset =
POINTER_SIZE_ALIGN(kLengthOffset + kIntSize);
static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PixelArray);
@ -2469,8 +2466,12 @@ class PixelArray: public Array {
// Out-of-range values passed to the setter are converted via a C
// cast, not clamping. Out-of-range indices cause exceptions to be
// raised rather than being silently ignored.
class ExternalArray: public Array {
class ExternalArray: public HeapObject {
public:
// [length]: length of the array.
inline int length();
inline void set_length(int value);
// [external_pointer]: The pointer to the external memory area backing this
// external array.
DECL_ACCESSORS(external_pointer, void) // Pointer to the data store.
@ -2482,9 +2483,11 @@ class ExternalArray: public Array {
static const int kMaxLength = 0x3fffffff;
// ExternalArray headers are not quadword aligned.
static const int kExternalPointerOffset = Array::kAlignedSize;
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kExternalPointerOffset =
POINTER_SIZE_ALIGN(kLengthOffset + kIntSize);
static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalArray);
@ -3038,7 +3041,13 @@ class Map: public HeapObject {
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
static const int kPadStart = kCodeCacheOffset + kPointerSize;
static const int kSize = MAP_SIZE_ALIGN(kPadStart);
static const int kSize = MAP_POINTER_ALIGN(kPadStart);
// Layout of pointer fields. Heap iteration code relies on them
// being continiously allocated.
static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
static const int kPointerFieldsEndOffset =
Map::kCodeCacheOffset + kPointerSize;
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@ -3350,23 +3359,64 @@ class SharedFunctionInfo: public HeapObject {
static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
kInferredNameOffset + kPointerSize;
// Integer fields.
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
kThisPropertyAssignmentsOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kIntSize;
static const int kNumLiteralsOffset = kExpectedNofPropertiesOffset + kIntSize;
kFormalParameterCountOffset + kPointerSize;
static const int kNumLiteralsOffset =
kExpectedNofPropertiesOffset + kPointerSize;
static const int kStartPositionAndTypeOffset =
kNumLiteralsOffset + kPointerSize;
static const int kEndPositionOffset =
kStartPositionAndTypeOffset + kPointerSize;
static const int kFunctionTokenPositionOffset =
kEndPositionOffset + kPointerSize;
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kPointerSize;
static const int kThisPropertyAssignmentsCountOffset =
kCompilerHintsOffset + kPointerSize;
// Total size.
static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize;
#else
// The only reason to use smi fields instead of int fields
// is to allow interation without maps decoding during
// garbage collections.
// To avoid wasting space on 64-bit architectures we use
// the following trick: we group integer fields into pairs
// First integer in each pair is shifted left by 1.
// By doing this we guarantee that LSB of each kPointerSize aligned
// word is not set and thus this word cannot be treated as pointer
// to HeapObject during old space traversal.
static const int kLengthOffset =
kThisPropertyAssignmentsOffset + kPointerSize;
static const int kFormalParameterCountOffset =
kLengthOffset + kIntSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kIntSize;
static const int kNumLiteralsOffset =
kExpectedNofPropertiesOffset + kIntSize;
static const int kEndPositionOffset =
kNumLiteralsOffset + kIntSize;
static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
static const int kStartPositionAndTypeOffset =
kEndPositionOffset + kIntSize;
static const int kFunctionTokenPositionOffset =
kStartPositionAndTypeOffset + kIntSize;
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kIntSize;
static const int kThisPropertyAssignmentsCountOffset =
kCompilerHintsOffset + kIntSize;
// Total size.
static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize;
#endif
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
private:
@ -4122,8 +4172,7 @@ class String: public HeapObject {
// Layout description.
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kHashFieldOffset = kLengthOffset + kPointerSize;
static const int kSize = kHashFieldOffset + kIntSize;
// Notice: kSize is not pointer-size aligned if pointers are 64-bit.
static const int kSize = kHashFieldOffset + kPointerSize;
// Maximum number of characters to consider when trying to convert a string
// value into an array index.
@ -4142,7 +4191,7 @@ class String: public HeapObject {
// whether a hash code has been computed. If the hash code has been
// computed the 2nd bit tells whether the string can be used as an
// array index.
static const int kHashComputedMask = 1;
static const int kHashNotComputedMask = 1;
static const int kIsArrayIndexMask = 1 << 1;
static const int kNofLengthBitFields = 2;
@ -4160,9 +4209,14 @@ class String: public HeapObject {
static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
static const int kArrayIndexValueBits =
kArrayIndexHashLengthShift - kHashShift;
static const int kArrayIndexValueMask =
((1 << kArrayIndexValueBits) - 1) << kHashShift;
// Value of empty hash field indicating that the hash is not computed.
static const int kEmptyHashField = 0;
static const int kEmptyHashField = kHashNotComputedMask;
// Value of hash field containing computed hash equal to zero.
static const int kZeroHash = 0;
// Maximal string length.
static const int kMaxLength = (1 << (32 - 2)) - 1;
@ -4230,6 +4284,8 @@ class String: public HeapObject {
// mutates the ConsString and might return a failure.
Object* SlowTryFlatten(PretenureFlag pretenure);
static inline bool IsHashFieldComputed(uint32_t field);
// Slow case of String::Equals. This implementation works on any strings
// but it is most efficient on strings that are almost flat.
bool SlowEquals(String* other);
@ -4279,7 +4335,7 @@ class SeqAsciiString: public SeqString {
// Computes the size for an AsciiString instance of a given length.
static int SizeFor(int length) {
return OBJECT_SIZE_ALIGN(kHeaderSize + length * kCharSize);
return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
}
// Layout description.
@ -4331,7 +4387,7 @@ class SeqTwoByteString: public SeqString {
// Computes the size for a TwoByteString instance of a given length.
static int SizeFor(int length) {
return OBJECT_SIZE_ALIGN(kHeaderSize + length * kShortSize);
return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
}
// Layout description.

6
deps/v8/src/platform-freebsd.cc

@ -84,6 +84,12 @@ void OS::Setup() {
}
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
__asm__ __volatile__("" : : : "memory");
*ptr = value;
}
uint64_t OS::CpuFeaturesImpliedByPlatform() {
return 0; // FreeBSD runs on anything.
}

3
deps/v8/src/platform-linux.cc

@ -177,7 +177,8 @@ LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
#endif
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__) // don't use on a simulator
#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__)
// Only use on ARM hardware.
pLinuxKernelMemoryBarrier();
#else
__asm__ __volatile__("" : : : "memory");

6
deps/v8/src/profile-generator.cc

@ -572,7 +572,8 @@ int CpuProfilesCollection::TokenToIndex(int security_token_id) {
List<CpuProfile*>* CpuProfilesCollection::GetProfilesList(
int security_token_id) {
const int index = TokenToIndex(security_token_id);
profiles_by_token_.AddBlock(NULL, profiles_by_token_.length() - index + 1);
const int lists_to_add = index - profiles_by_token_.length() + 1;
if (lists_to_add > 0) profiles_by_token_.AddBlock(NULL, lists_to_add);
List<CpuProfile*>* unabridged_list =
profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
const int current_count = unabridged_list->length();
@ -580,7 +581,8 @@ List<CpuProfile*>* CpuProfilesCollection::GetProfilesList(
profiles_by_token_[index] = new List<CpuProfile*>(current_count);
}
List<CpuProfile*>* list = profiles_by_token_[index];
list->AddBlock(NULL, current_count - list->length());
const int profiles_to_add = current_count - list->length();
if (profiles_to_add > 0) list->AddBlock(NULL, profiles_to_add);
return list;
}

138
deps/v8/src/runtime.cc

@ -291,7 +291,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
Handle<String> name(String::cast(*key));
ASSERT(!name->AsArrayIndex(&element_index));
result = SetProperty(boilerplate, name, value, NONE);
} else if (Array::IndexFromObject(*key, &element_index)) {
} else if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
result = SetElement(boilerplate, element_index, value);
} else {
@ -569,6 +569,18 @@ static void GetOwnPropertyImplementation(JSObject* obj,
}
// Enumerator used as indices into the array returned from GetOwnProperty
enum PropertyDescriptorIndices {
IS_ACCESSOR_INDEX,
VALUE_INDEX,
GETTER_INDEX,
SETTER_INDEX,
WRITABLE_INDEX,
ENUMERABLE_INDEX,
CONFIGURABLE_INDEX,
DESCRIPTOR_SIZE
};
// Returns an array with the property description:
// if args[1] is not a property on args[0]
// returns undefined
@ -579,18 +591,63 @@ static void GetOwnPropertyImplementation(JSObject* obj,
static Object* Runtime_GetOwnProperty(Arguments args) {
ASSERT(args.length() == 2);
HandleScope scope;
Handle<FixedArray> elms = Factory::NewFixedArray(5);
Handle<FixedArray> elms = Factory::NewFixedArray(DESCRIPTOR_SIZE);
Handle<JSArray> desc = Factory::NewJSArrayWithElements(elms);
LookupResult result;
CONVERT_CHECKED(JSObject, obj, args[0]);
CONVERT_CHECKED(String, name, args[1]);
// This could be an element.
uint32_t index;
if (name->AsArrayIndex(&index)) {
if (!obj->HasLocalElement(index)) {
return Heap::undefined_value();
}
// Special handling of string objects according to ECMAScript 5 15.5.5.2.
// Note that this might be a string object with elements other than the
// actual string value. This is covered by the subsequent cases.
if (obj->IsStringObjectWithCharacterAt(index)) {
JSValue* js_value = JSValue::cast(obj);
String* str = String::cast(js_value->value());
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, str->SubString(index, index+1));
elms->set(WRITABLE_INDEX, Heap::false_value());
elms->set(ENUMERABLE_INDEX, Heap::false_value());
elms->set(CONFIGURABLE_INDEX, Heap::false_value());
return *desc;
}
// This can potentially be an element in the elements dictionary or
// a fast element.
if (obj->HasDictionaryElements()) {
NumberDictionary* dictionary = obj->element_dictionary();
int entry = dictionary->FindEntry(index);
PropertyDetails details = dictionary->DetailsAt(entry);
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!details.IsDontEnum()));
elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
return *desc;
} else {
// Elements that are stored as array elements always has:
// writable: true, configurable: true, enumerable: true.
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, obj->GetElement(index));
elms->set(WRITABLE_INDEX, Heap::true_value());
elms->set(ENUMERABLE_INDEX, Heap::true_value());
elms->set(CONFIGURABLE_INDEX, Heap::true_value());
return *desc;
}
}
// Use recursive implementation to also traverse hidden prototypes
GetOwnPropertyImplementation(obj, name, &result);
if (!result.IsProperty())
if (!result.IsProperty()) {
return Heap::undefined_value();
}
if (result.type() == CALLBACKS) {
Object* structure = result.GetCallbackObject();
if (structure->IsProxy() || structure->IsAccessorInfo()) {
@ -598,25 +655,25 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
// an API defined callback.
Object* value = obj->GetPropertyWithCallback(
obj, structure, name, result.holder());
elms->set(0, Heap::false_value());
elms->set(1, value);
elms->set(2, Heap::ToBoolean(!result.IsReadOnly()));
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, value);
elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
} else if (structure->IsFixedArray()) {
// __defineGetter__/__defineSetter__ callback.
elms->set(0, Heap::true_value());
elms->set(1, FixedArray::cast(structure)->get(0));
elms->set(2, FixedArray::cast(structure)->get(1));
elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
elms->set(GETTER_INDEX, FixedArray::cast(structure)->get(0));
elms->set(SETTER_INDEX, FixedArray::cast(structure)->get(1));
} else {
return Heap::undefined_value();
}
} else {
elms->set(0, Heap::false_value());
elms->set(1, result.GetLazyValue());
elms->set(2, Heap::ToBoolean(!result.IsReadOnly()));
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, result.GetLazyValue());
elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
}
elms->set(3, Heap::ToBoolean(!result.IsDontEnum()));
elms->set(4, Heap::ToBoolean(!result.IsDontDelete()));
elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!result.IsDontEnum()));
elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!result.IsDontDelete()));
return *desc;
}
@ -1583,7 +1640,7 @@ static Object* Runtime_SetCode(Arguments args) {
static Object* CharCodeAt(String* subject, Object* index) {
uint32_t i = 0;
if (!Array::IndexFromObject(index, &i)) return Heap::nan_value();
if (!index->ToArrayIndex(&i)) return Heap::nan_value();
// Flatten the string. If someone wants to get a char at an index
// in a cons string, it is likely that more indices will be
// accessed.
@ -1599,7 +1656,7 @@ static Object* CharCodeAt(String* subject, Object* index) {
static Object* CharFromCode(Object* char_code) {
uint32_t code;
if (Array::IndexFromObject(char_code, &code)) {
if (char_code->ToArrayIndex(&code)) {
if (code <= 0xffff) {
return Heap::LookupSingleCharacterStringFromCode(code);
}
@ -2780,7 +2837,7 @@ static Object* Runtime_StringIndexOf(Arguments args) {
Object* index = args[2];
uint32_t start_index;
if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
int position = Runtime::StringMatch(sub, pat, start_index);
@ -2830,7 +2887,7 @@ static Object* Runtime_StringLastIndexOf(Arguments args) {
Object* index = args[2];
uint32_t start_index;
if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
uint32_t pat_length = pat->length();
uint32_t sub_length = sub->length();
@ -3657,7 +3714,7 @@ Object* Runtime::GetObjectProperty(Handle<Object> object, Handle<Object> key) {
// Check if the given key is an array index.
uint32_t index;
if (Array::IndexFromObject(*key, &index)) {
if (key->ToArrayIndex(&index)) {
return GetElementOrCharAt(object, index);
}
@ -3843,7 +3900,7 @@ Object* Runtime::SetObjectProperty(Handle<Object> object,
// Check if the given key is an array index.
uint32_t index;
if (Array::IndexFromObject(*key, &index)) {
if (key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the characters
// of a string using [] notation. We need to support this too in
// JavaScript.
@ -3895,7 +3952,7 @@ Object* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
// Check if the given key is an array index.
uint32_t index;
if (Array::IndexFromObject(*key, &index)) {
if (key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the characters
// of a string using [] notation. We need to support this too in
// JavaScript.
@ -3942,7 +3999,7 @@ Object* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object,
// Check if the given key is an array index.
uint32_t index;
if (Array::IndexFromObject(*key, &index)) {
if (key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the
// characters of a string using [] notation. In the case of a
// String object we just need to redirect the deletion to the
@ -4355,7 +4412,7 @@ static Object* Runtime_GetArgumentsProperty(Arguments args) {
// Try to convert the key to an index. If successful and within
// index return the the argument from the frame.
uint32_t index;
if (Array::IndexFromObject(args[0], &index) && index < n) {
if (args[0]->ToArrayIndex(&index) && index < n) {
return frame->GetParameter(index);
}
@ -5287,6 +5344,25 @@ static Object* Runtime_NumberToInteger(Arguments args) {
}
static Object* Runtime_NumberToIntegerMapMinusZero(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(number, args[0]);
// We do not include 0 so that we don't have to treat +0 / -0 cases.
if (number > 0 && number <= Smi::kMaxValue) {
return Smi::FromInt(static_cast<int>(number));
}
double double_value = DoubleToInteger(number);
// Map both -0 and +0 to +0.
if (double_value == 0) double_value = 0;
return Heap::NumberFromDouble(double_value);
}
static Object* Runtime_NumberToJSUint32(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@ -6457,8 +6533,8 @@ static Object* Runtime_NewArgumentsFast(Arguments args) {
if (obj->IsFailure()) return obj;
AssertNoAllocation no_gc;
reinterpret_cast<Array*>(obj)->set_map(Heap::fixed_array_map());
FixedArray* array = FixedArray::cast(obj);
FixedArray* array = reinterpret_cast<FixedArray*>(obj);
array->set_map(Heap::fixed_array_map());
array->set_length(length);
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
@ -7747,8 +7823,8 @@ static Object* Runtime_SwapElements(Arguments args) {
Handle<Object> key2 = args.at<Object>(2);
uint32_t index1, index2;
if (!Array::IndexFromObject(*key1, &index1)
|| !Array::IndexFromObject(*key2, &index2)) {
if (!key1->ToArrayIndex(&index1)
|| !key2->ToArrayIndex(&index2)) {
return Top::ThrowIllegalOperation();
}
@ -7779,17 +7855,19 @@ static Object* Runtime_GetArrayKeys(Arguments args) {
for (int i = 0; i < keys_length; i++) {
Object* key = keys->get(i);
uint32_t index;
if (!Array::IndexFromObject(key, &index) || index >= length) {
if (!key->ToArrayIndex(&index) || index >= length) {
// Zap invalid keys.
keys->set_undefined(i);
}
}
return *Factory::NewJSArrayWithElements(keys);
} else {
ASSERT(array->HasFastElements());
Handle<FixedArray> single_interval = Factory::NewFixedArray(2);
// -1 means start of array.
single_interval->set(0, Smi::FromInt(-1));
uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
uint32_t actual_length =
static_cast<uint32_t>(FixedArray::cast(array->elements())->length());
uint32_t min_length = actual_length < length ? actual_length : length;
Handle<Object> length_object =
Factory::NewNumber(static_cast<double>(min_length));

1
deps/v8/src/runtime.h

@ -102,6 +102,7 @@ namespace internal {
F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
F(NumberToInteger, 1, 1) \
F(NumberToIntegerMapMinusZero, 1, 1) \
F(NumberToJSUint32, 1, 1) \
F(NumberToJSInt32, 1, 1) \
F(NumberToSmi, 1, 1) \

224
deps/v8/src/spaces-inl.h

@ -66,99 +66,183 @@ Address Page::AllocationTop() {
}
void Page::ClearRSet() {
// This method can be called in all rset states.
memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
}
// Given a 32-bit address, separate its bits into:
// | page address | words (6) | bit offset (5) | pointer alignment (2) |
// The address of the rset word containing the bit for this word is computed as:
// page_address + words * 4
// For a 64-bit address, if it is:
// | page address | words(5) | bit offset(5) | pointer alignment (3) |
// The address of the rset word containing the bit for this word is computed as:
// page_address + words * 4 + kRSetOffset.
// The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
// even on the X64 architecture.
Address Page::ComputeRSetBitPosition(Address address, int offset,
uint32_t* bitmask) {
ASSERT(Page::is_rset_in_use());
Page* page = Page::FromAddress(address);
uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
kPointerSizeLog2);
*bitmask = 1 << (bit_offset % kBitsPerInt);
Address rset_address =
page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize;
// The remembered set address is either in the normal remembered set range
// of a page or else we have a large object page.
ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
|| page->IsLargeObjectPage());
if (rset_address >= page->RSetEnd()) {
// We have a large object page, and the remembered set address is actually
// past the end of the object.
// The first part of the remembered set is still located at the start of
// the page, but anything after kRSetEndOffset must be relocated to after
// the large object, i.e. after
// (page->ObjectAreaStart() + object size)
// We do that by adding the difference between the normal RSet's end and
// the object's end.
ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
int fixedarray_length =
FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
+ Array::kLengthOffset));
rset_address += kObjectStartOffset - kRSetEndOffset + fixedarray_length;
Address Page::AllocationWatermark() {
PagedSpace* owner = MemoryAllocator::PageOwner(this);
if (this == owner->AllocationTopPage()) {
return owner->top();
}
return rset_address;
return address() + AllocationWatermarkOffset();
}
void Page::SetRSet(Address address, int offset) {
uint32_t bitmask = 0;
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
Memory::uint32_at(rset_address) |= bitmask;
uint32_t Page::AllocationWatermarkOffset() {
return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
kAllocationWatermarkOffsetShift);
}
ASSERT(IsRSetSet(address, offset));
void Page::SetAllocationWatermark(Address allocation_watermark) {
if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
// When iterating intergenerational references during scavenge
// we might decide to promote an encountered young object.
// We will allocate a space for such an object and put it
// into the promotion queue to process it later.
// If space for object was allocated somewhere beyond allocation
// watermark this might cause garbage pointers to appear under allocation
// watermark. To avoid visiting them during dirty regions iteration
// which might be still in progress we store a valid allocation watermark
// value and mark this page as having an invalid watermark.
SetCachedAllocationWatermark(AllocationWatermark());
InvalidateWatermark(true);
}
flags_ = (flags_ & kFlagsMask) |
Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
ASSERT(AllocationWatermarkOffset()
== static_cast<uint32_t>(Offset(allocation_watermark)));
}
// Clears the corresponding remembered set bit for a given address.
void Page::UnsetRSet(Address address, int offset) {
uint32_t bitmask = 0;
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
Memory::uint32_at(rset_address) &= ~bitmask;
void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
mc_first_forwarded = allocation_watermark;
}
ASSERT(!IsRSetSet(address, offset));
Address Page::CachedAllocationWatermark() {
return mc_first_forwarded;
}
uint32_t Page::GetRegionMarks() {
return dirty_regions_;
}
bool Page::IsRSetSet(Address address, int offset) {
void Page::SetRegionMarks(uint32_t marks) {
dirty_regions_ = marks;
}
int Page::GetRegionNumberForAddress(Address addr) {
// Each page is divided into 256 byte regions. Each region has a corresponding
// dirty mark bit in the page header. Region can contain intergenerational
// references iff its dirty mark is set.
// A normal 8K page contains exactly 32 regions so all region marks fit
// into 32-bit integer field. To calculate a region number we just divide
// offset inside page by region size.
// A large page can contain more then 32 regions. But we want to avoid
// additional write barrier code for distinguishing between large and normal
// pages so we just ignore the fact that addr points into a large page and
// calculate region number as if addr pointed into a normal 8K page. This way
// we get a region number modulo 32 so for large pages several regions might
// be mapped to a single dirty mark.
ASSERT_PAGE_ALIGNED(this->address());
STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
// We are using masking with kPageAlignmentMask instead of Page::Offset()
// to get an offset to the beginning of 8K page containing addr not to the
// beginning of actual page which can be bigger then 8K.
intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
}
uint32_t Page::GetRegionMaskForAddress(Address addr) {
return 1 << GetRegionNumberForAddress(addr);
}
void Page::MarkRegionDirty(Address address) {
SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
}
bool Page::IsRegionDirty(Address address) {
return GetRegionMarks() & GetRegionMaskForAddress(address);
}
void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
int rstart = GetRegionNumberForAddress(start);
int rend = GetRegionNumberForAddress(end);
if (reaches_limit) {
end += 1;
}
if ((rend - rstart) == 0) {
return;
}
uint32_t bitmask = 0;
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
return (Memory::uint32_at(rset_address) & bitmask) != 0;
if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
|| (start == ObjectAreaStart())) {
// First region is fully covered
bitmask = 1 << rstart;
}
while (++rstart < rend) {
bitmask |= 1 << rstart;
}
if (bitmask) {
SetRegionMarks(GetRegionMarks() & ~bitmask);
}
}
void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
watermark_invalidated_mark_ ^= WATERMARK_INVALIDATED;
}
bool Page::IsWatermarkValid() {
return (flags_ & WATERMARK_INVALIDATED) != watermark_invalidated_mark_;
}
void Page::InvalidateWatermark(bool value) {
if (value) {
flags_ = (flags_ & ~WATERMARK_INVALIDATED) | watermark_invalidated_mark_;
} else {
flags_ = (flags_ & ~WATERMARK_INVALIDATED) |
(watermark_invalidated_mark_ ^ WATERMARK_INVALIDATED);
}
ASSERT(IsWatermarkValid() == !value);
}
bool Page::GetPageFlag(PageFlag flag) {
return (flags & flag) != 0;
return (flags_ & flag) != 0;
}
void Page::SetPageFlag(PageFlag flag, bool value) {
if (value) {
flags |= flag;
flags_ |= flag;
} else {
flags &= ~flag;
flags_ &= ~flag;
}
}
void Page::ClearPageFlags() {
flags_ = 0;
}
void Page::ClearGCFields() {
InvalidateWatermark(true);
SetAllocationWatermark(ObjectAreaStart());
if (Heap::gc_state() == Heap::SCAVENGE) {
SetCachedAllocationWatermark(ObjectAreaStart());
}
SetRegionMarks(kAllRegionsCleanMarks);
}
bool Page::WasInUseBeforeMC() {
return GetPageFlag(WAS_IN_USE_BEFORE_MC);
}
@ -343,14 +427,6 @@ HeapObject* LargeObjectChunk::GetObject() {
// -----------------------------------------------------------------------------
// LargeObjectSpace
int LargeObjectSpace::ExtraRSetBytesFor(int object_size) {
int extra_rset_bits =
RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize,
kBitsPerInt);
return extra_rset_bits / kBitsPerByte;
}
Object* NewSpace::AllocateRawInternal(int size_in_bytes,
AllocationInfo* alloc_info) {
Address new_top = alloc_info->top + size_in_bytes;

444
deps/v8/src/spaces.cc

@ -41,6 +41,7 @@ namespace internal {
&& (info).top <= (space).high() \
&& (info).limit == (space).high())
intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED;
// ----------------------------------------------------------------------------
// HeapObjectIterator
@ -138,13 +139,6 @@ PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
}
// -----------------------------------------------------------------------------
// Page
#ifdef DEBUG
Page::RSetState Page::rset_state_ = Page::IN_USE;
#endif
// -----------------------------------------------------------------------------
// CodeRange
@ -524,7 +518,10 @@ Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
for (int i = 0; i < pages_in_chunk; i++) {
Page* p = Page::FromAddress(page_addr);
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
p->InvalidateWatermark(true);
p->SetIsLargeObjectPage(false);
p->SetAllocationWatermark(p->ObjectAreaStart());
p->SetCachedAllocationWatermark(p->ObjectAreaStart());
page_addr += Page::kPageSize;
}
@ -681,6 +678,7 @@ Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
page_addr += Page::kPageSize;
p->InvalidateWatermark(true);
if (p->WasInUseBeforeMC()) {
*last_page_in_use = p;
}
@ -744,10 +742,10 @@ bool PagedSpace::Setup(Address start, size_t size) {
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
ASSERT(Capacity() <= max_capacity_);
// Sequentially initialize remembered sets in the newly allocated
// Sequentially clear region marks in the newly allocated
// pages and cache the current last page in the space.
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
p->ClearRSet();
p->SetRegionMarks(Page::kAllRegionsCleanMarks);
last_page_ = p;
}
@ -794,10 +792,10 @@ void PagedSpace::Unprotect() {
#endif
void PagedSpace::ClearRSet() {
void PagedSpace::MarkAllPagesClean() {
PageIterator it(this, PageIterator::ALL_PAGES);
while (it.has_next()) {
it.next()->ClearRSet();
it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
}
}
@ -900,7 +898,8 @@ HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
// of forwarding addresses is as an offset in terms of live bytes, so we
// need quick access to the allocation top of each page to decode
// forwarding addresses.
current_page->mc_relocation_top = mc_forwarding_info_.top;
current_page->SetAllocationWatermark(mc_forwarding_info_.top);
current_page->next_page()->InvalidateWatermark(true);
SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
}
@ -928,10 +927,10 @@ bool PagedSpace::Expand(Page* last_page) {
MemoryAllocator::SetNextPage(last_page, p);
// Sequentially clear remembered set of new pages and and cache the
// Sequentially clear region marks of new pages and and cache the
// new last page in the space.
while (p->is_valid()) {
p->ClearRSet();
p->SetRegionMarks(Page::kAllRegionsCleanMarks);
last_page_ = p;
p = p->next_page();
}
@ -1030,16 +1029,11 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (above_allocation_top) {
// We don't care what's above the allocation top.
} else {
// Unless this is the last page in the space containing allocated
// objects, the allocation top should be at a constant offset from the
// object area end.
Address top = current_page->AllocationTop();
if (current_page == top_page) {
ASSERT(top == allocation_info_.top);
// The next page will be above the allocation top.
above_allocation_top = true;
} else {
ASSERT(top == PageAllocationLimit(current_page));
}
// It should be packed with objects from the bottom to the top.
@ -1060,8 +1054,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
object->Verify();
// All the interior pointers should be contained in the heap and
// have their remembered set bits set if required as determined
// by the visitor.
// have page regions covering intergenerational references should be
// marked dirty.
int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor);
@ -1120,7 +1114,7 @@ bool NewSpace::Setup(Address start, int size) {
start_ = start;
address_mask_ = ~(size - 1);
object_mask_ = address_mask_ | kHeapObjectTag;
object_mask_ = address_mask_ | kHeapObjectTagMask;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
allocation_info_.top = to_space_.low();
@ -1324,7 +1318,7 @@ bool SemiSpace::Setup(Address start,
start_ = start;
address_mask_ = ~(maximum_capacity - 1);
object_mask_ = address_mask_ | kHeapObjectTag;
object_mask_ = address_mask_ | kHeapObjectTagMask;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
age_mark_ = start_;
@ -1634,7 +1628,7 @@ void FreeListNode::set_size(int size_in_bytes) {
// If the block is too small (eg, one or two words), to hold both a size
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > ByteArray::kAlignedSize) {
if (size_in_bytes > ByteArray::kHeaderSize) {
set_map(Heap::raw_unchecked_byte_array_map());
// Can't use ByteArray::cast because it fails during deserialization.
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
@ -1831,7 +1825,7 @@ FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
void FixedSizeFreeList::Reset() {
available_ = 0;
head_ = NULL;
head_ = tail_ = NULL;
}
@ -1843,8 +1837,13 @@ void FixedSizeFreeList::Free(Address start) {
ASSERT(!MarkCompactCollector::IsCompacting());
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(object_size_);
node->set_next(head_);
head_ = node->address();
node->set_next(NULL);
if (head_ == NULL) {
tail_ = head_ = node->address();
} else {
FreeListNode::FromAddress(tail_)->set_next(node->address());
tail_ = node->address();
}
available_ += object_size_;
}
@ -1907,15 +1906,14 @@ void OldSpace::MCCommitRelocationInfo() {
Page* p = it.next();
// Space below the relocation pointer is allocated.
computed_size +=
static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
if (it.has_next()) {
// Free the space at the top of the page. We cannot use
// p->mc_relocation_top after the call to Free (because Free will clear
// remembered set bits).
// Free the space at the top of the page.
int extra_size =
static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
if (extra_size > 0) {
int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
extra_size);
// The bytes we have just "freed" to add to the free list were
// already accounted as available.
accounting_stats_.WasteBytes(wasted_bytes);
@ -1963,7 +1961,10 @@ void PagedSpace::FreePages(Page* prev, Page* last) {
// Clean them up.
do {
first->ClearRSet();
first->InvalidateWatermark(true);
first->SetAllocationWatermark(first->ObjectAreaStart());
first->SetCachedAllocationWatermark(first->ObjectAreaStart());
first->SetRegionMarks(Page::kAllRegionsCleanMarks);
first = first->next_page();
} while (first != NULL);
@ -2003,6 +2004,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
// Current allocation top points to a page which is now in the middle
// of page list. We should move allocation top forward to the new last
// used page so various object iterators will continue to work properly.
last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
last_in_use->AllocationTop());
@ -2035,6 +2037,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
p->ObjectAreaStart());
p->SetAllocationWatermark(p->ObjectAreaStart());
Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
}
}
@ -2066,6 +2069,7 @@ bool PagedSpace::ReserveSpace(int bytes) {
if (!reserved_page->is_valid()) return false;
}
ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
SetAllocationInfo(&allocation_info_,
TopPageOf(allocation_info_)->next_page());
return true;
@ -2100,7 +2104,20 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
accounting_stats_.WasteBytes(wasted_bytes);
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
return HeapObject::cast(result);
HeapObject* obj = HeapObject::cast(result);
Page* p = Page::FromAddress(obj->address());
if (obj->address() >= p->AllocationWatermark()) {
// There should be no hole between the allocation watermark
// and allocated object address.
// Memory above the allocation watermark was not swept and
// might contain garbage pointers to new space.
ASSERT(obj->address() == p->AllocationWatermark());
p->SetAllocationWatermark(obj->address() + size_in_bytes);
}
return obj;
}
}
@ -2123,6 +2140,7 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
current_page->SetAllocationWatermark(allocation_info_.top);
int free_size =
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
if (free_size > 0) {
@ -2133,6 +2151,7 @@ void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
current_page->SetAllocationWatermark(allocation_info_.top);
int free_size =
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
// In the fixed space free list all the free list items have the right size.
@ -2152,8 +2171,10 @@ void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
Page* next_page = current_page->next_page();
next_page->ClearGCFields();
PutRestOfCurrentPageOnFreeList(current_page);
SetAllocationInfo(&allocation_info_, current_page->next_page());
SetAllocationInfo(&allocation_info_, next_page);
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
@ -2296,160 +2317,12 @@ void OldSpace::ReportStatistics() {
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
Capacity(), Waste(), Available(), pct);
// Report remembered set statistics.
int rset_marked_pointers = 0;
int rset_marked_arrays = 0;
int rset_marked_array_elements = 0;
int cross_gen_pointers = 0;
int cross_gen_array_elements = 0;
PageIterator page_it(this, PageIterator::PAGES_IN_USE);
while (page_it.has_next()) {
Page* p = page_it.next();
for (Address rset_addr = p->RSetStart();
rset_addr < p->RSetEnd();
rset_addr += kIntSize) {
int rset = Memory::int_at(rset_addr);
if (rset != 0) {
// Bits were set
int intoff =
static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
int bitoff = 0;
for (; bitoff < kBitsPerInt; ++bitoff) {
if ((rset & (1 << bitoff)) != 0) {
int bitpos = intoff*kBitsPerByte + bitoff;
Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
Object** obj = reinterpret_cast<Object**>(slot);
if (*obj == Heap::raw_unchecked_fixed_array_map()) {
rset_marked_arrays++;
FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
rset_marked_array_elements += fa->length();
// Manually inline FixedArray::IterateBody
Address elm_start = slot + FixedArray::kHeaderSize;
Address elm_stop = elm_start + fa->length() * kPointerSize;
for (Address elm_addr = elm_start;
elm_addr < elm_stop; elm_addr += kPointerSize) {
// Filter non-heap-object pointers
Object** elm_p = reinterpret_cast<Object**>(elm_addr);
if (Heap::InNewSpace(*elm_p))
cross_gen_array_elements++;
}
} else {
rset_marked_pointers++;
if (Heap::InNewSpace(*obj))
cross_gen_pointers++;
}
}
}
}
}
}
pct = rset_marked_pointers == 0 ?
0 : cross_gen_pointers * 100 / rset_marked_pointers;
PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
rset_marked_pointers, cross_gen_pointers, pct);
PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
PrintF(" elements %d, ", rset_marked_array_elements);
pct = rset_marked_array_elements == 0 ? 0
: cross_gen_array_elements * 100 / rset_marked_array_elements;
PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
PrintF(" total rset-marked bits %d\n",
(rset_marked_pointers + rset_marked_arrays));
pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
: (cross_gen_pointers + cross_gen_array_elements) * 100 /
(rset_marked_pointers + rset_marked_array_elements);
PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
(rset_marked_pointers + rset_marked_array_elements),
(cross_gen_pointers + cross_gen_array_elements),
pct);
ClearHistograms();
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
CollectHistogramInfo(obj);
ReportHistogram(true);
}
// Dump the range of remembered set words between [start, end) corresponding
// to the pointers starting at object_p. The allocation_top is an object
// pointer which should not be read past. This is important for large object
// pages, where some bits in the remembered set range do not correspond to
// allocated addresses.
static void PrintRSetRange(Address start, Address end, Object** object_p,
Address allocation_top) {
Address rset_address = start;
// If the range starts on on odd numbered word (eg, for large object extra
// remembered set ranges), print some spaces.
if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
PrintF(" ");
}
// Loop over all the words in the range.
while (rset_address < end) {
uint32_t rset_word = Memory::uint32_at(rset_address);
int bit_position = 0;
// Loop over all the bits in the word.
while (bit_position < kBitsPerInt) {
if (object_p == reinterpret_cast<Object**>(allocation_top)) {
// Print a bar at the allocation pointer.
PrintF("|");
} else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
// Do not dereference object_p past the allocation pointer.
PrintF("#");
} else if ((rset_word & (1 << bit_position)) == 0) {
// Print a dot for zero bits.
PrintF(".");
} else if (Heap::InNewSpace(*object_p)) {
// Print an X for one bits for pointers to new space.
PrintF("X");
} else {
// Print a circle for one bits for pointers to old space.
PrintF("o");
}
// Print a space after every 8th bit except the last.
if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
PrintF(" ");
}
// Advance to next bit.
bit_position++;
object_p++;
}
// Print a newline after every odd numbered word, otherwise a space.
if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
PrintF("\n");
} else {
PrintF(" ");
}
// Advance to next remembered set word.
rset_address += kIntSize;
}
}
void PagedSpace::DoPrintRSet(const char* space_name) {
PageIterator it(this, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* p = it.next();
PrintF("%s page 0x%x:\n", space_name, p);
PrintRSetRange(p->RSetStart(), p->RSetEnd(),
reinterpret_cast<Object**>(p->ObjectAreaStart()),
p->AllocationTop());
PrintF("\n");
}
}
void OldSpace::PrintRSet() { DoPrintRSet("old"); }
#endif
// -----------------------------------------------------------------------------
@ -2499,6 +2372,7 @@ void FixedSpace::MCCommitRelocationInfo() {
if (it.has_next()) {
accounting_stats_.WasteBytes(
static_cast<int>(page->ObjectAreaEnd() - page_top));
page->SetAllocationWatermark(page_top);
}
}
@ -2528,7 +2402,19 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
Object* result = free_list_.Allocate();
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
return HeapObject::cast(result);
HeapObject* obj = HeapObject::cast(result);
Page* p = Page::FromAddress(obj->address());
if (obj->address() >= p->AllocationWatermark()) {
// There should be no hole between the allocation watermark
// and allocated object address.
// Memory above the allocation watermark was not swept and
// might contain garbage pointers to new space.
ASSERT(obj->address() == p->AllocationWatermark());
p->SetAllocationWatermark(obj->address() + size_in_bytes);
}
return obj;
}
}
@ -2558,8 +2444,11 @@ HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
ASSERT(current_page->next_page()->is_valid());
ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
Page* next_page = current_page->next_page();
next_page->ClearGCFields();
current_page->SetAllocationWatermark(allocation_info_.top);
accounting_stats_.WasteBytes(page_extra_);
SetAllocationInfo(&allocation_info_, current_page->next_page());
SetAllocationInfo(&allocation_info_, next_page);
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
@ -2570,51 +2459,12 @@ void FixedSpace::ReportStatistics() {
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
Capacity(), Waste(), Available(), pct);
// Report remembered set statistics.
int rset_marked_pointers = 0;
int cross_gen_pointers = 0;
PageIterator page_it(this, PageIterator::PAGES_IN_USE);
while (page_it.has_next()) {
Page* p = page_it.next();
for (Address rset_addr = p->RSetStart();
rset_addr < p->RSetEnd();
rset_addr += kIntSize) {
int rset = Memory::int_at(rset_addr);
if (rset != 0) {
// Bits were set
int intoff =
static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
int bitoff = 0;
for (; bitoff < kBitsPerInt; ++bitoff) {
if ((rset & (1 << bitoff)) != 0) {
int bitpos = intoff*kBitsPerByte + bitoff;
Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
Object** obj = reinterpret_cast<Object**>(slot);
rset_marked_pointers++;
if (Heap::InNewSpace(*obj))
cross_gen_pointers++;
}
}
}
}
}
pct = rset_marked_pointers == 0 ?
0 : cross_gen_pointers * 100 / rset_marked_pointers;
PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
rset_marked_pointers, cross_gen_pointers, pct);
ClearHistograms();
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
CollectHistogramInfo(obj);
ReportHistogram(false);
}
void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
#endif
@ -2793,8 +2643,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
chunk->set_size(chunk_size);
first_chunk_ = chunk;
// Set the object address and size in the page header and clear its
// remembered set.
// Initialize page header.
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
Address object_address = page->ObjectAreaStart();
// Clear the low order bit of the second word in the page to flag it as a
@ -2802,13 +2651,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
// low order bit should already be clear.
ASSERT((chunk_size & 0x1) == 0);
page->SetIsLargeObjectPage(true);
page->ClearRSet();
int extra_bytes = requested_size - object_size;
if (extra_bytes > 0) {
// The extra memory for the remembered set should be cleared.
memset(object_address + object_size, 0, extra_bytes);
}
page->SetRegionMarks(Page::kAllRegionsCleanMarks);
return HeapObject::FromAddress(object_address);
}
@ -2823,8 +2666,7 @@ Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
ASSERT(0 < size_in_bytes);
int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
return AllocateRawInternal(size_in_bytes,
size_in_bytes,
NOT_EXECUTABLE);
}
@ -2851,59 +2693,61 @@ Object* LargeObjectSpace::FindObject(Address a) {
return Failure::Exception();
}
void LargeObjectSpace::ClearRSet() {
ASSERT(Page::is_rset_in_use());
LargeObjectIterator it(this);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
// We only have code, sequential strings, or fixed arrays in large
// object space, and only fixed arrays need remembered set support.
if (object->IsFixedArray()) {
// Clear the normal remembered set region of the page;
Page* page = Page::FromAddress(object->address());
page->ClearRSet();
// Clear the extra remembered set.
int size = object->Size();
int extra_rset_bytes = ExtraRSetBytesFor(size);
memset(object->address() + size, 0, extra_rset_bytes);
}
}
}
void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
ASSERT(Page::is_rset_in_use());
static void* lo_rset_histogram = StatsTable::CreateHistogram(
"V8.RSetLO",
0,
// Keeping this histogram's buckets the same as the paged space histogram.
Page::kObjectAreaSize / kPointerSize,
30);
void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
LargeObjectIterator it(this);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
// We only have code, sequential strings, or fixed arrays in large
// object space, and only fixed arrays can possibly contain pointers to
// the young generation.
if (object->IsFixedArray()) {
// Iterate the normal page remembered set range.
Page* page = Page::FromAddress(object->address());
Address object_end = object->address() + object->Size();
int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
Min(page->ObjectAreaEnd(), object_end),
page->RSetStart(),
copy_object_func);
// Iterate the extra array elements.
if (object_end > page->ObjectAreaEnd()) {
count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
object_end, copy_object_func);
}
if (lo_rset_histogram != NULL) {
StatsTable::AddHistogramSample(lo_rset_histogram, count);
uint32_t marks = page->GetRegionMarks();
uint32_t newmarks = Page::kAllRegionsCleanMarks;
if (marks != Page::kAllRegionsCleanMarks) {
// For a large page a single dirty mark corresponds to several
// regions (modulo 32). So we treat a large page as a sequence of
// normal pages of size Page::kPageSize having same dirty marks
// and subsequently iterate dirty regions on each of these pages.
Address start = object->address();
Address end = page->ObjectAreaEnd();
Address object_end = start + object->Size();
// Iterate regions of the first normal page covering object.
uint32_t first_region_number = page->GetRegionNumberForAddress(start);
newmarks |=
Heap::IterateDirtyRegions(marks >> first_region_number,
start,
end,
&Heap::IteratePointersInDirtyRegion,
copy_object) << first_region_number;
start = end;
end = start + Page::kPageSize;
while (end <= object_end) {
// Iterate next 32 regions.
newmarks |=
Heap::IterateDirtyRegions(marks,
start,
end,
&Heap::IteratePointersInDirtyRegion,
copy_object);
start = end;
end = start + Page::kPageSize;
}
if (start != object_end) {
// Iterate the last piece of an object which is less than
// Page::kPageSize.
newmarks |=
Heap::IterateDirtyRegions(marks,
start,
object_end,
&Heap::IteratePointersInDirtyRegion,
copy_object);
}
page->SetRegionMarks(newmarks);
}
}
}
@ -2995,7 +2839,7 @@ void LargeObjectSpace::Verify() {
} else if (object->IsFixedArray()) {
// We loop over fixed arrays ourselves, rather then using the visitor,
// because the visitor doesn't support the start/offset iteration
// needed for IsRSetSet.
// needed for IsRegionDirty.
FixedArray* array = FixedArray::cast(object);
for (int j = 0; j < array->length(); j++) {
Object* element = array->get(j);
@ -3004,8 +2848,11 @@ void LargeObjectSpace::Verify() {
ASSERT(Heap::Contains(element_object));
ASSERT(element_object->map()->IsMap());
if (Heap::InNewSpace(element_object)) {
ASSERT(Page::IsRSetSet(object->address(),
FixedArray::kHeaderSize + j * kPointerSize));
Address array_addr = object->address();
Address element_addr = array_addr + FixedArray::kHeaderSize +
j * kPointerSize;
ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
}
}
}
@ -3046,33 +2893,6 @@ void LargeObjectSpace::CollectCodeStatistics() {
}
}
}
void LargeObjectSpace::PrintRSet() {
LargeObjectIterator it(this);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
if (object->IsFixedArray()) {
Page* page = Page::FromAddress(object->address());
Address allocation_top = object->address() + object->Size();
PrintF("large page 0x%x:\n", page);
PrintRSetRange(page->RSetStart(), page->RSetEnd(),
reinterpret_cast<Object**>(object->address()),
allocation_top);
int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
kBitsPerInt);
PrintF("------------------------------------------------------------"
"-----------\n");
PrintRSetRange(allocation_top,
allocation_top + extra_rset_bits / kBitsPerByte,
reinterpret_cast<Object**>(object->address()
+ Page::kObjectAreaSize),
allocation_top);
PrintF("\n");
}
}
}
#endif // DEBUG
} } // namespace v8::internal

278
deps/v8/src/spaces.h

@ -45,23 +45,46 @@ namespace internal {
// The old generation is collected by a mark-sweep-compact collector.
//
// The semispaces of the young generation are contiguous. The old and map
// spaces consists of a list of pages. A page has a page header, a remembered
// set area, and an object area. A page size is deliberately chosen as 8K
// bytes. The first word of a page is an opaque page header that has the
// spaces consists of a list of pages. A page has a page header and an object
// area. A page size is deliberately chosen as 8K bytes.
// The first word of a page is an opaque page header that has the
// address of the next page and its ownership information. The second word may
// have the allocation top address of this page. The next 248 bytes are
// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
// remembered set bit corresponds to a pointer in the object area.
// have the allocation top address of this page. Heap objects are aligned to the
// pointer size.
//
// There is a separate large object space for objects larger than
// Page::kMaxHeapObjectSize, so that they do not have to move during
// collection. The large object space is paged and uses the same remembered
// set implementation. Pages in large object space may be larger than 8K.
// collection. The large object space is paged. Pages in large object space
// may be larger than 8K.
//
// A card marking write barrier is used to keep track of intergenerational
// references. Old space pages are divided into regions of Page::kRegionSize
// size. Each region has a corresponding dirty bit in the page header which is
// set if the region might contain pointers to new space. For details about
// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
// method body.
//
// During scavenges and mark-sweep collections we iterate intergenerational
// pointers without decoding heap object maps so if the page belongs to old
// pointer space or large object space it is essential to guarantee that
// the page does not contain any garbage pointers to new space: every pointer
// aligned word which satisfies the Heap::InNewSpace() predicate must be a
// pointer to a live heap object in new space. Thus objects in old pointer
// and large object spaces should have a special layout (e.g. no bare integer
// fields). This requirement does not apply to map space which is iterated in
// a special fashion. However we still require pointer fields of dead maps to
// be cleaned.
//
// To enable lazy cleaning of old space pages we use a notion of allocation
// watermark. Every pointer under watermark is considered to be well formed.
// Page allocation watermark is not necessarily equal to page allocation top but
// all alive objects on page should reside under allocation watermark.
// During scavenge allocation watermark might be bumped and invalid pointers
// might appear below it. To avoid following them we store a valid watermark
// into special field in the page header and set a page WATERMARK_INVALIDATED
// flag. For details see comments in the Page::SetAllocationWatermark() method
// body.
//
// NOTE: The mark-compact collector rebuilds the remembered set after a
// collection. It reuses first a few words of the remembered set for
// bookkeeping relocation information.
// Some assertion macros used in the debugging mode.
@ -91,25 +114,13 @@ class AllocationInfo;
// -----------------------------------------------------------------------------
// A page normally has 8K bytes. Large object pages may be larger. A page
// address is always aligned to the 8K page size. A page is divided into
// three areas: the first two words are used for bookkeeping, the next 248
// bytes are used as remembered set, and the rest of the page is the object
// area.
//
// Pointers are aligned to the pointer size (4), only 1 bit is needed
// for a pointer in the remembered set. Given an address, its remembered set
// bit position (offset from the start of the page) is calculated by dividing
// its page offset by 32. Therefore, the object area in a page starts at the
// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
// the first two words (64 bits) in a page can be used for other purposes.
// address is always aligned to the 8K page size.
//
// On the 64-bit platform, we add an offset to the start of the remembered set,
// and pointers are aligned to 8-byte pointer size. This means that we need
// only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
// For this reason we add an offset to get room for the Page data at the start.
// Each page starts with a header of Page::kPageHeaderSize size which contains
// bookkeeping data.
//
// The mark-compact collector transforms a map pointer into a page index and a
// page offset. The excact encoding is described in the comments for
// page offset. The exact encoding is described in the comments for
// class MapWord in objects.h.
//
// The only way to get a page pointer is by calling factory methods:
@ -150,18 +161,25 @@ class Page {
// Return the end of allocation in this page. Undefined for unused pages.
inline Address AllocationTop();
// Return the allocation watermark for the page.
// For old space pages it is guaranteed that the area under the watermark
// does not contain any garbage pointers to new space.
inline Address AllocationWatermark();
// Return the allocation watermark offset from the beginning of the page.
inline uint32_t AllocationWatermarkOffset();
inline void SetAllocationWatermark(Address allocation_watermark);
inline void SetCachedAllocationWatermark(Address allocation_watermark);
inline Address CachedAllocationWatermark();
// Returns the start address of the object area in this page.
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
// Returns the end address (exclusive) of the object area in this page.
Address ObjectAreaEnd() { return address() + Page::kPageSize; }
// Returns the start address of the remembered set area.
Address RSetStart() { return address() + kRSetStartOffset; }
// Returns the end address of the remembered set area (exclusive).
Address RSetEnd() { return address() + kRSetEndOffset; }
// Checks whether an address is page aligned.
static bool IsAlignedToPageSize(Address a) {
return 0 == (OffsetFrom(a) & kPageAlignmentMask);
@ -193,33 +211,23 @@ class Page {
}
// ---------------------------------------------------------------------
// Remembered set support
// Card marking support
// Clears remembered set in this page.
inline void ClearRSet();
static const uint32_t kAllRegionsCleanMarks = 0x0;
static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
// Return the address of the remembered set word corresponding to an
// object address/offset pair, and the bit encoded as a single-bit
// mask in the output parameter 'bitmask'.
INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
uint32_t* bitmask));
inline uint32_t GetRegionMarks();
inline void SetRegionMarks(uint32_t dirty);
// Sets the corresponding remembered set bit for a given address.
INLINE(static void SetRSet(Address address, int offset));
inline uint32_t GetRegionMaskForAddress(Address addr);
inline int GetRegionNumberForAddress(Address addr);
// Clears the corresponding remembered set bit for a given address.
static inline void UnsetRSet(Address address, int offset);
inline void MarkRegionDirty(Address addr);
inline bool IsRegionDirty(Address addr);
// Checks whether the remembered set bit for a given address is set.
static inline bool IsRSetSet(Address address, int offset);
#ifdef DEBUG
// Use a state to mark whether remembered set space can be used for other
// purposes.
enum RSetState { IN_USE, NOT_IN_USE };
static bool is_rset_in_use() { return rset_state_ == IN_USE; }
static void set_rset_state(RSetState state) { rset_state_ = state; }
#endif
inline void ClearRegionMarks(Address start,
Address end,
bool reaches_limit);
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@ -227,25 +235,11 @@ class Page {
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
// The offset of the remembered set in a page, in addition to the empty bytes
// formed as the remembered bits of the remembered set itself.
#ifdef V8_TARGET_ARCH_X64
static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers.
#else
static const int kRSetOffset = 0;
#endif
// The end offset of the remembered set in a page
// (heaps are aligned to pointer size).
static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
kIntSize + kPointerSize;
// The start offset of the object area in a page.
// This needs to be at least (bits per uint32_t) * kBitsPerPointer,
// to align start of rset to a uint32_t address.
static const int kObjectStartOffset = 256;
// The start offset of the used part of the remembered set in a page.
static const int kRSetStartOffset = kRSetOffset +
kObjectStartOffset / kBitsPerPointer;
static const int kObjectStartOffset = MAP_POINTER_ALIGN(kPageHeaderSize);
// Object area size in bytes.
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
@ -253,13 +247,65 @@ class Page {
// Maximum object size that fits in a page.
static const int kMaxHeapObjectSize = kObjectAreaSize;
static const int kDirtyFlagOffset = 2 * kPointerSize;
static const int kRegionSizeLog2 = 8;
static const int kRegionSize = 1 << kRegionSizeLog2;
static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
enum PageFlag {
IS_NORMAL_PAGE = 1 << 0,
WAS_IN_USE_BEFORE_MC = 1 << 1
WAS_IN_USE_BEFORE_MC = 1 << 1,
// Page allocation watermark was bumped by preallocation during scavenge.
// Correct watermark can be retrieved by CachedAllocationWatermark() method
WATERMARK_INVALIDATED = 1 << 2
};
// To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
// scavenge we just invalidate the watermark on each old space page after
// processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
// flag at the beginning of the next scavenge and each page becomes marked as
// having a valid watermark.
//
// The following invariant must hold for pages in old pointer and map spaces:
// If page is in use then page is marked as having invalid watermark at
// the beginning and at the end of any GC.
//
// This invariant guarantees that after flipping flag meaning at the
// beginning of scavenge all pages in use will be marked as having valid
// watermark.
static inline void FlipMeaningOfInvalidatedWatermarkFlag();
// Returns true if the page allocation watermark was not altered during
// scavenge.
inline bool IsWatermarkValid();
inline void InvalidateWatermark(bool value);
inline bool GetPageFlag(PageFlag flag);
inline void SetPageFlag(PageFlag flag, bool value);
inline void ClearPageFlags();
inline void ClearGCFields();
static const int kAllocationWatermarkOffsetShift = 3;
static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
static const uint32_t kAllocationWatermarkOffsetMask =
((1 << kAllocationWatermarkOffsetBits) - 1) <<
kAllocationWatermarkOffsetShift;
static const uint32_t kFlagsMask =
((1 << kAllocationWatermarkOffsetShift) - 1);
STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
kAllocationWatermarkOffsetBits);
// This field contains the meaning of the WATERMARK_INVALIDATED flag.
// Instead of clearing this flag from all pages we just flip
// its meaning at the beginning of a scavenge.
static intptr_t watermark_invalidated_mark_;
//---------------------------------------------------------------------------
// Page header description.
@ -279,26 +325,24 @@ class Page {
// second word *may* (if the page start and large object chunk start are
// the same) contain the large object chunk size. In either case, the
// low-order bit for large object pages will be cleared.
// For normal pages this word is used to store various page flags.
int flags;
// For normal pages this word is used to store page flags and
// offset of allocation top.
intptr_t flags_;
// The following fields may overlap with remembered set, they can only
// be used in the mark-compact collector when remembered set is not
// used.
// This field contains dirty marks for regions covering the page. Only dirty
// regions might contain intergenerational references.
// Only 32 dirty marks are supported so for large object pages several regions
// might be mapped to a single dirty mark.
uint32_t dirty_regions_;
// The index of the page in its owner space.
int mc_page_index;
// The allocation pointer after relocating objects to this page.
Address mc_relocation_top;
// The forwarding address of the first live object in this page.
// During mark-compact collections this field contains the forwarding address
// of the first live object in this page.
// During scavenge collection this field is used to store allocation watermark
// if it is altered during scavenge.
Address mc_first_forwarded;
#ifdef DEBUG
private:
static RSetState rset_state_; // state of the remembered set
#endif
};
@ -921,8 +965,7 @@ class PagedSpace : public Space {
// Checks whether page is currently in use by this space.
bool IsUsed(Page* page);
// Clears remembered sets of pages in this space.
void ClearRSet();
void MarkAllPagesClean();
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact);
@ -936,6 +979,11 @@ class PagedSpace : public Space {
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) = 0;
void FlushTopPageWatermark() {
AllocationTopPage()->SetCachedAllocationWatermark(top());
AllocationTopPage()->InvalidateWatermark(true);
}
// Current capacity without growing (Size() + Available() + Waste()).
int Capacity() { return accounting_stats_.Capacity(); }
@ -990,7 +1038,8 @@ class PagedSpace : public Space {
// Writes relocation info to the top page.
void MCWriteRelocationInfoToPage() {
TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
TopPageOf(mc_forwarding_info_)->
SetAllocationWatermark(mc_forwarding_info_.top);
}
// Computes the offset of a given address in this space to the beginning
@ -1108,8 +1157,6 @@ class PagedSpace : public Space {
#ifdef DEBUG
// Returns the number of total pages in this space.
int CountTotalPages();
void DoPrintRSet(const char* space_name);
#endif
private:
@ -1702,6 +1749,9 @@ class FixedSizeFreeList BASE_EMBEDDED {
// The head of the free list.
Address head_;
// The tail of the free list.
Address tail_;
// The identity of the owning space, for building allocation Failure
// objects.
AllocationSpace owner_;
@ -1762,8 +1812,6 @@ class OldSpace : public PagedSpace {
#ifdef DEBUG
// Reports statistics for the space
void ReportStatistics();
// Dump the remembered sets in the space to stdout.
void PrintRSet();
#endif
protected:
@ -1828,9 +1876,6 @@ class FixedSpace : public PagedSpace {
#ifdef DEBUG
// Reports statistic info of the space
void ReportStatistics();
// Dump the remembered sets in the space to stdout.
void PrintRSet();
#endif
protected:
@ -1899,11 +1944,11 @@ class MapSpace : public FixedSpace {
PageIterator it(this, PageIterator::ALL_PAGES);
while (pages_left-- > 0) {
ASSERT(it.has_next());
it.next()->ClearRSet();
it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
}
ASSERT(it.has_next());
Page* top_page = it.next();
top_page->ClearRSet();
top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
ASSERT(top_page->is_valid());
int offset = live_maps % kMapsPerPage * Map::kSize;
@ -1994,9 +2039,8 @@ class LargeObjectChunk {
public:
// Allocates a new LargeObjectChunk that contains a large object page
// (Page::kPageSize aligned) that has at least size_in_bytes (for a large
// object and possibly extra remembered set words) bytes after the object
// area start of that page. The allocated chunk size is set in the output
// parameter chunk_size.
// object) bytes after the object area start of that page.
// The allocated chunk size is set in the output parameter chunk_size.
static LargeObjectChunk* New(int size_in_bytes,
size_t* chunk_size,
Executability executable);
@ -2019,16 +2063,12 @@ class LargeObjectChunk {
// Returns the object in this chunk.
inline HeapObject* GetObject();
// Given a requested size (including any extra remembered set words),
// returns the physical size of a chunk to be allocated.
// Given a requested size returns the physical size of a chunk to be
// allocated.
static int ChunkSizeFor(int size_in_bytes);
// Given a chunk size, returns the object size it can accommodate (not
// including any extra remembered set words). Used by
// LargeObjectSpace::Available. Note that this can overestimate the size
// of object that will fit in a chunk---if the object requires extra
// remembered set words (eg, for large fixed arrays), the actual object
// size for the chunk will be smaller than reported by this function.
// Given a chunk size, returns the object size it can accommodate. Used by
// LargeObjectSpace::Available.
static int ObjectSizeFor(int chunk_size) {
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
@ -2064,8 +2104,7 @@ class LargeObjectSpace : public Space {
// Allocates a large FixedArray.
Object* AllocateRawFixedArray(int size_in_bytes);
// Available bytes for objects in this space, not including any extra
// remembered set words.
// Available bytes for objects in this space.
int Available() {
return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
}
@ -2083,11 +2122,8 @@ class LargeObjectSpace : public Space {
// space, may be slow.
Object* FindObject(Address a);
// Clears remembered sets.
void ClearRSet();
// Iterates objects whose remembered set bits are set.
void IterateRSet(ObjectSlotCallback func);
// Iterates objects covered by dirty regions.
void IterateDirtyRegions(ObjectSlotCallback func);
// Frees unmarked objects.
void FreeUnmarkedObjects();
@ -2114,8 +2150,6 @@ class LargeObjectSpace : public Space {
virtual void Print();
void ReportStatistics();
void CollectCodeStatistics();
// Dump the remembered sets in the space to stdout.
void PrintRSet();
#endif
// Checks whether an address is in the object area in this space. It
// iterates all objects in the space. May be slow.
@ -2134,10 +2168,6 @@ class LargeObjectSpace : public Space {
int object_size,
Executability executable);
// Returns the number of extra bytes (rounded up to the nearest full word)
// required for extra_object_bytes of extra pointers (in bytes).
static inline int ExtraRSetBytesFor(int extra_object_bytes);
friend class LargeObjectIterator;
public:

39
deps/v8/src/string.js

@ -62,26 +62,21 @@ function StringValueOf() {
// ECMA-262, section 15.5.4.4
function StringCharAt(pos) {
var char_code = %_FastCharCodeAt(this, pos);
if (!%_IsSmi(char_code)) {
var subject = TO_STRING_INLINE(this);
var index = TO_INTEGER(pos);
if (index >= subject.length || index < 0) return "";
char_code = %StringCharCodeAt(subject, index);
var result = %_StringCharAt(this, pos);
if (%_IsSmi(result)) {
result = %_StringCharAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
}
return %_CharFromCode(char_code);
return result;
}
// ECMA-262 section 15.5.4.5
function StringCharCodeAt(pos) {
var fast_answer = %_FastCharCodeAt(this, pos);
if (%_IsSmi(fast_answer)) {
return fast_answer;
var result = %_StringCharCodeAt(this, pos);
if (!%_IsSmi(result)) {
result = %_StringCharCodeAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
}
var subject = TO_STRING_INLINE(this);
var index = TO_INTEGER(pos);
return %StringCharCodeAt(subject, index);
return result;
}
@ -214,11 +209,7 @@ function StringMatch(regexp) {
function SubString(string, start, end) {
// Use the one character string cache.
if (start + 1 == end) {
var char_code = %_FastCharCodeAt(string, start);
if (!%_IsSmi(char_code)) {
char_code = %StringCharCodeAt(string, start);
}
return %_CharFromCode(char_code);
return %_StringCharAt(string, start);
}
return %_SubString(string, start, end);
}
@ -322,10 +313,7 @@ function ExpandReplacement(string, subject, matchInfo, builder) {
var expansion = '$';
var position = next + 1;
if (position < length) {
var peek = %_FastCharCodeAt(string, position);
if (!%_IsSmi(peek)) {
peek = %StringCharCodeAt(string, position);
}
var peek = %_StringCharCodeAt(string, position);
if (peek == 36) { // $$
++position;
builder.add('$');
@ -343,10 +331,7 @@ function ExpandReplacement(string, subject, matchInfo, builder) {
++position;
var n = peek - 48;
if (position < length) {
peek = %_FastCharCodeAt(string, position);
if (!%_IsSmi(peek)) {
peek = %StringCharCodeAt(string, position);
}
peek = %_StringCharCodeAt(string, position);
// $nn, 01 <= nn <= 99
if (n != 0 && peek == 48 || peek >= 49 && peek <= 57) {
var nn = n * 10 + (peek - 48);
@ -824,7 +809,7 @@ function StringFromCharCode(code) {
var n = %_ArgumentsLength();
if (n == 1) {
if (!%_IsSmi(code)) code = ToNumber(code);
return %_CharFromCode(code & 0xffff);
return %_StringCharFromCode(code & 0xffff);
}
// NOTE: This is not super-efficient, but it is necessary because we

8
deps/v8/src/stub-cache.h

@ -568,9 +568,11 @@ class KeyedStoreStubCompiler: public StubCompiler {
// a builtin function on its instance prototype (the one the generator
// is set for), and a name of a generator itself (used to build ids
// and generator function names).
#define CUSTOM_CALL_IC_GENERATORS(V) \
V(array, push, ArrayPush) \
V(array, pop, ArrayPop)
#define CUSTOM_CALL_IC_GENERATORS(V) \
V(array, push, ArrayPush) \
V(array, pop, ArrayPop) \
V(string, charCodeAt, StringCharCodeAt) \
V(string, charAt, StringCharAt)
class CallStubCompiler: public StubCompiler {

4
deps/v8/src/v8.cc

@ -149,10 +149,10 @@ void V8::TearDown() {
Top::TearDown();
Heap::TearDown();
CpuProfiler::TearDown();
Heap::TearDown();
Logger::TearDown();
is_running_ = false;

20
deps/v8/src/v8natives.js

@ -492,23 +492,23 @@ PropertyDescriptor.prototype.hasSetter = function() {
function GetOwnProperty(obj, p) {
var desc = new PropertyDescriptor();
// An array with:
// obj is a data property [false, value, Writeable, Enumerable, Configurable]
// obj is an accessor [true, Get, Set, Enumerable, Configurable]
// GetOwnProperty returns an array indexed by the constants
// defined in macros.py.
// If p is not a property on obj undefined is returned.
var props = %GetOwnProperty(ToObject(obj), ToString(p));
if (IS_UNDEFINED(props)) return void 0;
// This is an accessor
if (props[0]) {
desc.setGet(props[1]);
desc.setSet(props[2]);
if (props[IS_ACCESSOR_INDEX]) {
desc.setGet(props[GETTER_INDEX]);
desc.setSet(props[SETTER_INDEX]);
} else {
desc.setValue(props[1]);
desc.setWritable(props[2]);
desc.setValue(props[VALUE_INDEX]);
desc.setWritable(props[WRITABLE_INDEX]);
}
desc.setEnumerable(props[3]);
desc.setConfigurable(props[4]);
desc.setEnumerable(props[ENUMERABLE_INDEX]);
desc.setConfigurable(props[CONFIGURABLE_INDEX]);
return desc;
}

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
#define BUILD_NUMBER 12
#define BUILD_NUMBER 13
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

20
deps/v8/src/virtual-frame-light-inl.h

@ -60,7 +60,7 @@ VirtualFrame::VirtualFrame(VirtualFrame* original)
register_allocation_map_(original->register_allocation_map_) { }
bool VirtualFrame::Equals(VirtualFrame* other) {
bool VirtualFrame::Equals(const VirtualFrame* other) {
ASSERT(element_count() == other->element_count());
if (top_of_stack_state_ != other->top_of_stack_state_) return false;
if (register_allocation_map_ != other->register_allocation_map_) return false;
@ -99,7 +99,9 @@ VirtualFrame::RegisterAllocationScope::~RegisterAllocationScope() {
}
CodeGenerator* VirtualFrame::cgen() { return CodeGeneratorScope::Current(); }
CodeGenerator* VirtualFrame::cgen() const {
return CodeGeneratorScope::Current();
}
MacroAssembler* VirtualFrame::masm() { return cgen()->masm(); }
@ -112,15 +114,17 @@ void VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
}
int VirtualFrame::parameter_count() {
int VirtualFrame::parameter_count() const {
return cgen()->scope()->num_parameters();
}
int VirtualFrame::local_count() { return cgen()->scope()->num_stack_slots(); }
int VirtualFrame::local_count() const {
return cgen()->scope()->num_stack_slots();
}
int VirtualFrame::frame_pointer() { return parameter_count() + 3; }
int VirtualFrame::frame_pointer() const { return parameter_count() + 3; }
int VirtualFrame::context_index() { return frame_pointer() - 1; }
@ -129,7 +133,7 @@ int VirtualFrame::context_index() { return frame_pointer() - 1; }
int VirtualFrame::function_index() { return frame_pointer() - 2; }
int VirtualFrame::local0_index() { return frame_pointer() + 2; }
int VirtualFrame::local0_index() const { return frame_pointer() + 2; }
int VirtualFrame::fp_relative(int index) {
@ -139,12 +143,12 @@ int VirtualFrame::fp_relative(int index) {
}
int VirtualFrame::expression_base_index() {
int VirtualFrame::expression_base_index() const {
return local0_index() + local_count();
}
int VirtualFrame::height() {
int VirtualFrame::height() const {
return element_count() - expression_base_index();
}

2
deps/v8/src/x64/assembler-x64.h

@ -307,7 +307,7 @@ class Operand BASE_EMBEDDED {
private:
byte rex_;
byte buf_[10];
byte buf_[6];
// The number of bytes in buf_.
unsigned int len_;

44
deps/v8/src/x64/builtins-x64.cc

@ -308,7 +308,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// (tail-call) to the code in register edx without checking arguments.
__ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movsxlq(rbx,
FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
__ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
__ cmpq(rax, rbx);
@ -525,15 +526,15 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ lea(scratch1, Operand(result, JSArray::kSize));
__ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
// Initialize the FixedArray and fill it with holes. FixedArray length is not
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array
// scratch2: start of next object
__ Move(FieldOperand(scratch1, JSObject::kMapOffset),
__ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
Factory::fixed_array_map());
__ movq(FieldOperand(scratch1, Array::kLengthOffset),
Immediate(initial_capacity));
__ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
Smi::FromInt(initial_capacity));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
@ -587,7 +588,6 @@ static void AllocateJSArray(MacroAssembler* masm,
JSFunction::kPrototypeOrInitialMapOffset));
// Check whether an empty sized array is requested.
__ SmiToInteger64(array_size, array_size);
__ testq(array_size, array_size);
__ j(not_zero, &not_empty);
@ -605,10 +605,11 @@ static void AllocateJSArray(MacroAssembler* masm,
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
__ bind(&not_empty);
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
SmiIndex index =
masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
times_pointer_size,
array_size,
index.scale,
index.reg,
result,
elements_array_end,
scratch,
@ -620,43 +621,41 @@ static void AllocateJSArray(MacroAssembler* masm,
// result: JSObject
// elements_array: initial map
// elements_array_end: start of next object
// array_size: size of array
// array_size: size of array (smi)
__ bind(&allocated);
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
__ Move(elements_array, Factory::empty_fixed_array());
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
__ Integer32ToSmi(scratch, array_size);
__ movq(FieldOperand(result, JSArray::kLengthOffset), scratch);
__ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// elements_array_end: start of next object
// array_size: size of array
// array_size: size of array (smi)
__ lea(elements_array, Operand(result, JSArray::kSize));
__ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
// Initialize the fixed array. FixedArray length is not stored as a smi.
// Initialize the fixed array. FixedArray length is stored as a smi.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
// array_size: size of array
ASSERT(kSmiTag == 0);
// array_size: size of array (smi)
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
Factory::fixed_array_map());
Label not_empty_2, fill_array;
__ testq(array_size, array_size);
__ SmiTest(array_size);
__ j(not_zero, &not_empty_2);
// Length of the FixedArray is the number of pre-allocated elements even
// though the actual JSArray has length 0.
__ movq(FieldOperand(elements_array, Array::kLengthOffset),
Immediate(kPreallocatedArrayElements));
__ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
Smi::FromInt(kPreallocatedArrayElements));
__ jmp(&fill_array);
__ bind(&not_empty_2);
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ movq(FieldOperand(elements_array, Array::kLengthOffset), array_size);
__ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
@ -1039,8 +1038,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: number of elements
// rax: start of next object
__ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
__ movq(Operand(rdi, JSObject::kMapOffset), rcx); // setup the map
__ movl(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
__ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
__ Integer32ToSmi(rdx, rdx);
__ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
// Initialize the fields to undefined.
// rbx: JSObject

705
deps/v8/src/x64/codegen-x64.cc

File diff suppressed because it is too large

39
deps/v8/src/x64/codegen-x64.h

@ -571,10 +571,13 @@ class CodeGenerator: public AstVisitor {
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateCharFromCode(ZoneList<Expression*>* args);
void GenerateStringCharFromCode(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateStringCharAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@ -846,38 +849,6 @@ class GenericBinaryOpStub: public CodeStub {
class StringHelper : public AllStatic {
public:
// Generates fast code for getting a char code out of a string
// object at the given index. May bail out for four reasons (in the
// listed order):
// * Receiver is not a string (receiver_not_string label).
// * Index is not a smi (index_not_smi label).
// * Index is out of range (index_out_of_range).
// * Some other reason (slow_case label). In this case it's
// guaranteed that the above conditions are not violated,
// e.g. it's safe to assume the receiver is a string and the
// index is a non-negative smi < length.
// When successful, object, index, and scratch are clobbered.
// Otherwise, scratch and result are clobbered.
static void GenerateFastCharCodeAt(MacroAssembler* masm,
Register object,
Register index,
Register scratch,
Register result,
Label* receiver_not_string,
Label* index_not_smi,
Label* index_out_of_range,
Label* slow_case);
// Generates code for creating a one-char string from the given char
// code. May do a runtime call, so any register can be clobbered
// and, if the given invoke flag specifies a call, an internal frame
// is required. In tail call mode the result must be rax register.
static void GenerateCharFromCode(MacroAssembler* masm,
Register code,
Register result,
Register scratch,
InvokeFlag flag);
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersREP adds too much

196
deps/v8/src/x64/full-codegen-x64.cc

@ -1010,7 +1010,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(rax); // Map.
__ push(rdx); // Enumeration cache.
__ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
__ Integer32ToSmi(rax, rax);
__ push(rax); // Enumeration cache length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
__ jmp(&loop);
@ -1020,7 +1019,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(Smi::FromInt(0)); // Map (0) - force slow check.
__ push(rax);
__ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ Integer32ToSmi(rax, rax);
__ push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
@ -1906,76 +1904,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
Handle<String> name = expr->name();
if (strcmp("_IsSmi", *name->ToCString()) == 0) {
EmitIsSmi(expr->arguments());
} else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
EmitIsNonNegativeSmi(expr->arguments());
} else if (strcmp("_IsObject", *name->ToCString()) == 0) {
EmitIsObject(expr->arguments());
} else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
EmitIsUndetectableObject(expr->arguments());
} else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
EmitIsFunction(expr->arguments());
} else if (strcmp("_IsArray", *name->ToCString()) == 0) {
EmitIsArray(expr->arguments());
} else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
EmitIsRegExp(expr->arguments());
} else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
EmitIsConstructCall(expr->arguments());
} else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
EmitObjectEquals(expr->arguments());
} else if (strcmp("_Arguments", *name->ToCString()) == 0) {
EmitArguments(expr->arguments());
} else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
EmitArgumentsLength(expr->arguments());
} else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
EmitClassOf(expr->arguments());
} else if (strcmp("_Log", *name->ToCString()) == 0) {
EmitLog(expr->arguments());
} else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
EmitRandomHeapNumber(expr->arguments());
} else if (strcmp("_SubString", *name->ToCString()) == 0) {
EmitSubString(expr->arguments());
} else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
EmitRegExpExec(expr->arguments());
} else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
EmitValueOf(expr->arguments());
} else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
EmitSetValueOf(expr->arguments());
} else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
EmitNumberToString(expr->arguments());
} else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
EmitCharFromCode(expr->arguments());
} else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
EmitFastCharCodeAt(expr->arguments());
} else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
EmitStringAdd(expr->arguments());
} else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
EmitStringCompare(expr->arguments());
} else if (strcmp("_MathPow", *name->ToCString()) == 0) {
EmitMathPow(expr->arguments());
} else if (strcmp("_MathSin", *name->ToCString()) == 0) {
EmitMathSin(expr->arguments());
} else if (strcmp("_MathCos", *name->ToCString()) == 0) {
EmitMathCos(expr->arguments());
} else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
EmitMathSqrt(expr->arguments());
} else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
EmitCallFunction(expr->arguments());
} else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
EmitRegExpConstructResult(expr->arguments());
} else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
EmitSwapElements(expr->arguments());
} else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
EmitGetFromCache(expr->arguments());
} else {
UNREACHABLE();
}
}
void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@ -2414,46 +2342,120 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
Label slow_case, done;
// Fast case of Heap::LookupSingleCharacterStringFromCode.
__ JumpIfNotSmi(rax, &slow_case);
__ SmiToInteger32(rcx, rax);
__ cmpl(rcx, Immediate(String::kMaxAsciiCharCode));
__ j(above, &slow_case);
Label done;
StringCharFromCodeGenerator generator(rax, rbx);
generator.GenerateFast(masm_);
__ jmp(&done);
__ Move(rbx, Factory::single_character_string_cache());
__ movq(rbx, FieldOperand(rbx,
rcx,
times_pointer_size,
FixedArray::kHeaderSize));
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &slow_case);
__ movq(rax, rbx);
__ bind(&done);
Apply(context_, rbx);
}
void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
VisitForValue(args->at(0), kStack);
VisitForValue(args->at(1), kAccumulator);
Register object = rbx;
Register index = rax;
Register scratch = rcx;
Register result = rdx;
__ pop(object);
Label need_conversion;
Label index_out_of_range;
Label done;
StringCharCodeAtGenerator generator(object,
index,
scratch,
result,
&need_conversion,
&need_conversion,
&index_out_of_range,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm_);
__ jmp(&done);
__ bind(&slow_case);
__ push(rax);
__ CallRuntime(Runtime::kCharFromCode, 1);
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// NaN.
__ LoadRoot(result, Heap::kNanValueRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
// Move the undefined value into the result register, which will
// trigger conversion.
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ jmp(&done);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
Apply(context_, rax);
Apply(context_, result);
}
void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
// TODO(fsc): Port the complete implementation from the classic back-end.
// Move the undefined value into the result register, which will
// trigger the slow case.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
Apply(context_, rax);
void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
VisitForValue(args->at(0), kStack);
VisitForValue(args->at(1), kAccumulator);
Register object = rbx;
Register index = rax;
Register scratch1 = rcx;
Register scratch2 = rdx;
Register result = rax;
__ pop(object);
Label need_conversion;
Label index_out_of_range;
Label done;
StringCharAtGenerator generator(object,
index,
scratch1,
scratch2,
result,
&need_conversion,
&need_conversion,
&index_out_of_range,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm_);
__ jmp(&done);
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
__ LoadRoot(result, Heap::kEmptyStringRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
// Move smi zero into the result register, which will trigger
// conversion.
__ Move(result, Smi::FromInt(0));
__ jmp(&done);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
Apply(context_, result);
}
void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());

131
deps/v8/src/x64/ic-x64.cc

@ -104,8 +104,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
const int kCapacityOffset =
StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
__ movq(r2, FieldOperand(r0, kCapacityOffset));
__ SmiToInteger32(r2, r2);
__ SmiToInteger32(r2, FieldOperand(r0, kCapacityOffset));
__ decl(r2);
// Generate an unrolled loop that performs a few probes before
@ -165,11 +164,11 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
//
// key - holds the smi key on entry and is unchanged if a branch is
// performed to the miss label.
// Holds the result on exit if the load succeeded.
//
// Scratch registers:
//
// r0 - holds the untagged key on entry and holds the hash once computed.
// Holds the result on exit if the load succeeded.
//
// r1 - used to hold the capacity mask of the dictionary
//
@ -202,8 +201,8 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
__ xorl(r0, r1);
// Compute capacity mask.
__ movq(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
__ SmiToInteger32(r1, r1);
__ SmiToInteger32(r1,
FieldOperand(elements, NumberDictionary::kCapacityOffset));
__ decl(r1);
// Generate an unrolled loop that performs a few probes before giving up.
@ -245,7 +244,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
__ movq(r0, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
__ movq(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@ -351,7 +350,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
Label slow, check_string, index_int, index_string;
Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary;
Label check_number_dictionary;
@ -377,23 +376,23 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &check_string);
// Save key in rbx in case we want it for the number dictionary
// case.
__ movq(rbx, rax);
__ SmiToInteger32(rax, rax);
// Get the elements array of the object.
__ bind(&index_int);
__ bind(&index_smi);
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
// Check that the key (index) is within bounds.
__ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
__ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &slow); // Unsigned comparison rejects negative indices.
// Fast case: Do the load.
__ movq(rax, Operand(rcx, rax, times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
__ movq(rax, FieldOperand(rcx,
index.reg,
index.scale,
FixedArray::kHeaderSize));
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
@ -402,12 +401,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0);
// Check whether the elements is a pixel array.
// rax: untagged index
// rax: key
// rcx: elements array
__ bind(&check_pixel_array);
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kPixelArrayMapRootIndex);
__ j(not_equal, &check_number_dictionary);
__ SmiToInteger32(rax, rax);
__ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
__ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
@ -417,13 +417,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
// rax: untagged index
// rbx: key
// rax: key
// rcx: elements
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &slow);
GenerateNumberDictionaryLoad(masm, &slow, rcx, rbx, rax, rdx, rdi);
__ SmiToInteger32(rbx, rax);
GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, rdx, rdi);
__ ret(0);
// Slow case: Load name and receiver from stack and jump to runtime.
@ -512,78 +512,46 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
__ movl(rax, rbx);
__ and_(rax, Immediate(String::kArrayIndexHashMask));
__ shrl(rax, Immediate(String::kHashShift));
__ jmp(&index_int);
// We want the smi-tagged index in rax.
__ and_(rbx, Immediate(String::kArrayIndexValueMask));
__ shr(rbx, Immediate(String::kHashShift));
__ Integer32ToSmi(rax, rbx);
__ jmp(&index_smi);
}
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[8] : name (index)
// -- rsp[16] : receiver
// -----------------------------------
Label miss;
Label index_not_smi;
Label index_out_of_range;
Label slow_char_code;
Label got_char_code;
Register receiver = rdx;
Register index = rax;
Register code = rbx;
Register scratch = rcx;
Register scratch1 = rbx;
Register scratch2 = rcx;
Register result = rax;
__ movq(index, Operand(rsp, 1 * kPointerSize));
__ movq(receiver, Operand(rsp, 2 * kPointerSize));
StringHelper::GenerateFastCharCodeAt(masm,
receiver,
index,
scratch,
code,
&miss, // When not a string.
&index_not_smi,
&index_out_of_range,
&slow_char_code);
// If we didn't bail out, code register contains smi tagged char
// code.
__ bind(&got_char_code);
StringHelper::GenerateCharFromCode(masm, code, rax, scratch, JUMP_FUNCTION);
#ifdef DEBUG
__ Abort("Unexpected fall-through from char from code tail call");
#endif
// Check if key is a heap number.
__ bind(&index_not_smi);
__ CompareRoot(FieldOperand(index, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &miss);
// Push receiver and key on the stack (now that we know they are a
// string and a number), and call runtime.
__ bind(&slow_char_code);
__ EnterInternalFrame();
__ push(receiver);
__ push(index);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
ASSERT(!code.is(rax));
__ movq(code, rax);
__ LeaveInternalFrame();
StringCharAtGenerator char_at_generator(receiver,
index,
scratch1,
scratch2,
result,
&miss, // When not a string.
&miss, // When not a number.
&index_out_of_range,
STRING_INDEX_IS_ARRAY_INDEX);
char_at_generator.GenerateFast(masm);
__ ret(0);
// Check if the runtime call returned NaN char code. If yes, return
// undefined. Otherwise, we can continue.
if (FLAG_debug_code) {
ASSERT(kSmiTag == 0);
__ JumpIfSmi(code, &got_char_code);
__ CompareRoot(FieldOperand(code, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ Assert(equal, "StringCharCodeAt must return smi or heap number");
}
__ CompareRoot(code, Heap::kNanValueRootIndex);
__ j(not_equal, &got_char_code);
ICRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&index_out_of_range);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@ -852,9 +820,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
// Untag the key (for checking against untagged length in the fixed array).
__ SmiToInteger32(rdi, rcx);
__ cmpl(rdi, FieldOperand(rbx, Array::kLengthOffset));
__ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
// rax: value
// rbx: FixedArray
// rcx: index (as a smi)
@ -903,11 +869,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rcx: index (as a smi)
// flags: smicompare (rdx.length(), rbx)
__ j(not_equal, &slow); // do not leave holes in the array
__ SmiToInteger64(rdi, rcx);
__ cmpl(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
__ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
// Increment and restore smi-tag.
__ Integer64PlusConstantToSmi(rdi, rdi, 1);
// Increment index to get new length.
__ SmiAddConstant(rdi, rcx, Smi::FromInt(1));
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
__ jmp(&fast);
@ -936,16 +901,14 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
Label non_smi_value;
__ JumpIfNotSmi(rax, &non_smi_value);
SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
__ movq(Operand(rbx, index.reg, index.scale,
FixedArray::kHeaderSize - kHeapObjectTag),
__ movq(FieldOperand(rbx, index.reg, index.scale, FixedArray::kHeaderSize),
rax);
__ ret(0);
__ bind(&non_smi_value);
// Slow case that needs to retain rcx for use by RecordWrite.
// Update write barrier for the elements array address.
SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rcx, kPointerSizeLog2);
__ movq(Operand(rbx, index2.reg, index2.scale,
FixedArray::kHeaderSize - kHeapObjectTag),
__ movq(FieldOperand(rbx, index2.reg, index2.scale, FixedArray::kHeaderSize),
rax);
__ movq(rdx, rax);
__ RecordWriteNonSmi(rbx, 0, rdx, rcx);

169
deps/v8/src/x64/macro-assembler-x64.cc

@ -90,58 +90,21 @@ void MacroAssembler::RecordWriteHelper(Register object,
bind(&not_in_new_space);
}
Label fast;
// Compute the page start address from the heap object pointer, and reuse
// the 'object' register for it.
ASSERT(is_int32(~Page::kPageAlignmentMask));
and_(object,
Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
Register page_start = object;
// Compute the bit addr in the remembered set/index of the pointer in the
// page. Reuse 'addr' as pointer_offset.
subq(addr, page_start);
shr(addr, Immediate(kPointerSizeLog2));
Register pointer_offset = addr;
// If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object.
cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
j(below, &fast);
// We have a large object containing pointers. It must be a FixedArray.
// Adjust 'page_start' so that addressing using 'pointer_offset' hits the
// extra remembered set after the large object.
// Load the array length into 'scratch'.
movl(scratch,
Operand(page_start,
Page::kObjectStartOffset + FixedArray::kLengthOffset));
Register array_length = scratch;
// Extra remembered set starts right after the large object (a FixedArray), at
// page_start + kObjectStartOffset + objectSize
// where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
// Add the delta between the end of the normal RSet and the start of the
// extra RSet to 'page_start', so that addressing the bit using
// 'pointer_offset' hits the extra RSet words.
lea(page_start,
Operand(page_start, array_length, times_pointer_size,
Page::kObjectStartOffset + FixedArray::kHeaderSize
- Page::kRSetEndOffset));
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
bind(&fast);
bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
}
// Set the remembered set bit for [object+offset].
and_(object, Immediate(~Page::kPageAlignmentMask));
// Compute number of region covering addr. See Page::GetRegionNumberForAddress
// method for more details.
and_(addr, Immediate(Page::kPageAlignmentMask));
shrl(addr, Immediate(Page::kRegionSizeLog2));
// Set dirty mark for region.
bts(Operand(object, Page::kDirtyFlagOffset), addr);
}
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the smi_index register contains the array index into
// the elements array represented as a smi. Otherwise it can be used as a
@ -156,9 +119,8 @@ void MacroAssembler::RecordWrite(Register object,
// registers are rsi.
ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
// First, check if a remembered set write is even needed. The tests below
// catch stores of Smis and stores into young gen (which does not have space
// for the remembered set bits).
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
JumpIfSmi(value, &done);
@ -191,8 +153,8 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
bind(&okay);
}
// Test that the object address is not in the new space. We cannot
// set remembered set bits in the new space.
// Test that the object address is not in the new space. We cannot
// update page dirty marks for new space pages.
InNewSpace(object, scratch, equal, &done);
// The offset is relative to a tagged or untagged HeapObject pointer,
@ -201,48 +163,19 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize));
// We use optimized write barrier code if the word being written to is not in
// a large object page, or is in the first "page" of a large object page.
// We make sure that an offset is inside the right limits whether it is
// tagged or untagged.
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
// Compute the bit offset in the remembered set, leave it in 'scratch'.
lea(scratch, Operand(object, offset));
ASSERT(is_int32(Page::kPageAlignmentMask));
and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
shr(scratch, Immediate(kPointerSizeLog2));
// Compute the page address from the heap object pointer, leave it in
// 'object' (immediate value is sign extended).
and_(object, Immediate(~Page::kPageAlignmentMask));
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
bts(Operand(object, Page::kRSetOffset), scratch);
Register dst = smi_index;
if (offset != 0) {
lea(dst, Operand(object, offset));
} else {
Register dst = smi_index;
if (offset != 0) {
lea(dst, Operand(object, offset));
} else {
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric.
SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
lea(dst, FieldOperand(object,
index.reg,
index.scale,
FixedArray::kHeaderSize));
}
// If we are already generating a shared stub, not inlining the
// record write code isn't going to save us any memory.
if (generating_stub()) {
RecordWriteHelper(object, dst, scratch);
} else {
RecordWriteStub stub(object, dst, scratch);
CallStub(&stub);
}
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric.
SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
lea(dst, FieldOperand(object,
index.reg,
index.scale,
FixedArray::kHeaderSize));
}
RecordWriteHelper(object, dst, scratch);
bind(&done);
@ -573,6 +506,11 @@ void MacroAssembler::SmiToInteger32(Register dst, Register src) {
}
void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
movl(dst, Operand(src, kSmiShift / kBitsPerByte));
}
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
ASSERT_EQ(0, kSmiTag);
if (!dst.is(src)) {
@ -614,7 +552,7 @@ void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
cmpl(Operand(dst, kIntSize), Immediate(src->value()));
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
}
@ -638,6 +576,18 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
}
void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
Register src,
int power) {
ASSERT((0 <= power) && (power < 32));
if (dst.is(src)) {
shr(dst, Immediate(power + kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
}
Condition MacroAssembler::CheckSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testb(src, Immediate(kSmiTagMask));
@ -916,7 +866,7 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
if (constant->value() != 0) {
addl(Operand(dst, kIntSize), Immediate(constant->value()));
addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
}
}
@ -2594,7 +2544,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
movq(FieldOperand(result, String::kLengthOffset), scratch1);
movl(FieldOperand(result, String::kHashFieldOffset),
movq(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@ -2632,7 +2582,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
movq(FieldOperand(result, String::kLengthOffset), scratch1);
movl(FieldOperand(result, String::kHashFieldOffset),
movq(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@ -2691,20 +2641,27 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
// On Windows stack slots are reserved by the caller for all arguments
// including the ones passed in registers. On Linux 6 arguments are passed in
// registers and the caller does not reserve stack slots for them.
// On Windows 64 stack slots are reserved by the caller for all arguments
// including the ones passed in registers, and space is always allocated for
// the four register arguments even if the function takes fewer than four
// arguments.
// On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
// and the caller does not reserve stack slots for them.
ASSERT(num_arguments >= 0);
#ifdef _WIN64
static const int kArgumentsWithoutStackSlot = 0;
static const int kMinimumStackSlots = 4;
if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
return num_arguments;
#else
static const int kArgumentsWithoutStackSlot = 6;
static const int kRegisterPassedArguments = 6;
if (num_arguments < kRegisterPassedArguments) return 0;
return num_arguments - kRegisterPassedArguments;
#endif
return num_arguments > kArgumentsWithoutStackSlot ?
num_arguments - kArgumentsWithoutStackSlot : 0;
}
void MacroAssembler::PrepareCallCFunction(int num_arguments) {
int frame_alignment = OS::ActivationFrameAlignment();
ASSERT(frame_alignment != 0);

16
deps/v8/src/x64/macro-assembler-x64.h

@ -78,8 +78,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
// Set the remebered set bit for an address which points into an
// object. RecordWriteHelper only works if the object is not in new
// For page containing |object| mark region covering |addr| dirty.
// RecordWriteHelper only works if the object is not in new
// space.
void RecordWriteHelper(Register object,
Register addr,
@ -93,7 +93,7 @@ class MacroAssembler: public Assembler {
Condition cc,
Label* branch);
// Set the remembered set bit for [object+offset].
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
@ -103,7 +103,7 @@ class MacroAssembler: public Assembler {
Register value,
Register scratch);
// Set the remembered set bit for [object+offset].
// For page containing |object| mark region covering [object+offset] dirty.
// The value is known to not be a smi.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
@ -210,6 +210,7 @@ class MacroAssembler: public Assembler {
// Convert smi to 32-bit integer. I.e., not sign extended into
// high 32 bits of destination.
void SmiToInteger32(Register dst, Register src);
void SmiToInteger32(Register dst, const Operand& src);
// Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src);
@ -220,6 +221,13 @@ class MacroAssembler: public Assembler {
Register src,
int power);
// Divide a positive smi's integer value by a power of two.
// Provides result as 32-bit integer value.
void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
Register src,
int power);
// Simple comparison of smis.
void SmiCompare(Register dst, Register src);
void SmiCompare(Register dst, Smi* src);

392
deps/v8/src/x64/stub-cache-x64.cc

@ -375,206 +375,6 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
template <class Compiler>
static void CompileLoadInterceptor(Compiler* compiler,
StubCompiler* stub_compiler,
MacroAssembler* masm,
JSObject* object,
JSObject* holder,
String* name,
LookupResult* lookup,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
stub_compiler->CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
if (lookup->IsProperty() && lookup->IsCacheable()) {
compiler->CompileCacheable(masm,
stub_compiler,
receiver,
reg,
scratch1,
scratch2,
holder,
lookup,
name,
miss);
} else {
compiler->CompileRegular(masm,
receiver,
reg,
scratch2,
holder,
miss);
}
}
class LoadInterceptorCompiler BASE_EMBEDDED {
public:
explicit LoadInterceptorCompiler(Register name) : name_(name) {}
void CompileCacheable(MacroAssembler* masm,
StubCompiler* stub_compiler,
Register receiver,
Register holder,
Register scratch1,
Register scratch2,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
Label* miss_label) {
AccessorInfo* callback = NULL;
bool optimize = false;
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
if (lookup->type() == FIELD) {
optimize = true;
} else if (lookup->type() == CALLBACKS) {
Object* callback_object = lookup->GetCallbackObject();
if (callback_object->IsAccessorInfo()) {
callback = AccessorInfo::cast(callback_object);
optimize = callback->getter() != NULL;
}
}
if (!optimize) {
CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
miss_label);
return;
}
// Note: starting a frame here makes GC aware of pointers pushed below.
__ EnterInternalFrame();
if (lookup->type() == CALLBACKS) {
__ push(receiver);
}
__ push(holder);
__ push(name_);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm,
receiver,
holder,
name_,
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
__ j(equal, &interceptor_failed);
__ LeaveInternalFrame();
__ ret(0);
__ bind(&interceptor_failed);
__ pop(name_);
__ pop(holder);
if (lookup->type() == CALLBACKS) {
__ pop(receiver);
}
__ LeaveInternalFrame();
if (lookup->type() == FIELD) {
// We found FIELD property in prototype chain of interceptor's holder.
// Check that the maps from interceptor's holder to field's holder
// haven't changed...
holder = stub_compiler->CheckPrototypes(interceptor_holder,
holder,
lookup->holder(),
scratch1,
scratch2,
name,
miss_label);
// ... and retrieve a field from field's holder.
stub_compiler->GenerateFastPropertyLoad(masm,
rax,
holder,
lookup->holder(),
lookup->GetFieldIndex());
__ ret(0);
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
ASSERT(callback != NULL);
ASSERT(callback->getter() != NULL);
// Prepare for tail call. Push receiver to stack after return address.
Label cleanup;
__ pop(scratch2); // return address
__ push(receiver);
__ push(scratch2);
// Check that the maps from interceptor's holder to callback's holder
// haven't changed.
holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
lookup->holder(), scratch1,
scratch2,
name,
&cleanup);
// Continue tail call preparation: push remaining parameters after
// return address.
__ pop(scratch2); // return address
__ push(holder);
__ Move(holder, Handle<AccessorInfo>(callback));
__ push(holder);
__ push(FieldOperand(holder, AccessorInfo::kDataOffset));
__ push(name_);
__ push(scratch2); // restore return address
// Tail call to runtime.
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(ref, 5, 1);
// Clean up code: we pushed receiver after return address and
// need to remove it from there.
__ bind(&cleanup);
__ pop(scratch1); // return address
__ pop(scratch2); // receiver
__ push(scratch1);
}
}
void CompileRegular(MacroAssembler* masm,
Register receiver,
Register holder,
Register scratch,
JSObject* interceptor_holder,
Label* miss_label) {
__ pop(scratch); // save old return address
PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ push(scratch); // restore old return address
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
__ TailCallExternalReference(ref, 5, 1);
}
private:
Register name_;
};
// Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame.
//
@ -761,9 +561,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name,
depth1, miss);
stub_compiler_->CheckPrototypes(object, receiver,
interceptor_holder, scratch1,
scratch2, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@ -776,10 +576,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(),
scratch1, scratch2, name,
depth2, miss);
if (interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(), scratch1,
scratch2, name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
// safe to omit it here, as if present, it should be fetched
// by the previous CheckPrototypes.
ASSERT(depth2 == kInvalidProtoDepth);
}
// Invoke function.
if (can_do_fast_api_call) {
@ -1148,7 +955,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &miss);
if (argc == 1) { // Otherwise fall through to call builtin.
Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into rax and calculate new length.
__ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@ -1156,8 +963,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ SmiAddConstant(rax, rax, Smi::FromInt(argc));
// Get the element's length into rcx.
__ movl(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
__ Integer32ToSmi(rcx, rcx);
__ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ SmiCompare(rax, rcx);
@ -1176,12 +982,12 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ movq(Operand(rdx, 0), rcx);
// Check if value is a smi.
__ JumpIfNotSmi(rcx, &with_rset_update);
__ JumpIfNotSmi(rcx, &with_write_barrier);
__ bind(&exit);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_rset_update);
__ bind(&with_write_barrier);
__ InNewSpace(rbx, rcx, equal, &exit);
@ -1229,11 +1035,11 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes.
__ addl(FieldOperand(rbx, FixedArray::kLengthOffset),
Immediate(kAllocationDelta));
__ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
Smi::FromInt(kAllocationDelta));
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Elements are in new space, so no remembered set updates are necessary.
// Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
@ -1339,6 +1145,25 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
}
Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
// TODO(722): implement this.
return Heap::undefined_value();
}
Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
// TODO(722): implement this.
return Heap::undefined_value();
}
Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
@ -2117,9 +1942,8 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
JSObject* holder,
JSObject* interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
@ -2127,18 +1951,128 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register scratch2,
String* name,
Label* miss) {
LoadInterceptorCompiler compiler(name_reg);
CompileLoadInterceptor(&compiler,
this,
masm(),
object,
holder,
name,
lookup,
receiver,
scratch1,
scratch2,
miss);
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
bool compile_followup_inline = false;
if (lookup->IsProperty() && lookup->IsCacheable()) {
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo() &&
AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
compile_followup_inline = true;
}
}
if (compile_followup_inline) {
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name, miss);
ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
__ EnterInternalFrame();
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
// CALLBACKS case needs a receiver to be passed into C++ callback.
__ push(receiver);
}
__ push(holder_reg);
__ push(name_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm(),
receiver,
holder_reg,
name_reg,
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
__ j(equal, &interceptor_failed);
__ LeaveInternalFrame();
__ ret(0);
__ bind(&interceptor_failed);
__ pop(name_reg);
__ pop(holder_reg);
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
__ pop(receiver);
}
__ LeaveInternalFrame();
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
if (interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
lookup->holder(),
scratch1,
scratch2,
name,
miss);
}
if (lookup->type() == FIELD) {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), rax, holder_reg,
lookup->holder(), lookup->GetFieldIndex());
__ ret(0);
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
ASSERT(callback != NULL);
ASSERT(callback->getter() != NULL);
// Tail call to runtime.
// Important invariant in CALLBACKS case: the code above must be
// structured to never clobber |receiver| register.
__ pop(scratch2); // return address
__ push(receiver);
__ push(holder_reg);
__ Move(holder_reg, Handle<AccessorInfo>(callback));
__ push(holder_reg);
__ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
__ push(name_reg);
__ push(scratch2); // restore return address
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(ref, 5, 1);
}
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name, miss);
__ pop(scratch2); // save old return address
PushInterceptorArguments(masm(), receiver, holder_reg,
name_reg, interceptor_holder);
__ push(scratch2); // restore old return address
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
__ TailCallExternalReference(ref, 5, 1);
}
}

2
deps/v8/src/x64/virtual-frame-x64.h

@ -590,7 +590,7 @@ class VirtualFrame : public ZoneObject {
inline bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
friend class DeferredCode;
friend class FrameRegisterState;
friend class JumpTarget;
};

37
deps/v8/test/cctest/test-api.cc

@ -6245,12 +6245,25 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) {
templ->SetAccessor(v8_str("y"), Return239);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
// Check the case when receiver and interceptor's holder
// are the same objects.
v8::Handle<Value> value = CompileRun(
"var result = 0;"
"for (var i = 0; i < 7; i++) {"
" result = o.y;"
"}");
CHECK_EQ(239, value->Int32Value());
// Check the case when interceptor's holder is in proto chain
// of receiver.
value = CompileRun(
"r = { __proto__: o };"
"var result = 0;"
"for (var i = 0; i < 7; i++) {"
" result = r.y;"
"}");
CHECK_EQ(239, value->Int32Value());
}
@ -6265,6 +6278,8 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
context->Global()->Set(v8_str("o"), templ_o->NewInstance());
context->Global()->Set(v8_str("p"), templ_p->NewInstance());
// Check the case when receiver and interceptor's holder
// are the same objects.
v8::Handle<Value> value = CompileRun(
"o.__proto__ = p;"
"var result = 0;"
@ -6272,6 +6287,16 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
" result = o.x + o.y;"
"}");
CHECK_EQ(239 + 42, value->Int32Value());
// Check the case when interceptor's holder is in proto chain
// of receiver.
value = CompileRun(
"r = { __proto__: o };"
"var result = 0;"
"for (var i = 0; i < 7; i++) {"
" result = r.x + r.y;"
"}");
CHECK_EQ(239 + 42, value->Int32Value());
}
@ -7203,6 +7228,18 @@ THREADED_TEST(NullIndexedInterceptor) {
}
THREADED_TEST(NamedPropertyHandlerGetterAttributes) {
v8::HandleScope scope;
v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
templ->InstanceTemplate()->SetNamedPropertyHandler(InterceptorLoadXICGetter);
LocalContext env;
env->Global()->Set(v8_str("obj"),
templ->GetFunction()->NewInstance());
ExpectTrue("obj.x === 42");
ExpectTrue("!obj.propertyIsEnumerable('x')");
}
static v8::Handle<Value> ParentGetter(Local<String> name,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();

36
deps/v8/test/cctest/test-assembler-arm.cc

@ -280,4 +280,40 @@ TEST(4) {
}
}
TEST(5) {
// Test the ARMv7 bitfield instructions.
InitializeVM();
v8::HandleScope scope;
Assembler assm(NULL, 0);
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
// On entry, r0 = 0xAAAAAAAA = 0b10..10101010.
__ ubfx(r0, r0, 1, 12); // 0b00..010101010101 = 0x555
__ sbfx(r0, r0, 0, 5); // 0b11..111111110101 = -11
__ bfc(r0, 1, 3); // 0b11..111111110001 = -15
__ mov(r1, Operand(7));
__ bfi(r0, r1, 3, 3); // 0b11..111111111001 = -7
__ mov(pc, Operand(lr));
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
#endif
F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(
CALL_GENERATED_CODE(f, 0xAAAAAAAA, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(-7, res);
}
}
#undef __

46
deps/v8/test/cctest/test-disasm-arm.cc

@ -289,3 +289,49 @@ TEST(Type1) {
VERIFY_RUN();
}
TEST(Type3) {
SETUP();
if (CpuFeatures::IsSupported(ARMv7)) {
COMPARE(ubfx(r0, r1, 5, 10),
"e7e902d1 ubfx r0, r1, #5, #10");
COMPARE(ubfx(r1, r0, 5, 10),
"e7e912d0 ubfx r1, r0, #5, #10");
COMPARE(ubfx(r0, r1, 31, 1),
"e7e00fd1 ubfx r0, r1, #31, #1");
COMPARE(ubfx(r1, r0, 31, 1),
"e7e01fd0 ubfx r1, r0, #31, #1");
COMPARE(sbfx(r0, r1, 5, 10),
"e7a902d1 sbfx r0, r1, #5, #10");
COMPARE(sbfx(r1, r0, 5, 10),
"e7a912d0 sbfx r1, r0, #5, #10");
COMPARE(sbfx(r0, r1, 31, 1),
"e7a00fd1 sbfx r0, r1, #31, #1");
COMPARE(sbfx(r1, r0, 31, 1),
"e7a01fd0 sbfx r1, r0, #31, #1");
COMPARE(bfc(r0, 5, 10),
"e7ce029f bfc r0, #5, #10");
COMPARE(bfc(r1, 5, 10),
"e7ce129f bfc r1, #5, #10");
COMPARE(bfc(r0, 31, 1),
"e7df0f9f bfc r0, #31, #1");
COMPARE(bfc(r1, 31, 1),
"e7df1f9f bfc r1, #31, #1");
COMPARE(bfi(r0, r1, 5, 10),
"e7ce0291 bfi r0, r1, #5, #10");
COMPARE(bfi(r1, r0, 5, 10),
"e7ce1290 bfi r1, r0, #5, #10");
COMPARE(bfi(r0, r1, 31, 1),
"e7df0f91 bfi r0, r1, #31, #1");
COMPARE(bfi(r1, r0, 31, 1),
"e7df1f90 bfi r1, r0, #31, #1");
}
VERIFY_RUN();
}

26
deps/v8/test/cctest/test-heap.cc

@ -177,7 +177,7 @@ TEST(HeapObjects) {
TEST(Tagging) {
InitializeVM();
int request = 24;
CHECK_EQ(request, static_cast<int>(OBJECT_SIZE_ALIGN(request)));
CHECK_EQ(request, static_cast<int>(OBJECT_POINTER_ALIGN(request)));
CHECK(Smi::FromInt(42)->IsSmi());
CHECK(Failure::RetryAfterGC(request, NEW_SPACE)->IsFailure());
CHECK_EQ(request, Failure::RetryAfterGC(request, NEW_SPACE)->requested());
@ -666,14 +666,14 @@ TEST(JSArray) {
array->SetElementsLength(*length);
uint32_t int_length = 0;
CHECK(Array::IndexFromObject(*length, &int_length));
CHECK(length->ToArrayIndex(&int_length));
CHECK_EQ(*length, array->length());
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
array->SetElement(int_length, *name);
uint32_t new_int_length = 0;
CHECK(Array::IndexFromObject(array->length(), &new_int_length));
CHECK(array->length()->ToArrayIndex(&new_int_length));
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
CHECK_EQ(array->GetElement(int_length), *name);
CHECK_EQ(array->GetElement(0), *name);
@ -830,7 +830,7 @@ TEST(LargeObjectSpaceContains) {
}
CHECK(bytes_to_page > FixedArray::kHeaderSize);
int* flags_ptr = &Page::FromAddress(next_page)->flags;
intptr_t* flags_ptr = &Page::FromAddress(next_page)->flags_;
Address flags_addr = reinterpret_cast<Address>(flags_ptr);
int bytes_to_allocate =
@ -888,7 +888,7 @@ TEST(Regression39128) {
// The plan: create JSObject which references objects in new space.
// Then clone this object (forcing it to go into old space) and check
// that only bits pertaining to the object are updated in remembered set.
// that region dirty marks are updated correctly.
// Step 1: prepare a map for the object. We add 1 inobject property to it.
Handle<JSFunction> object_ctor(Top::global_context()->object_function());
@ -931,7 +931,7 @@ TEST(Regression39128) {
CHECK(!object->IsFailure());
CHECK(new_space->Contains(object));
JSObject* jsobject = JSObject::cast(object);
CHECK_EQ(0, jsobject->elements()->length());
CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
CHECK_EQ(0, jsobject->properties()->length());
// Create a reference to object in new space in jsobject.
jsobject->FastPropertyAtPut(-1, array);
@ -951,17 +951,9 @@ TEST(Regression39128) {
}
CHECK(Heap::old_pointer_space()->Contains(clone->address()));
// Step 5: verify validity of remembered set.
// Step 5: verify validity of region dirty marks.
Address clone_addr = clone->address();
Page* page = Page::FromAddress(clone_addr);
// Check that remembered set tracks a reference from inobject property 1.
CHECK(page->IsRSetSet(clone_addr, object_size - kPointerSize));
// Probe several addresses after the object.
for (int i = 0; i < 7; i++) {
int offset = object_size + i * kPointerSize;
if (clone_addr + offset >= page->ObjectAreaEnd()) {
break;
}
CHECK(!page->IsRSetSet(clone_addr, offset));
}
// Check that region covering inobject property 1 is marked dirty.
CHECK(page->IsRegionDirty(clone_addr + (object_size - kPointerSize)));
}

22
deps/v8/test/cctest/test-spaces.cc

@ -32,40 +32,32 @@
using namespace v8::internal;
static void VerifyRSet(Address page_start) {
#ifdef DEBUG
Page::set_rset_state(Page::IN_USE);
#endif
static void VerifyRegionMarking(Address page_start) {
Page* p = Page::FromAddress(page_start);
p->ClearRSet();
p->SetRegionMarks(Page::kAllRegionsCleanMarks);
for (Address addr = p->ObjectAreaStart();
addr < p->ObjectAreaEnd();
addr += kPointerSize) {
CHECK(!Page::IsRSetSet(addr, 0));
CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
}
for (Address addr = p->ObjectAreaStart();
addr < p->ObjectAreaEnd();
addr += kPointerSize) {
Page::SetRSet(addr, 0);
Page::FromAddress(addr)->MarkRegionDirty(addr);
}
for (Address addr = p->ObjectAreaStart();
addr < p->ObjectAreaEnd();
addr += kPointerSize) {
CHECK(Page::IsRSetSet(addr, 0));
CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
}
}
TEST(Page) {
#ifdef DEBUG
Page::set_rset_state(Page::NOT_IN_USE);
#endif
byte* mem = NewArray<byte>(2*Page::kPageSize);
CHECK(mem != NULL);
@ -90,8 +82,8 @@ TEST(Page) {
CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
// test remember set
VerifyRSet(page_start);
// test region marking
VerifyRegionMarking(page_start);
DeleteArray(mem);
}

8
deps/v8/test/es5conform/es5conform.status

@ -200,11 +200,6 @@ chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-34: FAIL_OK
# SUBSETFAIL
chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-35: FAIL_OK
# getOwnPropertyDescriptor not implemented on array indices
chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-b-1: FAIL_OK
# We fail this because Object.keys returns numbers for element indices
# rather than strings.
@ -260,9 +255,6 @@ chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-5-1: FAIL_OK
# Same as 15.4.4.16-7-7
chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-8-7: FAIL_OK
# Uses a array index number as a property
chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-8-c-iii-1: FAIL_OK
chapter15/15.5: UNIMPLEMENTED
chapter15/15.6: UNIMPLEMENTED

64
deps/v8/test/mjsunit/get-own-property-descriptor.js

@ -25,15 +25,22 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
function get(){return x}
function set(x){this.x=x};
// This file only tests very simple descriptors that always have
// configurable, enumerable, and writable set to true.
// A range of more elaborate tests are performed in
// object-define-property.js
var obj = {x:1};
function get() { return x; }
function set(x) { this.x = x; }
var obj = {x: 1};
obj.__defineGetter__("accessor", get);
obj.__defineSetter__("accessor", set);
var a = new Array();
a[1] = 42;
obj[1] = 42;
var descIsData = Object.getOwnPropertyDescriptor(obj,'x');
var descIsData = Object.getOwnPropertyDescriptor(obj, 'x');
assertTrue(descIsData.enumerable);
assertTrue(descIsData.writable);
assertTrue(descIsData.configurable);
@ -49,3 +56,50 @@ assertTrue(descIsNotData == undefined);
var descIsNotAccessor = Object.getOwnPropertyDescriptor(obj, 'not-accessor');
assertTrue(descIsNotAccessor == undefined);
var descArray = Object.getOwnPropertyDescriptor(a, '1');
assertTrue(descArray.enumerable);
assertTrue(descArray.configurable);
assertTrue(descArray.writable);
assertEquals(descArray.value, 42);
var descObjectElement = Object.getOwnPropertyDescriptor(obj, '1');
assertTrue(descObjectElement.enumerable);
assertTrue(descObjectElement.configurable);
assertTrue(descObjectElement.writable);
assertEquals(descObjectElement.value, 42);
// String objects.
var a = new String('foobar');
for (var i = 0; i < a.length; i++) {
var descStringObject = Object.getOwnPropertyDescriptor(a, i);
assertFalse(descStringObject.enumerable);
assertFalse(descStringObject.configurable);
assertFalse(descStringObject.writable);
assertEquals(descStringObject.value, a.substring(i, i+1));
}
// Support for additional attributes on string objects.
a.x = 42;
a[10] = 'foo';
var descStringProperty = Object.getOwnPropertyDescriptor(a, 'x');
assertTrue(descStringProperty.enumerable);
assertTrue(descStringProperty.configurable);
assertTrue(descStringProperty.writable);
assertEquals(descStringProperty.value, 42);
var descStringElement = Object.getOwnPropertyDescriptor(a, '10');
assertTrue(descStringElement.enumerable);
assertTrue(descStringElement.configurable);
assertTrue(descStringElement.writable);
assertEquals(descStringElement.value, 'foo');
// Test that elements in the prototype chain is not returned.
var proto = {};
proto[10] = 42;
var objWithProto = new Array();
objWithProto.prototype = proto;
objWithProto[0] = 'bar';
var descWithProto = Object.getOwnPropertyDescriptor(objWithProto, '10');
assertEquals(undefined, descWithProto);

228
deps/v8/test/mjsunit/string-charat.js

@ -27,29 +27,52 @@
var s = "test";
assertEquals("t", s.charAt());
assertEquals("t", s.charAt("string"));
assertEquals("t", s.charAt(null));
assertEquals("t", s.charAt(void 0));
assertEquals("t", s.charAt(false));
assertEquals("e", s.charAt(true));
assertEquals("", s.charAt(-1));
assertEquals("", s.charAt(4));
assertEquals("t", s.charAt(0));
assertEquals("t", s.charAt(3));
assertEquals("t", s.charAt(NaN));
assertEquals(116, s.charCodeAt());
assertEquals(116, s.charCodeAt("string"));
assertEquals(116, s.charCodeAt(null));
assertEquals(116, s.charCodeAt(void 0));
assertEquals(116, s.charCodeAt(false));
assertEquals(101, s.charCodeAt(true));
assertEquals(116, s.charCodeAt(0));
assertEquals(116, s.charCodeAt(3));
assertEquals(116, s.charCodeAt(NaN));
assertTrue(isNaN(s.charCodeAt(-1)));
assertTrue(isNaN(s.charCodeAt(4)));
var slowIndex1 = { valueOf: function() { return 1; } };
var slowIndex2 = { toString: function() { return "2"; } };
var slowIndexOutOfRange = { valueOf: function() { return -1; } };
function basicTest() {
assertEquals("t", s.charAt());
assertEquals("t", s.charAt("string"));
assertEquals("t", s.charAt(null));
assertEquals("t", s.charAt(void 0));
assertEquals("t", s.charAt(false));
assertEquals("e", s.charAt(true));
assertEquals("", s.charAt(-1));
assertEquals("", s.charAt(4));
assertEquals("", s.charAt(slowIndexOutOfRange));
assertEquals("", s.charAt(1/0));
assertEquals("", s.charAt(-1/0));
assertEquals("t", s.charAt(0));
assertEquals("t", s.charAt(-0.0));
assertEquals("t", s.charAt(0.4));
assertEquals("e", s.charAt(slowIndex1));
assertEquals("s", s.charAt(slowIndex2));
assertEquals("t", s.charAt(3));
assertEquals("t", s.charAt(3.4));
assertEquals("t", s.charAt(NaN));
assertEquals(116, s.charCodeAt());
assertEquals(116, s.charCodeAt("string"));
assertEquals(116, s.charCodeAt(null));
assertEquals(116, s.charCodeAt(void 0));
assertEquals(116, s.charCodeAt(false));
assertEquals(101, s.charCodeAt(true));
assertEquals(116, s.charCodeAt(0));
assertEquals(116, s.charCodeAt(-0.0));
assertEquals(116, s.charCodeAt(0.4));
assertEquals(101, s.charCodeAt(slowIndex1));
assertEquals(115, s.charCodeAt(slowIndex2));
assertEquals(116, s.charCodeAt(3));
assertEquals(116, s.charCodeAt(3.4));
assertEquals(116, s.charCodeAt(NaN));
assertTrue(isNaN(s.charCodeAt(-1)));
assertTrue(isNaN(s.charCodeAt(4)));
assertTrue(isNaN(s.charCodeAt(slowIndexOutOfRange)));
assertTrue(isNaN(s.charCodeAt(1/0)));
assertTrue(isNaN(s.charCodeAt(-1/0)));
}
basicTest();
// Make sure enough of the one-char string cache is filled.
var alpha = ['@'];
@ -64,3 +87,162 @@ for (var i = 1; i < 128; i++) {
assertEquals(alpha[i], alphaStr.charAt(i));
assertEquals(String.fromCharCode(i), alphaStr.charAt(i));
}
// Test stealing String.prototype.{charAt,charCodeAt}.
var o = {
charAt: String.prototype.charAt,
charCodeAt: String.prototype.charCodeAt,
toString: function() { return "012"; },
valueOf: function() { return "should not be called"; }
};
function stealTest() {
assertEquals("0", o.charAt(0));
assertEquals("1", o.charAt(1));
assertEquals("1", o.charAt(1.4));
assertEquals("1", o.charAt(slowIndex1));
assertEquals("2", o.charAt(2));
assertEquals("2", o.charAt(slowIndex2));
assertEquals(48, o.charCodeAt(0));
assertEquals(49, o.charCodeAt(1));
assertEquals(49, o.charCodeAt(1.4));
assertEquals(49, o.charCodeAt(slowIndex1));
assertEquals(50, o.charCodeAt(2));
assertEquals(50, o.charCodeAt(slowIndex2));
assertEquals("", o.charAt(-1));
assertEquals("", o.charAt(-1.4));
assertEquals("", o.charAt(10));
assertEquals("", o.charAt(slowIndexOutOfRange));
assertTrue(isNaN(o.charCodeAt(-1)));
assertTrue(isNaN(o.charCodeAt(-1.4)));
assertTrue(isNaN(o.charCodeAt(10)));
assertTrue(isNaN(o.charCodeAt(slowIndexOutOfRange)));
}
stealTest();
// Test custom string IC-s.
for (var i = 0; i < 20; i++) {
basicTest();
stealTest();
}
var badToString = function() { return []; };
function testBadToString_charAt() {
var goodToString = o.toString;
var hasCaught = false;
var numCalls = 0;
var result;
try {
for (var i = 0; i < 20; i++) {
if (i == 10) o.toString = o.valueOf = badToString;
result = o.charAt(1);
numCalls++;
}
} catch (e) {
hasCaught = true;
} finally {
o.toString = goodToString;
}
assertTrue(hasCaught);
assertEquals("1", result);
assertEquals(10, numCalls);
}
testBadToString_charAt();
function testBadToString_charCodeAt() {
var goodToString = o.toString;
var hasCaught = false;
var numCalls = 0;
var result;
try {
for (var i = 0; i < 20; i++) {
if (i == 10) o.toString = o.valueOf = badToString;
result = o.charCodeAt(1);
numCalls++;
}
} catch (e) {
hasCaught = true;
} finally {
o.toString = goodToString;
}
assertTrue(hasCaught);
assertEquals(49, result);
assertEquals(10, numCalls);
}
testBadToString_charCodeAt();
var badIndex = {
toString: badToString,
valueOf: badToString
};
function testBadIndex_charAt() {
var index = 1;
var hasCaught = false;
var numCalls = 0;
var result;
try {
for (var i = 0; i < 20; i++) {
if (i == 10) index = badIndex;
result = o.charAt(index);
numCalls++;
}
} catch (e) {
hasCaught = true;
}
assertTrue(hasCaught);
assertEquals("1", result);
assertEquals(10, numCalls);
}
testBadIndex_charAt();
function testBadIndex_charCodeAt() {
var index = 1;
var hasCaught = false;
var numCalls = 0;
var result;
try {
for (var i = 0; i < 20; i++) {
if (i == 10) index = badIndex;
result = o.charCodeAt(index);
numCalls++;
}
} catch (e) {
hasCaught = true;
}
assertTrue(hasCaught);
assertEquals(49, result);
assertEquals(10, numCalls);
}
testBadIndex_charCodeAt();
function testPrototypeChange_charAt() {
var result, oldResult;
for (var i = 0; i < 20; i++) {
if (i == 10) {
oldResult = result;
String.prototype.charAt = function() { return "%"; };
}
result = s.charAt(1);
}
assertEquals("%", result);
assertEquals("e", oldResult);
delete String.prototype.charAt; // Restore the default.
}
testPrototypeChange_charAt();
function testPrototypeChange_charCodeAt() {
var result, oldResult;
for (var i = 0; i < 20; i++) {
if (i == 10) {
oldResult = result;
String.prototype.charCodeAt = function() { return 42; };
}
result = s.charCodeAt(1);
}
assertEquals(42, result);
assertEquals(101, oldResult);
delete String.prototype.charCodeAt; // Restore the default.
}
testPrototypeChange_charCodeAt();

24
deps/v8/test/mjsunit/string-index.js

@ -207,6 +207,28 @@ for (var i = 0; i < 100; ++i) {
assertEquals(expected, actual);
}
// Test negative zero case.
var keys = [0, -0.0];
var str = 'ab', arr = ['a', 'a'];
for (var i = 0; i < 100; ++i) {
var index = Math.floor(i / 50);
var key = keys[index];
var expected = arr[index];
var actual = str[key];
assertEquals(expected, actual);
}
// Test "not-an-array-index" case.
var keys = [0, 0.5];
var str = 'ab', arr = ['a', undefined];
for (var i = 0; i < 100; ++i) {
var index = Math.floor(i / 50);
var key = keys[index];
var expected = arr[index];
var actual = str[key];
assertEquals(expected, actual);
}
// Test out of range case.
var keys = [0, -1];
var str = 'ab', arr = ['a', undefined];
@ -234,4 +256,4 @@ for (var i = 0; i < 50; ++i) {
var expected = arr[0];
var actual = str[0];
assertEquals(expected, actual);
}
}

Loading…
Cancel
Save