Browse Source

Upgrade V8 to 3.8.0

v0.7.4-release
Ryan Dahl 13 years ago
parent
commit
b3a7de15b7
  1. 12
      deps/v8/ChangeLog
  2. 1
      deps/v8/build/common.gypi
  3. 3
      deps/v8/include/v8-profiler.h
  4. 57
      deps/v8/src/api.cc
  5. 7
      deps/v8/src/arm/builtins-arm.cc
  6. 287
      deps/v8/src/arm/code-stubs-arm.cc
  7. 8
      deps/v8/src/arm/full-codegen-arm.cc
  8. 3
      deps/v8/src/arm/ic-arm.cc
  9. 15
      deps/v8/src/arm/lithium-arm.cc
  10. 2
      deps/v8/src/arm/lithium-arm.h
  11. 227
      deps/v8/src/arm/lithium-codegen-arm.cc
  12. 15
      deps/v8/src/arm/macro-assembler-arm.cc
  13. 2
      deps/v8/src/arm/macro-assembler-arm.h
  14. 20
      deps/v8/src/arm/stub-cache-arm.cc
  15. 14
      deps/v8/src/assembler.cc
  16. 2
      deps/v8/src/ast.cc
  17. 8
      deps/v8/src/ast.h
  18. 2
      deps/v8/src/bootstrapper.cc
  19. 72
      deps/v8/src/builtins.cc
  20. 61
      deps/v8/src/code-stubs.cc
  21. 28
      deps/v8/src/code-stubs.h
  22. 2
      deps/v8/src/compiler.cc
  23. 2
      deps/v8/src/debug-agent.cc
  24. 1
      deps/v8/src/debug-agent.h
  25. 34
      deps/v8/src/debug.cc
  26. 38
      deps/v8/src/elements.cc
  27. 17
      deps/v8/src/elements.h
  28. 28
      deps/v8/src/factory.cc
  29. 13
      deps/v8/src/factory.h
  30. 17
      deps/v8/src/frames.cc
  31. 3
      deps/v8/src/heap-inl.h
  32. 115
      deps/v8/src/heap.cc
  33. 1
      deps/v8/src/heap.h
  34. 5
      deps/v8/src/hydrogen-instructions.cc
  35. 54
      deps/v8/src/hydrogen-instructions.h
  36. 194
      deps/v8/src/hydrogen.cc
  37. 138
      deps/v8/src/ia32/assembler-ia32.cc
  38. 9
      deps/v8/src/ia32/assembler-ia32.h
  39. 50
      deps/v8/src/ia32/builtins-ia32.cc
  40. 623
      deps/v8/src/ia32/code-stubs-ia32.cc
  41. 4
      deps/v8/src/ia32/debug-ia32.cc
  42. 8
      deps/v8/src/ia32/deoptimizer-ia32.cc
  43. 47
      deps/v8/src/ia32/disasm-ia32.cc
  44. 6
      deps/v8/src/ia32/full-codegen-ia32.cc
  45. 3
      deps/v8/src/ia32/ic-ia32.cc
  46. 276
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  47. 8
      deps/v8/src/ia32/lithium-codegen-ia32.h
  48. 24
      deps/v8/src/ia32/lithium-ia32.cc
  49. 19
      deps/v8/src/ia32/lithium-ia32.h
  50. 32
      deps/v8/src/ia32/macro-assembler-ia32.cc
  51. 7
      deps/v8/src/ia32/macro-assembler-ia32.h
  52. 26
      deps/v8/src/ia32/stub-cache-ia32.cc
  53. 4
      deps/v8/src/ic-inl.h
  54. 89
      deps/v8/src/ic.cc
  55. 27
      deps/v8/src/ic.h
  56. 149
      deps/v8/src/mark-compact.cc
  57. 12
      deps/v8/src/mark-compact.h
  58. 1
      deps/v8/src/messages.js
  59. 595
      deps/v8/src/mips/code-stubs-mips.cc
  60. 92
      deps/v8/src/mips/codegen-mips.cc
  61. 15
      deps/v8/src/mips/codegen-mips.h
  62. 101
      deps/v8/src/mips/full-codegen-mips.cc
  63. 3
      deps/v8/src/mips/ic-mips.cc
  64. 400
      deps/v8/src/mips/lithium-codegen-mips.cc
  65. 7
      deps/v8/src/mips/lithium-codegen-mips.h
  66. 29
      deps/v8/src/mips/lithium-mips.cc
  67. 18
      deps/v8/src/mips/lithium-mips.h
  68. 15
      deps/v8/src/mips/macro-assembler-mips.cc
  69. 1
      deps/v8/src/mips/macro-assembler-mips.h
  70. 20
      deps/v8/src/mips/stub-cache-mips.cc
  71. 161
      deps/v8/src/objects-inl.h
  72. 138
      deps/v8/src/objects.cc
  73. 85
      deps/v8/src/objects.h
  74. 49
      deps/v8/src/parser.cc
  75. 5
      deps/v8/src/parser.h
  76. 5
      deps/v8/src/platform-posix.cc
  77. 5
      deps/v8/src/platform-win32.cc
  78. 4
      deps/v8/src/platform.h
  79. 1
      deps/v8/src/preparser.cc
  80. 20
      deps/v8/src/profile-generator-inl.h
  81. 242
      deps/v8/src/profile-generator.cc
  82. 31
      deps/v8/src/profile-generator.h
  83. 43
      deps/v8/src/runtime.cc
  84. 43
      deps/v8/src/scopes.cc
  85. 5
      deps/v8/src/scopes.h
  86. 6
      deps/v8/src/spaces.cc
  87. 55
      deps/v8/src/store-buffer.cc
  88. 4
      deps/v8/src/store-buffer.h
  89. 4
      deps/v8/src/stub-cache.cc
  90. 10
      deps/v8/src/stub-cache.h
  91. 15
      deps/v8/src/type-info.cc
  92. 1
      deps/v8/src/type-info.h
  93. 106
      deps/v8/src/v8natives.js
  94. 2
      deps/v8/src/v8threads.h
  95. 4
      deps/v8/src/version.cc
  96. 166
      deps/v8/src/x64/assembler-x64.cc
  97. 7
      deps/v8/src/x64/assembler-x64.h
  98. 3
      deps/v8/src/x64/builtins-x64.cc
  99. 414
      deps/v8/src/x64/code-stubs-x64.cc
  100. 4
      deps/v8/src/x64/debug-x64.cc

12
deps/v8/ChangeLog

@ -1,3 +1,15 @@
2011-12-13: Version 3.8.0
Fixed handling of arrays in DefineOwnProperty. (issue 1756)
Sync parser and preparser on do-while and return statements.
(issue 1856)
Fixed another corner case for DefineOwnProperty on arrays (issue 1756).
Stability and performance improvements on all platforms.
2011-12-01: Version 3.7.12
Increase tick interval for the android platform.

1
deps/v8/build/common.gypi

@ -303,6 +303,7 @@
}],
['OS=="win"', {
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},

3
deps/v8/include/v8-profiler.h

@ -219,8 +219,9 @@ class V8EXPORT HeapGraphEdge {
// (e.g. parts of a ConsString).
kHidden = 4, // A link that is needed for proper sizes
// calculation, but may be hidden from user.
kShortcut = 5 // A link that must not be followed during
kShortcut = 5, // A link that must not be followed during
// sizes calculation.
kWeak = 6 // A weak reference (ignored by the GC).
};
/** Returns edge type (see HeapGraphEdge::Type). */

57
deps/v8/src/api.cc

@ -1462,31 +1462,35 @@ Local<Script> Script::New(v8::Handle<String> source,
ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
LOG_API(isolate, "Script::New");
ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(*source);
i::Handle<i::Object> name_obj;
int line_offset = 0;
int column_offset = 0;
if (origin != NULL) {
if (!origin->ResourceName().IsEmpty()) {
name_obj = Utils::OpenHandle(*origin->ResourceName());
}
if (!origin->ResourceLineOffset().IsEmpty()) {
line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
i::SharedFunctionInfo* raw_result = NULL;
{ i::HandleScope scope(isolate);
i::Handle<i::String> str = Utils::OpenHandle(*source);
i::Handle<i::Object> name_obj;
int line_offset = 0;
int column_offset = 0;
if (origin != NULL) {
if (!origin->ResourceName().IsEmpty()) {
name_obj = Utils::OpenHandle(*origin->ResourceName());
}
if (!origin->ResourceLineOffset().IsEmpty()) {
line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
}
if (!origin->ResourceColumnOffset().IsEmpty()) {
column_offset =
static_cast<int>(origin->ResourceColumnOffset()->Value());
}
}
if (!origin->ResourceColumnOffset().IsEmpty()) {
column_offset = static_cast<int>(origin->ResourceColumnOffset()->Value());
EXCEPTION_PREAMBLE(isolate);
i::ScriptDataImpl* pre_data_impl =
static_cast<i::ScriptDataImpl*>(pre_data);
// We assert that the pre-data is sane, even though we can actually
// handle it if it turns out not to be in release mode.
ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
// If the pre-data isn't sane we simply ignore it
if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
pre_data_impl = NULL;
}
}
EXCEPTION_PREAMBLE(isolate);
i::ScriptDataImpl* pre_data_impl = static_cast<i::ScriptDataImpl*>(pre_data);
// We assert that the pre-data is sane, even though we can actually
// handle it if it turns out not to be in release mode.
ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
// If the pre-data isn't sane we simply ignore it
if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
pre_data_impl = NULL;
}
i::Handle<i::SharedFunctionInfo> result =
i::Handle<i::SharedFunctionInfo> result =
i::Compiler::Compile(str,
name_obj,
line_offset,
@ -1495,8 +1499,11 @@ Local<Script> Script::New(v8::Handle<String> source,
pre_data_impl,
Utils::OpenHandle(*script_data),
i::NOT_NATIVES_CODE);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
raw_result = *result;
}
i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
return Local<Script>(ToApi<Script>(result));
}

7
deps/v8/src/arm/builtins-arm.cc

@ -394,13 +394,18 @@ static void ArrayNativeCode(MacroAssembler* masm,
// r5: elements_array_end (untagged)
// sp[0]: last argument
Label loop, entry;
__ mov(r7, sp);
__ jmp(&entry);
__ bind(&loop);
__ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
__ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
if (FLAG_smi_only_arrays) {
__ JumpIfNotSmi(r2, call_generic_code);
}
__ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
__ bind(&entry);
__ cmp(r4, r5);
__ b(lt, &loop);
__ mov(sp, r7);
// Remove caller arguments and receiver from the stack, setup return value and
// return.

287
deps/v8/src/arm/code-stubs-arm.cc

@ -3455,110 +3455,202 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
Label call_runtime;
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label base_not_smi;
Label exponent_not_smi;
Label convert_exponent;
const Register base = r0;
const Register exponent = r1;
const Register heapnumbermap = r5;
const Register heapnumber = r6;
const DoubleRegister double_base = d0;
const DoubleRegister double_exponent = d1;
const DoubleRegister double_result = d2;
const SwVfpRegister single_scratch = s0;
const Register scratch = r9;
const Register scratch2 = r7;
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
CpuFeatures::Scope vfp3_scope(VFP3);
const Register base = r1;
const Register exponent = r2;
const Register heapnumbermap = r5;
const Register heapnumber = r0;
const DoubleRegister double_base = d1;
const DoubleRegister double_exponent = d2;
const DoubleRegister double_result = d3;
const DoubleRegister double_scratch = d0;
const SwVfpRegister single_scratch = s0;
const Register scratch = r9;
const Register scratch2 = r7;
Label call_runtime, done, exponent_not_smi, int_exponent;
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack to double registers.
__ ldr(base, MemOperand(sp, 1 * kPointerSize));
__ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
// Convert base to double value and store it in d0.
__ JumpIfNotSmi(base, &base_not_smi);
// Base is a Smi. Untag and convert it.
__ SmiUntag(base);
__ vmov(single_scratch, base);
__ vcvt_f64_s32(double_base, single_scratch);
__ b(&convert_exponent);
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
__ bind(&base_not_smi);
__ JumpIfSmi(base, &base_is_smi);
__ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
// Base is a heapnumber. Load it into double register.
__ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent);
__ bind(&base_is_smi);
__ SmiUntag(base);
__ vmov(single_scratch, base);
__ vcvt_f64_s32(double_base, single_scratch);
__ bind(&unpack_exponent);
__ bind(&convert_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi);
__ SmiUntag(exponent);
// The base is in a double register and the exponent is
// an untagged smi. Allocate a heap number and call a
// C function for integer exponents. The register containing
// the heap number is callee-saved.
__ AllocateHeapNumber(heapnumber,
scratch,
scratch2,
heapnumbermap,
&call_runtime);
__ push(lr);
__ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(double_base, exponent);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(
ExternalReference::power_double_int_function(masm->isolate()),
1, 1);
__ pop(lr);
__ GetCFunctionDoubleResult(double_result);
}
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
__ Ret(2 * kPointerSize);
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
__ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
// Exponent is a heapnumber. Load it into double register.
__ vldr(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
// Base is already in double_base.
__ JumpIfNotSmi(exponent, &exponent_not_smi);
__ SmiUntag(exponent);
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
__ vldr(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
if (exponent_type_ != INTEGER) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ vcvt_u32_f64(single_scratch, double_exponent);
// We do not check for NaN or Infinity here because comparing numbers on
// ARM correctly distinguishes NaNs. We end up calling the built-in.
__ vcvt_f64_u32(double_scratch, single_scratch);
__ VFPCompareAndSetFlags(double_scratch, double_exponent);
__ b(eq, &int_exponent_convert);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
Label not_plus_half;
// Test for 0.5.
__ vmov(double_scratch, 0.5);
__ VFPCompareAndSetFlags(double_exponent, double_scratch);
__ b(ne, &not_plus_half);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
__ vmov(double_scratch, -V8_INFINITY);
__ VFPCompareAndSetFlags(double_base, double_scratch);
__ vneg(double_result, double_scratch, eq);
__ b(eq, &done);
// Add +0 to convert -0 to +0.
__ vadd(double_scratch, double_base, kDoubleRegZero);
__ vsqrt(double_result, double_scratch);
__ jmp(&done);
__ bind(&not_plus_half);
__ vmov(double_scratch, -0.5);
__ VFPCompareAndSetFlags(double_exponent, double_scratch);
__ b(ne, &call_runtime);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
__ vmov(double_scratch, -V8_INFINITY);
__ VFPCompareAndSetFlags(double_base, double_scratch);
__ vmov(double_result, kDoubleRegZero, eq);
__ b(eq, &done);
// Add +0 to convert -0 to +0.
__ vadd(double_scratch, double_base, kDoubleRegZero);
__ vmov(double_result, 1);
__ vsqrt(double_scratch, double_scratch);
__ vdiv(double_result, double_result, double_scratch);
__ jmp(&done);
}
// The base and the exponent are in double registers.
// Allocate a heap number and call a C function for
// double exponents. The register containing
// the heap number is callee-saved.
__ AllocateHeapNumber(heapnumber,
scratch,
scratch2,
heapnumbermap,
&call_runtime);
__ push(lr);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
0, 2);
__ pop(lr);
__ GetCFunctionDoubleResult(double_result);
}
__ pop(lr);
__ GetCFunctionDoubleResult(double_result);
__ jmp(&done);
__ bind(&int_exponent_convert);
__ vcvt_u32_f64(single_scratch, double_exponent);
__ vmov(exponent, single_scratch);
}
// Calculate power with integer exponent.
__ bind(&int_exponent);
__ mov(scratch, exponent); // Back up exponent.
__ vmov(double_scratch, double_base); // Back up base.
__ vmov(double_result, 1.0);
// Get absolute value of exponent.
__ cmp(scratch, Operand(0));
__ mov(scratch2, Operand(0), LeaveCC, mi);
__ sub(scratch, scratch2, scratch, LeaveCC, mi);
Label while_true;
__ bind(&while_true);
__ mov(scratch, Operand(scratch, ASR, 1), SetCC);
__ vmul(double_result, double_result, double_scratch, cs);
__ vmul(double_scratch, double_scratch, double_scratch, ne);
__ b(ne, &while_true);
__ cmp(exponent, Operand(0));
__ b(ge, &done);
__ vmov(double_scratch, 1.0);
__ vdiv(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
__ VFPCompareAndSetFlags(double_result, 0.0);
__ b(ne, &done);
// double_exponent may not containe the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
__ vmov(single_scratch, exponent);
__ vcvt_f64_s32(double_exponent, single_scratch);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
if (exponent_type_ == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
__ bind(&done);
__ AllocateHeapNumber(
heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
__ Ret(2 * kPointerSize);
}
ASSERT(heapnumber.is(r0));
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret(2);
} else {
__ push(lr);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
0, 2);
}
__ pop(lr);
__ GetCFunctionDoubleResult(double_result);
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret();
}
}
@ -6628,26 +6720,47 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r1, r0);
__ push(lr);
void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r2, Operand(known_map_));
__ b(ne, &miss);
__ cmp(r3, Operand(known_map_));
__ b(ne, &miss);
__ sub(r0, r0, Operand(r1));
__ Ret();
__ bind(&miss);
GenerateMiss(masm);
}
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
__ push(lr);
__ Push(r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
__ push(ip);
__ CallExternalReference(miss, 3);
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
__ pop(lr);
__ pop(r0);
__ pop(r1);
}
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
__ pop(lr);
__ pop(r0);
__ pop(r1);
__ Jump(r2);
}

8
deps/v8/src/arm/full-codegen-arm.cc

@ -2938,8 +2938,12 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
MathPowStub stub;
__ CallStub(&stub);
if (CpuFeatures::IsSupported(VFP3)) {
MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
}
context()->Plug(r0);
}

3
deps/v8/src/arm/ic-arm.cc

@ -1587,6 +1587,9 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
if (state == KNOWN_OBJECTS) {
stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
}
rewritten = stub.GetCode();
}
set_target(*rewritten);

15
deps/v8/src/arm/lithium-arm.cc

@ -1153,6 +1153,11 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
} else if (op == kMathPowHalf) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LOperand* temp = FixedTemp(d3);
LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
return DefineFixedDouble(result, d2);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
@ -1166,8 +1171,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DefineAsRegister(result);
case kMathRound:
return AssignEnvironment(DefineAsRegister(result));
case kMathPowHalf:
return DefineAsRegister(result);
default:
UNREACHABLE();
return NULL;
@ -1402,7 +1405,7 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LOperand* left = UseFixedDouble(instr->left(), d1);
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), d2) :
UseFixed(instr->right(), r0);
UseFixed(instr->right(), r2);
LPower* result = new LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, d3),
instr,
@ -1795,7 +1798,8 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadContextSlot(context));
LInstruction* result = DefineAsRegister(new LLoadContextSlot(context));
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@ -1809,7 +1813,8 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
return new LStoreContextSlot(context, value);
LInstruction* result = new LStoreContextSlot(context, value);
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}

2
deps/v8/src/arm/lithium-arm.h

@ -1793,6 +1793,8 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
LOperand* value() { return InputAt(0); }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};

227
deps/v8/src/arm/lithium-codegen-arm.cc

@ -321,7 +321,22 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
if (op->IsRegister()) {
return ToRegister(op->index());
} else if (op->IsConstantOperand()) {
__ mov(scratch, ToOperand(op));
LConstantOperand* const_op = LConstantOperand::cast(op);
Handle<Object> literal = chunk_->LookupLiteral(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
__ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
} else if (r.IsDouble()) {
Abort("EmitLoadRegister: Unsupported double immediate.");
} else {
ASSERT(r.IsTagged());
if (literal->IsSmi()) {
__ mov(scratch, Operand(literal));
} else {
__ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
}
}
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
__ ldr(scratch, ToMemOperand(op));
@ -1337,8 +1352,13 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
ASSERT(instr->result()->IsRegister());
__ mov(ToRegister(instr->result()), Operand(instr->value()));
Handle<Object> value = instr->value();
if (value->IsSmi()) {
__ mov(ToRegister(instr->result()), Operand(value));
} else {
__ LoadHeapObject(ToRegister(instr->result()),
Handle<HeapObject>::cast(value));
}
}
@ -2164,7 +2184,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// offset to the location of the map check.
Register temp = ToRegister(instr->TempAt(0));
ASSERT(temp.is(r4));
__ mov(InstanceofStub::right(), Operand(instr->function()));
__ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 4;
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
@ -2263,21 +2283,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
// Cells are always in the remembered set.
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteField(scratch,
JSGlobalPropertyCell::kValueOffset,
value,
scratch2,
kLRHasBeenSaved,
kSaveFPRegs,
OMIT_REMEMBERED_SET,
check_needed);
}
// Cells are always rescanned, so no write barrier here.
}
@ -2297,6 +2303,11 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ ldr(result, ContextOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
DeoptimizeIf(eq, instr->environment());
}
}
@ -2304,6 +2315,13 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
MemOperand target = ContextOperand(context, instr->slot_index());
if (instr->hydrogen()->RequiresHoleCheck()) {
Register scratch = scratch0();
__ ldr(scratch, target);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
DeoptimizeIf(eq, instr->environment());
}
__ str(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
@ -2355,7 +2373,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
}
} else {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
LoadHeapObject(result, Handle<HeapObject>::cast(function));
__ LoadHeapObject(result, function);
}
}
@ -2800,7 +2818,7 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
LoadHeapObject(result, instr->hydrogen()->closure());
__ LoadHeapObject(result, instr->hydrogen()->closure());
}
@ -2868,7 +2886,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
__ mov(r1, Operand(instr->function()));
__ LoadHeapObject(r1, instr->function());
CallKnownFunction(instr->function(),
instr->arity(),
instr,
@ -3053,11 +3071,11 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
__ and_(scratch, result, Operand(HeapNumber::kSignMask));
__ Vmov(double_scratch0(), 0.5);
__ vadd(input, input, double_scratch0());
__ vadd(double_scratch0(), input, double_scratch0());
// Check sign of the result: if the sign changed, the input
// value was in ]0.5, 0[ and the result should be -0.
__ vmov(result, input.high());
__ vmov(result, double_scratch0().high());
__ eor(result, result, Operand(scratch), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(mi, instr->environment());
@ -3068,7 +3086,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
__ EmitVFPTruncate(kRoundToMinusInf,
double_scratch0().low(),
input,
double_scratch0(),
result,
scratch);
DeoptimizeIf(ne, instr->environment());
@ -3097,68 +3115,53 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
// Math.sqrt(-Infinity) == NaN
Label done;
__ vmov(temp, -V8_INFINITY);
__ VFPCompareAndSetFlags(input, temp);
__ vneg(result, temp, eq);
__ b(&done, eq);
// Add +0 to convert -0 to +0.
__ vadd(result, input, kDoubleRegZero);
__ vsqrt(result, result);
__ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
Register scratch = scratch0();
DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
if (exponent_type.IsDouble()) {
// Prepare arguments and call C function.
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(ToDoubleRegister(left),
ToDoubleRegister(right));
__ CallCFunction(
ExternalReference::power_double_double_function(isolate()), 0, 2);
} else if (exponent_type.IsInteger32()) {
ASSERT(ToRegister(right).is(r0));
// Prepare arguments and call C function.
__ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
__ CallCFunction(
ExternalReference::power_double_int_function(isolate()), 1, 1);
} else {
ASSERT(exponent_type.IsTagged());
ASSERT(instr->hydrogen()->left()->representation().IsDouble());
Register right_reg = ToRegister(right);
// Check for smi on the right hand side.
Label non_smi, call;
__ JumpIfNotSmi(right_reg, &non_smi);
// Untag smi and convert it to a double.
__ SmiUntag(right_reg);
SwVfpRegister single_scratch = double_scratch0().low();
__ vmov(single_scratch, right_reg);
__ vcvt_f64_s32(result_reg, single_scratch);
__ jmp(&call);
// Heap number map check.
__ bind(&non_smi);
__ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
ToDoubleRegister(instr->InputAt(1)).is(d2));
ASSERT(!instr->InputAt(1)->IsRegister() ||
ToRegister(instr->InputAt(1)).is(r2));
ASSERT(ToDoubleRegister(instr->InputAt(0)).is(d1));
ASSERT(ToDoubleRegister(instr->result()).is(d3));
if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(r2, &no_deopt);
__ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
__ cmp(r7, Operand(ip));
DeoptimizeIf(ne, instr->environment());
int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag;
__ add(scratch, right_reg, Operand(value_offset));
__ vldr(result_reg, scratch, 0);
// Prepare arguments and call C function.
__ bind(&call);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
__ CallCFunction(
ExternalReference::power_double_double_function(isolate()), 0, 2);
__ bind(&no_deopt);
MathPowStub stub(MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
MathPowStub stub(MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
ASSERT(exponent_type.IsDouble());
MathPowStub stub(MathPowStub::DOUBLE);
__ CallStub(&stub);
}
// Store the result in the result register.
__ GetCFunctionDoubleResult(result_reg);
}
@ -3294,7 +3297,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
__ mov(r1, Operand(instr->target()));
__ LoadHeapObject(r1, instr->target());
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@ -4118,9 +4121,18 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
ASSERT(instr->InputAt(0)->IsRegister());
Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(instr->hydrogen()->target()));
Register reg = ToRegister(instr->value());
Handle<JSFunction> target = instr->hydrogen()->target();
if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(target);
__ mov(ip, Operand(Handle<Object>(cell)));
__ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
__ cmp(reg, ip);
} else {
__ cmp(reg, Operand(target));
}
DeoptimizeIf(ne, instr->environment());
}
@ -4189,19 +4201,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
void LCodeGen::LoadHeapObject(Register result,
Handle<HeapObject> object) {
if (heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
factory()->NewJSGlobalPropertyCell(object);
__ mov(result, Operand(cell));
__ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
} else {
__ mov(result, Operand(object));
}
}
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register temp1 = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
@ -4210,7 +4209,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
LoadHeapObject(temp1, current_prototype);
__ LoadHeapObject(temp1, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
@ -4220,7 +4219,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
LoadHeapObject(temp1, current_prototype);
__ LoadHeapObject(temp1, current_prototype);
}
// Check the holder map.
@ -4231,15 +4230,31 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
Heap* heap = isolate()->heap();
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
// Load the map's "bit field 2".
__ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
__ cmp(r2, Operand(boilerplate_elements_kind));
DeoptimizeIf(ne, instr->environment());
}
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
__ mov(r1, Operand(constant_elements));
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
__ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
__ Push(r3, r2, r1);
// Pick the right runtime function or stub to call.
@ -4256,9 +4271,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
constant_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ELEMENTS;
boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@ -4297,10 +4312,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ add(r2, result, Operand(*offset));
__ str(r2, FieldMemOperand(result, total_offset));
LoadHeapObject(source, value_object);
__ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
LoadHeapObject(r2, Handle<HeapObject>::cast(value));
__ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
__ str(r2, FieldMemOperand(result, total_offset));
} else {
__ mov(r2, Operand(value));
@ -4326,7 +4341,7 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
__ bind(&allocated);
int offset = 0;
LoadHeapObject(r1, instr->hydrogen()->boilerplate());
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
ASSERT_EQ(size, offset);
}

15
deps/v8/src/arm/macro-assembler-arm.cc

@ -407,6 +407,19 @@ void MacroAssembler::StoreRoot(Register source,
}
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
mov(result, Operand(cell));
ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
} else {
mov(result, Operand(object));
}
}
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
@ -1111,7 +1124,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
mov(r1, Operand(function));
LoadHeapObject(r1, function);
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());

2
deps/v8/src/arm/macro-assembler-arm.h

@ -166,6 +166,8 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);
void LoadHeapObject(Register dst, Handle<HeapObject> object);
// ---------------------------------------------------------------------------
// GC Support

20
deps/v8/src/arm/stub-cache-arm.cc

@ -575,7 +575,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ mov(r5, Operand(function));
__ LoadHeapObject(r5, function);
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
@ -1099,7 +1099,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
Handle<Object> value,
Handle<JSFunction> value,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@ -1110,7 +1110,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
__ mov(r0, Operand(value));
__ LoadHeapObject(r0, value);
__ Ret();
}
@ -2587,15 +2587,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
// Store the value in the cell.
__ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
__ mov(r1, r0);
__ RecordWriteField(r4,
JSGlobalPropertyCell::kValueOffset,
r1,
r2,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET);
// Cells are always rescanned, so no write barrier here.
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
@ -2690,7 +2682,7 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Object> value,
Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : receiver
@ -2830,7 +2822,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<Object> value) {
Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key

14
deps/v8/src/assembler.cc

@ -1113,17 +1113,9 @@ double power_double_int(double x, int y) {
double power_double_double(double x, double y) {
int y_int = static_cast<int>(y);
if (y == y_int) {
return power_double_int(x, y_int); // Returns 1.0 for exponent 0.
}
if (!isinf(x)) {
if (y == 0.5) return sqrt(x + 0.0); // -0 must be converted to +0.
if (y == -0.5) return 1.0 / sqrt(x + 0.0);
}
if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
return OS::nan_value();
}
// The checks for special cases can be dropped in ia32 because it has already
// been done in generated code before bailing out here.
if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value();
return pow(x, y);
}

2
deps/v8/src/ast.cc

@ -70,6 +70,7 @@ VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
is_trivial_(false),
is_lvalue_(false),
position_(RelocInfo::kNoPosition) {
BindTo(var);
}
@ -84,6 +85,7 @@ VariableProxy::VariableProxy(Isolate* isolate,
var_(NULL),
is_this_(is_this),
is_trivial_(false),
is_lvalue_(false),
position_(position) {
// Names must be canonicalized for fast equality checks.
ASSERT(name->IsSymbol());

8
deps/v8/src/ast.h

@ -1159,12 +1159,17 @@ class VariableProxy: public Expression {
bool IsArguments() { return var_ != NULL && var_->is_arguments(); }
bool IsLValue() {
return is_lvalue_;
}
Handle<String> name() const { return name_; }
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
int position() const { return position_; }
void MarkAsTrivial() { is_trivial_ = true; }
void MarkAsLValue() { is_lvalue_ = true; }
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@ -1174,6 +1179,9 @@ class VariableProxy: public Expression {
Variable* var_; // resolved variable, or NULL
bool is_this_;
bool is_trivial_;
// True if this variable proxy is being used in an assignment
// or with a increment/decrement operator.
bool is_lvalue_;
int position_;
};

2
deps/v8/src/bootstrapper.cc

@ -299,7 +299,7 @@ class Genesis BASE_EMBEDDED {
void Bootstrapper::Iterate(ObjectVisitor* v) {
extensions_cache_.Iterate(v);
v->Synchronize("Extensions");
v->Synchronize(VisitorSynchronization::kExtensions);
}

72
deps/v8/src/builtins.cc

@ -233,30 +233,57 @@ BUILTIN(ArrayCodeGeneric) {
return array->Initialize(JSArray::kPreallocatedArrayElements);
}
// Take the arguments as elements.
int number_of_elements = args.length() - 1;
Smi* len = Smi::FromInt(number_of_elements);
Object* obj;
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len->value());
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
// Set length and elements on the array.
int number_of_elements = args.length() - 1;
MaybeObject* maybe_object =
array->EnsureCanContainElements(FixedArray::cast(obj));
array->EnsureCanContainElements(&args, 1, number_of_elements,
ALLOW_CONVERTED_DOUBLE_ELEMENTS);
if (maybe_object->IsFailure()) return maybe_object;
AssertNoAllocation no_gc;
FixedArray* elms = FixedArray::cast(obj);
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
// Fill in the content
for (int index = 0; index < number_of_elements; index++) {
elms->set(index, args[index+1], mode);
// Allocate an appropriately typed elements array.
MaybeObject* maybe_elms;
ElementsKind elements_kind = array->GetElementsKind();
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
number_of_elements);
} else {
maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
}
FixedArrayBase* elms;
if (!maybe_elms->To<FixedArrayBase>(&elms)) return maybe_elms;
array->set_elements(FixedArray::cast(obj));
array->set_length(len);
// Fill in the content
switch (array->GetElementsKind()) {
case FAST_SMI_ONLY_ELEMENTS: {
FixedArray* smi_elms = FixedArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
smi_elms->set(index, args[index+1], SKIP_WRITE_BARRIER);
}
break;
}
case FAST_ELEMENTS: {
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
FixedArray* object_elms = FixedArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
object_elms->set(index, args[index+1], mode);
}
break;
}
case FAST_DOUBLE_ELEMENTS: {
FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
double_elms->set(index, args[index+1]->Number());
}
break;
}
default:
UNREACHABLE();
break;
}
array->set_elements(elms);
array->set_length(Smi::FromInt(number_of_elements));
return array;
}
@ -424,7 +451,8 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
MaybeObject* maybe_array = array->EnsureCanContainElements(
args,
first_added_arg,
args_length - first_added_arg);
args_length - first_added_arg,
DONT_ALLOW_DOUBLE_ELEMENTS);
if (maybe_array->IsFailure()) return maybe_array;
return array->elements();
}
@ -627,7 +655,8 @@ BUILTIN(ArrayUnshift) {
ASSERT(to_add <= (Smi::kMaxValue - len));
MaybeObject* maybe_object =
array->EnsureCanContainElements(&args, 1, to_add);
array->EnsureCanContainElements(&args, 1, to_add,
DONT_ALLOW_DOUBLE_ELEMENTS);
if (maybe_object->IsFailure()) return maybe_object;
if (new_length > elms->length()) {
@ -758,7 +787,8 @@ BUILTIN(ArraySlice) {
FixedArray* result_elms = FixedArray::cast(result);
MaybeObject* maybe_object =
result_array->EnsureCanContainElements(result_elms);
result_array->EnsureCanContainElements(result_elms,
DONT_ALLOW_DOUBLE_ELEMENTS);
if (maybe_object->IsFailure()) return maybe_object;
AssertNoAllocation no_gc;
@ -1022,7 +1052,7 @@ BUILTIN(ArrayConcat) {
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
if (!array->HasFastSmiOnlyElements()) {
result_array->EnsureCanContainNonSmiElements();
result_array->EnsureCanContainHeapObjectElements();
break;
}
}

61
deps/v8/src/code-stubs.cc

@ -101,7 +101,14 @@ Handle<Code> CodeStub::GetCode() {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
Code* code;
if (!FindCodeInCache(&code)) {
if (UseSpecialCache()
? FindCodeInSpecialCache(&code)
: FindCodeInCache(&code)) {
ASSERT(IsPregenerated() == code->is_pregenerated());
return Handle<Code>(code);
}
{
HandleScope scope(isolate);
// Generate the new code.
@ -121,19 +128,21 @@ Handle<Code> CodeStub::GetCode() {
RecordCodeGeneration(*new_object, &masm);
FinishCode(new_object);
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
factory->DictionaryAtNumberPut(
Handle<NumberDictionary>(heap->code_stubs()),
GetKey(),
new_object);
heap->public_set_code_stubs(*dict);
if (UseSpecialCache()) {
AddToSpecialCache(new_object);
} else {
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
factory->DictionaryAtNumberPut(
Handle<NumberDictionary>(heap->code_stubs()),
GetKey(),
new_object);
heap->public_set_code_stubs(*dict);
}
code = *new_object;
Activate(code);
} else {
CHECK(IsPregenerated() == code->is_pregenerated());
}
Activate(code);
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
return Handle<Code>(code, isolate);
}
@ -159,6 +168,32 @@ void CodeStub::PrintName(StringStream* stream) {
}
void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
ASSERT(*known_map_ != NULL);
Isolate* isolate = new_object->GetIsolate();
Factory* factory = isolate->factory();
return Map::UpdateCodeCache(known_map_,
factory->compare_ic_symbol(),
new_object);
}
bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
Isolate* isolate = known_map_->GetIsolate();
Factory* factory = isolate->factory();
Code::Flags flags = Code::ComputeFlags(
static_cast<Code::Kind>(GetCodeKind()),
UNINITIALIZED);
Handle<Object> probe(
known_map_->FindInCodeCache(*factory->compare_ic_symbol(), flags));
if (probe->IsCode()) {
*code_out = Code::cast(*probe);
return true;
}
return false;
}
int ICCompareStub::MinorKey() {
return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
}
@ -184,6 +219,10 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
case CompareIC::OBJECTS:
GenerateObjects(masm);
break;
case CompareIC::KNOWN_OBJECTS:
ASSERT(*known_map_ != NULL);
GenerateKnownObjects(masm);
break;
default:
UNREACHABLE();
}

28
deps/v8/src/code-stubs.h

@ -194,6 +194,17 @@ class CodeStub BASE_EMBEDDED {
return UNINITIALIZED;
}
// Add the code to a specialized cache, specific to an individual
// stub type. Please note, this method must add the code object to a
// roots object, otherwise we will remove the code during GC.
virtual void AddToSpecialCache(Handle<Code> new_object) { }
// Find code in a specialized cache, work is delegated to the specific stub.
virtual bool FindCodeInSpecialCache(Code** code_out) { return false; }
// If a stub uses a special cache override this.
virtual bool UseSpecialCache() { return false; }
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
virtual void PrintName(StringStream* stream);
@ -442,12 +453,17 @@ class InstanceofStub: public CodeStub {
class MathPowStub: public CodeStub {
public:
MathPowStub() {}
enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
explicit MathPowStub(ExponentType exponent_type)
: exponent_type_(exponent_type) { }
virtual void Generate(MacroAssembler* masm);
private:
virtual CodeStub::Major MajorKey() { return MathPow; }
virtual int MinorKey() { return 0; }
virtual int MinorKey() { return exponent_type_; }
ExponentType exponent_type_;
};
@ -460,6 +476,8 @@ class ICCompareStub: public CodeStub {
virtual void Generate(MacroAssembler* masm);
void set_known_map(Handle<Map> map) { known_map_ = map; }
private:
class OpField: public BitField<int, 0, 3> { };
class StateField: public BitField<int, 3, 5> { };
@ -479,12 +497,18 @@ class ICCompareStub: public CodeStub {
void GenerateStrings(MacroAssembler* masm);
void GenerateObjects(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm);
void GenerateKnownObjects(MacroAssembler* masm);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
virtual void AddToSpecialCache(Handle<Code> new_object);
virtual bool FindCodeInSpecialCache(Code** code_out);
virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; }
Token::Value op_;
CompareIC::State state_;
Handle<Map> known_map_;
};

2
deps/v8/src/compiler.cc

@ -398,7 +398,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
FunctionLiteral* lit = info->function();
LiveEditFunctionTracker live_edit_tracker(isolate, lit);
if (!MakeCode(info)) {
isolate->StackOverflow();
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return Handle<SharedFunctionInfo>::null();
}

2
deps/v8/src/debug-agent.cc

@ -229,8 +229,6 @@ void DebuggerAgentSession::Shutdown() {
const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
const int DebuggerAgentUtil::kContentLengthSize =
StrLength(kContentLength);
SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {

1
deps/v8/src/debug-agent.h

@ -115,7 +115,6 @@ class DebuggerAgentSession: public Thread {
class DebuggerAgentUtil {
public:
static const char* const kContentLength;
static const int kContentLengthSize;
static SmartArrayPointer<char> ReceiveMessage(const Socket* conn);
static bool SendConnectMessage(const Socket* conn,

34
deps/v8/src/debug.cc

@ -1796,8 +1796,9 @@ void Debug::PrepareForBreakPoints() {
}
} else if (frame->function()->IsJSFunction()) {
JSFunction* function = JSFunction::cast(frame->function());
if (function->code()->kind() == Code::FUNCTION &&
!function->code()->has_debug_break_slots()) {
ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
if (!frame->LookupCode()->has_debug_break_slots() ||
!function->shared()->code()->has_debug_break_slots()) {
active_functions.Add(Handle<JSFunction>(function));
}
}
@ -1853,20 +1854,16 @@ void Debug::PrepareForBreakPoints() {
if (function->code() == *lazy_compile) {
function->set_code(shared->code());
}
Handle<Code> current_code(function->code());
if (shared->code()->has_debug_break_slots()) {
// if the code is already recompiled to have break slots skip
// recompilation.
ASSERT(!function->code()->has_debug_break_slots());
} else {
if (!shared->code()->has_debug_break_slots()) {
// Try to compile the full code with debug break slots. If it
// fails just keep the current code.
ASSERT(shared->code() == *current_code);
Handle<Code> current_code(function->shared()->code());
ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
shared->set_code(*lazy_compile);
bool prev_force_debugger_active =
isolate_->debugger()->force_debugger_active();
isolate_->debugger()->set_force_debugger_active(true);
ASSERT(current_code->kind() == Code::FUNCTION);
CompileFullCodeForDebugging(shared, current_code);
isolate_->debugger()->set_force_debugger_active(
prev_force_debugger_active);
@ -1883,10 +1880,13 @@ void Debug::PrepareForBreakPoints() {
// If the current frame is for this function in its
// non-optimized form rewrite the return address to continue
// in the newly compiled full code with debug break slots.
if (frame->function()->IsJSFunction() &&
frame->function() == *function &&
frame->LookupCode()->kind() == Code::FUNCTION) {
intptr_t delta = frame->pc() - current_code->instruction_start();
if (!frame->is_optimized() &&
frame->function()->IsJSFunction() &&
frame->function() == *function) {
ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
Handle<Code> frame_code(frame->LookupCode());
if (frame_code->has_debug_break_slots()) continue;
intptr_t delta = frame->pc() - frame_code->instruction_start();
int debug_break_slot_count = 0;
int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
@ -1915,11 +1915,11 @@ void Debug::PrepareForBreakPoints() {
"for debugging, "
"changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
reinterpret_cast<intptr_t>(
current_code->instruction_start()),
frame_code->instruction_start()),
reinterpret_cast<intptr_t>(
current_code->instruction_start()) +
current_code->instruction_size(),
current_code->instruction_size(),
frame_code->instruction_start()) +
frame_code->instruction_size(),
frame_code->instruction_size(),
reinterpret_cast<intptr_t>(new_code->instruction_start()),
reinterpret_cast<intptr_t>(new_code->instruction_start()) +
new_code->instruction_size(),

38
deps/v8/src/elements.cc

@ -134,6 +134,22 @@ class ElementsAccessorBase : public ElementsAccessor {
JSObject* obj,
Object* length);
virtual MaybeObject* SetCapacityAndLength(JSArray* array,
int capacity,
int length) {
return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
array,
capacity,
length);
}
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
int capacity,
int length) {
UNIMPLEMENTED();
return obj;
}
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
@ -376,11 +392,6 @@ class FastObjectElementsAccessor
return heap->true_value();
}
protected:
friend class FastElementsAccessor<FastObjectElementsAccessor,
FixedArray,
kPointerSize>;
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
@ -393,6 +404,11 @@ class FastObjectElementsAccessor
set_capacity_mode);
}
protected:
friend class FastElementsAccessor<FastObjectElementsAccessor,
FixedArray,
kPointerSize>;
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
@ -405,6 +421,12 @@ class FastDoubleElementsAccessor
: public FastElementsAccessor<FastDoubleElementsAccessor,
FixedDoubleArray,
kDoubleSize> {
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
}
protected:
friend class ElementsAccessorBase<FastDoubleElementsAccessor,
FixedDoubleArray>;
@ -412,12 +434,6 @@ class FastDoubleElementsAccessor
FixedDoubleArray,
kDoubleSize>;
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
}
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {

17
deps/v8/src/elements.h

@ -44,11 +44,24 @@ class ElementsAccessor {
JSObject* holder,
Object* receiver) = 0;
// Modifies the length data property as specified for JSArrays and resizes
// the underlying backing store accordingly.
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
// have non-deletable elements can only be shrunk to the size of highest
// element that is non-deletable.
virtual MaybeObject* SetLength(JSObject* holder,
Object* new_length) = 0;
// Modifies both the length and capacity of a JSArray, resizing the underlying
// backing store as necessary. This method does NOT honor the semantics of
// EcmaScript 5.1 15.4.5.2, arrays can be shrunk beyond non-deletable
// elements. This method should only be called for array expansion OR by
// runtime JavaScript code that use InternalArrays and don't care about
// EcmaScript 5.1 semantics.
virtual MaybeObject* SetCapacityAndLength(JSArray* array,
int capacity,
int length) = 0;
virtual MaybeObject* Delete(JSObject* holder,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;

28
deps/v8/src/factory.cc

@ -926,28 +926,48 @@ Handle<JSArray> Factory::NewJSArray(int capacity,
}
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
PretenureFlag pretenure) {
Handle<JSArray> result =
Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
pretenure));
result->set_length(Smi::FromInt(0));
SetContent(result, elements);
return result;
}
void Factory::SetElementsCapacityAndLength(Handle<JSArray> array,
int capacity,
int length) {
ElementsAccessor* accessor = array->GetElementsAccessor();
CALL_HEAP_FUNCTION_VOID(
isolate(),
accessor->SetCapacityAndLength(*array, capacity, length));
}
void Factory::SetContent(Handle<JSArray> array,
Handle<FixedArray> elements) {
Handle<FixedArrayBase> elements) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->SetContent(*elements));
}
void Factory::EnsureCanContainNonSmiElements(Handle<JSArray> array) {
void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->EnsureCanContainHeapObjectElements());
}
void Factory::EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
EnsureElementsMode mode) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->EnsureCanContainNonSmiElements());
array->EnsureCanContainElements(*elements, mode));
}

13
deps/v8/src/factory.h

@ -259,12 +259,19 @@ class Factory {
PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArray> elements,
Handle<FixedArrayBase> elements,
PretenureFlag pretenure = NOT_TENURED);
void SetContent(Handle<JSArray> array, Handle<FixedArray> elements);
void SetElementsCapacityAndLength(Handle<JSArray> array,
int capacity,
int length);
void EnsureCanContainNonSmiElements(Handle<JSArray> array);
void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
void EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
EnsureElementsMode mode);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);

17
deps/v8/src/frames.cc

@ -723,12 +723,17 @@ void JavaScriptFrame::PrintTop(FILE* file,
JavaScriptFrame* frame = it.frame();
if (frame->IsConstructor()) PrintF(file, "new ");
// function name
Object* fun = frame->function();
if (fun->IsJSFunction()) {
SharedFunctionInfo* shared = JSFunction::cast(fun)->shared();
shared->DebugName()->ShortPrint(file);
Object* maybe_fun = frame->function();
if (maybe_fun->IsJSFunction()) {
JSFunction* fun = JSFunction::cast(maybe_fun);
fun->PrintName();
Code* js_code = frame->unchecked_code();
Address pc = frame->pc();
int code_offset =
static_cast<int>(pc - js_code->instruction_start());
PrintF("+%d", code_offset);
SharedFunctionInfo* shared = fun->shared();
if (print_line_number) {
Address pc = frame->pc();
Code* code = Code::cast(
v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
int source_pos = code->SourcePosition(pc);
@ -751,7 +756,7 @@ void JavaScriptFrame::PrintTop(FILE* file,
}
}
} else {
fun->ShortPrint(file);
PrintF("<unknown>");
}
if (print_args) {

3
deps/v8/src/heap-inl.h

@ -125,7 +125,8 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
reinterpret_cast<HeapObject*>(result)->set_map(map);
// String maps are all immortal immovable objects.
reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(str.length());

115
deps/v8/src/heap.cc

@ -80,7 +80,7 @@ Heap::Heap()
#endif
reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * LUMP_OF_MEMORY),
max_executable_size_(128l * LUMP_OF_MEMORY),
@ -1012,7 +1012,7 @@ void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
// Store Buffer overflowed while scanning promoted objects. These are not
// in any particular page, though they are likely to be clustered by the
// allocation routines.
store_buffer_->HandleFullness();
store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
} else {
// Store Buffer overflowed while scanning a particular old space page for
// pointers to new space.
@ -1813,7 +1813,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
}
Map* map = reinterpret_cast<Map*>(result);
map->set_map_unsafe(meta_map());
map->set_map_no_write_barrier(meta_map());
map->set_instance_type(instance_type);
map->set_visitor_id(
StaticVisitorBase::GetVisitorId(instance_type, instance_size));
@ -2173,7 +2173,7 @@ MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
HeapObject::cast(result)->set_map_unsafe(heap_number_map());
HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
HeapNumber::cast(result)->set_value(value);
return result;
}
@ -2191,7 +2191,7 @@ MaybeObject* Heap::AllocateHeapNumber(double value) {
{ MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
HeapObject::cast(result)->set_map_unsafe(heap_number_map());
HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
HeapNumber::cast(result)->set_value(value);
return result;
}
@ -2202,7 +2202,8 @@ MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
{ MaybeObject* maybe_result = AllocateRawCell();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
HeapObject::cast(result)->set_map_unsafe(global_property_cell_map());
HeapObject::cast(result)->set_map_no_write_barrier(
global_property_cell_map());
JSGlobalPropertyCell::cast(result)->set_value(value);
return result;
}
@ -2416,6 +2417,7 @@ bool Heap::CreateInitialObjects() {
}
set_code_stubs(NumberDictionary::cast(obj));
// Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
// is set to avoid expanding the dictionary during bootstrapping.
{ MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
@ -2543,7 +2545,7 @@ void StringSplitCache::Enter(Heap* heap,
}
}
}
array->set_map(heap->fixed_cow_array_map());
array->set_map_no_write_barrier(heap->fixed_cow_array_map());
}
@ -3139,7 +3141,8 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@ -3157,7 +3160,8 @@ MaybeObject* Heap::AllocateByteArray(int length) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@ -3167,11 +3171,11 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
filler->set_map_unsafe(one_pointer_filler_map());
filler->set_map_no_write_barrier(one_pointer_filler_map());
} else if (size == 2 * kPointerSize) {
filler->set_map_unsafe(two_pointer_filler_map());
filler->set_map_no_write_barrier(two_pointer_filler_map());
} else {
filler->set_map_unsafe(free_space_map());
filler->set_map_no_write_barrier(free_space_map());
FreeSpace::cast(filler)->set_size(size);
}
}
@ -3189,7 +3193,7 @@ MaybeObject* Heap::AllocateExternalArray(int length,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
reinterpret_cast<ExternalArray*>(result)->set_map_unsafe(
reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
MapForExternalArrayType(array_type));
reinterpret_cast<ExternalArray*>(result)->set_length(length);
reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
@ -3226,7 +3230,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
if (!maybe_result->ToObject(&result)) return maybe_result;
// Initialize the object
HeapObject::cast(result)->set_map_unsafe(code_map());
HeapObject::cast(result)->set_map_no_write_barrier(code_map());
Code* code = Code::cast(result);
ASSERT(!isolate_->code_range()->exists() ||
isolate_->code_range()->contains(code->address()));
@ -3355,7 +3359,7 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// No need for write barrier since object is white and map is in old space.
HeapObject::cast(result)->set_map_unsafe(map);
HeapObject::cast(result)->set_map_no_write_barrier(map);
return result;
}
@ -4084,7 +4088,7 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
reinterpret_cast<HeapObject*>(result)->set_map_unsafe(map);
reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(chars);
@ -4128,7 +4132,7 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
}
// Partially initialize the object.
HeapObject::cast(result)->set_map_unsafe(ascii_string_map());
HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
@ -4163,7 +4167,7 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
}
// Partially initialize the object.
HeapObject::cast(result)->set_map_unsafe(string_map());
HeapObject::cast(result)->set_map_no_write_barrier(string_map());
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
@ -4179,7 +4183,8 @@ MaybeObject* Heap::AllocateEmptyFixedArray() {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Initialize the object.
reinterpret_cast<FixedArray*>(result)->set_map_unsafe(fixed_array_map());
reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
fixed_array_map());
reinterpret_cast<FixedArray*>(result)->set_length(0);
return result;
}
@ -4208,13 +4213,13 @@ MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
}
if (InNewSpace(obj)) {
HeapObject* dst = HeapObject::cast(obj);
dst->set_map_unsafe(map);
dst->set_map_no_write_barrier(map);
CopyBlock(dst->address() + kPointerSize,
src->address() + kPointerSize,
FixedArray::SizeFor(len) - kPointerSize);
return obj;
}
HeapObject::cast(obj)->set_map_unsafe(map);
HeapObject::cast(obj)->set_map_no_write_barrier(map);
FixedArray* result = FixedArray::cast(obj);
result->set_length(len);
@ -4234,7 +4239,7 @@ MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
HeapObject* dst = HeapObject::cast(obj);
dst->set_map_unsafe(map);
dst->set_map_no_write_barrier(map);
CopyBlock(
dst->address() + FixedDoubleArray::kLengthOffset,
src->address() + FixedDoubleArray::kLengthOffset,
@ -4252,7 +4257,7 @@ MaybeObject* Heap::AllocateFixedArray(int length) {
}
// Initialize header.
FixedArray* array = reinterpret_cast<FixedArray*>(result);
array->set_map_unsafe(fixed_array_map());
array->set_map_no_write_barrier(fixed_array_map());
array->set_length(length);
// Initialize body.
ASSERT(!InNewSpace(undefined_value()));
@ -4300,7 +4305,7 @@ MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
if (!maybe_result->ToObject(&result)) return maybe_result;
}
HeapObject::cast(result)->set_map_unsafe(heap->fixed_array_map());
HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
@ -4333,7 +4338,8 @@ MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
reinterpret_cast<FixedArray*>(obj)->set_map_unsafe(fixed_array_map());
reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
fixed_array_map());
FixedArray::cast(obj)->set_length(length);
return obj;
}
@ -4347,7 +4353,7 @@ MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Initialize the object.
reinterpret_cast<FixedDoubleArray*>(result)->set_map_unsafe(
reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
fixed_double_array_map());
reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
return result;
@ -4364,7 +4370,7 @@ MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
reinterpret_cast<FixedDoubleArray*>(obj)->set_map_unsafe(
reinterpret_cast<FixedDoubleArray*>(obj)->set_map_no_write_barrier(
fixed_double_array_map());
FixedDoubleArray::cast(obj)->set_length(length);
return obj;
@ -4401,7 +4407,8 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
{ MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
reinterpret_cast<HeapObject*>(result)->set_map_unsafe(hash_table_map());
reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
hash_table_map());
ASSERT(result->IsHashTable());
return result;
}
@ -4414,7 +4421,7 @@ MaybeObject* Heap::AllocateGlobalContext() {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_unsafe(global_context_map());
context->set_map_no_write_barrier(global_context_map());
ASSERT(context->IsGlobalContext());
ASSERT(result->IsContext());
return result;
@ -4428,7 +4435,7 @@ MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_unsafe(function_context_map());
context->set_map_no_write_barrier(function_context_map());
context->set_closure(function);
context->set_previous(function->context());
context->set_extension(NULL);
@ -4448,7 +4455,7 @@ MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_unsafe(catch_context_map());
context->set_map_no_write_barrier(catch_context_map());
context->set_closure(function);
context->set_previous(previous);
context->set_extension(name);
@ -4466,7 +4473,7 @@ MaybeObject* Heap::AllocateWithContext(JSFunction* function,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_unsafe(with_context_map());
context->set_map_no_write_barrier(with_context_map());
context->set_closure(function);
context->set_previous(previous);
context->set_extension(extension);
@ -4484,7 +4491,7 @@ MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_unsafe(block_context_map());
context->set_map_no_write_barrier(block_context_map());
context->set_closure(function);
context->set_previous(previous);
context->set_extension(scope_info);
@ -4497,7 +4504,7 @@ MaybeObject* Heap::AllocateScopeInfo(int length) {
FixedArray* scope_info;
MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
scope_info->set_map_unsafe(scope_info_map());
scope_info->set_map_no_write_barrier(scope_info_map());
return scope_info;
}
@ -4541,8 +4548,10 @@ void Heap::EnsureHeapIsIterable() {
bool Heap::IdleNotification(int hint) {
if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
return hint < 1000 ? true : IdleGlobalGC();
if (hint >= 1000) return IdleGlobalGC();
if (contexts_disposed_ > 0 || !FLAG_incremental_marking ||
FLAG_expose_gc || Serializer::enabled()) {
return true;
}
// By doing small chunks of GC work in each IdleNotification,
@ -5150,29 +5159,29 @@ void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
v->Synchronize("symbol_table");
v->Synchronize(VisitorSynchronization::kSymbolTable);
if (mode != VISIT_ALL_IN_SCAVENGE &&
mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
// Scavenge collections have special processing for this.
external_string_table_.Iterate(v);
}
v->Synchronize("external_string_table");
v->Synchronize(VisitorSynchronization::kExternalStringsTable);
}
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize("strong_root_list");
v->Synchronize(VisitorSynchronization::kStrongRootList);
v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
v->Synchronize("symbol");
v->Synchronize(VisitorSynchronization::kSymbol);
isolate_->bootstrapper()->Iterate(v);
v->Synchronize("bootstrapper");
v->Synchronize(VisitorSynchronization::kBootstrapper);
isolate_->Iterate(v);
v->Synchronize("top");
v->Synchronize(VisitorSynchronization::kTop);
Relocatable::Iterate(v);
v->Synchronize("relocatable");
v->Synchronize(VisitorSynchronization::kRelocatable);
#ifdef ENABLE_DEBUGGER_SUPPORT
isolate_->debug()->Iterate(v);
@ -5180,13 +5189,13 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
isolate_->deoptimizer_data()->Iterate(v);
}
#endif
v->Synchronize("debug");
v->Synchronize(VisitorSynchronization::kDebug);
isolate_->compilation_cache()->Iterate(v);
v->Synchronize("compilationcache");
v->Synchronize(VisitorSynchronization::kCompilationCache);
// Iterate over local handles in handle scopes.
isolate_->handle_scope_implementer()->Iterate(v);
v->Synchronize("handlescope");
v->Synchronize(VisitorSynchronization::kHandleScope);
// Iterate over the builtin code objects and code stubs in the
// heap. Note that it is not necessary to iterate over code objects
@ -5194,7 +5203,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
if (mode != VISIT_ALL_IN_SCAVENGE) {
isolate_->builtins()->IterateBuiltins(v);
}
v->Synchronize("builtins");
v->Synchronize(VisitorSynchronization::kBuiltins);
// Iterate over global handles.
switch (mode) {
@ -5209,11 +5218,11 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
isolate_->global_handles()->IterateAllRoots(v);
break;
}
v->Synchronize("globalhandles");
v->Synchronize(VisitorSynchronization::kGlobalHandles);
// Iterate over pointers being held by inactive threads.
isolate_->thread_manager()->Iterate(v);
v->Synchronize("threadmanager");
v->Synchronize(VisitorSynchronization::kThreadManager);
// Iterate over the pointers the Serialization/Deserialization code is
// holding.
@ -5413,7 +5422,7 @@ class HeapDebugUtils {
Address map_addr = map_p->address();
obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
MarkObjectRecursively(&map);
@ -5460,7 +5469,7 @@ class HeapDebugUtils {
HeapObject* map_p = HeapObject::FromAddress(map_addr);
obj->set_map(reinterpret_cast<Map*>(map_p));
obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
@ -6172,7 +6181,7 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
Address map_addr = map_p->address();
obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
// Scan the object body.
if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
@ -6214,7 +6223,7 @@ void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
HeapObject* map_p = HeapObject::FromAddress(map_addr);
obj->set_map(reinterpret_cast<Map*>(map_p));
obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);

1
deps/v8/src/heap.h

@ -245,6 +245,7 @@ inline Heap* _inline_get_heap_();
V(use_strict, "use strict") \
V(dot_symbol, ".") \
V(anonymous_function_symbol, "(anonymous function)") \
V(compare_ic_symbol, ".compare_ic") \
V(infinity_symbol, "Infinity") \
V(minus_infinity_symbol, "-Infinity")

5
deps/v8/src/hydrogen-instructions.cc

@ -1227,10 +1227,7 @@ void HConstant::PrintDataTo(StringStream* stream) {
bool HArrayLiteral::IsCopyOnWrite() const {
Handle<FixedArray> constant_elements = this->constant_elements();
FixedArrayBase* constant_elements_values =
FixedArrayBase::cast(constant_elements->get(1));
return constant_elements_values->map() == HEAP->fixed_cow_array_map();
return boilerplate_object_->elements()->map() == HEAP->fixed_cow_array_map();
}

54
deps/v8/src/hydrogen-instructions.h

@ -3447,8 +3447,21 @@ class HStoreGlobalGeneric: public HTemplateInstruction<3> {
class HLoadContextSlot: public HUnaryOperation {
public:
HLoadContextSlot(HValue* context , int slot_index)
: HUnaryOperation(context), slot_index_(slot_index) {
enum Mode {
// Perform a normal load of the context slot without checking its value.
kLoad,
// Load and check the value of the context slot. Deoptimize if it's the
// hole value. This is used for checking for loading of uninitialized
// harmony bindings where we deoptimize into full-codegen generated code
// which will subsequently throw a reference error.
kLoadCheck
};
HLoadContextSlot(HValue* context, Variable* var)
: HUnaryOperation(context), slot_index_(var->index()) {
ASSERT(var->IsContextSlot());
mode_ = (var->mode() == LET || var->mode() == CONST_HARMONY)
? kLoadCheck : kLoad;
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kDependsOnContextSlots);
@ -3456,6 +3469,10 @@ class HLoadContextSlot: public HUnaryOperation {
int slot_index() const { return slot_index_; }
bool RequiresHoleCheck() {
return mode_ == kLoadCheck;
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@ -3472,13 +3489,25 @@ class HLoadContextSlot: public HUnaryOperation {
private:
int slot_index_;
Mode mode_;
};
class HStoreContextSlot: public HTemplateInstruction<2> {
public:
HStoreContextSlot(HValue* context, int slot_index, HValue* value)
: slot_index_(slot_index) {
enum Mode {
// Perform a normal store to the context slot without checking its previous
// value.
kAssign,
// Check the previous value of the context slot and deoptimize if it's the
// hole value. This is used for checking for assignments to uninitialized
// harmony bindings where we deoptimize into full-codegen generated code
// which will subsequently throw a reference error.
kAssignCheck
};
HStoreContextSlot(HValue* context, int slot_index, Mode mode, HValue* value)
: slot_index_(slot_index), mode_(mode) {
SetOperandAt(0, context);
SetOperandAt(1, value);
SetFlag(kChangesContextSlots);
@ -3487,11 +3516,16 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
int slot_index() const { return slot_index_; }
Mode mode() const { return mode_; }
bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value());
}
bool RequiresHoleCheck() {
return mode_ == kAssignCheck;
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@ -3502,6 +3536,7 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
private:
int slot_index_;
Mode mode_;
};
@ -4167,18 +4202,21 @@ class HMaterializedLiteral: public HTemplateInstruction<V> {
class HArrayLiteral: public HMaterializedLiteral<1> {
public:
HArrayLiteral(HValue* context,
Handle<FixedArray> constant_elements,
Handle<JSObject> boilerplate_object,
int length,
int literal_index,
int depth)
: HMaterializedLiteral<1>(literal_index, depth),
length_(length),
constant_elements_(constant_elements) {
boilerplate_object_(boilerplate_object) {
SetOperandAt(0, context);
}
HValue* context() { return OperandAt(0); }
Handle<FixedArray> constant_elements() const { return constant_elements_; }
ElementsKind boilerplate_elements_kind() const {
return boilerplate_object_->GetElementsKind();
}
Handle<JSObject> boilerplate_object() const { return boilerplate_object_; }
int length() const { return length_; }
bool IsCopyOnWrite() const;
@ -4192,7 +4230,7 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
private:
int length_;
Handle<FixedArray> constant_elements_;
Handle<JSObject> boilerplate_object_;
};

194
deps/v8/src/hydrogen.cc

@ -2756,10 +2756,13 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
// 2. Build all the tests, with dangling true branches
int default_id = AstNode::kNoNumber;
for (int i = 0; i < clause_count; ++i) {
CaseClause* clause = clauses->at(i);
if (clause->is_default()) continue;
if (clause->is_default()) {
default_id = clause->EntryId();
continue;
}
if (switch_type == SMI_SWITCH) {
clause->RecordTypeFeedback(oracle());
}
@ -2806,7 +2809,10 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
HBasicBlock* last_block = current_block();
if (not_string_block != NULL) {
last_block = CreateJoin(last_block, not_string_block, stmt->ExitId());
int join_id = (default_id != AstNode::kNoNumber)
? default_id
: stmt->ExitId();
last_block = CreateJoin(last_block, not_string_block, join_id);
}
// 3. Loop over the clauses and the linked list of tests in lockstep,
@ -3222,11 +3228,11 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Variable* variable = expr->var();
if (variable->mode() == LET) {
return Bailout("reference to let variable");
}
switch (variable->location()) {
case Variable::UNALLOCATED: {
if (variable->mode() == LET || variable->mode() == CONST_HARMONY) {
return Bailout("reference to global harmony declared variable");
}
// Handle known global constants like 'undefined' specially to avoid a
// load from a global cell for them.
Handle<Object> constant_value =
@ -3269,9 +3275,11 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
case Variable::PARAMETER:
case Variable::LOCAL: {
HValue* value = environment()->Lookup(variable);
if (variable->mode() == CONST &&
value == graph()->GetConstantHole()) {
return Bailout("reference to uninitialized const variable");
if (value == graph()->GetConstantHole()) {
ASSERT(variable->mode() == CONST ||
variable->mode() == CONST_HARMONY ||
variable->mode() == LET);
return Bailout("reference to uninitialized variable");
}
return ast_context()->ReturnValue(value);
}
@ -3281,8 +3289,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return Bailout("reference to const context slot");
}
HValue* context = BuildContextChainWalk(variable);
HLoadContextSlot* instr =
new(zone()) HLoadContextSlot(context, variable->index());
HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, variable);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@ -3325,13 +3332,13 @@ static bool IsFastObjectLiteral(Handle<JSObject> boilerplate,
int* total_size) {
if (max_depth <= 0) return false;
FixedArrayBase* elements = boilerplate->elements();
Handle<FixedArrayBase> elements(boilerplate->elements());
if (elements->length() > 0 &&
elements->map() != HEAP->fixed_cow_array_map()) {
return false;
}
FixedArray* properties = boilerplate->properties();
Handle<FixedArray> properties(boilerplate->properties());
if (properties->length() > 0) {
return false;
} else {
@ -3457,11 +3464,25 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
int length = subexprs->length();
HValue* context = environment()->LookupContext();
HArrayLiteral* literal = new(zone()) HArrayLiteral(context,
expr->constant_elements(),
length,
expr->literal_index(),
expr->depth());
Handle<FixedArray> literals(environment()->closure()->literals());
Handle<Object> raw_boilerplate(literals->get(expr->literal_index()));
// For now, no boilerplate causes a deopt.
if (raw_boilerplate->IsUndefined()) {
AddInstruction(new(zone()) HSoftDeoptimize);
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
Handle<JSObject> boilerplate(Handle<JSObject>::cast(raw_boilerplate));
ElementsKind boilerplate_elements_kind = boilerplate->GetElementsKind();
HArrayLiteral* literal = new(zone()) HArrayLiteral(
context,
boilerplate,
length,
expr->literal_index(),
expr->depth());
// The array is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
PushAndAdd(literal);
@ -3484,42 +3505,25 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* key = AddInstruction(
new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
Representation::Integer32()));
HInstruction* elements_kind =
AddInstruction(new(zone()) HElementsKind(literal));
HBasicBlock* store_fast = graph()->CreateBasicBlock();
// Two empty blocks to satisfy edge split form.
HBasicBlock* store_fast_edgesplit1 = graph()->CreateBasicBlock();
HBasicBlock* store_fast_edgesplit2 = graph()->CreateBasicBlock();
HBasicBlock* store_generic = graph()->CreateBasicBlock();
HBasicBlock* check_smi_only_elements = graph()->CreateBasicBlock();
HBasicBlock* join = graph()->CreateBasicBlock();
HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(value);
smicheck->SetSuccessorAt(0, store_fast_edgesplit1);
smicheck->SetSuccessorAt(1, check_smi_only_elements);
current_block()->Finish(smicheck);
store_fast_edgesplit1->Finish(new(zone()) HGoto(store_fast));
set_current_block(check_smi_only_elements);
HCompareConstantEqAndBranch* smi_elements_check =
new(zone()) HCompareConstantEqAndBranch(elements_kind,
FAST_ELEMENTS,
Token::EQ_STRICT);
smi_elements_check->SetSuccessorAt(0, store_fast_edgesplit2);
smi_elements_check->SetSuccessorAt(1, store_generic);
current_block()->Finish(smi_elements_check);
store_fast_edgesplit2->Finish(new(zone()) HGoto(store_fast));
set_current_block(store_fast);
AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
store_fast->Goto(join);
set_current_block(store_generic);
AddInstruction(BuildStoreKeyedGeneric(literal, key, value));
store_generic->Goto(join);
join->SetJoinId(expr->id());
set_current_block(join);
switch (boilerplate_elements_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
AddInstruction(new(zone()) HStoreKeyedFastElement(
elements,
key,
value,
boilerplate_elements_kind));
break;
case FAST_DOUBLE_ELEMENTS:
AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements,
key,
value));
break;
default:
UNREACHABLE();
break;
}
AddSimulate(expr->GetIdForElement(i));
}
@ -3838,8 +3842,11 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
}
HValue* context = BuildContextChainWalk(var);
HStoreContextSlot::Mode mode =
(var->mode() == LET || var->mode() == CONST_HARMONY)
? HStoreContextSlot::kAssignCheck : HStoreContextSlot::kAssign;
HStoreContextSlot* instr =
new(zone()) HStoreContextSlot(context, var->index(), Top());
new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
AddSimulate(expr->AssignmentId());
@ -3959,8 +3966,10 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
// variables (e.g. initialization inside a loop).
HValue* old_value = environment()->Lookup(var);
AddInstruction(new HUseConst(old_value));
} else if (var->mode() == LET) {
return Bailout("unsupported assignment to let");
} else if (var->mode() == CONST_HARMONY) {
if (expr->op() != Token::INIT_CONST_HARMONY) {
return Bailout("non-initializer assignment to const");
}
}
if (proxy->IsArguments()) return Bailout("assignment to arguments");
@ -3977,6 +3986,14 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
case Variable::PARAMETER:
case Variable::LOCAL: {
// Perform an initialization check for let declared variables
// or parameters.
if (var->mode() == LET && expr->op() == Token::ASSIGN) {
HValue* env_value = environment()->Lookup(var);
if (env_value == graph()->GetConstantHole()) {
return Bailout("assignment to let variable before initialization");
}
}
// We do not allow the arguments object to occur in a context where it
// may escape, but assignments to stack-allocated locals are
// permitted.
@ -4004,8 +4021,18 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* context = BuildContextChainWalk(var);
HStoreContextSlot* instr =
new(zone()) HStoreContextSlot(context, var->index(), Top());
HStoreContextSlot::Mode mode;
if (expr->op() == Token::ASSIGN) {
mode = (var->mode() == LET || var->mode() == CONST_HARMONY)
? HStoreContextSlot::kAssignCheck : HStoreContextSlot::kAssign;
} else {
ASSERT(expr->op() == Token::INIT_VAR ||
expr->op() == Token::INIT_LET ||
expr->op() == Token::INIT_CONST_HARMONY);
mode = HStoreContextSlot::kAssign;
}
HStoreContextSlot* instr = new(zone()) HStoreContextSlot(
context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
AddSimulate(expr->AssignmentId());
@ -5614,8 +5641,11 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
HValue* context = BuildContextChainWalk(var);
HStoreContextSlot::Mode mode =
(var->mode() == LET || var->mode() == CONST_HARMONY)
? HStoreContextSlot::kAssignCheck : HStoreContextSlot::kAssign;
HStoreContextSlot* instr =
new(zone()) HStoreContextSlot(context, var->index(), after);
new(zone()) HStoreContextSlot(context, var->index(), mode, after);
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
AddSimulate(expr->AssignmentId());
@ -6116,14 +6146,27 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
AddInstruction(new(zone()) HCheckNonSmi(right));
AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
// Can we get away with map check and not instance type check?
Handle<Map> map = oracle()->GetCompareMap(expr);
if (!map.is_null()) {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(new(zone()) HCheckMap(left, map));
AddInstruction(new(zone()) HCheckNonSmi(right));
AddInstruction(new(zone()) HCheckMap(right, map));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
} else {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
AddInstruction(new(zone()) HCheckNonSmi(right));
AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
}
}
default:
return Bailout("Unsupported non-primitive compare");
@ -6188,28 +6231,27 @@ void HGraphBuilder::VisitDeclaration(Declaration* decl) {
void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function) {
if (mode == LET || mode == CONST_HARMONY) {
return Bailout("unsupported harmony declaration");
}
Variable* var = proxy->var();
bool binding_needs_init =
(mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (var->location()) {
case Variable::UNALLOCATED:
return Bailout("unsupported global declaration");
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT:
if (mode == CONST || function != NULL) {
if (binding_needs_init || function != NULL) {
HValue* value = NULL;
if (mode == CONST) {
value = graph()->GetConstantHole();
} else {
if (function != NULL) {
VisitForValue(function);
value = Pop();
} else {
value = graph()->GetConstantHole();
}
if (var->IsContextSlot()) {
HValue* context = environment()->LookupContext();
HStoreContextSlot* store =
new HStoreContextSlot(context, var->index(), value);
HStoreContextSlot* store = new HStoreContextSlot(
context, var->index(), HStoreContextSlot::kAssign, value);
AddInstruction(store);
if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
} else {

138
deps/v8/src/ia32/assembler-ia32.cc

@ -388,8 +388,91 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
ASSERT(IsPowerOf2(m));
while ((pc_offset() & (m - 1)) != 0) {
nop();
int mask = m - 1;
int addr = pc_offset();
Nop((m - (addr & mask)) & mask);
}
bool Assembler::IsNop(Address addr) {
Address a = addr;
while (*a == 0x66) a++;
if (*a == 0x90) return true;
if (a[0] == 0xf && a[1] == 0x1f) return true;
return false;
}
void Assembler::Nop(int bytes) {
EnsureSpace ensure_space(this);
if (!CpuFeatures::IsSupported(SSE2)) {
// Older CPUs that do not support SSE2 may not support multibyte NOP
// instructions.
for (; bytes > 0; bytes--) {
EMIT(0x90);
}
return;
}
// Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf
while (bytes > 0) {
switch (bytes) {
case 2:
EMIT(0x66);
case 1:
EMIT(0x90);
return;
case 3:
EMIT(0xf);
EMIT(0x1f);
EMIT(0);
return;
case 4:
EMIT(0xf);
EMIT(0x1f);
EMIT(0x40);
EMIT(0);
return;
case 6:
EMIT(0x66);
case 5:
EMIT(0xf);
EMIT(0x1f);
EMIT(0x44);
EMIT(0);
EMIT(0);
return;
case 7:
EMIT(0xf);
EMIT(0x1f);
EMIT(0x80);
EMIT(0);
EMIT(0);
EMIT(0);
EMIT(0);
return;
default:
case 11:
EMIT(0x66);
bytes--;
case 10:
EMIT(0x66);
bytes--;
case 9:
EMIT(0x66);
bytes--;
case 8:
EMIT(0xf);
EMIT(0x1f);
EMIT(0x84);
EMIT(0);
EMIT(0);
EMIT(0);
EMIT(0);
EMIT(0);
bytes -= 8;
}
}
}
@ -463,13 +546,6 @@ void Assembler::push(const Operand& src) {
}
void Assembler::push(Handle<Object> handle) {
EnsureSpace ensure_space(this);
EMIT(0x68);
emit(handle);
}
void Assembler::pop(Register dst) {
ASSERT(reloc_info_writer.last_pc() != NULL);
EnsureSpace ensure_space(this);
@ -1640,6 +1716,27 @@ void Assembler::fyl2x() {
}
void Assembler::f2xm1() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xF0);
}
void Assembler::fscale() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xFD);
}
void Assembler::fninit() {
EnsureSpace ensure_space(this);
EMIT(0xDB);
EMIT(0xE3);
}
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC0, i);
@ -1953,6 +2050,16 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
}
void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
EMIT(0x2E);
emit_sse_operand(dst, src);
}
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@ -2158,6 +2265,19 @@ void Assembler::movd(const Operand& dst, XMMRegister src) {
}
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
ASSERT(CpuFeatures::IsSupported(SSE4_1));
ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
EMIT(0x3A);
EMIT(0x17);
emit_sse_operand(dst, src);
EMIT(imm8);
}
void Assembler::pand(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);

9
deps/v8/src/ia32/assembler-ia32.h

@ -659,6 +659,7 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@ -673,7 +674,6 @@ class Assembler : public AssemblerBase {
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
void push(Handle<Object> handle);
void pop(Register dst);
void pop(const Operand& dst);
@ -926,6 +926,9 @@ class Assembler : public AssemblerBase {
void fsin();
void fptan();
void fyl2x();
void f2xm1();
void fscale();
void fninit();
void fadd(int i);
void fsub(int i);
@ -983,6 +986,7 @@ class Assembler : public AssemblerBase {
void andpd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
enum RoundingMode {
kRoundToNearest = 0x0,
@ -1017,6 +1021,7 @@ class Assembler : public AssemblerBase {
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
void movss(XMMRegister dst, XMMRegister src);
void extractps(Register dst, XMMRegister src, byte imm8);
void pand(XMMRegister dst, XMMRegister src);
void pxor(XMMRegister dst, XMMRegister src);
@ -1080,7 +1085,7 @@ class Assembler : public AssemblerBase {
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
static bool IsNop(Address addr) { return *addr == 0x90; }
static bool IsNop(Address addr);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }

50
deps/v8/src/ia32/builtins-ia32.cc

@ -1238,37 +1238,42 @@ static void ArrayNativeCode(MacroAssembler* masm,
false,
&prepare_generic_code_call);
__ IncrementCounter(counters->array_function_native(), 1);
__ mov(eax, ebx);
__ pop(ebx);
if (construct_call) {
__ pop(edi);
}
__ push(eax);
// eax: JSArray
__ push(ebx);
__ mov(ebx, Operand(esp, kPointerSize));
// ebx: argc
// edx: elements_array_end (untagged)
// esp[0]: JSArray
// esp[4]: return address
// esp[8]: last argument
// esp[4]: argc
// esp[8]: constructor (only if construct_call)
// esp[12]: return address
// esp[16]: last argument
// Location of the last argument
__ lea(edi, Operand(esp, 2 * kPointerSize));
int last_arg_offset = (construct_call ? 4 : 3) * kPointerSize;
__ lea(edi, Operand(esp, last_arg_offset));
// Location of the first array element (Parameter fill_with_holes to
// AllocateJSArrayis false, so the FixedArray is returned in ecx).
// AllocateJSArray is false, so the FixedArray is returned in ecx).
__ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
Label has_non_smi_element;
// ebx: argc
// edx: location of the first array element
// edi: location of the last argument
// esp[0]: JSArray
// esp[4]: return address
// esp[8]: last argument
// esp[4]: argc
// esp[8]: constructor (only if construct_call)
// esp[12]: return address
// esp[16]: last argument
Label loop, entry;
__ mov(ecx, ebx);
__ jmp(&entry);
__ bind(&loop);
__ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
if (FLAG_smi_only_arrays) {
__ JumpIfNotSmi(eax, &has_non_smi_element);
}
__ mov(Operand(edx, 0), eax);
__ add(edx, Immediate(kPointerSize));
__ bind(&entry);
@ -1278,13 +1283,20 @@ static void ArrayNativeCode(MacroAssembler* masm,
// Remove caller arguments from the stack and return.
// ebx: argc
// esp[0]: JSArray
// esp[4]: return address
// esp[8]: last argument
// esp[4]: argc
// esp[8]: constructor (only if construct_call)
// esp[12]: return address
// esp[16]: last argument
__ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
__ pop(eax);
__ pop(ebx);
__ lea(esp, Operand(esp, ebx, times_pointer_size,
last_arg_offset - kPointerSize));
__ jmp(ecx);
__ bind(&has_non_smi_element);
// Throw away the array that's only been partially constructed.
__ pop(eax);
__ pop(ecx);
__ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
__ push(ecx);
__ ret(0);
// Restore argc and constructor before running the generic code.
__ bind(&prepare_generic_code_call);

623
deps/v8/src/ia32/code-stubs-ia32.cc

@ -2938,157 +2938,263 @@ void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
// Registers are used as follows:
// edx = base
// eax = exponent
// ecx = temporary, result
CpuFeatures::Scope use_sse2(SSE2);
Label allocate_return, call_runtime;
// Load input parameters.
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ mov(eax, Operand(esp, 1 * kPointerSize));
// Save 1 in xmm3 - we need this several times later on.
__ mov(ecx, Immediate(1));
__ cvtsi2sd(xmm3, ecx);
Label exponent_nonsmi;
Label base_nonsmi;
// If the exponent is a heap number go to that specific case.
__ JumpIfNotSmi(eax, &exponent_nonsmi);
__ JumpIfNotSmi(edx, &base_nonsmi);
// Optimized version when both exponent and base are smis.
Label powi;
__ SmiUntag(edx);
__ cvtsi2sd(xmm0, edx);
__ jmp(&powi);
// exponent is smi and base is a heapnumber.
__ bind(&base_nonsmi);
Factory* factory = masm->isolate()->factory();
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
factory->heap_number_map());
__ j(not_equal, &call_runtime);
const Register exponent = eax;
const Register base = edx;
const Register scratch = ecx;
const XMMRegister double_result = xmm3;
const XMMRegister double_base = xmm2;
const XMMRegister double_exponent = xmm1;
const XMMRegister double_scratch = xmm4;
Label call_runtime, done, exponent_not_smi, int_exponent;
// Save 1 in double_result - we need this several times later on.
__ mov(scratch, Immediate(1));
__ cvtsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack.
__ mov(base, Operand(esp, 2 * kPointerSize));
__ mov(exponent, Operand(esp, 1 * kPointerSize));
__ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ cmp(FieldOperand(base, HeapObject::kMapOffset),
factory->heap_number_map());
__ j(not_equal, &call_runtime);
__ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear);
__ bind(&base_is_smi);
__ SmiUntag(base);
__ cvtsi2sd(double_base, base);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
__ SmiUntag(exponent);
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
__ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
factory->heap_number_map());
__ j(not_equal, &call_runtime);
__ movdbl(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
__ SmiUntag(exponent);
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
__ movdbl(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
}
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
if (exponent_type_ != INTEGER) {
Label fast_power;
// Detect integer exponents stored as double.
__ cvttsd2si(exponent, Operand(double_exponent));
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
__ cmp(exponent, Immediate(0x80000000u));
__ j(equal, &call_runtime);
__ cvtsi2sd(double_scratch, exponent);
// Already ruled out NaNs for exponent.
__ ucomisd(double_exponent, double_scratch);
__ j(equal, &int_exponent);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
Label continue_sqrt, continue_rsqrt, not_plus_half;
// Test for 0.5.
// Load double_scratch with 0.5.
__ mov(scratch, Immediate(0x3F000000u));
__ movd(double_scratch, scratch);
__ cvtss2sd(double_scratch, double_scratch);
// Already ruled out NaNs for exponent.
__ ucomisd(double_scratch, double_exponent);
__ j(not_equal, &not_plus_half, Label::kNear);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
// According to IEEE-754, single-precision -Infinity has the highest
// 9 bits set and the lowest 23 bits cleared.
__ mov(scratch, 0xFF800000u);
__ movd(double_scratch, scratch);
__ cvtss2sd(double_scratch, double_scratch);
__ ucomisd(double_base, double_scratch);
// Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &continue_sqrt, Label::kNear);
__ j(carry, &continue_sqrt, Label::kNear);
// Set result to Infinity in the special case.
__ xorps(double_result, double_result);
__ subsd(double_result, double_scratch);
__ jmp(&done);
__ bind(&continue_sqrt);
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
__ xorps(double_scratch, double_scratch);
__ addsd(double_scratch, double_base); // Convert -0 to +0.
__ sqrtsd(double_result, double_scratch);
__ jmp(&done);
// Test for -0.5.
__ bind(&not_plus_half);
// Load double_exponent with -0.5 by substracting 1.
__ subsd(double_scratch, double_result);
// Already ruled out NaNs for exponent.
__ ucomisd(double_scratch, double_exponent);
__ j(not_equal, &fast_power, Label::kNear);
// Calculates reciprocal of square root of base. Check for the special
// case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
// According to IEEE-754, single-precision -Infinity has the highest
// 9 bits set and the lowest 23 bits cleared.
__ mov(scratch, 0xFF800000u);
__ movd(double_scratch, scratch);
__ cvtss2sd(double_scratch, double_scratch);
__ ucomisd(double_base, double_scratch);
// Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &continue_rsqrt, Label::kNear);
__ j(carry, &continue_rsqrt, Label::kNear);
// Set result to 0 in the special case.
__ xorps(double_result, double_result);
__ jmp(&done);
__ bind(&continue_rsqrt);
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
__ xorps(double_exponent, double_exponent);
__ addsd(double_exponent, double_base); // Convert -0 to +0.
__ sqrtsd(double_exponent, double_exponent);
__ divsd(double_result, double_exponent);
__ jmp(&done);
}
// Optimized version of pow if exponent is a smi.
// xmm0 contains the base.
__ bind(&powi);
__ SmiUntag(eax);
// Using FPU instructions to calculate power.
Label fast_power_failed;
__ bind(&fast_power);
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
__ sub(esp, Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), double_exponent);
__ fld_d(Operand(esp, 0)); // E
__ movdbl(Operand(esp, 0), double_base);
__ fld_d(Operand(esp, 0)); // B, E
// Exponent is in st(1) and base is in st(0)
// B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
// FYL2X calculates st(1) * log2(st(0))
__ fyl2x(); // X
__ fld(0); // X, X
__ frndint(); // rnd(X), X
__ fsub(1); // rnd(X), X-rnd(X)
__ fxch(1); // X - rnd(X), rnd(X)
// F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
__ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
__ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
__ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
// FSCALE calculates st(0) * 2^st(1)
__ fscale(); // 2^X, rnd(X)
__ fstp(1);
// Bail out to runtime in case of exceptions in the status word.
__ fnstsw_ax();
__ test_b(eax, 0x5F); // We check for all but precision exception.
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(esp, 0));
__ movdbl(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ jmp(&done);
// Save exponent in base as we need to check if exponent is negative later.
// We know that base and exponent are in different registers.
__ mov(edx, eax);
__ bind(&fast_power_failed);
__ fninit();
__ add(esp, Immediate(kDoubleSize));
__ jmp(&call_runtime);
}
// Calculate power with integer exponent.
__ bind(&int_exponent);
const XMMRegister double_scratch2 = double_exponent;
__ mov(scratch, exponent); // Back up exponent.
__ movsd(double_scratch, double_base); // Back up base.
__ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
Label no_neg;
__ cmp(eax, 0);
__ j(greater_equal, &no_neg, Label::kNear);
__ neg(eax);
Label no_neg, while_true, no_multiply;
__ test(scratch, scratch);
__ j(positive, &no_neg, Label::kNear);
__ neg(scratch);
__ bind(&no_neg);
// Load xmm1 with 1.
__ movsd(xmm1, xmm3);
Label while_true;
Label no_multiply;
__ bind(&while_true);
__ shr(eax, 1);
__ shr(scratch, 1);
__ j(not_carry, &no_multiply, Label::kNear);
__ mulsd(xmm1, xmm0);
__ mulsd(double_result, double_scratch);
__ bind(&no_multiply);
__ mulsd(xmm0, xmm0);
__ j(not_zero, &while_true);
// base has the original value of the exponent - if the exponent is
// negative return 1/result.
__ test(edx, edx);
__ j(positive, &allocate_return);
// Special case if xmm1 has reached infinity.
__ mov(ecx, Immediate(0x7FB00000));
__ movd(xmm0, ecx);
__ cvtss2sd(xmm0, xmm0);
__ ucomisd(xmm0, xmm1);
__ j(equal, &call_runtime);
__ divsd(xmm3, xmm1);
__ movsd(xmm1, xmm3);
__ jmp(&allocate_return);
// exponent (or both) is a heapnumber - no matter what we should now work
// on doubles.
__ bind(&exponent_nonsmi);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
factory->heap_number_map());
__ j(not_equal, &call_runtime);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
// Test if exponent is nan.
__ ucomisd(xmm1, xmm1);
__ j(parity_even, &call_runtime);
__ mulsd(double_scratch, double_scratch);
__ j(not_zero, &while_true);
Label base_not_smi;
Label handle_special_cases;
__ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
__ SmiUntag(edx);
__ cvtsi2sd(xmm0, edx);
__ jmp(&handle_special_cases, Label::kNear);
__ bind(&base_not_smi);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
factory->heap_number_map());
__ j(not_equal, &call_runtime);
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ and_(ecx, HeapNumber::kExponentMask);
__ cmp(ecx, Immediate(HeapNumber::kExponentMask));
// base is NaN or +/-Infinity
__ j(greater_equal, &call_runtime);
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
// scratch has the original value of the exponent - if the exponent is
// negative, return 1/result.
__ test(exponent, exponent);
__ j(positive, &done);
__ divsd(double_scratch2, double_result);
__ movsd(double_result, double_scratch2);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
__ xorps(double_scratch2, double_scratch2);
__ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
// double_exponent aliased as double_scratch2 has already been overwritten
// and may not have contained the exponent value in the first place when the
// exponent is a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
__ cvtsi2sd(double_exponent, exponent);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
if (exponent_type_ == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
// base is in xmm0 and exponent is in xmm1.
__ bind(&handle_special_cases);
Label not_minus_half;
// Test for -0.5.
// Load xmm2 with -0.5.
__ mov(ecx, Immediate(0xBF000000));
__ movd(xmm2, ecx);
__ cvtss2sd(xmm2, xmm2);
// xmm2 now has -0.5.
__ ucomisd(xmm2, xmm1);
__ j(not_equal, &not_minus_half, Label::kNear);
// Calculates reciprocal of square root.
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
__ xorps(xmm1, xmm1);
__ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
__ divsd(xmm3, xmm1);
__ movsd(xmm1, xmm3);
__ jmp(&allocate_return);
// Test for 0.5.
__ bind(&not_minus_half);
// Load xmm2 with 0.5.
// Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
__ addsd(xmm2, xmm3);
// xmm2 now has 0.5.
__ ucomisd(xmm2, xmm1);
__ j(not_equal, &call_runtime);
// Calculates square root.
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
__ xorps(xmm1, xmm1);
__ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
__ bind(&allocate_return);
__ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
__ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
__ mov(eax, ecx);
__ ret(2 * kPointerSize);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
__ bind(&done);
__ AllocateHeapNumber(eax, scratch, base, &call_runtime);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
} else {
__ bind(&call_runtime);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(4, scratch);
__ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
__ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()), 4);
}
// Return value is in st(0) on ia32.
// Store it into the (fixed) result register.
__ sub(esp, Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
__ movdbl(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(0);
}
}
@ -4540,7 +4646,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// megamorphic.
__ cmp(ecx, Immediate(UninitializedSentinel(isolate)));
__ j(equal, &initialize, Label::kNear);
// MegamorphicSentinel is a root so no write-barrier is needed.
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(MegamorphicSentinel(isolate)));
__ jmp(&call, Label::kNear);
@ -4548,14 +4655,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// An uninitialized cache is patched with the function.
__ bind(&initialize);
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
__ mov(ecx, edi);
__ RecordWriteField(ebx,
JSGlobalPropertyCell::kValueOffset,
ecx,
edx,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET, // Cells are rescanned.
OMIT_SMI_CHECK);
// No need for a write barrier here - cells are rescanned.
__ bind(&call);
}
@ -4587,6 +4687,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// non-function case.
__ mov(ebx, Operand(esp, 0));
__ mov(ebx, Operand(ebx, 1));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write barrier is needed.
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(MegamorphicSentinel(isolate)));
}
@ -5991,20 +6093,23 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ JumpIfNotSmi(edx, &runtime);
__ sub(ecx, edx);
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
Label return_eax;
__ j(equal, &return_eax);
Label not_original_string;
__ j(not_equal, &not_original_string, Label::kNear);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&not_original_string);
// Special handling of sub-strings of length 1 and 2. One character strings
// are handled in the runtime system (looked up in the single character
// cache). Two character strings are looked for in the symbol cache.
__ SmiUntag(ecx); // Result length is no longer smi.
__ cmp(ecx, 2);
__ cmp(ecx, Immediate(Smi::FromInt(2)));
__ j(greater, &result_longer_than_two);
__ j(less, &runtime);
// Sub string of length 2 requested.
// eax: string
// ebx: instance type
// ecx: sub string length (value is 2)
// ecx: sub string length (smi, value is 2)
// edx: from index (smi)
__ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
@ -6019,6 +6124,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, ebx, ecx, eax, edx, edi,
&make_two_character_string, &make_two_character_string);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&make_two_character_string);
@ -6026,55 +6132,61 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand(esp, 3 * kPointerSize));
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ Set(ecx, Immediate(2));
__ Set(ecx, Immediate(Smi::FromInt(2)));
__ mov(edx, Operand(esp, 2 * kPointerSize)); // Load index.
__ bind(&result_longer_than_two);
// eax: string
// ebx: instance type
// ecx: sub string length (smi)
// edx: from index (smi)
// Deal with different string types: update the index if necessary
// and put the underlying string into edi.
Label underlying_unpacked, sliced_string, seq_or_external_string;
// If the string is not indirect, it can only be sequential or external.
STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
STATIC_ASSERT(kIsIndirectStringMask != 0);
__ test(ebx, Immediate(kIsIndirectStringMask));
__ j(zero, &seq_or_external_string, Label::kNear);
Factory* factory = masm->isolate()->factory();
__ test(ebx, Immediate(kSlicedNotConsMask));
__ j(not_zero, &sliced_string, Label::kNear);
// Cons string. Check whether it is flat, then fetch first part.
// Flat cons strings have an empty second part.
__ cmp(FieldOperand(eax, ConsString::kSecondOffset),
factory->empty_string());
__ j(not_equal, &runtime);
__ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
// Update instance type.
__ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ jmp(&underlying_unpacked, Label::kNear);
__ bind(&sliced_string);
// Sliced string. Fetch parent and adjust start index by offset.
__ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
__ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
// Update instance type.
__ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ jmp(&underlying_unpacked, Label::kNear);
__ bind(&seq_or_external_string);
// Sequential or external string. Just move string to the expected register.
__ mov(edi, eax);
__ bind(&underlying_unpacked);
if (FLAG_string_slices) {
Label copy_routine;
// If coming from the make_two_character_string path, the string
// is too short to be sliced anyways.
STATIC_ASSERT(2 < SlicedString::kMinLength);
__ jmp(&copy_routine);
__ bind(&result_longer_than_two);
// eax: string
// ebx: instance type
// ecx: sub string length
// edx: from index (smi)
Label allocate_slice, sliced_string, seq_or_external_string;
__ cmp(ecx, SlicedString::kMinLength);
// Short slice. Copy instead of slicing.
__ j(less, &copy_routine);
// If the string is not indirect, it can only be sequential or external.
STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
STATIC_ASSERT(kIsIndirectStringMask != 0);
__ test(ebx, Immediate(kIsIndirectStringMask));
__ j(zero, &seq_or_external_string, Label::kNear);
Factory* factory = masm->isolate()->factory();
__ test(ebx, Immediate(kSlicedNotConsMask));
__ j(not_zero, &sliced_string, Label::kNear);
// Cons string. Check whether it is flat, then fetch first part.
__ cmp(FieldOperand(eax, ConsString::kSecondOffset),
factory->empty_string());
__ j(not_equal, &runtime);
__ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
__ jmp(&allocate_slice, Label::kNear);
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
__ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
__ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
__ jmp(&allocate_slice, Label::kNear);
__ bind(&seq_or_external_string);
// Sequential or external string. Just move string to the correct register.
__ mov(edi, eax);
__ bind(&allocate_slice);
// edi: underlying subject string
// ebx: instance type of original subject string
// edx: offset
// ecx: length
// edx: adjusted start index (smi)
// ecx: length (smi)
__ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
// Short slice. Copy instead of slicing.
__ j(less, &copy_routine);
// Allocate new sliced string. At this point we do not reload the instance
// type including the string encoding because we simply rely on the info
// provided by the original string. It does not matter if the original
@ -6091,27 +6203,50 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
__ bind(&set_slice_header);
__ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
__ SmiTag(ecx);
__ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
__ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
__ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
__ jmp(&return_eax);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&copy_routine);
} else {
__ bind(&result_longer_than_two);
}
// eax: string
// ebx: instance type
// ecx: result string length
// Check for flat ascii string
Label non_ascii_flat;
__ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
// edi: underlying subject string
// ebx: instance type of original subject string
// edx: adjusted start index (smi)
// ecx: length (smi)
// The subject string can only be external or sequential string of either
// encoding at this point.
Label two_byte_sequential, runtime_drop_two, sequential_string;
STATIC_ASSERT(kExternalStringTag != 0);
STATIC_ASSERT(kSeqStringTag == 0);
__ test_b(ebx, kExternalStringTag);
__ j(zero, &sequential_string);
// Handle external string.
Label ascii_external, done;
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
__ test_b(ebx, kShortExternalStringMask);
__ j(not_zero, &runtime);
__ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
// Stash away (adjusted) index and (underlying) string.
__ push(edx);
__ push(edi);
__ SmiUntag(ecx);
STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
__ test_b(ebx, kStringEncodingMask);
__ j(zero, &two_byte_sequential);
// Allocate the result.
__ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
// Sequential ascii string. Allocate the result.
__ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
// eax: result string
// ecx: result string length
@ -6120,11 +6255,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(edi, eax);
__ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
__ mov(esi, Operand(esp, 3 * kPointerSize));
__ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
__ pop(esi);
__ pop(ebx);
__ SmiUntag(ebx);
__ add(esi, ebx);
__ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
// eax: result string
// ecx: result length
@ -6133,20 +6267,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// esi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
__ mov(esi, edx); // Restore esi.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&non_ascii_flat);
// eax: string
// ebx: instance type & kStringRepresentationMask | kStringEncodingMask
// ecx: result string length
// Check for flat two byte string
__ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
__ j(not_equal, &runtime);
// Allocate the result.
__ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
__ bind(&two_byte_sequential);
// Sequential two-byte string. Allocate the result.
__ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
// eax: result string
// ecx: result string length
@ -6156,14 +6282,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ add(edi,
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
__ mov(esi, Operand(esp, 3 * kPointerSize));
__ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
__ pop(esi);
__ pop(ebx);
// As from is a smi it is 2 times the value which matches the size of a two
// byte character.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(esi, ebx);
__ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize));
// eax: result string
// ecx: result length
@ -6172,11 +6297,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// esi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
__ mov(esi, edx); // Restore esi.
__ bind(&return_eax);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
// Drop pushed values on the stack before tail call.
__ bind(&runtime_drop_two);
__ Drop(2);
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
@ -6568,33 +6695,45 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Save the registers.
__ pop(ecx);
__ push(edx);
__ push(eax);
__ push(ecx);
void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
__ JumpIfSmi(ecx, &miss, Label::kNear);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ cmp(ecx, known_map_);
__ j(not_equal, &miss, Label::kNear);
__ cmp(ebx, known_map_);
__ j(not_equal, &miss, Label::kNear);
__ sub(eax, edx);
__ ret(0);
__ bind(&miss);
GenerateMiss(masm);
}
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(edx);
__ push(edx); // Preserve edx and eax.
__ push(eax);
__ push(edx); // And also use them as the arguments.
__ push(eax);
__ push(Immediate(Smi::FromInt(op_)));
__ CallExternalReference(miss, 3);
// Compute the entry point of the rewritten stub.
__ lea(edi, FieldOperand(eax, Code::kHeaderSize));
__ pop(eax);
__ pop(edx);
}
// Compute the entry point of the rewritten stub.
__ lea(edi, FieldOperand(eax, Code::kHeaderSize));
// Restore registers.
__ pop(ecx);
__ pop(eax);
__ pop(edx);
__ push(ecx);
// Do a tail call to the rewritten stub.
__ jmp(edi);
}

4
deps/v8/src/ia32/debug-ia32.cc

@ -258,9 +258,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
Label check_codesize;
__ bind(&check_codesize);
__ RecordDebugBreakSlot();
for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
__ nop();
}
__ Nop(Assembler::kDebugBreakSlotLength);
ASSERT_EQ(Assembler::kDebugBreakSlotLength,
masm->SizeOfCodeGeneratedSince(&check_codesize));
}

8
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -231,8 +231,8 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x90; // nop
*(call_target_address - 2) = 0x90; // nop
*(call_target_address - 3) = 0x66; // 2 byte nop part 1
*(call_target_address - 2) = 0x90; // 2 byte nop part 2
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
@ -250,8 +250,8 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
ASSERT(*(call_target_address - 3) == 0x90 && // nop
*(call_target_address - 2) == 0x90 && // nop
ASSERT(*(call_target_address - 3) == 0x66 && // 2 byte nop part 1
*(call_target_address - 2) == 0x90 && // 2 byte nop part 2
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x07; // offset

47
deps/v8/src/ia32/disasm-ia32.cc

@ -763,10 +763,13 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
case 0xEB: mnem = "fldpi"; break;
case 0xED: mnem = "fldln2"; break;
case 0xEE: mnem = "fldz"; break;
case 0xF0: mnem = "f2xm1"; break;
case 0xF1: mnem = "fyl2x"; break;
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
case 0xF8: mnem = "fprem"; break;
case 0xFC: mnem = "frndint"; break;
case 0xFD: mnem = "fscale"; break;
case 0xFE: mnem = "fsin"; break;
case 0xFF: mnem = "fcos"; break;
default: UnimplementedInstruction();
@ -788,6 +791,8 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
has_register = true;
} else if (modrm_byte == 0xE2) {
mnem = "fclex";
} else if (modrm_byte == 0xE3) {
mnem = "fninit";
} else {
UnimplementedInstruction();
}
@ -987,7 +992,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0x0F:
{ byte f0byte = *(data+1);
{ byte f0byte = data[1];
const char* f0mnem = F0Mnem(f0byte);
if (f0byte == 0x18) {
int mod, regop, rm;
@ -995,6 +1000,25 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
const char* suffix[] = {"nta", "1", "2", "3"};
AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
data += PrintRightOperand(data);
} else if (f0byte == 0x1F && data[2] == 0) {
AppendToBuffer("nop"); // 3 byte nop.
data += 3;
} else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
AppendToBuffer("nop"); // 4 byte nop.
data += 4;
} else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
data[4] == 0) {
AppendToBuffer("nop"); // 5 byte nop.
data += 5;
} else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
data[4] == 0 && data[5] == 0 && data[6] == 0) {
AppendToBuffer("nop"); // 7 byte nop.
data += 7;
} else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
data[4] == 0 && data[5] == 0 && data[6] == 0 &&
data[7] == 0) {
AppendToBuffer("nop"); // 8 byte nop.
data += 8;
} else if (f0byte == 0xA2 || f0byte == 0x31) {
AppendToBuffer("%s", f0mnem);
data += 2;
@ -1130,8 +1154,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0x66: // prefix
data++;
if (*data == 0x8B) {
while (*data == 0x66) data++;
if (*data == 0xf && data[1] == 0x1f) {
AppendToBuffer("nop"); // 0x66 prefix
} else if (*data == 0x90) {
AppendToBuffer("nop"); // 0x66 prefix
} else if (*data == 0x8B) {
data++;
data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
} else if (*data == 0x89) {
@ -1185,6 +1213,16 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x17) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("extractps %s,%s,%d",
NameOfCPURegister(regop),
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x22) {
data++;
int mod, regop, rm;
@ -1258,6 +1296,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x90) {
data++;
AppendToBuffer("nop"); // 2 byte nop.
} else if (*data == 0xF3) {
data++;
int mod, regop, rm;

6
deps/v8/src/ia32/full-codegen-ia32.cc

@ -2883,7 +2883,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
VisitForStackValue(args->at(1));
if (CpuFeatures::IsSupported(SSE2)) {
MathPowStub stub;
MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
@ -3787,7 +3787,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->true_value());
} else {
__ push(isolate()->factory()->true_value());
__ Push(isolate()->factory()->true_value());
}
__ jmp(&done, Label::kNear);
__ bind(&materialize_false);
@ -3795,7 +3795,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->false_value());
} else {
__ push(isolate()->factory()->false_value());
__ Push(isolate()->factory()->false_value());
}
__ bind(&done);
}

3
deps/v8/src/ia32/ic-ia32.cc

@ -1625,6 +1625,9 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
if (state == KNOWN_OBJECTS) {
stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
}
rewritten = stub.GetCode();
}
set_target(*rewritten);

276
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -341,6 +341,13 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
}
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
Handle<Object> literal = chunk_->LookupLiteral(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
return literal;
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
Handle<Object> value = chunk_->LookupLiteral(op);
return value->Number();
@ -518,7 +525,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
} else if (context->IsConstantOperand()) {
Handle<Object> literal =
chunk_->LookupLiteral(LConstantOperand::cast(context));
LoadHeapObject(esi, Handle<Context>::cast(literal));
__ LoadHeapObject(esi, Handle<Context>::cast(literal));
} else {
UNREACHABLE();
}
@ -1219,7 +1226,7 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
Register reg = ToRegister(instr->result());
Handle<Object> handle = instr->value();
if (handle->IsHeapObject()) {
LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
__ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
} else {
__ Set(reg, Immediate(handle));
}
@ -2030,7 +2037,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// the stub.
Register temp = ToRegister(instr->TempAt(0));
ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
__ mov(InstanceofStub::right(), Immediate(instr->function()));
__ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 13;
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
__ mov(temp, Immediate(delta));
@ -2137,20 +2144,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ mov(FieldOperand(object, offset), value);
// Cells are always in the remembered set.
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteField(object,
offset,
value,
address,
kSaveFPRegs,
OMIT_REMEMBERED_SET,
check_needed);
}
// Cells are always rescanned, so no write barrier here.
}
@ -2171,13 +2165,22 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ mov(result, ContextOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
__ mov(ContextOperand(context, instr->slot_index()), value);
Operand target = ContextOperand(context, instr->slot_index());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
__ mov(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
@ -2229,7 +2232,24 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
}
} else {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
LoadHeapObject(result, Handle<HeapObject>::cast(function));
__ LoadHeapObject(result, function);
}
}
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
if (object->IsSmi()) {
__ Push(Handle<Smi>::cast(object));
} else {
__ PushHeapObject(Handle<HeapObject>::cast(object));
}
} else if (operand->IsRegister()) {
__ push(ToRegister(operand));
} else {
__ push(ToOperand(operand));
}
}
@ -2639,17 +2659,13 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->InputAt(0);
if (argument->IsConstantOperand()) {
__ push(ToImmediate(argument));
} else {
__ push(ToOperand(argument));
}
EmitPushTaggedOperand(argument);
}
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
LoadHeapObject(result, instr->hydrogen()->closure());
__ LoadHeapObject(result, instr->hydrogen()->closure());
}
@ -2719,7 +2735,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(edi, instr->function());
__ LoadHeapObject(edi, instr->function());
CallKnownFunction(instr->function(),
instr->arity(),
instr,
@ -2893,12 +2909,12 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
__ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
__ ucomisd(xmm_scratch, input_reg);
__ j(above, &below_half);
// input = input + 0.5
__ addsd(input_reg, xmm_scratch);
// xmm_scratch = input + 0.5
__ addsd(xmm_scratch, input_reg);
// Compute Math.floor(value + 0.5).
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, Operand(input_reg));
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x80000000u);
@ -2934,72 +2950,67 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
// Math.sqrt(-Infinity) == NaN
Label done, sqrt;
// Check base for -Infinity. According to IEEE-754, single-precision
// -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
__ mov(scratch, 0xFF800000);
__ movd(xmm_scratch, scratch);
__ cvtss2sd(xmm_scratch, xmm_scratch);
__ ucomisd(input_reg, xmm_scratch);
// Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &sqrt, Label::kNear);
__ j(carry, &sqrt, Label::kNear);
// If input is -Infinity, return Infinity.
__ xorps(input_reg, input_reg);
__ subsd(input_reg, xmm_scratch);
__ jmp(&done, Label::kNear);
// Square root.
__ bind(&sqrt);
__ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
__ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
if (exponent_type.IsDouble()) {
// It is safe to use ebx directly since the instruction is marked
// as a call.
__ PrepareCallCFunction(4, ebx);
__ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
__ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
4);
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
ToDoubleRegister(instr->InputAt(1)).is(xmm1));
ASSERT(!instr->InputAt(1)->IsRegister() ||
ToRegister(instr->InputAt(1)).is(eax));
ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(eax, &no_deopt);
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
DeoptimizeIf(not_equal, instr->environment());
__ bind(&no_deopt);
MathPowStub stub(MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
// It is safe to use ebx directly since the instruction is marked
// as a call.
ASSERT(!ToRegister(right).is(ebx));
__ PrepareCallCFunction(4, ebx);
__ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
__ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
__ CallCFunction(ExternalReference::power_double_int_function(isolate()),
4);
MathPowStub stub(MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
ASSERT(exponent_type.IsTagged());
CpuFeatures::Scope scope(SSE2);
Register right_reg = ToRegister(right);
Label non_smi, call;
__ JumpIfNotSmi(right_reg, &non_smi);
__ SmiUntag(right_reg);
__ cvtsi2sd(result_reg, Operand(right_reg));
__ jmp(&call);
__ bind(&non_smi);
// It is safe to use ebx directly since the instruction is marked
// as a call.
ASSERT(!right_reg.is(ebx));
__ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
DeoptimizeIf(not_equal, instr->environment());
__ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
__ bind(&call);
__ PrepareCallCFunction(4, ebx);
__ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
__ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
4);
ASSERT(exponent_type.IsDouble());
MathPowStub stub(MathPowStub::DOUBLE);
__ CallStub(&stub);
}
// Return value is in st(0) on ia32.
// Store it into the (fixed) result register.
__ sub(Operand(esp), Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
__ movdbl(result_reg, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
}
@ -3072,9 +3083,6 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
case kMathSqrt:
DoMathSqrt(instr);
break;
case kMathPowHalf:
DoMathPowHalf(instr);
break;
case kMathCos:
DoMathCos(instr);
break;
@ -3159,7 +3167,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(edi, instr->target());
__ LoadHeapObject(edi, instr->target());
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@ -3524,16 +3532,8 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
if (instr->left()->IsConstantOperand()) {
__ push(ToImmediate(instr->left()));
} else {
__ push(ToOperand(instr->left()));
}
if (instr->right()->IsConstantOperand()) {
__ push(ToImmediate(instr->right()));
} else {
__ push(ToOperand(instr->right()));
}
EmitPushTaggedOperand(instr->left());
EmitPushTaggedOperand(instr->right());
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@ -4032,7 +4032,7 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
__ cmp(reg, Operand::Cell(cell));
} else {
Operand operand = ToOperand(instr->value());
__ cmp(operand, instr->hydrogen()->target());
__ cmp(operand, target);
}
DeoptimizeIf(not_equal, instr->environment());
}
@ -4096,17 +4096,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
__ mov(result, Operand::Cell(cell));
} else {
__ mov(result, object);
}
}
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register reg = ToRegister(instr->TempAt(0));
@ -4114,7 +4103,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
LoadHeapObject(reg, current_prototype);
__ LoadHeapObject(reg, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
@ -4124,7 +4113,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
LoadHeapObject(reg, current_prototype);
__ LoadHeapObject(reg, current_prototype);
}
// Check the holder map.
@ -4136,17 +4125,32 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
Heap* heap = isolate()->heap();
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
// Load the map's "bit field 2". We only need the first byte,
// but the following masking takes care of that anyway.
__ mov(ebx, FieldOperand(ebx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ and_(ebx, Map::kElementsKindMask);
__ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
DeoptimizeIf(not_equal, instr->environment());
}
// Setup the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(constant_elements));
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
__ push(Immediate(Handle<FixedArray>(heap->empty_fixed_array())));
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@ -4162,9 +4166,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
constant_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ELEMENTS;
boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@ -4179,7 +4183,7 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
ASSERT(!result.is(ecx));
if (FLAG_debug_code) {
LoadHeapObject(ecx, object);
__ LoadHeapObject(ecx, object);
__ cmp(source, ecx);
__ Assert(equal, "Unexpected object literal boilerplate");
}
@ -4209,10 +4213,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(ecx, Operand(result, *offset));
__ mov(FieldOperand(result, total_offset), ecx);
LoadHeapObject(source, value_object);
__ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
__ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
__ mov(FieldOperand(result, total_offset), ecx);
} else {
__ mov(FieldOperand(result, total_offset), Immediate(value));
@ -4237,7 +4241,7 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
__ bind(&allocated);
int offset = 0;
LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
__ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
ASSERT_EQ(size, offset);
}
@ -4359,11 +4363,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
LOperand* input = instr->InputAt(1);
if (input->IsConstantOperand()) {
__ push(ToImmediate(input));
} else {
__ push(ToOperand(input));
}
EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
@ -4487,9 +4487,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
int patch_size = Deoptimizer::patch_size();
if (current_pc < last_lazy_deopt_pc_ + patch_size) {
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
while (padding_size-- > 0) {
__ nop();
}
__ Nop(padding_size);
}
last_lazy_deopt_pc_ = masm()->pc_offset();
}
@ -4513,11 +4511,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
LOperand* obj = instr->object();
LOperand* key = instr->key();
__ push(ToOperand(obj));
if (key->IsConstantOperand()) {
__ push(ToImmediate(key));
} else {
__ push(ToOperand(key));
}
EmitPushTaggedOperand(key);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
@ -4614,16 +4608,8 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoIn(LIn* instr) {
LOperand* obj = instr->object();
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
__ push(ToImmediate(key));
} else {
__ push(ToOperand(key));
}
if (obj->IsConstantOperand()) {
__ push(ToImmediate(obj));
} else {
__ push(ToOperand(obj));
}
EmitPushTaggedOperand(key);
EmitPushTaggedOperand(obj);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());

8
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -207,8 +207,6 @@ class LCodeGen BASE_EMBEDDED {
LInstruction* instr,
CallKind call_kind);
void LoadHeapObject(Register result, Handle<HeapObject> object);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@ -227,6 +225,7 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
int ToInteger32(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
@ -239,7 +238,6 @@ class LCodeGen BASE_EMBEDDED {
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
void DoMathTan(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
@ -306,6 +304,10 @@ class LCodeGen BASE_EMBEDDED {
void EnsureSpaceForLazyDeopt();
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;

24
deps/v8/src/ia32/lithium-ia32.cc

@ -298,6 +298,12 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
void LMathPowHalf::PrintDataTo(StringStream* stream) {
stream->Add("/pow_half ");
InputAt(0)->PrintTo(stream);
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream);
stream->Add("[%d]", slot_index());
@ -1184,6 +1190,11 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
if (op == kMathPowHalf) {
LOperand* temp = TempRegister();
LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
return DefineSameAsFirst(result);
}
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
input);
switch (op) {
@ -1195,8 +1206,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return AssignEnvironment(DefineAsRegister(result));
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathPowHalf:
return DefineSameAsFirst(result);
default:
UNREACHABLE();
return NULL;
@ -1437,9 +1446,9 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
ASSERT(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), xmm1);
LOperand* left = UseFixedDouble(instr->left(), xmm2);
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), xmm2) :
UseFixedDouble(instr->right(), xmm1) :
UseFixed(instr->right(), eax);
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
@ -1866,7 +1875,9 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LLoadContextSlot(context));
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@ -1881,7 +1892,8 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
value = UseRegister(instr->value());
temp = NULL;
}
return new(zone()) LStoreContextSlot(context, value, temp);
LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}

19
deps/v8/src/ia32/lithium-ia32.h

@ -123,6 +123,7 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MathPowHalf) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@ -582,6 +583,24 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> {
};
class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
public:
LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[1] = context;
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
virtual void PrintDataTo(StringStream* stream);
};
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {

32
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -755,7 +755,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
// Push the state and the code object.
push(Immediate(state));
push(CodeObject());
Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
@ -2022,7 +2022,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
mov(edi, Immediate(function));
LoadHeapObject(edi, function);
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
@ -2151,6 +2151,29 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
}
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
mov(result, Operand::Cell(cell));
} else {
mov(result, object);
}
}
void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
push(Operand::Cell(cell));
} else {
Push(object);
}
}
void MacroAssembler::Ret() {
ret(0);
}
@ -2182,11 +2205,6 @@ void MacroAssembler::Move(Register dst, Register src) {
}
void MacroAssembler::Move(Register dst, Handle<Object> value) {
mov(dst, value);
}
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));

7
deps/v8/src/ia32/macro-assembler-ia32.h

@ -237,6 +237,9 @@ class MacroAssembler: public Assembler {
void StoreToSafepointRegisterSlot(Register dst, Immediate src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
void LoadHeapObject(Register result, Handle<HeapObject> object);
void PushHeapObject(Handle<HeapObject> object);
// ---------------------------------------------------------------------------
// JavaScript invokes
@ -718,10 +721,8 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
void Move(Register target, Handle<Object> value);
// Push a handle value.
void Push(Handle<Object> handle) { push(handle); }
void Push(Handle<Object> handle) { push(Immediate(handle)); }
Handle<Object> CodeObject() {
ASSERT(!code_object_.is_null());

26
deps/v8/src/ia32/stub-cache-ia32.cc

@ -429,7 +429,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ mov(edi, Immediate(function));
__ LoadHeapObject(edi, function);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Pass the additional arguments.
@ -1025,7 +1025,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
Handle<Object> value,
Handle<JSFunction> value,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@ -1036,7 +1036,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
__ mov(eax, value);
__ LoadHeapObject(eax, value);
__ ret(0);
}
@ -2522,23 +2522,9 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
// Store the value in the cell.
__ mov(cell_operand, eax);
Label done;
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &done);
__ mov(ecx, eax);
__ lea(edx, cell_operand);
// Cells are always in the remembered set.
__ RecordWrite(ebx, // Object.
edx, // Address.
ecx, // Value.
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// No write barrier here, because cells are always rescanned.
// Return the value (register eax).
__ bind(&done);
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1);
__ ret(0);
@ -2729,7 +2715,7 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Object> value,
Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : receiver
@ -2891,7 +2877,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<Object> value) {
Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver

4
deps/v8/src/ic-inl.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -36,7 +36,7 @@ namespace v8 {
namespace internal {
Address IC::address() {
Address IC::address() const {
// Get the address of the call.
Address result = pc() - Assembler::kCallTargetAddressOffset;

89
deps/v8/src/ic.cc

@ -40,13 +40,13 @@ namespace v8 {
namespace internal {
#ifdef DEBUG
static char TransitionMarkFromState(IC::State state) {
char IC::TransitionMarkFromState(IC::State state) {
switch (state) {
case UNINITIALIZED: return '0';
case PREMONOMORPHIC: return 'P';
case MONOMORPHIC: return '1';
case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
case MEGAMORPHIC: return 'N';
case MEGAMORPHIC: return IsGeneric() ? 'G' : 'N';
// We never see the debugger states here, because the state is
// computed from the original code - not the patched code. Let
@ -80,19 +80,7 @@ void IC::TraceIC(const char* type,
raw_frame = it.frame();
}
}
if (raw_frame->is_java_script()) {
JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
Code* js_code = frame->unchecked_code();
// Find the function on the stack and both the active code for the
// function and the original code.
JSFunction* function = JSFunction::cast(frame->function());
function->PrintName();
int code_offset =
static_cast<int>(address() - js_code->instruction_start());
PrintF("+%d", code_offset);
} else {
PrintF("<unknown>");
}
JavaScriptFrame::PrintTop(stdout, false, true);
PrintF(" (%c->%c)",
TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state));
@ -100,13 +88,23 @@ void IC::TraceIC(const char* type,
PrintF("]\n");
}
}
#endif // DEBUG
#define TRACE_GENERIC_IC(type, reason) \
do { \
if (FLAG_trace_ic) { \
PrintF("[%s patching generic stub in ", type); \
JavaScriptFrame::PrintTop(stdout, false, true); \
PrintF(" (%s)]\n", reason); \
} \
} while (false)
#else
#define TRACE_GENERIC_IC(type, reason)
#endif // DEBUG
#define TRACE_IC(type, name, old_state, new_target) \
ASSERT((TraceIC(type, name, old_state, new_target), true))
IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
ASSERT(isolate == Isolate::Current());
// To improve the performance of the (much used) IC code, we unfold
@ -137,7 +135,7 @@ IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
Address IC::OriginalCodeAddress() {
Address IC::OriginalCodeAddress() const {
HandleScope scope;
// Compute the JavaScript frame for the frame pointer of this IC
// structure. We need this to be able to find the function
@ -914,7 +912,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
name, receiver, holder, lookup->GetFieldIndex());
break;
case CONSTANT_FUNCTION: {
Handle<Object> constant(lookup->GetConstantFunction());
Handle<JSFunction> constant(lookup->GetConstantFunction());
code = isolate()->stub_cache()->ComputeLoadConstant(
name, receiver, holder, constant);
break;
@ -1123,6 +1121,8 @@ MaybeObject* KeyedLoadIC::Load(State state,
stub = ComputeStub(receiver, LOAD, kNonStrictMode, stub);
}
}
} else {
TRACE_GENERIC_IC("KeyedLoadIC", "force generic");
}
if (!stub.is_null()) set_target(*stub);
}
@ -1163,7 +1163,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup,
name, receiver, holder, lookup->GetFieldIndex());
break;
case CONSTANT_FUNCTION: {
Handle<Object> constant(lookup->GetConstantFunction());
Handle<JSFunction> constant(lookup->GetConstantFunction());
code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
name, receiver, holder, constant);
break;
@ -1473,6 +1473,7 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
if (target()->type() != NORMAL) {
TRACE_GENERIC_IC("KeyedIC", "non-NORMAL target type");
return generic_stub;
}
@ -1494,12 +1495,14 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
if (!map_added) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
TRACE_GENERIC_IC("KeyedIC", "same map added twice");
return generic_stub;
}
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
TRACE_GENERIC_IC("KeyedIC", "max polymorph exceeded");
return generic_stub;
}
@ -1685,6 +1688,8 @@ MaybeObject* KeyedStoreIC::Store(State state,
}
stub = ComputeStub(receiver, stub_kind, strict_mode, stub);
}
} else {
TRACE_GENERIC_IC("KeyedStoreIC", "force generic");
}
}
if (!stub.is_null()) set_target(*stub);
@ -2315,6 +2320,7 @@ const char* CompareIC::GetStateName(State state) {
case SMIS: return "SMIS";
case HEAP_NUMBERS: return "HEAP_NUMBERS";
case OBJECTS: return "OBJECTS";
case KNOWN_OBJECTS: return "OBJECTS";
case SYMBOLS: return "SYMBOLS";
case STRINGS: return "STRINGS";
case GENERIC: return "GENERIC";
@ -2329,19 +2335,38 @@ CompareIC::State CompareIC::TargetState(State state,
bool has_inlined_smi_code,
Handle<Object> x,
Handle<Object> y) {
if (!has_inlined_smi_code && state != UNINITIALIZED && state != SYMBOLS) {
return GENERIC;
switch (state) {
case UNINITIALIZED:
if (x->IsSmi() && y->IsSmi()) return SMIS;
if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
if (!Token::IsEqualityOp(op_)) return GENERIC;
if (x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
if (x->IsString() && y->IsString()) return STRINGS;
if (x->IsJSObject() && y->IsJSObject()) {
if (Handle<JSObject>::cast(x)->map() ==
Handle<JSObject>::cast(y)->map() &&
Token::IsEqualityOp(op_)) {
return KNOWN_OBJECTS;
} else {
return OBJECTS;
}
}
return GENERIC;
case SMIS:
return has_inlined_smi_code && x->IsNumber() && y->IsNumber()
? HEAP_NUMBERS
: GENERIC;
case SYMBOLS:
ASSERT(Token::IsEqualityOp(op_));
return x->IsString() && y->IsString() ? STRINGS : GENERIC;
case HEAP_NUMBERS:
case STRINGS:
case OBJECTS:
case KNOWN_OBJECTS:
case GENERIC:
return GENERIC;
}
if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
if (state == UNINITIALIZED &&
x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
if ((state == UNINITIALIZED || state == SYMBOLS) &&
x->IsString() && y->IsString()) return STRINGS;
if (state == UNINITIALIZED &&
x->IsJSObject() && y->IsJSObject()) return OBJECTS;
UNREACHABLE();
return GENERIC;
}

27
deps/v8/src/ic.h

@ -91,10 +91,13 @@ class IC {
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
IC(FrameDepth depth, Isolate* isolate);
virtual ~IC() {}
// Get the call-site target; used for determining the state.
Code* target() { return GetTargetAtAddress(address()); }
inline Address address();
Code* target() const { return GetTargetAtAddress(address()); }
inline Address address() const;
virtual bool IsGeneric() const { return false; }
// Compute the current IC state based on the target stub, receiver and name.
static State StateFrom(Code* target, Object* receiver, Object* name);
@ -139,13 +142,15 @@ class IC {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Computes the address in the original code when the code running is
// containing break points (calls to DebugBreakXXX builtins).
Address OriginalCodeAddress();
Address OriginalCodeAddress() const;
#endif
// Set the call-site target.
void set_target(Code* code) { SetTargetAtAddress(address(), code); }
#ifdef DEBUG
char TransitionMarkFromState(IC::State state);
void TraceIC(const char* type,
Handle<Object> name,
State old_state,
@ -452,6 +457,10 @@ class KeyedLoadIC: public KeyedIC {
bool is_js_array,
ElementsKind elements_kind);
virtual bool IsGeneric() const {
return target() == *generic_stub();
}
protected:
virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
@ -477,7 +486,7 @@ class KeyedLoadIC: public KeyedIC {
Handle<Code> megamorphic_stub() {
return isolate()->builtins()->KeyedLoadIC_Generic();
}
Handle<Code> generic_stub() {
Handle<Code> generic_stub() const {
return isolate()->builtins()->KeyedLoadIC_Generic();
}
Handle<Code> pre_monomorphic_stub() {
@ -595,6 +604,11 @@ class KeyedStoreIC: public KeyedIC {
bool is_js_array,
ElementsKind elements_kind);
virtual bool IsGeneric() const {
return target() == *generic_stub() ||
target() == *generic_stub_strict();
}
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
@ -632,10 +646,10 @@ class KeyedStoreIC: public KeyedIC {
Handle<Code> megamorphic_stub_strict() {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
}
Handle<Code> generic_stub() {
Handle<Code> generic_stub() const {
return isolate()->builtins()->KeyedStoreIC_Generic();
}
Handle<Code> generic_stub_strict() {
Handle<Code> generic_stub_strict() const {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
}
Handle<Code> non_strict_arguments_stub() {
@ -710,6 +724,7 @@ class CompareIC: public IC {
SYMBOLS,
STRINGS,
OBJECTS,
KNOWN_OBJECTS,
GENERIC
};

149
deps/v8/src/mark-compact.cc

@ -619,8 +619,7 @@ class CodeFlusher {
}
void AddCandidate(JSFunction* function) {
ASSERT(function->unchecked_code() ==
function->unchecked_shared()->unchecked_code());
ASSERT(function->code() == function->shared()->code());
SetNextCandidate(function, jsfunction_candidates_head_);
jsfunction_candidates_head_ = function;
@ -640,15 +639,15 @@ class CodeFlusher {
while (candidate != NULL) {
next_candidate = GetNextCandidate(candidate);
SharedFunctionInfo* shared = candidate->unchecked_shared();
SharedFunctionInfo* shared = candidate->shared();
Code* code = shared->unchecked_code();
Code* code = shared->code();
MarkBit code_mark = Marking::MarkBitFrom(code);
if (!code_mark.Get()) {
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
} else {
candidate->set_code(shared->unchecked_code());
candidate->set_code(shared->code());
}
// We are in the middle of a GC cycle so the write barrier in the code
@ -674,7 +673,7 @@ class CodeFlusher {
next_candidate = GetNextCandidate(candidate);
SetNextCandidate(candidate, NULL);
Code* code = candidate->unchecked_code();
Code* code = candidate->code();
MarkBit code_mark = Marking::MarkBitFrom(code);
if (!code_mark.Get()) {
candidate->set_code(lazy_compile);
@ -702,7 +701,7 @@ class CodeFlusher {
static SharedFunctionInfo** GetNextCandidateField(
SharedFunctionInfo* candidate) {
Code* code = candidate->unchecked_code();
Code* code = candidate->code();
return reinterpret_cast<SharedFunctionInfo**>(
code->address() + Code::kNextCodeFlushingCandidateOffset);
}
@ -884,8 +883,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) {
IC::Clear(rinfo->pc());
// Please note targets for cleared inline cached do not have to be
// marked since they are contained in HEAP->non_monomorphic_cache().
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
} else {
if (FLAG_cleanup_code_caches_at_gc &&
@ -894,9 +891,10 @@ class StaticMarkingVisitor : public StaticVisitorBase {
target->has_function_cache()) {
CallFunctionStub::Clear(heap, rinfo->pc());
}
MarkBit code_mark = Marking::MarkBitFrom(target);
heap->mark_compact_collector()->MarkObject(target, code_mark);
}
MarkBit code_mark = Marking::MarkBitFrom(target);
heap->mark_compact_collector()->MarkObject(target, code_mark);
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
}
@ -1037,12 +1035,12 @@ class StaticMarkingVisitor : public StaticVisitorBase {
inline static bool IsCompiled(JSFunction* function) {
return function->unchecked_code() !=
return function->code() !=
function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
}
inline static bool IsCompiled(SharedFunctionInfo* function) {
return function->unchecked_code() !=
return function->code() !=
function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
}
@ -1051,8 +1049,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
MarkBit code_mark =
Marking::MarkBitFrom(function->unchecked_code());
MarkBit code_mark = Marking::MarkBitFrom(function->code());
if (code_mark.Get()) {
if (!Marking::MarkBitFrom(shared_info).Get()) {
shared_info->set_code_age(0);
@ -1061,7 +1058,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
// We do not flush code for optimized functions.
if (function->code() != shared_info->unchecked_code()) {
if (function->code() != shared_info->code()) {
return false;
}
@ -1072,7 +1069,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
MarkBit code_mark =
Marking::MarkBitFrom(shared_info->unchecked_code());
Marking::MarkBitFrom(shared_info->code());
if (code_mark.Get()) {
return false;
}
@ -1085,16 +1082,24 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// We never flush code for Api functions.
Object* function_data = shared_info->function_data();
if (function_data->IsFunctionTemplateInfo()) return false;
if (function_data->IsFunctionTemplateInfo()) {
return false;
}
// Only flush code for functions.
if (shared_info->code()->kind() != Code::FUNCTION) return false;
if (shared_info->code()->kind() != Code::FUNCTION) {
return false;
}
// Function must be lazy compilable.
if (!shared_info->allows_lazy_compilation()) return false;
if (!shared_info->allows_lazy_compilation()) {
return false;
}
// If this is a full script wrapped in a function we do no flush the code.
if (shared_info->is_toplevel()) return false;
if (shared_info->is_toplevel()) {
return false;
}
// Age this shared function info.
if (shared_info->code_age() < kCodeAgeThreshold) {
@ -1267,30 +1272,12 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
if (!flush_code_candidate) {
Code* code = jsfunction->unchecked_shared()->unchecked_code();
Code* code = jsfunction->shared()->code();
MarkBit code_mark = Marking::MarkBitFrom(code);
heap->mark_compact_collector()->MarkObject(code, code_mark);
if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
// For optimized functions we should retain both non-optimized version
// of it's code and non-optimized version of all inlined functions.
// This is required to support bailing out from inlined code.
DeoptimizationInputData* data =
reinterpret_cast<DeoptimizationInputData*>(
jsfunction->unchecked_code()->unchecked_deoptimization_data());
FixedArray* literals = data->UncheckedLiteralArray();
for (int i = 0, count = data->InlinedFunctionCount()->value();
i < count;
i++) {
JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
Code* inlined_code = inlined->unchecked_shared()->unchecked_code();
MarkBit inlined_code_mark =
Marking::MarkBitFrom(inlined_code);
heap->mark_compact_collector()->MarkObject(
inlined_code, inlined_code_mark);
}
collector->MarkObject(code, code_mark);
if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) {
collector->MarkInlinedFunctionsCode(jsfunction->code());
}
}
@ -1415,11 +1402,7 @@ class CodeMarkingVisitor : public ThreadVisitor {
: collector_(collector) {}
void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
Code* code = it.frame()->unchecked_code();
MarkBit code_bit = Marking::MarkBitFrom(code);
collector_->MarkObject(it.frame()->unchecked_code(), code_bit);
}
collector_->PrepareThreadForCodeFlushing(isolate, top);
}
private:
@ -1441,8 +1424,8 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
MarkBit shared_mark = Marking::MarkBitFrom(shared);
MarkBit code_mark = Marking::MarkBitFrom(shared->unchecked_code());
collector_->MarkObject(shared->unchecked_code(), code_mark);
MarkBit code_mark = Marking::MarkBitFrom(shared->code());
collector_->MarkObject(shared->code(), code_mark);
collector_->MarkObject(shared, shared_mark);
}
}
@ -1452,6 +1435,44 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
};
void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
// For optimized functions we should retain both non-optimized version
// of it's code and non-optimized version of all inlined functions.
// This is required to support bailing out from inlined code.
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
FixedArray* literals = data->LiteralArray();
for (int i = 0, count = data->InlinedFunctionCount()->value();
i < count;
i++) {
JSFunction* inlined = JSFunction::cast(literals->get(i));
Code* inlined_code = inlined->shared()->code();
MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code);
MarkObject(inlined_code, inlined_code_mark);
}
}
void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
ThreadLocalTop* top) {
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
// Note: for the frame that has a pending lazy deoptimization
// StackFrame::unchecked_code will return a non-optimized code object for
// the outermost function and StackFrame::LookupCode will return
// actual optimized code object.
StackFrame* frame = it.frame();
Code* code = frame->unchecked_code();
MarkBit code_mark = Marking::MarkBitFrom(code);
MarkObject(code, code_mark);
if (frame->is_optimized()) {
MarkInlinedFunctionsCode(frame->LookupCode());
}
}
}
void MarkCompactCollector::PrepareForCodeFlushing() {
ASSERT(heap() == Isolate::Current()->heap());
@ -1479,11 +1500,8 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
// Make sure we are not referencing the code from the stack.
ASSERT(this == heap()->mark_compact_collector());
for (StackFrameIterator it; !it.done(); it.Advance()) {
Code* code = it.frame()->unchecked_code();
MarkBit code_mark = Marking::MarkBitFrom(code);
MarkObject(code, code_mark);
}
PrepareThreadForCodeFlushing(heap()->isolate(),
heap()->isolate()->thread_local_top());
// Iterate the archived stacks in all threads to check if
// the code is referenced.
@ -2081,6 +2099,24 @@ void MarkCompactCollector::MarkLiveObjects() {
PrepareForCodeFlushing();
if (was_marked_incrementally_) {
// There is no write barrier on cells so we have to scan them now at the end
// of the incremental marking.
{
HeapObjectIterator cell_iterator(heap()->cell_space());
HeapObject* cell;
while ((cell = cell_iterator.Next()) != NULL) {
ASSERT(cell->IsJSGlobalPropertyCell());
if (IsMarked(cell)) {
int offset = JSGlobalPropertyCell::kValueOffset;
StaticMarkingVisitor::VisitPointer(
heap(),
reinterpret_cast<Object**>(cell->address() + offset));
}
}
}
}
RootMarkingVisitor root_visitor(heap());
MarkRoots(&root_visitor);
@ -3673,6 +3709,7 @@ void MarkCompactCollector::SweepSpaces() {
#endif
SweeperType how_to_sweep =
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
if (sweep_precisely_) how_to_sweep = PRECISE;
// Noncompacting collections simply sweep the spaces to clear the mark
// bits and free the nonlive blocks (for old and map spaces). We sweep

12
deps/v8/src/mark-compact.h

@ -383,6 +383,10 @@ class SlotsBuffer {
};
// Defined in isolate.h.
class ThreadLocalTop;
// -------------------------------------------------------------------------
// Mark-Compact collector
class MarkCompactCollector {
@ -603,6 +607,14 @@ class MarkCompactCollector {
friend class CodeMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
// Mark non-optimize code for functions inlined into the given optimized
// code. This will prevent it from being flushed.
void MarkInlinedFunctionsCode(Code* code);
// Mark code objects that are active on the stack to prevent them
// from being flushed.
void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
void PrepareForCodeFlushing();
// Marking operations for objects reachable from roots.

1
deps/v8/src/messages.js

@ -246,6 +246,7 @@ function FormatMessage(message) {
"unprotected_const", ["Illegal const declaration in unprotected statement context."],
"cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
"redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
"harmony_const_assign", ["Assignment to constant variable."],
];
var messages = { __proto__ : null };
for (var i = 0; i < messagesDictionary.length; i += 2) {

595
deps/v8/src/mips/code-stubs-mips.cc

@ -255,21 +255,61 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
// [sp]: constant elements.
// [sp + kPointerSize]: literal index.
// [sp + (2 * kPointerSize)]: literals array.
static void GenerateFastCloneShallowArrayCommon(
MacroAssembler* masm,
int length,
FastCloneShallowArrayStub::Mode mode,
Label* fail) {
// Registers on entry:
// a3: boilerplate literal array.
ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
// All sizes here are multiples of kPointerSize.
int elements_size = 0;
if (length_ > 0) {
elements_size = mode_ == CLONE_DOUBLE_ELEMENTS
? FixedDoubleArray::SizeFor(length_)
: FixedArray::SizeFor(length_);
if (length > 0) {
elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
? FixedDoubleArray::SizeFor(length)
: FixedArray::SizeFor(length);
}
int size = JSArray::kSize + elements_size;
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
__ AllocateInNewSpace(size,
v0,
a1,
a2,
fail,
TAG_OBJECT);
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length == 0)) {
__ lw(a1, FieldMemOperand(a3, i));
__ sw(a1, FieldMemOperand(v0, i));
}
}
if (length > 0) {
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
__ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
__ Addu(a2, v0, Operand(JSArray::kSize));
__ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
// Copy the elements array.
ASSERT((elements_size % kPointerSize) == 0);
__ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
}
}
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
// [sp]: constant elements.
// [sp + kPointerSize]: literal index.
// [sp + (2 * kPointerSize)]: literals array.
// Load boilerplate object into r3 and check if we need to create a
// boilerplate.
Label slow_case;
@ -282,17 +322,42 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
__ Branch(&slow_case, eq, a3, Operand(t1));
FastCloneShallowArrayStub::Mode mode = mode_;
if (mode == CLONE_ANY_ELEMENTS) {
Label double_elements, check_fast_elements;
__ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
__ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
__ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
__ Branch(&check_fast_elements, ne, v0, Operand(t1));
GenerateFastCloneShallowArrayCommon(masm, 0,
COPY_ON_WRITE_ELEMENTS, &slow_case);
// Return and remove the on-stack parameters.
__ DropAndRet(3);
__ bind(&check_fast_elements);
__ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
__ Branch(&double_elements, ne, v0, Operand(t1));
GenerateFastCloneShallowArrayCommon(masm, length_,
CLONE_ELEMENTS, &slow_case);
// Return and remove the on-stack parameters.
__ DropAndRet(3);
__ bind(&double_elements);
mode = CLONE_DOUBLE_ELEMENTS;
// Fall through to generate the code to handle double elements.
}
if (FLAG_debug_code) {
const char* message;
Heap::RootListIndex expected_map_index;
if (mode_ == CLONE_ELEMENTS) {
if (mode == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map_index = Heap::kFixedArrayMapRootIndex;
} else if (mode_ == CLONE_DOUBLE_ELEMENTS) {
} else if (mode == CLONE_DOUBLE_ELEMENTS) {
message = "Expected (writable) fixed double array";
expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
} else {
ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
}
@ -304,42 +369,59 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ pop(a3);
}
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
// Return new object in v0.
__ AllocateInNewSpace(size,
v0,
a1,
a2,
&slow_case,
TAG_OBJECT);
GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
__ lw(a1, FieldMemOperand(a3, i));
__ sw(a1, FieldMemOperand(v0, i));
}
}
// Return and remove the on-stack parameters.
__ Addu(sp, sp, Operand(3 * kPointerSize));
__ Ret();
if (length_ > 0) {
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
__ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
__ Addu(a2, v0, Operand(JSArray::kSize));
__ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
__ bind(&slow_case);
__ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
}
// Copy the elements array.
ASSERT((elements_size % kPointerSize) == 0);
__ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
// [sp]: object literal flags.
// [sp + kPointerSize]: constant properties.
// [sp + (2 * kPointerSize)]: literal index.
// [sp + (3 * kPointerSize)]: literals array.
// Load boilerplate object into a3 and check if we need to create a
// boilerplate.
Label slow_case;
__ lw(a3, MemOperand(sp, 3 * kPointerSize));
__ lw(a0, MemOperand(sp, 2 * kPointerSize));
__ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, t0, a3);
__ lw(a3, MemOperand(a3));
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
__ Branch(&slow_case, eq, a3, Operand(t0));
// Check that the boilerplate contains only fast properties and we can
// statically determine the instance size.
int size = JSObject::kHeaderSize + length_ * kPointerSize;
__ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
__ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
// Allocate the JS object and copy header together with all in-object
// properties from the boilerplate.
__ AllocateInNewSpace(size, a0, a1, a2, &slow_case, TAG_OBJECT);
for (int i = 0; i < size; i += kPointerSize) {
__ lw(a1, FieldMemOperand(a3, i));
__ sw(a1, FieldMemOperand(a0, i));
}
// Return and remove the on-stack parameters.
__ Addu(sp, sp, Operand(3 * kPointerSize));
__ Ret();
__ Drop(4);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
__ bind(&slow_case);
__ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
__ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
}
@ -3510,113 +3592,218 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
Label call_runtime;
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
Label base_not_smi;
Label exponent_not_smi;
Label convert_exponent;
const Register base = a0;
const Register exponent = a2;
const Register heapnumbermap = t1;
const Register heapnumber = s0; // Callee-saved register.
const Register scratch = t2;
const Register scratch2 = t3;
// Alocate FP values in the ABI-parameter-passing regs.
const DoubleRegister double_base = f12;
const DoubleRegister double_exponent = f14;
const DoubleRegister double_result = f0;
const DoubleRegister double_scratch = f2;
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
CpuFeatures::Scope fpu_scope(FPU);
const Register base = a1;
const Register exponent = a2;
const Register heapnumbermap = t1;
const Register heapnumber = v0;
const DoubleRegister double_base = f2;
const DoubleRegister double_exponent = f4;
const DoubleRegister double_result = f0;
const DoubleRegister double_scratch = f6;
const FPURegister single_scratch = f8;
const Register scratch = t5;
const Register scratch2 = t3;
Label call_runtime, done, exponent_not_smi, int_exponent;
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack to double registers.
__ lw(base, MemOperand(sp, 1 * kPointerSize));
__ lw(exponent, MemOperand(sp, 0 * kPointerSize));
// Convert base to double value and store it in f0.
__ JumpIfNotSmi(base, &base_not_smi);
// Base is a Smi. Untag and convert it.
__ SmiUntag(base);
__ mtc1(base, double_scratch);
__ cvt_d_w(double_base, double_scratch);
__ Branch(&convert_exponent);
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
__ bind(&base_not_smi);
__ JumpIfSmi(base, &base_is_smi);
__ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
// Base is a heapnumber. Load it into double register.
__ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent);
__ bind(&base_is_smi);
__ SmiUntag(base);
__ mtc1(base, single_scratch);
__ cvt_d_w(double_base, single_scratch);
__ bind(&unpack_exponent);
__ bind(&convert_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi);
__ SmiUntag(exponent);
// The base is in a double register and the exponent is
// an untagged smi. Allocate a heap number and call a
// C function for integer exponents. The register containing
// the heap number is callee-saved.
__ AllocateHeapNumber(heapnumber,
scratch,
scratch2,
heapnumbermap,
&call_runtime);
__ push(ra);
__ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(double_base, exponent);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(
ExternalReference::power_double_int_function(masm->isolate()), 1, 1);
__ pop(ra);
__ GetCFunctionDoubleResult(double_result);
}
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(v0, heapnumber);
__ DropAndRet(2 * kPointerSize);
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
__ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
// Exponent is a heapnumber. Load it into double register.
__ ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
// Base is already in double_base.
__ JumpIfNotSmi(exponent, &exponent_not_smi);
__ SmiUntag(exponent);
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
__ ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
if (exponent_type_ != INTEGER) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf,
single_scratch,
double_exponent,
scratch,
scratch2,
kCheckForInexactConversion);
// scratch2 == 0 means there was no conversion error.
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
Label not_plus_half;
// Test for 0.5.
__ Move(double_scratch, 0.5);
__ BranchF(USE_DELAY_SLOT,
&not_plus_half,
NULL,
ne,
double_exponent,
double_scratch);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
__ Move(double_scratch, -V8_INFINITY);
__ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
__ neg_d(double_result, double_scratch);
// Add +0 to convert -0 to +0.
__ add_d(double_scratch, double_base, kDoubleRegZero);
__ sqrt_d(double_result, double_scratch);
__ jmp(&done);
__ bind(&not_plus_half);
__ Move(double_scratch, -0.5);
__ BranchF(USE_DELAY_SLOT,
&call_runtime,
NULL,
ne,
double_exponent,
double_scratch);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
__ Move(double_scratch, -V8_INFINITY);
__ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
__ Move(double_result, kDoubleRegZero);
// Add +0 to convert -0 to +0.
__ add_d(double_scratch, double_base, kDoubleRegZero);
__ Move(double_result, 1);
__ sqrt_d(double_scratch, double_scratch);
__ div_d(double_result, double_result, double_scratch);
__ jmp(&done);
}
// The base and the exponent are in double registers.
// Allocate a heap number and call a C function for
// double exponents. The register containing
// the heap number is callee-saved.
__ AllocateHeapNumber(heapnumber,
scratch,
scratch2,
heapnumbermap,
&call_runtime);
__ push(ra);
__ PrepareCallCFunction(0, 2, scratch);
// ABI (o32) for func(double a, double b): a in f12, b in f14.
ASSERT(double_base.is(f12));
ASSERT(double_exponent.is(f14));
__ SetCallCDoubleArguments(double_base, double_exponent);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
0,
2);
__ pop(ra);
__ GetCFunctionDoubleResult(double_result);
0, 2);
}
__ pop(ra);
__ GetCFunctionDoubleResult(double_result);
__ jmp(&done);
__ bind(&int_exponent_convert);
__ mfc1(exponent, single_scratch);
}
// Calculate power with integer exponent.
__ bind(&int_exponent);
__ mov(scratch, exponent); // Back up exponent.
__ mov_d(double_scratch, double_base); // Back up base.
__ Move(double_result, 1.0);
// Get absolute value of exponent.
Label positive_exponent;
__ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
__ Subu(scratch, zero_reg, scratch);
__ bind(&positive_exponent);
Label while_true, no_carry, loop_end;
__ bind(&while_true);
__ And(scratch2, scratch, 1);
__ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
__ mul_d(double_result, double_result, double_scratch);
__ bind(&no_carry);
__ sra(scratch, scratch, 1);
__ Branch(&loop_end, eq, scratch, Operand(zero_reg));
__ mul_d(double_scratch, double_scratch, double_scratch);
__ Branch(&while_true);
__ bind(&loop_end);
__ Branch(&done, ge, exponent, Operand(zero_reg));
__ Move(double_scratch, 1.0);
__ div_d(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
__ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
// double_exponent may not contain the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
__ mtc1(exponent, single_scratch);
__ cvt_d_w(double_exponent, single_scratch);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
if (exponent_type_ == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
__ bind(&done);
__ AllocateHeapNumber(
heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(v0, heapnumber);
__ DropAndRet(2 * kPointerSize);
}
ASSERT(heapnumber.is(v0));
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ DropAndRet(2);
} else {
__ push(ra);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
0, 2);
}
__ pop(ra);
__ GetCFunctionDoubleResult(double_result);
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret();
}
}
@ -4759,8 +4946,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
// First check for flat string. None of the following string type tests will
// succeed if kIsNotStringTag is set.
__ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
// succeed if subject is not a string or a short external string.
__ And(a1,
a0,
Operand(kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
__ Branch(&seq_string, eq, a1, Operand(zero_reg));
@ -4774,16 +4965,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
// In the case of a sliced string its offset has to be taken into account.
Label cons_string, check_encoding;
Label cons_string, external_string, check_encoding;
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
__ Branch(&runtime, eq, a1, Operand(kExternalStringTag));
__ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
// Catch non-string subject (should already have been guarded against).
STATIC_ASSERT(kNotStringTag != 0);
__ And(at, a1, Operand(kIsNotStringMask));
// Catch non-string subject or short external string.
STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
__ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
__ Branch(&runtime, ne, at, Operand(zero_reg));
// String is sliced.
@ -4804,7 +4996,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ And(at, a0, Operand(kStringRepresentationMask));
__ Branch(&runtime, ne, at, Operand(zero_reg));
__ Branch(&external_string, ne, at, Operand(zero_reg));
__ bind(&seq_string);
// subject: Subject string
@ -5030,6 +5222,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Addu(sp, sp, Operand(4 * kPointerSize));
__ Ret();
// External string. Short external strings have already been ruled out.
// a0: scratch
__ bind(&external_string);
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ And(at, a0, Operand(kIsIndirectStringMask));
__ Assert(eq,
"external string expected, but not found",
at,
Operand(zero_reg));
}
__ lw(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ Subu(subject,
subject,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
__ jmp(&seq_string);
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@ -5288,77 +5503,14 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
__ Branch(index_out_of_range_, ls, t0, Operand(index_));
// We need special handling for non-flat strings.
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t0, result_, Operand(kStringRepresentationMask));
__ Branch(&flat_string, eq, t0, Operand(zero_reg));
// Handle non-flat strings.
__ And(result_, result_, Operand(kStringRepresentationMask));
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
__ Branch(&sliced_string, gt, result_, Operand(kExternalStringTag));
__ Branch(&call_runtime_, eq, result_, Operand(kExternalStringTag));
// ConsString.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
Label assure_seq_string;
__ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
__ LoadRoot(t0, Heap::kEmptyStringRootIndex);
__ Branch(&call_runtime_, ne, result_, Operand(t0));
// Get the first of the two parts.
__ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
__ jmp(&assure_seq_string);
// SlicedString, unpack and add offset.
__ bind(&sliced_string);
__ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
__ Addu(index_, index_, result_);
__ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Assure that we are dealing with a sequential string. Go to runtime if not.
__ bind(&assure_seq_string);
__ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// Check that parent is not an external string. Go to runtime otherwise.
// Note that if the original string is a cons or slice with an external
// string as underlying string, we pass that unpacked underlying string with
// the adjusted index to the runtime function.
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t0, result_, Operand(kStringRepresentationMask));
__ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ And(t0, result_, Operand(kStringEncodingMask));
__ Branch(&ascii_string, ne, t0, Operand(zero_reg));
__ sra(index_, index_, kSmiTagSize);
// 2-byte string.
// Load the 2-byte character code into the result register. We can
// add without shifting since the smi tag size is the log2 of the
// number of bytes in a two-byte character.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
__ Addu(index_, object_, Operand(index_));
__ lhu(result_, FieldMemOperand(index_, SeqTwoByteString::kHeaderSize));
__ Branch(&got_char_code);
StringCharLoadGenerator::Generate(masm,
object_,
index_,
result_,
&call_runtime_);
// ASCII string.
// Load the byte into the result register.
__ bind(&ascii_string);
__ srl(t0, index_, kSmiTagSize);
__ Addu(index_, object_, t0);
__ lbu(result_, FieldMemOperand(index_, SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
__ sll(result_, result_, kSmiTagSize);
__ bind(&exit_);
}
@ -5407,6 +5559,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
__ sll(index_, index_, kSmiTagSize);
__ Push(object_, index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
@ -6821,26 +6974,39 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ Push(a1, a0);
__ push(ra);
void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
__ And(a2, a1, a0);
__ JumpIfSmi(a2, &miss);
__ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
__ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&miss, ne, a2, Operand(known_map_));
__ Branch(&miss, ne, a3, Operand(known_map_));
__ Ret(USE_DELAY_SLOT);
__ subu(v0, a0, a1);
// Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
masm->isolate());
__ bind(&miss);
GenerateMiss(masm);
}
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a0);
__ push(ra);
__ Push(a1, a0);
__ li(t0, Operand(Smi::FromInt(op_)));
__ push(t0);
__ CallExternalReference(miss, 3);
// Compute the entry point of the rewritten stub.
__ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
__ Pop(a1, a0, ra);
}
// Compute the entry point of the rewritten stub.
__ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
__ pop(ra);
__ pop(a0);
__ pop(a1);
__ Jump(a2);
}
@ -7463,7 +7629,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Update the write barrier for the array store.
__ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
@ -7472,14 +7639,16 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t2, t1, t2);
__ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
__ Ret();
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, t6,
&slow_elements);
__ Ret();
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
}

92
deps/v8/src/mips/codegen-mips.cc

@ -310,6 +310,98 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ pop(ra);
}
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,
Register result,
Label* call_runtime) {
// Fetch the instance type of the receiver into result register.
__ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label check_sequential;
__ And(at, result, Operand(kIsIndirectStringMask));
__ Branch(&check_sequential, eq, at, Operand(zero_reg));
// Dispatch on the indirect string shape: slice or cons.
Label cons_string;
__ And(at, result, Operand(kSlicedNotConsMask));
__ Branch(&cons_string, eq, at, Operand(zero_reg));
// Handle slices.
Label indirect_string_loaded;
__ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ sra(at, result, kSmiTagSize);
__ Addu(index, index, at);
__ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ jmp(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
__ bind(&cons_string);
__ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
__ LoadRoot(at, Heap::kEmptyStringRootIndex);
__ Branch(call_runtime, ne, result, Operand(at));
// Get the first of the two strings and load its instance type.
__ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
__ bind(&indirect_string_loaded);
__ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
// reduced to the underlying sequential or external string).
Label external_string, check_encoding;
__ bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ And(at, result, Operand(kStringRepresentationMask));
__ Branch(&external_string, ne, at, Operand(zero_reg));
// Prepare sequential strings
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ Addu(string,
string,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
__ jmp(&check_encoding);
// Handle external strings.
__ bind(&external_string);
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ And(at, result, Operand(kIsIndirectStringMask));
__ Assert(eq, "external string expected, but not found",
at, Operand(zero_reg));
}
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
__ And(at, result, Operand(kShortExternalStringMask));
__ Branch(call_runtime, ne, at, Operand(zero_reg));
__ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
Label ascii, done;
__ bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ And(at, result, Operand(kStringEncodingMask));
__ Branch(&ascii, ne, at, Operand(zero_reg));
// Two-byte string.
__ sll(at, index, 1);
__ Addu(at, string, at);
__ lhu(result, MemOperand(at));
__ jmp(&done);
__ bind(&ascii);
// Ascii string.
__ Addu(at, string, index);
__ lbu(result, MemOperand(at));
__ bind(&done);
}
#undef __
} } // namespace v8::internal

15
deps/v8/src/mips/codegen-mips.h

@ -75,6 +75,21 @@ class CodeGenerator: public AstVisitor {
};
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
// indexed character into |result|. We expect |index| as untagged input and
// |result| as untagged output.
static void Generate(MacroAssembler* masm,
Register string,
Register index,
Register result,
Label* call_runtime);
private:
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
} } // namespace v8::internal
#endif // V8_MIPS_CODEGEN_MIPS_H_

101
deps/v8/src/mips/full-codegen-mips.cc

@ -1424,10 +1424,11 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(expr->constant_properties()));
__ li(a1, Operand(constant_properties));
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
@ -1436,10 +1437,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
__ Push(a3, a2, a1, a0);
int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
} else if (flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
}
// If result_saved is true the result is on top of the stack. If
@ -1540,6 +1546,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@ -1549,7 +1556,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_elements));
__ Push(a3, a2, a1);
if (constant_elements_values->map() ==
if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@ -1564,10 +1571,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
constant_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
__ CallStub(&stub);
}
@ -1589,65 +1595,30 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ push(v0);
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
__ lw(t6, MemOperand(sp)); // Copy of array literal.
__ lw(a1, FieldMemOperand(t6, JSObject::kElementsOffset));
__ lw(a2, FieldMemOperand(t6, JSObject::kMapOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
Label element_done;
Label double_elements;
Label smi_element;
Label slow_elements;
Label fast_elements;
__ CheckFastElements(a2, a3, &double_elements);
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(result_register(), &smi_element);
__ CheckFastSmiOnlyElements(a2, a3, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
__ bind(&slow_elements);
__ push(t6); // Copy of array literal.
__ li(a1, Operand(Smi::FromInt(i)));
__ li(a2, Operand(Smi::FromInt(NONE))); // PropertyAttributes
StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
? kNonStrictMode : kStrictMode;
__ li(a3, Operand(Smi::FromInt(strict_mode_flag))); // Strict mode.
__ Push(a1, result_register(), a2, a3);
__ CallRuntime(Runtime::kSetProperty, 5);
__ Branch(&element_done);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ li(a3, Operand(Smi::FromInt(i)));
__ StoreNumberToDoubleElements(result_register(), a3, t6, a1, t0, t1, t5,
t3, &slow_elements);
__ Branch(&element_done);
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ sw(result_register(), FieldMemOperand(a1, offset));
// Update the write barrier for the array store.
__ RecordWriteField(
a1, offset, result_register(), a2, kRAHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Branch(&element_done);
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ sw(result_register(), FieldMemOperand(a1, offset));
// Fall through
__ bind(&element_done);
if (constant_elements_kind == FAST_ELEMENTS) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ lw(t2, MemOperand(sp)); // Copy of array literal.
__ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
__ sw(result_register(), FieldMemOperand(a1, offset));
// Update the write barrier for the array store.
__ RecordWriteField(a1, offset, result_register(), a2,
kRAHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
__ lw(a1, MemOperand(sp)); // Copy of array literal.
__ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
__ li(a3, Operand(Smi::FromInt(i)));
__ li(t0, Operand(Smi::FromInt(expr->literal_index())));
__ mov(a0, result_register());
StoreArrayLiteralElementStub stub;
__ CallStub(&stub);
}
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
if (result_saved) {
context()->PlugTOS();
} else {
@ -2987,8 +2958,12 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
MathPowStub stub;
__ CallStub(&stub);
if (CpuFeatures::IsSupported(FPU)) {
MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
}
context()->Plug(v0);
}

3
deps/v8/src/mips/ic-mips.cc

@ -1587,6 +1587,9 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
if (state == KNOWN_OBJECTS) {
stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
}
rewritten = stub.GetCode();
}
set_target(*rewritten);

400
deps/v8/src/mips/lithium-codegen-mips.cc

@ -291,7 +291,22 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
if (op->IsRegister()) {
return ToRegister(op->index());
} else if (op->IsConstantOperand()) {
__ li(scratch, ToOperand(op));
LConstantOperand* const_op = LConstantOperand::cast(op);
Handle<Object> literal = chunk_->LookupLiteral(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
__ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
} else if (r.IsDouble()) {
Abort("EmitLoadRegister: Unsupported double immediate.");
} else {
ASSERT(r.IsTagged());
if (literal->IsSmi()) {
__ li(scratch, Operand(literal));
} else {
__ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
}
}
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
__ lw(scratch, ToMemOperand(op));
@ -1162,8 +1177,13 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
ASSERT(instr->result()->IsRegister());
__ li(ToRegister(instr->result()), Operand(instr->value()));
Handle<Object> value = instr->value();
if (value->IsSmi()) {
__ li(ToRegister(instr->result()), Operand(value));
} else {
__ LoadHeapObject(ToRegister(instr->result()),
Handle<HeapObject>::cast(value));
}
}
@ -2039,7 +2059,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// offset to the location of the map check.
Register temp = ToRegister(instr->TempAt(0));
ASSERT(temp.is(t0));
__ li(InstanceofStub::right(), Operand(instr->function()));
__ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 7;
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
@ -2141,21 +2161,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ sw(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
// Cells are always in the remembered set.
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteField(scratch,
JSGlobalPropertyCell::kValueOffset,
value,
scratch2,
kRAHasBeenSaved,
kSaveFPRegs,
OMIT_REMEMBERED_SET,
check_needed);
}
// Cells are always rescanned, so no write barrier here.
}
@ -2175,6 +2181,10 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ lw(result, ContextOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr->environment(), result, Operand(at));
}
}
@ -2182,6 +2192,12 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
MemOperand target = ContextOperand(context, instr->slot_index());
if (instr->hydrogen()->RequiresHoleCheck()) {
Register scratch = scratch0();
__ lw(scratch, target);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
}
__ sw(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
@ -2233,7 +2249,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
}
} else {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
LoadHeapObject(result, Handle<HeapObject>::cast(function));
__ LoadHeapObject(result, function);
}
}
@ -2687,7 +2703,7 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
LoadHeapObject(result, instr->hydrogen()->closure());
__ LoadHeapObject(result, instr->hydrogen()->closure());
}
@ -2757,7 +2773,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ mov(a0, v0);
__ li(a1, Operand(instr->function()));
__ LoadHeapObject(a1, instr->function());
CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
}
@ -2942,11 +2958,11 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
__ And(scratch, result, Operand(HeapNumber::kSignMask));
__ Move(double_scratch0(), 0.5);
__ add_d(input, input, double_scratch0());
__ add_d(double_scratch0(), input, double_scratch0());
// Check sign of the result: if the sign changed, the input
// value was in ]0.5, 0[ and the result should be -0.
__ mfc1(result, input.high());
__ mfc1(result, double_scratch0().high());
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
@ -2966,7 +2982,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
__ EmitFPUTruncate(kRoundToMinusInf,
double_scratch0().low(),
input,
double_scratch0(),
result,
except_flag);
@ -2996,69 +3012,54 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister double_scratch = double_scratch0();
DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
ASSERT(!input.is(result));
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
// Math.sqrt(-Infinity) == NaN
Label done;
__ Move(temp, -V8_INFINITY);
__ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
// Set up Infinity in the delay slot.
// result is overwritten if the branch is not taken.
__ neg_d(result, temp);
// Add +0 to convert -0 to +0.
__ mtc1(zero_reg, double_scratch.low());
__ mtc1(zero_reg, double_scratch.high());
__ add_d(result, input, double_scratch);
__ add_d(result, input, kDoubleRegZero);
__ sqrt_d(result, result);
__ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
Register scratch = scratch0();
DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
if (exponent_type.IsDouble()) {
// Prepare arguments and call C function.
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(ToDoubleRegister(left),
ToDoubleRegister(right));
__ CallCFunction(
ExternalReference::power_double_double_function(isolate()), 0, 2);
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
ToDoubleRegister(instr->InputAt(1)).is(f4));
ASSERT(!instr->InputAt(1)->IsRegister() ||
ToRegister(instr->InputAt(1)).is(a2));
ASSERT(ToDoubleRegister(instr->InputAt(0)).is(f2));
ASSERT(ToDoubleRegister(instr->result()).is(f0));
if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(a2, &no_deopt);
__ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
ASSERT(ToRegister(right).is(a0));
// Prepare arguments and call C function.
__ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
__ CallCFunction(
ExternalReference::power_double_int_function(isolate()), 1, 1);
MathPowStub stub(MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
ASSERT(exponent_type.IsTagged());
ASSERT(instr->hydrogen()->left()->representation().IsDouble());
Register right_reg = ToRegister(right);
// Check for smi on the right hand side.
Label non_smi, call;
__ JumpIfNotSmi(right_reg, &non_smi);
// Untag smi and convert it to a double.
__ SmiUntag(right_reg);
FPURegister single_scratch = double_scratch0();
__ mtc1(right_reg, single_scratch);
__ cvt_d_w(result_reg, single_scratch);
__ Branch(&call);
// Heap number map check.
__ bind(&non_smi);
__ lw(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
__ ldc1(result_reg, FieldMemOperand(right_reg, HeapNumber::kValueOffset));
// Prepare arguments and call C function.
__ bind(&call);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
__ CallCFunction(
ExternalReference::power_double_double_function(isolate()), 0, 2);
ASSERT(exponent_type.IsDouble());
MathPowStub stub(MathPowStub::DOUBLE);
__ CallStub(&stub);
}
// Store the result in the result register.
__ GetCFunctionDoubleResult(result_reg);
}
@ -3194,7 +3195,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ li(a1, Operand(instr->target()));
__ LoadHeapObject(a1, instr->target());
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@ -3520,89 +3521,13 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
LStringCharCodeAt* instr_;
};
Register temp = scratch1();
Register string = ToRegister(instr->string());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr);
// Fetch the instance type of the receiver into result register.
__ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label check_sequential;
__ And(temp, result, kIsIndirectStringMask);
__ Branch(&check_sequential, eq, temp, Operand(zero_reg));
// Dispatch on the indirect string shape: slice or cons.
Label cons_string;
__ And(temp, result, kSlicedNotConsMask);
__ Branch(&cons_string, eq, temp, Operand(zero_reg));
// Handle slices.
Label indirect_string_loaded;
__ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ sra(temp, result, kSmiTagSize);
__ addu(index, index, temp);
__ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ jmp(&indirect_string_loaded);
// Handle conses.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
__ bind(&cons_string);
__ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
__ LoadRoot(temp, Heap::kEmptyStringRootIndex);
__ Branch(deferred->entry(), ne, result, Operand(temp));
// Get the first of the two strings and load its instance type.
__ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
__ bind(&indirect_string_loaded);
__ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// Check whether the string is sequential. The only non-sequential
// shapes we support have just been unwrapped above.
// Note that if the original string is a cons or slice with an external
// string as underlying string, we pass that unpacked underlying string with
// the adjusted index to the runtime function.
__ bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ And(temp, result, Operand(kStringRepresentationMask));
__ Branch(deferred->entry(), ne, temp, Operand(zero_reg));
// Dispatch on the encoding: ASCII or two-byte.
Label ascii_string;
STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ And(temp, result, Operand(kStringEncodingMask));
__ Branch(&ascii_string, ne, temp, Operand(zero_reg));
// Two-byte string.
// Load the two-byte character code into the result register.
Label done;
__ Addu(result,
string,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ sll(temp, index, 1);
__ Addu(result, result, temp);
__ lhu(result, MemOperand(result, 0));
__ Branch(&done);
// ASCII string.
// Load the byte into the result register.
__ bind(&ascii_string);
__ Addu(result,
string,
Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ Addu(result, result, index);
__ lbu(result, MemOperand(result, 0));
__ bind(&done);
StringCharLoadGenerator::Generate(masm(),
ToRegister(instr->string()),
ToRegister(instr->index()),
ToRegister(instr->result()),
deferred->entry());
__ bind(deferred->exit());
}
@ -4098,10 +4023,20 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
ASSERT(instr->InputAt(0)->IsRegister());
Register reg = ToRegister(instr->InputAt(0));
DeoptimizeIf(ne, instr->environment(), reg,
Operand(instr->hydrogen()->target()));
Register reg = ToRegister(instr->value());
Handle<JSFunction> target = instr->hydrogen()->target();
if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(target);
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
DeoptimizeIf(ne, instr->environment(), reg,
Operand(at));
} else {
DeoptimizeIf(ne, instr->environment(), reg,
Operand(target));
}
}
@ -4170,19 +4105,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
void LCodeGen::LoadHeapObject(Register result,
Handle<HeapObject> object) {
if (heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
factory()->NewJSGlobalPropertyCell(object);
__ li(result, Operand(cell));
__ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
} else {
__ li(result, Operand(object));
}
}
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register temp1 = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
@ -4191,7 +4113,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
LoadHeapObject(temp1, current_prototype);
__ LoadHeapObject(temp1, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
@ -4203,7 +4125,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
LoadHeapObject(temp1, current_prototype);
__ LoadHeapObject(temp1, current_prototype);
}
// Check the holder map.
@ -4216,15 +4138,32 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
Heap* heap = isolate()->heap();
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
// Load map into a2.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
// Load the map's "bit field 2".
__ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
DeoptimizeIf(ne,
instr->environment(),
a2,
Operand(boilerplate_elements_kind));
}
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
__ li(a1, Operand(constant_elements));
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
__ li(a1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
__ Push(a3, a2, a1);
// Pick the right runtime function or stub to call.
@ -4241,29 +4180,108 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
constant_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ELEMENTS;
boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
int* offset) {
ASSERT(!source.is(a2));
ASSERT(!result.is(a2));
// Increase the offset so that subsequent objects end up right after
// this one.
int current_offset = *offset;
int size = object->map()->instance_size();
*offset += size;
// Copy object header.
ASSERT(object->properties()->length() == 0);
ASSERT(object->elements()->length() == 0 ||
object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
int inobject_properties = object->map()->inobject_properties();
int header_size = size - inobject_properties * kPointerSize;
for (int i = 0; i < header_size; i += kPointerSize) {
__ lw(a2, FieldMemOperand(source, i));
__ sw(a2, FieldMemOperand(result, current_offset + i));
}
// Copy in-object properties.
for (int i = 0; i < inobject_properties; i++) {
int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ Addu(a2, result, Operand(*offset));
__ sw(a2, FieldMemOperand(result, total_offset));
__ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
__ sw(a2, FieldMemOperand(result, total_offset));
} else {
__ li(a2, Operand(value));
__ sw(a2, FieldMemOperand(result, total_offset));
}
}
}
void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
int size = instr->hydrogen()->total_size();
// Allocate all objects that are part of the literal in one big
// allocation. This avoids multiple limit checks.
Label allocated, runtime_allocate;
__ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
__ li(a0, Operand(Smi::FromInt(size)));
__ push(a0);
CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
__ bind(&allocated);
int offset = 0;
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
ASSERT_EQ(size, offset);
}
void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
__ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
__ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
__ li(a2, Operand(instr->hydrogen()->constant_properties()));
__ li(a1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
__ li(a2, Operand(constant_properties));
int flags = instr->hydrogen()->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
__ li(a1, Operand(Smi::FromInt(flags)));
__ Push(t0, a3, a2, a1);
// Pick the right runtime function to call.
int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else {
} else if (flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
} else {
FastCloneShallowObjectStub stub(properties_count);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}

7
deps/v8/src/mips/lithium-codegen-mips.h

@ -316,6 +316,13 @@ class LCodeGen BASE_EMBEDDED {
Handle<Map> type,
Handle<String> name);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
int* offset);
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
: label(),

29
deps/v8/src/mips/lithium-mips.cc

@ -1152,6 +1152,13 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), f4);
LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, f4), instr);
} else if (op == kMathPowHalf) {
// Input cannot be the same as the result.
// See lithium-codegen-mips.cc::DoMathPowHalf.
LOperand* input = UseFixedDouble(instr->value(), f8);
LOperand* temp = FixedTemp(f6);
LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
return DefineFixedDouble(result, f4);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
@ -1165,8 +1172,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DefineAsRegister(result);
case kMathRound:
return AssignEnvironment(DefineAsRegister(result));
case kMathPowHalf:
return DefineAsRegister(result);
default:
UNREACHABLE();
return NULL;
@ -1401,9 +1406,9 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LOperand* left = UseFixedDouble(instr->left(), f2);
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), f4) :
UseFixed(instr->right(), a0);
UseFixed(instr->right(), a2);
LPower* result = new LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, f6),
return MarkAsCall(DefineFixedDouble(result, f0),
instr,
CAN_DEOPTIMIZE_EAGERLY);
}
@ -1796,7 +1801,8 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadContextSlot(context));
LInstruction* result = DefineAsRegister(new LLoadContextSlot(context));
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@ -1810,7 +1816,8 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
return new LStoreContextSlot(context, value);
LInstruction* result = new LStoreContextSlot(context, value);
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@ -2071,8 +2078,14 @@ LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
}
LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
return MarkAsCall(DefineFixed(new LObjectLiteral, v0), instr);
LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
return MarkAsCall(DefineFixed(new LObjectLiteralFast, v0), instr);
}
LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
HObjectLiteralGeneric* instr) {
return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, v0), instr);
}

18
deps/v8/src/mips/lithium-mips.h

@ -134,7 +134,8 @@ class LCodeGen;
V(NumberTagD) \
V(NumberTagI) \
V(NumberUntagD) \
V(ObjectLiteral) \
V(ObjectLiteralFast) \
V(ObjectLiteralGeneric) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@ -1792,6 +1793,8 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
LOperand* value() { return InputAt(0); }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
@ -1899,10 +1902,17 @@ class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
};
class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
};
class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
};

15
deps/v8/src/mips/macro-assembler-mips.cc

@ -81,6 +81,19 @@ void MacroAssembler::StoreRoot(Register source,
}
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
li(result, Operand(cell));
lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
} else {
li(result, Operand(object));
}
}
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
@ -3555,7 +3568,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
li(a1, Operand(function));
LoadHeapObject(a1, function);
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());

1
deps/v8/src/mips/macro-assembler-mips.h

@ -262,6 +262,7 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
void LoadHeapObject(Register dst, Handle<HeapObject> object);
// ---------------------------------------------------------------------------
// GC Support

20
deps/v8/src/mips/stub-cache-mips.cc

@ -574,7 +574,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ li(t1, Operand(function));
__ LoadHeapObject(t1, function);
__ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
@ -1115,7 +1115,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
Handle<Object> value,
Handle<JSFunction> value,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@ -1127,7 +1127,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
__ li(v0, Operand(value));
__ LoadHeapObject(v0, value);
__ Ret();
}
@ -2605,15 +2605,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
// Store the value in the cell.
__ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
__ mov(v0, a0); // Stored value must be returned in v0.
// This trashes a0 but the value is returned in v0 anyway.
__ RecordWriteField(t0,
JSGlobalPropertyCell::kValueOffset,
a0,
a2,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET);
// Cells are always rescanned, so no write barrier here.
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
@ -2709,7 +2701,7 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Object> value,
Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : receiver
@ -2847,7 +2839,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<Object> value) {
Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key

161
deps/v8/src/objects-inl.h

@ -1115,7 +1115,7 @@ void HeapObject::set_map(Map* value) {
// Unsafe accessor omitting write barrier.
void HeapObject::set_map_unsafe(Map* value) {
void HeapObject::set_map_no_write_barrier(Map* value) {
set_map_word(MapWord::FromMap(value));
}
@ -1183,6 +1183,22 @@ int HeapNumber::get_sign() {
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
Object** FixedArray::GetFirstElementAddress() {
return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
}
bool FixedArray::ContainsOnlySmisOrHoles() {
Object* the_hole = GetHeap()->the_hole_value();
Object** current = GetFirstElementAddress();
for (int i = 0; i < length(); ++i) {
Object* candidate = *current++;
if (!candidate->IsSmi() && candidate != the_hole) return false;
}
return true;
}
FixedArrayBase* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset);
return static_cast<FixedArrayBase*>(array);
@ -1211,38 +1227,66 @@ void JSObject::ValidateSmiOnlyElements() {
}
MaybeObject* JSObject::EnsureCanContainNonSmiElements() {
MaybeObject* JSObject::EnsureCanContainHeapObjectElements() {
#if DEBUG
ValidateSmiOnlyElements();
#endif
if ((map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS)) {
Object* obj;
MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
set_map(Map::cast(obj));
if ((map()->elements_kind() != FAST_ELEMENTS)) {
return TransitionElementsKind(FAST_ELEMENTS);
}
return this;
}
MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
uint32_t count) {
if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
for (uint32_t i = 0; i < count; ++i) {
Object* current = *objects++;
if (!current->IsSmi() && current != GetHeap()->the_hole_value()) {
return EnsureCanContainNonSmiElements();
uint32_t count,
EnsureElementsMode mode) {
ElementsKind current_kind = map()->elements_kind();
ElementsKind target_kind = current_kind;
ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
if (current_kind == FAST_ELEMENTS) return this;
Heap* heap = GetHeap();
Object* the_hole = heap->the_hole_value();
Object* heap_number_map = heap->heap_number_map();
for (uint32_t i = 0; i < count; ++i) {
Object* current = *objects++;
if (!current->IsSmi() && current != the_hole) {
if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS &&
HeapObject::cast(current)->map() == heap_number_map) {
target_kind = FAST_DOUBLE_ELEMENTS;
} else {
target_kind = FAST_ELEMENTS;
break;
}
}
}
if (target_kind != current_kind) {
return TransitionElementsKind(target_kind);
}
return this;
}
MaybeObject* JSObject::EnsureCanContainElements(FixedArray* elements) {
Object** objects = reinterpret_cast<Object**>(
FIELD_ADDR(elements, elements->OffsetOfElementAt(0)));
return EnsureCanContainElements(objects, elements->length());
MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
EnsureElementsMode mode) {
if (elements->map() != GetHeap()->fixed_double_array_map()) {
ASSERT(elements->map() == GetHeap()->fixed_array_map() ||
elements->map() == GetHeap()->fixed_cow_array_map());
if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
mode = DONT_ALLOW_DOUBLE_ELEMENTS;
}
Object** objects = FixedArray::cast(elements)->GetFirstElementAddress();
return EnsureCanContainElements(objects, elements->length(), mode);
}
ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
if (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
return TransitionElementsKind(FAST_DOUBLE_ELEMENTS);
}
return this;
}
@ -1311,8 +1355,6 @@ void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) {
// The write barrier is not used for global property cells.
ASSERT(!val->IsJSGlobalPropertyCell());
WRITE_FIELD(this, kValueOffset, val);
GetHeap()->incremental_marking()->RecordWrite(
this, HeapObject::RawField(this, kValueOffset), val);
}
@ -1703,6 +1745,20 @@ void FixedArray::set(int index,
}
void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(array, offset, value);
Heap* heap = array->GetHeap();
if (heap->InNewSpace(value)) {
heap->RecordWrite(array->address(), offset);
}
}
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
@ -1797,12 +1853,12 @@ void DescriptorArray::set_bit_field3_storage(int value) {
}
void DescriptorArray::NoWriteBarrierSwap(FixedArray* array,
int first,
int second) {
void DescriptorArray::NoIncrementalWriteBarrierSwap(FixedArray* array,
int first,
int second) {
Object* tmp = array->get(first);
NoWriteBarrierSet(array, first, array->get(second));
NoWriteBarrierSet(array, second, tmp);
NoIncrementalWriteBarrierSet(array, first, array->get(second));
NoIncrementalWriteBarrierSet(array, second, tmp);
}
@ -1914,20 +1970,16 @@ void DescriptorArray::Set(int descriptor_number,
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
// Make sure none of the elements in desc are in new space.
ASSERT(!HEAP->InNewSpace(desc->GetKey()));
ASSERT(!HEAP->InNewSpace(desc->GetValue()));
NoWriteBarrierSet(this,
ToKeyIndex(descriptor_number),
desc->GetKey());
NoIncrementalWriteBarrierSet(this,
ToKeyIndex(descriptor_number),
desc->GetKey());
FixedArray* content_array = GetContentArray();
NoWriteBarrierSet(content_array,
ToValueIndex(descriptor_number),
desc->GetValue());
NoWriteBarrierSet(content_array,
ToDetailsIndex(descriptor_number),
desc->GetDetails().AsSmi());
NoIncrementalWriteBarrierSet(content_array,
ToValueIndex(descriptor_number),
desc->GetValue());
NoIncrementalWriteBarrierSet(content_array,
ToDetailsIndex(descriptor_number),
desc->GetDetails().AsSmi());
}
@ -1941,15 +1993,16 @@ void DescriptorArray::CopyFrom(int index,
}
void DescriptorArray::NoWriteBarrierSwapDescriptors(int first, int second) {
NoWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
void DescriptorArray::NoIncrementalWriteBarrierSwapDescriptors(
int first, int second) {
NoIncrementalWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
FixedArray* content_array = GetContentArray();
NoWriteBarrierSwap(content_array,
ToValueIndex(first),
ToValueIndex(second));
NoWriteBarrierSwap(content_array,
ToDetailsIndex(first),
ToDetailsIndex(second));
NoIncrementalWriteBarrierSwap(content_array,
ToValueIndex(first),
ToValueIndex(second));
NoIncrementalWriteBarrierSwap(content_array,
ToDetailsIndex(first),
ToDetailsIndex(second));
}
@ -4111,7 +4164,8 @@ ElementsKind JSObject::GetElementsKind() {
(map == GetHeap()->fixed_array_map() ||
map == GetHeap()->fixed_cow_array_map())) ||
(kind == FAST_DOUBLE_ELEMENTS &&
fixed_array->IsFixedDoubleArray()) ||
(fixed_array->IsFixedDoubleArray() ||
fixed_array == GetHeap()->empty_fixed_array())) ||
(kind == DICTIONARY_ELEMENTS &&
fixed_array->IsFixedArray() &&
fixed_array->IsDictionary()) ||
@ -4570,11 +4624,18 @@ void JSArray::set_length(Smi* length) {
}
MaybeObject* JSArray::SetContent(FixedArray* storage) {
MaybeObject* maybe_object = EnsureCanContainElements(storage);
if (maybe_object->IsFailure()) return maybe_object;
set_length(Smi::FromInt(storage->length()));
MaybeObject* JSArray::SetContent(FixedArrayBase* storage) {
MaybeObject* maybe_result = EnsureCanContainElements(
storage, ALLOW_COPIED_DOUBLE_ELEMENTS);
if (maybe_result->IsFailure()) return maybe_result;
ASSERT((storage->map() == GetHeap()->fixed_double_array_map() &&
GetElementsKind() == FAST_DOUBLE_ELEMENTS) ||
((storage->map() != GetHeap()->fixed_double_array_map()) &&
((GetElementsKind() == FAST_ELEMENTS) ||
(GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
FixedArray::cast(storage)->ContainsOnlySmisOrHoles()))));
set_elements(storage);
set_length(Smi::FromInt(storage->length()));
return this;
}

138
deps/v8/src/objects.cc

@ -961,14 +961,14 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
if (size >= ExternalString::kSize) {
this->set_map(
this->set_map_no_write_barrier(
is_symbol
? (is_ascii ? heap->external_symbol_with_ascii_data_map()
: heap->external_symbol_map())
: (is_ascii ? heap->external_string_with_ascii_data_map()
: heap->external_string_map()));
} else {
this->set_map(
this->set_map_no_write_barrier(
is_symbol
? (is_ascii ? heap->short_external_symbol_with_ascii_data_map()
: heap->short_external_symbol_map())
@ -1011,11 +1011,13 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
// Morph the object to an external string by adjusting the map and
// reinitializing the fields. Use short version if space is limited.
if (size >= ExternalString::kSize) {
this->set_map(is_symbol ? heap->external_ascii_symbol_map()
: heap->external_ascii_string_map());
this->set_map_no_write_barrier(
is_symbol ? heap->external_ascii_symbol_map()
: heap->external_ascii_string_map());
} else {
this->set_map(is_symbol ? heap->short_external_ascii_symbol_map()
: heap->short_external_ascii_string_map());
this->set_map_no_write_barrier(
is_symbol ? heap->short_external_ascii_symbol_map()
: heap->short_external_ascii_string_map());
}
ExternalAsciiString* self = ExternalAsciiString::cast(this);
self->set_resource(resource);
@ -1640,8 +1642,6 @@ MaybeObject* JSObject::AddConstantFunctionProperty(
String* name,
JSFunction* function,
PropertyAttributes attributes) {
ASSERT(!GetHeap()->InNewSpace(function));
// Allocate new instance descriptors with (name, function) added
ConstantFunctionDescriptor d(name, function, attributes);
Object* new_descriptors;
@ -1756,7 +1756,7 @@ MaybeObject* JSObject::AddProperty(String* name,
// Ensure the descriptor array does not get too big.
if (map_of_this->instance_descriptors()->number_of_descriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
if (value->IsJSFunction() && !heap->InNewSpace(value)) {
if (value->IsJSFunction()) {
return AddConstantFunctionProperty(name,
JSFunction::cast(value),
attributes);
@ -2995,7 +2995,6 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
ASSERT(target_descriptors->GetType(number) == CONSTANT_FUNCTION);
JSFunction* function =
JSFunction::cast(target_descriptors->GetValue(number));
ASSERT(!HEAP->InNewSpace(function));
if (value == function) {
set_map(target_map);
return value;
@ -4855,7 +4854,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
// of the next map and recording the index in the transition array in
// the map field of the array.
Map* next = Map::cast(contents->get(i));
next->set_map_unsafe(current);
next->set_map_no_write_barrier(current);
*map_or_index_field = Smi::FromInt(i + 2);
current = next;
map_done = false;
@ -4880,7 +4879,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
Object* perhaps_map = prototype_transitions->get(i);
if (perhaps_map->IsMap()) {
Map* next = Map::cast(perhaps_map);
next->set_map_unsafe(current);
next->set_map_no_write_barrier(current);
*proto_map_or_index_field =
Smi::FromInt(i + kProtoTransitionElementsPerEntry);
current = next;
@ -4896,7 +4895,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
// the map field, which is being used to track the traversal and put the
// correct map (the meta_map) in place while we do the callback.
Map* prev = current->map();
current->set_map_unsafe(meta_map);
current->set_map_no_write_barrier(meta_map);
callback(current, data);
current = prev;
}
@ -5395,7 +5394,9 @@ MaybeObject* FixedArray::CopySize(int new_length) {
AssertNoAllocation no_gc;
int len = length();
if (new_length < len) len = new_length;
result->set_map(map());
// We are taking the map from the old fixed array so the map is sure to
// be an immortal immutable object.
result->set_map_no_write_barrier(map());
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) {
result->set(i, get(i), mode);
@ -5635,7 +5636,7 @@ void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
}
}
if (child_hash <= parent_hash) break;
NoWriteBarrierSwapDescriptors(parent_index, child_index);
NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
// Now element at child_index could be < its children.
parent_index = child_index; // parent_hash remains correct.
}
@ -5644,7 +5645,7 @@ void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
// Extract elements and create sorted array.
for (int i = len - 1; i > 0; --i) {
// Put max element at the back of the array.
NoWriteBarrierSwapDescriptors(0, i);
NoIncrementalWriteBarrierSwapDescriptors(0, i);
// Shift down the new top element.
int parent_index = 0;
const uint32_t parent_hash = GetKey(parent_index)->Hash();
@ -5660,7 +5661,7 @@ void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
}
}
if (child_hash <= parent_hash) break;
NoWriteBarrierSwapDescriptors(parent_index, child_index);
NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
parent_index = child_index;
}
}
@ -7639,6 +7640,22 @@ void SharedFunctionInfo::CompleteInobjectSlackTracking() {
}
#define DECLARE_TAG(ignore1, name, ignore2) name,
const char* const VisitorSynchronization::kTags[
VisitorSynchronization::kNumberOfSyncTags] = {
VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
};
#undef DECLARE_TAG
#define DECLARE_TAG(ignore1, ignore2, name) name,
const char* const VisitorSynchronization::kTagNames[
VisitorSynchronization::kNumberOfSyncTags] = {
VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
};
#undef DECLARE_TAG
void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@ -8115,9 +8132,20 @@ void Code::Disassemble(const char* name, FILE* out) {
static void CopyFastElementsToFast(FixedArray* source,
FixedArray* destination,
WriteBarrierMode mode) {
uint32_t count = static_cast<uint32_t>(source->length());
for (uint32_t i = 0; i < count; ++i) {
destination->set(i, source->get(i), mode);
int count = source->length();
int copy_size = Min(count, destination->length());
if (mode == SKIP_WRITE_BARRIER ||
!Page::FromAddress(destination->address())->IsFlagSet(
MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)) {
Address to = destination->address() + FixedArray::kHeaderSize;
Address from = source->address() + FixedArray::kHeaderSize;
memcpy(reinterpret_cast<void*>(to),
reinterpret_cast<void*>(from),
kPointerSize * copy_size);
} else {
for (int i = 0; i < copy_size; ++i) {
destination->set(i, source->get(i), mode);
}
}
}
@ -8125,11 +8153,14 @@ static void CopyFastElementsToFast(FixedArray* source,
static void CopySlowElementsToFast(NumberDictionary* source,
FixedArray* destination,
WriteBarrierMode mode) {
int destination_length = destination->length();
for (int i = 0; i < source->Capacity(); ++i) {
Object* key = source->KeyAt(i);
if (key->IsNumber()) {
uint32_t entry = static_cast<uint32_t>(key->Number());
destination->set(entry, source->ValueAt(i), mode);
if (entry < static_cast<uint32_t>(destination_length)) {
destination->set(entry, source->ValueAt(i), mode);
}
}
}
}
@ -8340,14 +8371,8 @@ MaybeObject* JSArray::Initialize(int capacity) {
void JSArray::Expand(int required_size) {
Handle<JSArray> self(this);
Handle<FixedArray> old_backing(FixedArray::cast(elements()));
int old_size = old_backing->length();
int new_size = required_size > old_size ? required_size : old_size;
Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size);
// Can't use this any more now because we may have had a GC!
for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
GetIsolate()->factory()->SetContent(self, new_backing);
GetIsolate()->factory()->SetElementsCapacityAndLength(
Handle<JSArray>(this), required_size, required_size);
}
@ -8501,13 +8526,14 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
uint32_t first_arg,
uint32_t arg_count) {
uint32_t arg_count,
EnsureElementsMode mode) {
// Elements in |Arguments| are ordered backwards (because they're on the
// stack), but the method that's called here iterates over them in forward
// direction.
return EnsureCanContainElements(
args->arguments() - first_arg - (arg_count - 1),
arg_count);
arg_count, mode);
}
@ -9459,31 +9485,45 @@ MUST_USE_RESULT MaybeObject* JSObject::TransitionElementsKind(
FixedArrayBase* elms = FixedArrayBase::cast(elements());
uint32_t capacity = static_cast<uint32_t>(elms->length());
uint32_t length = capacity;
if (IsJSArray()) {
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
Object* raw_length = JSArray::cast(this)->length();
if (raw_length->IsUndefined()) {
// If length is undefined, then JSArray is being initialized and has no
// elements, assume a length of zero.
length = 0;
} else {
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
}
}
if (from_kind == FAST_SMI_ONLY_ELEMENTS) {
if (to_kind == FAST_DOUBLE_ELEMENTS) {
MaybeObject* maybe_result =
SetFastDoubleElementsCapacityAndLength(capacity, length);
if (maybe_result->IsFailure()) return maybe_result;
return this;
} else if (to_kind == FAST_ELEMENTS) {
MaybeObject* maybe_new_map = GetElementsTransitionMap(FAST_ELEMENTS);
Map* new_map;
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
if (FLAG_trace_elements_transitions) {
PrintElementsTransition(stdout, from_kind, elms, FAST_ELEMENTS, elms);
}
set_map(new_map);
return this;
if ((from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) ||
(length == 0)) {
MaybeObject* maybe_new_map = GetElementsTransitionMap(to_kind);
Map* new_map;
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
if (FLAG_trace_elements_transitions) {
PrintElementsTransition(stdout, from_kind, elms, to_kind, elms);
}
} else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
set_map(new_map);
return this;
}
if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
to_kind == FAST_DOUBLE_ELEMENTS) {
MaybeObject* maybe_result =
SetFastDoubleElementsCapacityAndLength(capacity, length);
if (maybe_result->IsFailure()) return maybe_result;
return this;
}
if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
capacity, length, kDontAllowSmiOnlyElements);
if (maybe_result->IsFailure()) return maybe_result;
return this;
}
// This method should never be called for any other case than the ones
// handled above.
UNREACHABLE();
@ -10598,7 +10638,7 @@ class SymbolKey : public HashTableKey {
// Transform string to symbol if possible.
Map* map = heap->SymbolMapForString(string_);
if (map != NULL) {
string_->set_map(map);
string_->set_map_no_write_barrier(map);
ASSERT(string_->IsSymbol());
return string_;
}

85
deps/v8/src/objects.h

@ -1131,7 +1131,10 @@ class HeapObject: public Object {
// information.
inline Map* map();
inline void set_map(Map* value);
inline void set_map_unsafe(Map* value);
// The no-write-barrier version. This is OK if the object is white and in
// new space, or if the value is an immortal immutable object, like the maps
// of primitive (non-JS) objects like strings, heap numbers etc.
inline void set_map_no_write_barrier(Map* value);
// During garbage collection, the map word of a heap object does not
// necessarily contain a map pointer.
@ -1319,6 +1322,13 @@ class HeapNumber: public HeapObject {
};
enum EnsureElementsMode {
DONT_ALLOW_DOUBLE_ELEMENTS,
ALLOW_COPIED_DOUBLE_ELEMENTS,
ALLOW_CONVERTED_DOUBLE_ELEMENTS
};
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
class JSReceiver: public HeapObject {
@ -1612,16 +1622,19 @@ class JSObject: public JSReceiver {
inline void ValidateSmiOnlyElements();
// Makes sure that this object can contain non-smi Object as elements.
inline MaybeObject* EnsureCanContainNonSmiElements();
// Makes sure that this object can contain HeapObject as elements.
inline MaybeObject* EnsureCanContainHeapObjectElements();
// Makes sure that this object can contain the specified elements.
inline MaybeObject* EnsureCanContainElements(Object** elements,
uint32_t count);
inline MaybeObject* EnsureCanContainElements(FixedArray* elements);
uint32_t count,
EnsureElementsMode mode);
inline MaybeObject* EnsureCanContainElements(FixedArrayBase* elements,
EnsureElementsMode mode);
MaybeObject* EnsureCanContainElements(Arguments* arguments,
uint32_t first_arg,
uint32_t arg_count);
uint32_t arg_count,
EnsureElementsMode mode);
// Do we want to keep the elements in fast case when increasing the
// capacity?
@ -2121,6 +2134,9 @@ class FixedArray: public FixedArrayBase {
// Gives access to raw memory which stores the array's data.
inline Object** data_start();
inline Object** GetFirstElementAddress();
inline bool ContainsOnlySmisOrHoles();
// Copy operations.
MUST_USE_RESULT inline MaybeObject* Copy();
MUST_USE_RESULT MaybeObject* CopySize(int new_length);
@ -2187,6 +2203,13 @@ class FixedArray: public FixedArrayBase {
int index,
Object* value);
// Set operation on FixedArray without incremental write barrier. Can
// only be used if the object is guaranteed to be white (whiteness witness
// is present).
static inline void NoIncrementalWriteBarrierSet(FixedArray* array,
int index,
Object* value);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
};
@ -2465,12 +2488,12 @@ class DescriptorArray: public FixedArray {
NULL_DESCRIPTOR;
}
// Swap operation on FixedArray without using write barriers.
static inline void NoWriteBarrierSwap(FixedArray* array,
int first,
int second);
static inline void NoIncrementalWriteBarrierSwap(
FixedArray* array, int first, int second);
// Swap descriptor first and second.
inline void NoWriteBarrierSwapDescriptors(int first, int second);
inline void NoIncrementalWriteBarrierSwapDescriptors(
int first, int second);
FixedArray* GetContentArray() {
return FixedArray::cast(get(kContentArrayIndex));
@ -3738,11 +3761,6 @@ class DeoptimizationInputData: public FixedArray {
DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
// Unchecked accessor to be used during GC.
FixedArray* UncheckedLiteralArray() {
return reinterpret_cast<FixedArray*>(get(kLiteralArrayIndex));
}
#undef DEFINE_ELEMENT_ACCESSORS
// Accessors for elements of the ith deoptimization entry.
@ -7381,7 +7399,7 @@ class JSArray: public JSObject {
MUST_USE_RESULT MaybeObject* Initialize(int capacity);
// Set the content of the array to the content of storage.
inline MaybeObject* SetContent(FixedArray* storage);
inline MaybeObject* SetContent(FixedArrayBase* storage);
// Casting.
static inline JSArray* cast(Object* obj);
@ -7862,6 +7880,34 @@ class BreakPointInfo: public Struct {
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_ACCESSORS
#define VISITOR_SYNCHRONIZATION_TAGS_LIST(V) \
V(kSymbolTable, "symbol_table", "(Symbols)") \
V(kExternalStringsTable, "external_strings_table", "(External strings)") \
V(kStrongRootList, "strong_root_list", "(Strong roots)") \
V(kSymbol, "symbol", "(Symbol)") \
V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
V(kTop, "top", "(Isolate)") \
V(kRelocatable, "relocatable", "(Relocatable)") \
V(kDebug, "debug", "(Debugger)") \
V(kCompilationCache, "compilationcache", "(Compilation cache)") \
V(kHandleScope, "handlescope", "(Handle scope)") \
V(kBuiltins, "builtins", "(Builtins)") \
V(kGlobalHandles, "globalhandles", "(Global handles)") \
V(kThreadManager, "threadmanager", "(Thread manager)") \
V(kExtensions, "Extensions", "(Extensions)")
class VisitorSynchronization : public AllStatic {
public:
#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
enum SyncTag {
VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_ENUM)
kNumberOfSyncTags
};
#undef DECLARE_ENUM
static const char* const kTags[kNumberOfSyncTags];
static const char* const kTagNames[kNumberOfSyncTags];
};
// Abstract base class for visiting, and optionally modifying, the
// pointers contained in Objects. Used in GC and serialization/deserialization.
@ -7917,13 +7963,10 @@ class ObjectVisitor BASE_EMBEDDED {
// Visits a handle that has an embedder-assigned class ID.
virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
#ifdef DEBUG
// Intended for serialization/deserialization checking: insert, or
// check for the presence of, a tag at this position in the stream.
virtual void Synchronize(const char* tag) {}
#else
inline void Synchronize(const char* tag) {}
#endif
// Also used for marking up GC roots in heap snapshots.
virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
};

49
deps/v8/src/parser.cc

@ -2158,6 +2158,20 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// reported (underlining).
Expect(Token::RETURN, CHECK_OK);
Token::Value tok = peek();
Statement* result;
if (scanner().HasAnyLineTerminatorBeforeNext() ||
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
ExpectSemicolon(CHECK_OK);
result = new(zone()) ReturnStatement(GetLiteralUndefined());
} else {
Expression* expr = ParseExpression(true, CHECK_OK);
ExpectSemicolon(CHECK_OK);
result = new(zone()) ReturnStatement(expr);
}
// An ECMAScript program is considered syntactically incorrect if it
// contains a return statement that is not within the body of a
// function. See ECMA-262, section 12.9, page 67.
@ -2170,19 +2184,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
return new(zone()) ExpressionStatement(throw_error);
}
Token::Value tok = peek();
if (scanner().HasAnyLineTerminatorBeforeNext() ||
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
ExpectSemicolon(CHECK_OK);
return new(zone()) ReturnStatement(GetLiteralUndefined());
}
Expression* expr = ParseExpression(true, CHECK_OK);
ExpectSemicolon(CHECK_OK);
return new(zone()) ReturnStatement(expr);
return result;
}
@ -2693,6 +2695,7 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
// Assignment to eval or arguments is disallowed in strict mode.
CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
}
MarkAsLValue(expression);
Token::Value op = Next(); // Get assignment operator.
int pos = scanner().location().beg_pos;
@ -2926,6 +2929,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
// Prefix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
MarkAsLValue(expression);
int position = scanner().location().beg_pos;
return new(zone()) CountOperation(isolate(),
@ -2961,6 +2965,7 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
// Postfix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
MarkAsLValue(expression);
Token::Value next = Next();
int position = scanner().location().beg_pos;
@ -3375,6 +3380,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
isolate()->factory()->NewFixedArray(values->length(), TENURED);
Handle<FixedDoubleArray> double_literals;
ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
bool has_only_undefined_values = true;
// Fill in the literals.
bool is_simple = true;
@ -3398,6 +3404,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary. Always remember
// the tagged value, no matter what the ElementsKind is in case we
// ultimately end up in FAST_ELEMENTS.
has_only_undefined_values = false;
object_literals->set(i, *boilerplate_value);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
// Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
@ -3436,6 +3443,13 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
}
}
// Very small array literals that don't have a concrete hint about their type
// from a constant value should default to the slow case to avoid lots of
// elements transitions on really small objects.
if (has_only_undefined_values && values->length() <= 2) {
elements_kind = FAST_ELEMENTS;
}
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple && depth == 1 && values->length() > 0 &&
@ -4479,6 +4493,15 @@ Handle<String> Parser::ParseIdentifierName(bool* ok) {
}
void Parser::MarkAsLValue(Expression* expression) {
VariableProxy* proxy = expression != NULL
? expression->AsVariableProxy()
: NULL;
if (proxy != NULL) proxy->MarkAsLValue();
}
// Checks LHS expression for assignment and prefix/postfix increment/decrement
// in strict mode.
void Parser::CheckStrictModeLValue(Expression* expression,

5
deps/v8/src/parser.h

@ -661,6 +661,11 @@ class Parser {
bool* is_set,
bool* ok);
// Determine if the expression is a variable proxy and mark it as being used
// in an assignment or with a increment/decrement operator. This is currently
// used on for the statically checking assignments to harmony const bindings.
void MarkAsLValue(Expression* expression);
// Strict mode validation of LValue expressions
void CheckStrictModeLValue(Expression* expression,
const char* error,

5
deps/v8/src/platform-posix.cc

@ -70,6 +70,11 @@ intptr_t OS::MaxVirtualMemory() {
}
intptr_t OS::CommitPageSize() {
return 4096;
}
#ifndef __CYGWIN__
// Get rid of writable permission on code allocations.
void OS::ProtectCode(void* address, const size_t size) {

5
deps/v8/src/platform-win32.cc

@ -889,6 +889,11 @@ void OS::Free(void* address, const size_t size) {
}
intptr_t OS::CommitPageSize() {
return 4096;
}
void OS::ProtectCode(void* address, const size_t size) {
DWORD old_protect;
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);

4
deps/v8/src/platform.h

@ -172,6 +172,10 @@ class OS {
bool is_executable);
static void Free(void* address, const size_t size);
// This is the granularity at which the ProtectCode(...) call can set page
// permissions.
static intptr_t CommitPageSize();
// Mark code segments non-writable.
static void ProtectCode(void* address, const size_t size);

1
deps/v8/src/preparser.cc

@ -627,6 +627,7 @@ PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) {
Expect(i::Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
Expect(i::Token::RPAREN, ok);
if (peek() == i::Token::SEMICOLON) Consume(i::Token::SEMICOLON);
return Statement::Default();
}

20
deps/v8/src/profile-generator-inl.h

@ -95,6 +95,26 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
}
uint64_t HeapObjectsMap::GetNthGcSubrootId(int delta) {
return kGcRootsFirstSubrootId + delta * kObjectIdStep;
}
HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
return reinterpret_cast<HeapObject*>(
reinterpret_cast<char*>(kFirstGcSubrootObject) +
delta * HeapObjectsMap::kObjectIdStep);
}
int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
return static_cast<int>(
(reinterpret_cast<char*>(subroot) -
reinterpret_cast<char*>(kFirstGcSubrootObject)) /
HeapObjectsMap::kObjectIdStep);
}
uint64_t HeapEntry::id() {
union {
Id stored_id;

242
deps/v8/src/profile-generator.cc

@ -938,7 +938,7 @@ void HeapGraphEdge::Init(
void HeapGraphEdge::Init(int child_index, Type type, int index, HeapEntry* to) {
ASSERT(type == kElement || type == kHidden);
ASSERT(type == kElement || type == kHidden || type == kWeak);
child_index_ = child_index;
type_ = type;
index_ = index;
@ -1053,8 +1053,11 @@ void HeapEntry::PaintAllReachable() {
}
void HeapEntry::Print(int max_depth, int indent) {
OS::Print("%6d %6d [%llu] ", self_size(), RetainedSize(false), id());
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
OS::Print("%6d %7d @%6llu %*c %s%s: ",
self_size(), RetainedSize(false), id(),
indent, ' ', prefix, edge_name);
if (type() != kString) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
} else {
@ -1073,29 +1076,40 @@ void HeapEntry::Print(int max_depth, int indent) {
Vector<HeapGraphEdge> ch = children();
for (int i = 0; i < ch.length(); ++i) {
HeapGraphEdge& edge = ch[i];
const char* edge_prefix = "";
ScopedVector<char> index(64);
const char* edge_name = index.start();
switch (edge.type()) {
case HeapGraphEdge::kContextVariable:
OS::Print(" %*c #%s: ", indent, ' ', edge.name());
edge_prefix = "#";
edge_name = edge.name();
break;
case HeapGraphEdge::kElement:
OS::Print(" %*c %d: ", indent, ' ', edge.index());
OS::SNPrintF(index, "%d", edge.index());
break;
case HeapGraphEdge::kInternal:
OS::Print(" %*c $%s: ", indent, ' ', edge.name());
edge_prefix = "$";
edge_name = edge.name();
break;
case HeapGraphEdge::kProperty:
OS::Print(" %*c %s: ", indent, ' ', edge.name());
edge_name = edge.name();
break;
case HeapGraphEdge::kHidden:
OS::Print(" %*c $%d: ", indent, ' ', edge.index());
edge_prefix = "$";
OS::SNPrintF(index, "%d", edge.index());
break;
case HeapGraphEdge::kShortcut:
OS::Print(" %*c ^%s: ", indent, ' ', edge.name());
edge_prefix = "^";
edge_name = edge.name();
break;
case HeapGraphEdge::kWeak:
edge_prefix = "w";
OS::SNPrintF(index, "%d", edge.index());
break;
default:
OS::Print("!!! unknown edge type: %d ", edge.type());
OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
}
edge.to()->Print(max_depth, indent + 2);
edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2);
}
}
@ -1215,6 +1229,9 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
STATIC_ASSERT(
sizeof(HeapEntry) ==
SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapEntrySize); // NOLINT
for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
gc_subroot_entries_[i] = NULL;
}
}
HeapSnapshot::~HeapSnapshot() {
@ -1270,6 +1287,21 @@ HeapEntry* HeapSnapshot::AddGcRootsEntry(int children_count,
}
HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag,
int children_count,
int retainers_count) {
ASSERT(gc_subroot_entries_[tag] == NULL);
ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
return (gc_subroot_entries_[tag] = AddEntry(
HeapEntry::kObject,
VisitorSynchronization::kTagNames[tag],
HeapObjectsMap::GetNthGcSubrootId(tag),
0,
children_count,
retainers_count));
}
HeapEntry* HeapSnapshot::AddNativesRootEntry(int children_count,
int retainers_count) {
ASSERT(natives_root_entry_ == NULL);
@ -1355,17 +1387,22 @@ List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
void HeapSnapshot::Print(int max_depth) {
root()->Print(max_depth, 0);
root()->Print("", "", max_depth, 0);
}
// We split IDs on evens for embedder objects (see
// HeapObjectsMap::GenerateId) and odds for native objects.
const uint64_t HeapObjectsMap::kInternalRootObjectId = 1;
const uint64_t HeapObjectsMap::kGcRootsObjectId = 3;
const uint64_t HeapObjectsMap::kNativesRootObjectId = 5;
// Increase kFirstAvailableObjectId if new 'special' objects appear.
const uint64_t HeapObjectsMap::kFirstAvailableObjectId = 7;
const uint64_t HeapObjectsMap::kGcRootsObjectId =
HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep;
const uint64_t HeapObjectsMap::kNativesRootObjectId =
HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
const uint64_t HeapObjectsMap::kGcRootsFirstSubrootId =
HeapObjectsMap::kNativesRootObjectId + HeapObjectsMap::kObjectIdStep;
const uint64_t HeapObjectsMap::kFirstAvailableObjectId =
HeapObjectsMap::kGcRootsFirstSubrootId +
VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
HeapObjectsMap::HeapObjectsMap()
: initial_fill_mode_(true),
@ -1391,7 +1428,7 @@ uint64_t HeapObjectsMap::FindObject(Address addr) {
if (existing != 0) return existing;
}
uint64_t id = next_id_;
next_id_ += 2;
next_id_ += kObjectIdStep;
AddEntry(addr, id);
return id;
}
@ -1684,6 +1721,12 @@ HeapObject *const V8HeapExplorer::kInternalRootObject =
HeapObject *const V8HeapExplorer::kGcRootsObject =
reinterpret_cast<HeapObject*>(
static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
HeapObject *const V8HeapExplorer::kFirstGcSubrootObject =
reinterpret_cast<HeapObject*>(
static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
HeapObject *const V8HeapExplorer::kLastGcSubrootObject =
reinterpret_cast<HeapObject*>(
static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
V8HeapExplorer::V8HeapExplorer(
@ -1716,6 +1759,11 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
return snapshot_->AddRootEntry(children_count);
} else if (object == kGcRootsObject) {
return snapshot_->AddGcRootsEntry(children_count, retainers_count);
} else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
return snapshot_->AddGcSubrootEntry(
GetGcSubrootOrder(object),
children_count,
retainers_count);
} else if (object->IsJSGlobalObject()) {
const char* tag = objects_tags_.GetTag(object);
const char* name = collection_->names()->GetName(
@ -1779,6 +1827,18 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
: "",
children_count,
retainers_count);
} else if (object->IsGlobalContext()) {
return AddEntry(object,
HeapEntry::kHidden,
"system / GlobalContext",
children_count,
retainers_count);
} else if (object->IsContext()) {
return AddEntry(object,
HeapEntry::kHidden,
"system / Context",
children_count,
retainers_count);
} else if (object->IsFixedArray() ||
object->IsFixedDoubleArray() ||
object->IsByteArray() ||
@ -1818,9 +1878,38 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
}
class GcSubrootsEnumerator : public ObjectVisitor {
public:
GcSubrootsEnumerator(
SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
: filler_(filler),
explorer_(explorer),
previous_object_count_(0),
object_count_(0) {
}
void VisitPointers(Object** start, Object** end) {
object_count_ += end - start;
}
void Synchronize(VisitorSynchronization::SyncTag tag) {
// Skip empty subroots.
if (previous_object_count_ != object_count_) {
previous_object_count_ = object_count_;
filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
}
}
private:
SnapshotFillerInterface* filler_;
V8HeapExplorer* explorer_;
intptr_t previous_object_count_;
intptr_t object_count_;
};
void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
filler->AddEntry(kInternalRootObject, this);
filler->AddEntry(kGcRootsObject, this);
GcSubrootsEnumerator enumerator(filler, this);
heap_->IterateRoots(&enumerator, VISIT_ALL);
}
@ -1939,6 +2028,11 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
"literals_or_bindings",
js_fun->literals_or_bindings(),
JSFunction::kLiteralsOffset);
for (int i = JSFunction::kNonWeakFieldsEndOffset;
i < JSFunction::kSize;
i += kPointerSize) {
SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
}
}
TagObject(js_obj->properties(), "(object properties)");
SetInternalReference(obj, entry,
@ -1965,8 +2059,14 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
"(context func. result caches)");
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
TagObject(context->runtime_context(), "(runtime context)");
TagObject(context->map_cache(), "(context map cache)");
TagObject(context->data(), "(context data)");
for (int i = Context::FIRST_WEAK_SLOT;
i < Context::GLOBAL_CONTEXT_SLOTS;
++i) {
SetWeakReference(obj, entry,
i, context->get(i),
FixedArray::OffsetOfElementAt(i));
}
} else if (obj->IsMap()) {
Map* map = Map::cast(obj);
SetInternalReference(obj, entry,
@ -2009,6 +2109,9 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
SetInternalReference(obj, entry,
"script", shared->script(),
SharedFunctionInfo::kScriptOffset);
SetWeakReference(obj, entry,
1, shared->initial_map(),
SharedFunctionInfo::kInitialMapOffset);
} else if (obj->IsScript()) {
Script* script = Script::cast(obj);
SetInternalReference(obj, entry,
@ -2235,15 +2338,66 @@ HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
class RootsReferencesExtractor : public ObjectVisitor {
private:
struct IndexTag {
IndexTag(int index, VisitorSynchronization::SyncTag tag)
: index(index), tag(tag) { }
int index;
VisitorSynchronization::SyncTag tag;
};
public:
explicit RootsReferencesExtractor(V8HeapExplorer* explorer)
: explorer_(explorer) {
RootsReferencesExtractor()
: collecting_all_references_(false),
previous_reference_count_(0) {
}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) explorer_->SetGcRootsReference(*p);
if (collecting_all_references_) {
for (Object** p = start; p < end; p++) all_references_.Add(*p);
} else {
for (Object** p = start; p < end; p++) strong_references_.Add(*p);
}
}
void SetCollectingAllReferences() { collecting_all_references_ = true; }
void FillReferences(V8HeapExplorer* explorer) {
ASSERT(strong_references_.length() <= all_references_.length());
for (int i = 0; i < reference_tags_.length(); ++i) {
explorer->SetGcRootsReference(reference_tags_[i].tag);
}
int strong_index = 0, all_index = 0, tags_index = 0;
while (all_index < all_references_.length()) {
if (strong_index < strong_references_.length() &&
strong_references_[strong_index] == all_references_[all_index]) {
explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
false,
all_references_[all_index++]);
++strong_index;
} else {
explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
true,
all_references_[all_index++]);
}
if (reference_tags_[tags_index].index == all_index) ++tags_index;
}
}
void Synchronize(VisitorSynchronization::SyncTag tag) {
if (collecting_all_references_ &&
previous_reference_count_ != all_references_.length()) {
previous_reference_count_ = all_references_.length();
reference_tags_.Add(IndexTag(previous_reference_count_, tag));
}
}
private:
V8HeapExplorer* explorer_;
bool collecting_all_references_;
List<Object*> strong_references_;
List<Object*> all_references_;
int previous_reference_count_;
List<IndexTag> reference_tags_;
};
@ -2268,8 +2422,11 @@ bool V8HeapExplorer::IterateAndExtractReferences(
return false;
}
SetRootGcRootsReference();
RootsReferencesExtractor extractor(this);
RootsReferencesExtractor extractor;
heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
extractor.SetCollectingAllReferences();
heap_->IterateRoots(&extractor, VISIT_ALL);
extractor.FillReferences(this);
filler_ = NULL;
return progress_->ProgressReport(false);
}
@ -2359,6 +2516,24 @@ void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
}
void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
HeapEntry* parent_entry,
int index,
Object* child_obj,
int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetIndexedReference(HeapGraphEdge::kWeak,
parent_obj,
parent_entry,
index,
child_obj,
child_entry);
IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
}
void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
HeapEntry* parent_entry,
String* reference_name,
@ -2421,12 +2596,21 @@ void V8HeapExplorer::SetRootShortcutReference(Object* child_obj) {
}
void V8HeapExplorer::SetGcRootsReference(Object* child_obj) {
void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
filler_->SetIndexedAutoIndexReference(
HeapGraphEdge::kElement,
kGcRootsObject, snapshot_->gc_roots(),
GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag));
}
void V8HeapExplorer::SetGcSubrootReference(
VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetIndexedAutoIndexReference(
HeapGraphEdge::kElement,
kGcRootsObject, snapshot_->gc_roots(),
is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag),
child_obj, child_entry);
}
}
@ -3235,7 +3419,8 @@ void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge) {
writer_->AddNumber(edge->type());
writer_->AddCharacter(',');
if (edge->type() == HeapGraphEdge::kElement
|| edge->type() == HeapGraphEdge::kHidden) {
|| edge->type() == HeapGraphEdge::kHidden
|| edge->type() == HeapGraphEdge::kWeak) {
writer_->AddNumber(edge->index());
} else {
writer_->AddNumber(GetStringId(edge->name()));
@ -3315,7 +3500,8 @@ void HeapSnapshotJSONSerializer::SerializeNodes() {
"," JSON_S("property")
"," JSON_S("internal")
"," JSON_S("hidden")
"," JSON_S("shortcut"))
"," JSON_S("shortcut")
"," JSON_S("weak"))
"," JSON_S("string_or_number")
"," JSON_S("node"))))));
#undef JSON_S

31
deps/v8/src/profile-generator.h

@ -455,7 +455,8 @@ class HeapGraphEdge BASE_EMBEDDED {
kProperty = v8::HeapGraphEdge::kProperty,
kInternal = v8::HeapGraphEdge::kInternal,
kHidden = v8::HeapGraphEdge::kHidden,
kShortcut = v8::HeapGraphEdge::kShortcut
kShortcut = v8::HeapGraphEdge::kShortcut,
kWeak = v8::HeapGraphEdge::kWeak
};
HeapGraphEdge() { }
@ -465,7 +466,7 @@ class HeapGraphEdge BASE_EMBEDDED {
Type type() { return static_cast<Type>(type_); }
int index() {
ASSERT(type_ == kElement || type_ == kHidden);
ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
return index_;
}
const char* name() {
@ -588,7 +589,8 @@ class HeapEntry BASE_EMBEDDED {
int EntrySize() { return EntriesSize(1, children_count_, retainers_count_); }
int RetainedSize(bool exact);
void Print(int max_depth, int indent);
void Print(
const char* prefix, const char* edge_name, int max_depth, int indent);
Handle<HeapObject> GetHeapObject();
@ -661,6 +663,7 @@ class HeapSnapshot {
HeapEntry* root() { return root_entry_; }
HeapEntry* gc_roots() { return gc_roots_entry_; }
HeapEntry* natives_root() { return natives_root_entry_; }
HeapEntry* gc_subroot(int index) { return gc_subroot_entries_[index]; }
List<HeapEntry*>* entries() { return &entries_; }
int raw_entries_size() { return raw_entries_size_; }
@ -674,6 +677,9 @@ class HeapSnapshot {
int retainers_count);
HeapEntry* AddRootEntry(int children_count);
HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
HeapEntry* AddGcSubrootEntry(int tag,
int children_count,
int retainers_count);
HeapEntry* AddNativesRootEntry(int children_count, int retainers_count);
void ClearPaint();
HeapEntry* GetEntryById(uint64_t id);
@ -695,6 +701,7 @@ class HeapSnapshot {
HeapEntry* root_entry_;
HeapEntry* gc_roots_entry_;
HeapEntry* natives_root_entry_;
HeapEntry* gc_subroot_entries_[VisitorSynchronization::kNumberOfSyncTags];
char* raw_entries_;
List<HeapEntry*> entries_;
bool entries_sorted_;
@ -716,10 +723,13 @@ class HeapObjectsMap {
void MoveObject(Address from, Address to);
static uint64_t GenerateId(v8::RetainedObjectInfo* info);
static inline uint64_t GetNthGcSubrootId(int delta);
static const int kObjectIdStep = 2;
static const uint64_t kInternalRootObjectId;
static const uint64_t kGcRootsObjectId;
static const uint64_t kNativesRootObjectId;
static const uint64_t kGcRootsFirstSubrootId;
static const uint64_t kFirstAvailableObjectId;
private:
@ -969,6 +979,11 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapEntry* parent,
int index,
Object* child);
void SetWeakReference(HeapObject* parent_obj,
HeapEntry* parent_entry,
int index,
Object* child_obj,
int field_offset);
void SetPropertyReference(HeapObject* parent_obj,
HeapEntry* parent,
String* reference_name,
@ -981,11 +996,16 @@ class V8HeapExplorer : public HeapEntriesAllocator {
Object* child);
void SetRootShortcutReference(Object* child);
void SetRootGcRootsReference();
void SetGcRootsReference(Object* child);
void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
void SetGcSubrootReference(
VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
void TagObject(Object* obj, const char* tag);
HeapEntry* GetEntry(Object* obj);
static inline HeapObject* GetNthGcSubrootObject(int delta);
static inline int GetGcSubrootOrder(HeapObject* subroot);
Heap* heap_;
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
@ -994,8 +1014,11 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapObjectsSet objects_tags_;
static HeapObject* const kGcRootsObject;
static HeapObject* const kFirstGcSubrootObject;
static HeapObject* const kLastGcSubrootObject;
friend class IndexedReferencesExtractor;
friend class GcSubrootsEnumerator;
friend class RootsReferencesExtractor;
DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);

43
deps/v8/src/runtime.cc

@ -625,6 +625,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index), isolate);
if (*boilerplate == isolate->heap()->undefined_value()) {
ASSERT(*elements != isolate->heap()->empty_fixed_array());
boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
@ -4651,6 +4652,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
if (value->IsNumber()) {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
TransitionElementsKind(boilerplate_object, FAST_DOUBLE_ELEMENTS);
ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS);
FixedDoubleArray* double_array =
FixedDoubleArray::cast(object->elements());
@ -4660,6 +4662,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS ||
elements_kind == FAST_DOUBLE_ELEMENTS);
TransitionElementsKind(object, FAST_ELEMENTS);
TransitionElementsKind(boilerplate_object, FAST_ELEMENTS);
FixedArray* object_array =
FixedArray::cast(object->elements());
object_array->set(store_index, *value);
@ -6293,7 +6296,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
int part_count = indices.length();
Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
MaybeObject* maybe_result = result->EnsureCanContainNonSmiElements();
MaybeObject* maybe_result = result->EnsureCanContainHeapObjectElements();
if (maybe_result->IsFailure()) return maybe_result;
result->set_length(Smi::FromInt(part_count));
@ -6669,7 +6672,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
// This assumption is used by the slice encoding in one or two smis.
ASSERT(Smi::kMaxValue >= String::kMaxLength);
MaybeObject* maybe_result = array->EnsureCanContainNonSmiElements();
MaybeObject* maybe_result = array->EnsureCanContainHeapObjectElements();
if (maybe_result->IsFailure()) return maybe_result;
int special_length = special->length();
@ -7395,7 +7398,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
}
// Slow version of Math.pow. We check for fast paths for special cases.
// Used if SSE2/VFP3 is not available.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@ -7411,22 +7415,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
}
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
return isolate->heap()->AllocateHeapNumber(power_double_double(x, y));
int y_int = static_cast<int>(y);
double result;
if (y == y_int) {
result = power_double_int(x, y_int); // Returns 1 if exponent is 0.
} else if (y == 0.5) {
result = (isinf(x)) ? V8_INFINITY : sqrt(x + 0.0); // Convert -0 to +0.
} else if (y == -0.5) {
result = (isinf(x)) ? 0 : 1.0 / sqrt(x + 0.0); // Convert -0 to +0.
} else {
result = power_double_double(x, y);
}
if (isnan(result)) return isolate->heap()->nan_value();
return isolate->heap()->AllocateHeapNumber(result);
}
// Fast version of Math.pow if we know that y is not an integer and
// y is not -0.5 or 0.5. Used as slowcase from codegen.
// Fast version of Math.pow if we know that y is not an integer and y is not
// -0.5 or 0.5. Used as slow case from fullcodegen.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
isolate->counters()->math_pow()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
if (y == 0) {
return Smi::FromInt(1);
} else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
return isolate->heap()->nan_value();
} else {
return isolate->heap()->AllocateHeapNumber(pow(x, y));
double result = power_double_double(x, y);
if (isnan(result)) return isolate->heap()->nan_value();
return isolate->heap()->AllocateHeapNumber(result);
}
}
@ -7991,7 +8009,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
AssertNoAllocation no_gc;
FixedArray* array = reinterpret_cast<FixedArray*>(obj);
array->set_map(isolate->heap()->fixed_array_map());
array->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
array->set_length(length);
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
@ -8111,7 +8129,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
for (int j = 0; j < argc; j++, i++) {
new_bindings->set(i, *arguments[j + 1]);
}
new_bindings->set_map(isolate->heap()->fixed_cow_array_map());
new_bindings->set_map_no_write_barrier(
isolate->heap()->fixed_cow_array_map());
bound_function->set_function_bindings(*new_bindings);
// Update length.
@ -9299,7 +9318,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
CONVERT_ARG_CHECKED(JSArray, output, 1);
MaybeObject* maybe_result_array =
output->EnsureCanContainNonSmiElements();
output->EnsureCanContainHeapObjectElements();
if (maybe_result_array->IsFailure()) return maybe_result_array;
RUNTIME_ASSERT(output->HasFastElements());

43
deps/v8/src/scopes.cc

@ -31,6 +31,7 @@
#include "bootstrapper.h"
#include "compiler.h"
#include "messages.h"
#include "scopeinfo.h"
#include "allocation-inl.h"
@ -284,8 +285,25 @@ bool Scope::Analyze(CompilationInfo* info) {
}
#endif
if (FLAG_harmony_scoping) {
VariableProxy* proxy = scope->CheckAssignmentToConst();
if (proxy != NULL) {
// Found an assignment to const. Throw a syntax error.
MessageLocation location(info->script(),
proxy->position(),
proxy->position());
Isolate* isolate = info->isolate();
Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(0);
Handle<Object> result =
factory->NewSyntaxError("harmony_const_assign", array);
isolate->Throw(*result, &location);
return false;
}
}
info->SetScope(scope);
return true; // Can not fail.
return true;
}
@ -554,6 +572,29 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
}
VariableProxy* Scope::CheckAssignmentToConst() {
// Check this scope.
if (is_extended_mode()) {
for (int i = 0; i < unresolved_.length(); i++) {
ASSERT(unresolved_[i]->var() != NULL);
if (unresolved_[i]->var()->is_const_mode() &&
unresolved_[i]->IsLValue()) {
return unresolved_[i];
}
}
}
// Check inner scopes.
for (int i = 0; i < inner_scopes_.length(); i++) {
VariableProxy* proxy = inner_scopes_[i]->CheckAssignmentToConst();
if (proxy != NULL) return proxy;
}
// No assignments to const found.
return NULL;
}
void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
ZoneList<Variable*>* context_locals) {
ASSERT(stack_locals != NULL);

5
deps/v8/src/scopes.h

@ -187,6 +187,11 @@ class Scope: public ZoneObject {
// scope over a let binding of the same name.
Declaration* CheckConflictingVarDeclarations();
// For harmony block scoping mode: Check if the scope has variable proxies
// that are used as lvalues and point to const variables. Assumes that scopes
// have been analyzed and variables been resolved.
VariableProxy* CheckAssignmentToConst();
// ---------------------------------------------------------------------------
// Scope-specific info.

6
deps/v8/src/spaces.cc

@ -1656,14 +1656,14 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > FreeSpace::kHeaderSize) {
set_map_unsafe(heap->raw_unchecked_free_space_map());
set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
// Can't use FreeSpace::cast because it fails during deserialization.
FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
this_as_free_space->set_size(size_in_bytes);
} else if (size_in_bytes == kPointerSize) {
set_map_unsafe(heap->raw_unchecked_one_pointer_filler_map());
set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
set_map_unsafe(heap->raw_unchecked_two_pointer_filler_map());
set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
} else {
UNREACHABLE();
}

55
deps/v8/src/store-buffer.cc

@ -41,6 +41,7 @@ StoreBuffer::StoreBuffer(Heap* heap)
old_start_(NULL),
old_limit_(NULL),
old_top_(NULL),
old_reserved_limit_(NULL),
old_buffer_is_sorted_(false),
old_buffer_is_filtered_(false),
during_gc_(false),
@ -59,10 +60,25 @@ void StoreBuffer::Setup() {
reinterpret_cast<uintptr_t>(virtual_memory_->address());
start_ =
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
old_top_ = old_start_ = new Address[kOldStoreBufferLength];
old_limit_ = old_start_ + kOldStoreBufferLength;
limit_ = start_ + (kStoreBufferSize / kPointerSize);
old_virtual_memory_ =
new VirtualMemory(kOldStoreBufferLength * kPointerSize);
old_top_ = old_start_ =
reinterpret_cast<Address*>(old_virtual_memory_->address());
// Don't know the alignment requirements of the OS, but it is certainly not
// less than 0xfff.
ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
ASSERT(initial_length > 0);
ASSERT(initial_length <= kOldStoreBufferLength);
old_limit_ = old_start_ + initial_length;
old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
CHECK(old_virtual_memory_->Commit(
reinterpret_cast<void*>(old_start_),
(old_limit_ - old_start_) * kPointerSize,
false));
ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
@ -76,9 +92,9 @@ void StoreBuffer::Setup() {
ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
0);
virtual_memory_->Commit(reinterpret_cast<Address>(start_),
kStoreBufferSize,
false); // Not executable.
CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
kStoreBufferSize,
false)); // Not executable.
heap_->public_set_store_buffer_top(start_);
hash_map_1_ = new uintptr_t[kHashMapLength];
@ -90,10 +106,10 @@ void StoreBuffer::Setup() {
void StoreBuffer::TearDown() {
delete virtual_memory_;
delete old_virtual_memory_;
delete[] hash_map_1_;
delete[] hash_map_2_;
delete[] old_start_;
old_start_ = old_top_ = old_limit_ = NULL;
old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
start_ = limit_ = NULL;
heap_->public_set_store_buffer_top(start_);
}
@ -150,7 +166,18 @@ void StoreBuffer::Uniq() {
}
void StoreBuffer::HandleFullness() {
void StoreBuffer::EnsureSpace(intptr_t space_needed) {
while (old_limit_ - old_top_ < space_needed &&
old_limit_ < old_reserved_limit_) {
size_t grow = old_limit_ - old_start_; // Double size.
CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
grow * kPointerSize,
false));
old_limit_ += grow;
}
if (old_limit_ - old_top_ >= space_needed) return;
if (old_buffer_is_filtered_) return;
ASSERT(may_move_store_buffer_entries_);
Compact();
@ -645,9 +672,7 @@ void StoreBuffer::Compact() {
// the worst case (compaction doesn't eliminate any pointers).
ASSERT(top <= limit_);
heap_->public_set_store_buffer_top(start_);
if (top - start_ > old_limit_ - old_top_) {
HandleFullness();
}
EnsureSpace(top - start_);
ASSERT(may_move_store_buffer_entries_);
// Goes through the addresses in the store buffer attempting to remove
// duplicates. In the interest of speed this is a lossy operation. Some
@ -688,9 +713,7 @@ void StoreBuffer::Compact() {
void StoreBuffer::CheckForFullBuffer() {
if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
HandleFullness();
}
EnsureSpace(kStoreBufferSize * 2);
}
} } // namespace v8::internal

4
deps/v8/src/store-buffer.h

@ -109,7 +109,7 @@ class StoreBuffer {
// been promoted. Rebuilds the store buffer completely if it overflowed.
void SortUniq();
void HandleFullness();
void EnsureSpace(intptr_t space_needed);
void Verify();
bool PrepareForIteration();
@ -134,6 +134,8 @@ class StoreBuffer {
Address* old_start_;
Address* old_limit_;
Address* old_top_;
Address* old_reserved_limit_;
VirtualMemory* old_virtual_memory_;
bool old_buffer_is_sorted_;
bool old_buffer_is_filtered_;

4
deps/v8/src/stub-cache.cc

@ -184,7 +184,7 @@ Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<Object> value) {
Handle<JSFunction> value) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
@ -266,7 +266,7 @@ Handle<Code> StubCache::ComputeKeyedLoadField(Handle<String> name,
Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<Object> value) {
Handle<JSFunction> value) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);

10
deps/v8/src/stub-cache.h

@ -92,7 +92,7 @@ class StubCache {
Handle<Code> ComputeLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<Object> value);
Handle<JSFunction> value);
Handle<Code> ComputeLoadInterceptor(Handle<String> name,
Handle<JSObject> receiver,
@ -121,7 +121,7 @@ class StubCache {
Handle<Code> ComputeKeyedLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<Object> value);
Handle<JSFunction> value);
Handle<Code> ComputeKeyedLoadInterceptor(Handle<String> name,
Handle<JSObject> receiver,
@ -518,7 +518,7 @@ class StubCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
Register scratch3,
Handle<Object> value,
Handle<JSFunction> value,
Handle<String> name,
Label* miss);
@ -568,7 +568,7 @@ class LoadStubCompiler: public StubCompiler {
Handle<Code> CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Object> value,
Handle<JSFunction> value,
Handle<String> name);
Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
@ -603,7 +603,7 @@ class KeyedLoadStubCompiler: public StubCompiler {
Handle<Code> CompileLoadConstant(Handle<String> name,
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Object> value);
Handle<JSFunction> value);
Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,

15
deps/v8/src/type-info.cc

@ -259,6 +259,7 @@ TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
case CompareIC::STRINGS:
return TypeInfo::String();
case CompareIC::OBJECTS:
case CompareIC::KNOWN_OBJECTS:
// TODO(kasperl): We really need a type for JS objects here.
return TypeInfo::NonPrimitive();
case CompareIC::GENERIC:
@ -278,6 +279,19 @@ bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
}
Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
Handle<Object> object = GetInfo(expr->id());
if (!object->IsCode()) return Handle<Map>::null();
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_compare_ic_stub()) return Handle<Map>::null();
CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
if (state != CompareIC::KNOWN_OBJECTS) {
return Handle<Map>::null();
}
return Handle<Map>(code->FindFirstMap());
}
TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
Handle<Object> object = GetInfo(expr->id());
TypeInfo unknown = TypeInfo::Unknown();
@ -367,6 +381,7 @@ TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
case CompareIC::HEAP_NUMBERS:
return TypeInfo::Number();
case CompareIC::OBJECTS:
case CompareIC::KNOWN_OBJECTS:
// TODO(kasperl): We really need a type for JS objects here.
return TypeInfo::NonPrimitive();
case CompareIC::GENERIC:

1
deps/v8/src/type-info.h

@ -273,6 +273,7 @@ class TypeFeedbackOracle BASE_EMBEDDED {
TypeInfo BinaryType(BinaryOperation* expr);
TypeInfo CompareType(CompareOperation* expr);
bool IsSymbolCompare(CompareOperation* expr);
Handle<Map> GetCompareMap(CompareOperation* expr);
TypeInfo SwitchType(CaseClause* clause);
TypeInfo IncrementType(CountOperation* expr);

106
deps/v8/src/v8natives.js

@ -660,6 +660,21 @@ function GetOwnProperty(obj, v) {
}
// ES5 section 8.12.7.
function Delete(obj, p, should_throw) {
var desc = GetOwnProperty(obj, p);
if (IS_UNDEFINED(desc)) return true;
if (desc.isConfigurable()) {
%DeleteProperty(obj, p, 0);
return true;
} else if (should_throw) {
throw MakeTypeError("define_disallowed", [p]);
} else {
return;
}
}
// Harmony proxies.
function DefineProxyProperty(obj, p, attributes, should_throw) {
var handler = %GetHandler(obj);
@ -677,12 +692,7 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
// ES5 8.12.9.
function DefineOwnProperty(obj, p, desc, should_throw) {
if (%IsJSProxy(obj)) {
var attributes = FromGenericPropertyDescriptor(desc);
return DefineProxyProperty(obj, p, attributes, should_throw);
}
function DefineObjectProperty(obj, p, desc, should_throw) {
var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
// A false value here means that access checks failed.
if (current_or_access === false) return void 0;
@ -846,6 +856,90 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
}
// ES5 section 15.4.5.1.
function DefineArrayProperty(obj, p, desc, should_throw) {
// Note that the length of an array is not actually stored as part of the
// property, hence we use generated code throughout this function instead of
// DefineObjectProperty() to modify its value.
// Step 3 - Special handling for length property.
if (p == "length") {
var length = obj.length;
if (!desc.hasValue()) {
return DefineObjectProperty(obj, "length", desc, should_throw);
}
var new_length = ToUint32(desc.getValue());
if (new_length != ToNumber(desc.getValue())) {
throw new $RangeError('defineProperty() array length out of range');
}
var length_desc = GetOwnProperty(obj, "length");
if (new_length != length && !length_desc.isWritable()) {
if (should_throw) {
throw MakeTypeError("redefine_disallowed", [p]);
} else {
return false;
}
}
var threw = false;
while (new_length < length--) {
if (!Delete(obj, ToString(length), false)) {
new_length = length + 1;
threw = true;
break;
}
}
// Make sure the below call to DefineObjectProperty() doesn't overwrite
// any magic "length" property by removing the value.
obj.length = new_length;
desc.value_ = void 0;
desc.hasValue_ = false;
if (!DefineObjectProperty(obj, "length", desc, should_throw) || threw) {
if (should_throw) {
throw MakeTypeError("redefine_disallowed", [p]);
} else {
return false;
}
}
return true;
}
// Step 4 - Special handling for array index.
var index = ToUint32(p);
if (index == ToNumber(p) && index != 4294967295) {
var length = obj.length;
var length_desc = GetOwnProperty(obj, "length");
if ((index >= length && !length_desc.isWritable()) ||
!DefineObjectProperty(obj, p, desc, true)) {
if (should_throw) {
throw MakeTypeError("define_disallowed", [p]);
} else {
return false;
}
}
if (index >= length) {
obj.length = index + 1;
}
return true;
}
// Step 5 - Fallback to default implementation.
return DefineObjectProperty(obj, p, desc, should_throw);
}
// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
function DefineOwnProperty(obj, p, desc, should_throw) {
if (%IsJSProxy(obj)) {
var attributes = FromGenericPropertyDescriptor(desc);
return DefineProxyProperty(obj, p, attributes, should_throw);
} else if (IS_ARRAY(obj)) {
return DefineArrayProperty(obj, p, desc, should_throw);
} else {
return DefineObjectProperty(obj, p, desc, should_throw);
}
}
// ES5 section 15.2.3.2.
function ObjectGetPrototypeOf(obj) {
if (!IS_SPEC_OBJECT(obj)) {

2
deps/v8/src/v8threads.h

@ -72,7 +72,7 @@ class ThreadState {
};
// Defined in top.h
// Defined in isolate.h.
class ThreadLocalTop;

4
deps/v8/src/version.cc

@ -33,8 +33,8 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 7
#define BUILD_NUMBER 12
#define MINOR_VERSION 8
#define BUILD_NUMBER 0
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

166
deps/v8/src/x64/assembler-x64.cc

@ -426,13 +426,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
ASSERT(IsPowerOf2(m));
int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
while (delta >= 9) {
nop(9);
delta -= 9;
}
if (delta > 0) {
nop(delta);
}
Nop(delta);
}
@ -441,6 +435,15 @@ void Assembler::CodeTargetAlign() {
}
bool Assembler::IsNop(Address addr) {
Address a = addr;
while (*a == 0x66) a++;
if (*a == 0x90) return true;
if (a[0] == 0xf && a[1] == 0x1f) return true;
return false;
}
void Assembler::bind_to(Label* L, int pos) {
ASSERT(!L->is_bound()); // Label may only be bound once.
ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid.
@ -1763,7 +1766,7 @@ void Assembler::notl(Register dst) {
}
void Assembler::nop(int n) {
void Assembler::Nop(int n) {
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.
//
@ -1778,73 +1781,64 @@ void Assembler::nop(int n) {
// 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00
// 00000000H] 00H
ASSERT(1 <= n);
ASSERT(n <= 9);
EnsureSpace ensure_space(this);
switch (n) {
case 1:
emit(0x90);
return;
case 2:
emit(0x66);
emit(0x90);
return;
case 3:
emit(0x0f);
emit(0x1f);
emit(0x00);
return;
case 4:
emit(0x0f);
emit(0x1f);
emit(0x40);
emit(0x00);
return;
case 5:
emit(0x0f);
emit(0x1f);
emit(0x44);
emit(0x00);
emit(0x00);
return;
case 6:
emit(0x66);
emit(0x0f);
emit(0x1f);
emit(0x44);
emit(0x00);
emit(0x00);
return;
case 7:
emit(0x0f);
emit(0x1f);
emit(0x80);
emit(0x00);
emit(0x00);
emit(0x00);
emit(0x00);
return;
case 8:
emit(0x0f);
emit(0x1f);
emit(0x84);
emit(0x00);
emit(0x00);
emit(0x00);
emit(0x00);
emit(0x00);
return;
case 9:
emit(0x66);
emit(0x0f);
emit(0x1f);
emit(0x84);
emit(0x00);
emit(0x00);
emit(0x00);
emit(0x00);
emit(0x00);
return;
while (n > 0) {
switch (n) {
case 2:
emit(0x66);
case 1:
emit(0x90);
return;
case 3:
emit(0x0f);
emit(0x1f);
emit(0x00);
return;
case 4:
emit(0x0f);
emit(0x1f);
emit(0x40);
emit(0x00);
return;
case 6:
emit(0x66);
case 5:
emit(0x0f);
emit(0x1f);
emit(0x44);
emit(0x00);
emit(0x00);
return;
case 7:
emit(0x0f);
emit(0x1f);
emit(0x80);
emit(0x00);
emit(0x00);
emit(0x00);
emit(0x00);
return;
default:
case 11:
emit(0x66);
n--;
case 10:
emit(0x66);
n--;
case 9:
emit(0x66);
n--;
case 8:
emit(0x0f);
emit(0x1f);
emit(0x84);
emit(0x00);
emit(0x00);
emit(0x00);
emit(0x00);
emit(0x00);
n -= 8;
}
}
}
@ -2313,6 +2307,27 @@ void Assembler::fyl2x() {
}
void Assembler::f2xm1() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xF0);
}
void Assembler::fscale() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xFD);
}
void Assembler::fninit() {
EnsureSpace ensure_space(this);
emit(0xDB);
emit(0xE3);
}
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC0, i);
@ -2572,7 +2587,8 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
ASSERT(is_uint2(imm8));
ASSERT(CpuFeatures::IsSupported(SSE4_1));
ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);

7
deps/v8/src/x64/assembler-x64.h

@ -636,6 +636,7 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m, where m must be a power of 2.
void Align(int m);
void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@ -1154,7 +1155,6 @@ class Assembler : public AssemblerBase {
void hlt();
void int3();
void nop();
void nop(int n);
void rdtsc();
void ret(int imm16);
void setcc(Condition cc, Register reg);
@ -1277,6 +1277,9 @@ class Assembler : public AssemblerBase {
void fcos();
void fptan();
void fyl2x();
void f2xm1();
void fscale();
void fninit();
void frndint();
@ -1398,7 +1401,7 @@ class Assembler : public AssemblerBase {
return static_cast<int>(reloc_info_writer.pos() - pc_);
}
static bool IsNop(Address addr) { return *addr == 0x90; }
static bool IsNop(Address addr);
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;

3
deps/v8/src/x64/builtins-x64.cc

@ -1305,6 +1305,9 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ jmp(&entry);
__ bind(&loop);
__ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
if (FLAG_smi_only_arrays) {
__ JumpIfNotSmi(kScratchRegister, call_generic_code);
}
__ movq(Operand(rdx, 0), kScratchRegister);
__ addq(rdx, Immediate(kPointerSize));
__ bind(&entry);

414
deps/v8/src/x64/code-stubs-x64.cc

@ -1991,152 +1991,259 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
// Registers are used as follows:
// rdx = base
// rax = exponent
// rcx = temporary, result
Label allocate_return, call_runtime;
// Load input parameters.
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
__ movq(rax, Operand(rsp, 1 * kPointerSize));
// Choose register conforming to calling convention (when bailing out).
#ifdef _WIN64
const Register exponent = rdx;
#else
const Register exponent = rdi;
#endif
const Register base = rax;
const Register scratch = rcx;
const XMMRegister double_result = xmm3;
const XMMRegister double_base = xmm2;
const XMMRegister double_exponent = xmm1;
const XMMRegister double_scratch = xmm4;
// Save 1 in xmm3 - we need this several times later on.
__ Set(rcx, 1);
__ cvtlsi2sd(xmm3, rcx);
Label call_runtime, done, exponent_not_smi, int_exponent;
Label exponent_nonsmi;
Label base_nonsmi;
// If the exponent is a heap number go to that specific case.
__ JumpIfNotSmi(rax, &exponent_nonsmi);
__ JumpIfNotSmi(rdx, &base_nonsmi);
// Save 1 in double_result - we need this several times later on.
__ movq(scratch, Immediate(1));
__ cvtlsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack.
__ movq(base, Operand(rsp, 2 * kPointerSize));
__ movq(exponent, Operand(rsp, 1 * kPointerSize));
__ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
__ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear);
__ bind(&base_is_smi);
__ SmiToInteger32(base, base);
__ cvtlsi2sd(double_base, base);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
__ SmiToInteger32(exponent, exponent);
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
__ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
__ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
__ SmiToInteger32(exponent, exponent);
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
__ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
}
// Optimized version when both exponent and base are smis.
Label powi;
__ SmiToInteger32(rdx, rdx);
__ cvtlsi2sd(xmm0, rdx);
__ jmp(&powi);
// Exponent is a smi and base is a heapnumber.
__ bind(&base_nonsmi);
__ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
if (exponent_type_ != INTEGER) {
Label fast_power;
// Detect integer exponents stored as double.
__ cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
__ cmpl(exponent, Immediate(0x80000000u));
__ j(equal, &call_runtime);
__ cvtlsi2sd(double_scratch, exponent);
// Already ruled out NaNs for exponent.
__ ucomisd(double_exponent, double_scratch);
__ j(equal, &int_exponent);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
Label continue_sqrt, continue_rsqrt, not_plus_half;
// Test for 0.5.
// Load double_scratch with 0.5.
__ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
__ movq(double_scratch, scratch);
// Already ruled out NaNs for exponent.
__ ucomisd(double_scratch, double_exponent);
__ j(not_equal, &not_plus_half, Label::kNear);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &continue_sqrt, Label::kNear);
__ j(carry, &continue_sqrt, Label::kNear);
// Set result to Infinity in the special case.
__ xorps(double_result, double_result);
__ subsd(double_result, double_scratch);
__ jmp(&done);
__ bind(&continue_sqrt);
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
__ xorps(double_scratch, double_scratch);
__ addsd(double_scratch, double_base); // Convert -0 to 0.
__ sqrtsd(double_result, double_scratch);
__ jmp(&done);
// Test for -0.5.
__ bind(&not_plus_half);
// Load double_scratch with -0.5 by substracting 1.
__ subsd(double_scratch, double_result);
// Already ruled out NaNs for exponent.
__ ucomisd(double_scratch, double_exponent);
__ j(not_equal, &fast_power, Label::kNear);
// Calculates reciprocal of square root of base. Check for the special
// case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &continue_rsqrt, Label::kNear);
__ j(carry, &continue_rsqrt, Label::kNear);
// Set result to 0 in the special case.
__ xorps(double_result, double_result);
__ jmp(&done);
__ bind(&continue_rsqrt);
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
__ xorps(double_exponent, double_exponent);
__ addsd(double_exponent, double_base); // Convert -0 to +0.
__ sqrtsd(double_exponent, double_exponent);
__ divsd(double_result, double_exponent);
__ jmp(&done);
}
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Using FPU instructions to calculate power.
Label fast_power_failed;
__ bind(&fast_power);
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
__ subq(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), double_exponent);
__ fld_d(Operand(rsp, 0)); // E
__ movsd(Operand(rsp, 0), double_base);
__ fld_d(Operand(rsp, 0)); // B, E
// Exponent is in st(1) and base is in st(0)
// B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
// FYL2X calculates st(1) * log2(st(0))
__ fyl2x(); // X
__ fld(0); // X, X
__ frndint(); // rnd(X), X
__ fsub(1); // rnd(X), X-rnd(X)
__ fxch(1); // X - rnd(X), rnd(X)
// F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
__ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
__ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
__ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
// FSCALE calculates st(0) * 2^st(1)
__ fscale(); // 2^X, rnd(X)
__ fstp(1);
// Bail out to runtime in case of exceptions in the status word.
__ fnstsw_ax();
__ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(rsp, 0));
__ movsd(double_result, Operand(rsp, 0));
__ addq(rsp, Immediate(kDoubleSize));
__ jmp(&done);
// Optimized version of pow if exponent is a smi.
// xmm0 contains the base.
__ bind(&powi);
__ SmiToInteger32(rax, rax);
__ bind(&fast_power_failed);
__ fninit();
__ addq(rsp, Immediate(kDoubleSize));
__ jmp(&call_runtime);
}
// Save exponent in base as we need to check if exponent is negative later.
// We know that base and exponent are in different registers.
__ movq(rdx, rax);
// Calculate power with integer exponent.
__ bind(&int_exponent);
const XMMRegister double_scratch2 = double_exponent;
// Back up exponent as we need to check if exponent is negative later.
__ movq(scratch, exponent); // Back up exponent.
__ movsd(double_scratch, double_base); // Back up base.
__ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
Label no_neg;
__ cmpl(rax, Immediate(0));
__ j(greater_equal, &no_neg, Label::kNear);
__ negl(rax);
Label no_neg, while_true, no_multiply;
__ testl(scratch, scratch);
__ j(positive, &no_neg, Label::kNear);
__ negl(scratch);
__ bind(&no_neg);
// Load xmm1 with 1.
__ movaps(xmm1, xmm3);
Label while_true;
Label no_multiply;
__ bind(&while_true);
__ shrl(rax, Immediate(1));
__ shrl(scratch, Immediate(1));
__ j(not_carry, &no_multiply, Label::kNear);
__ mulsd(xmm1, xmm0);
__ mulsd(double_result, double_scratch);
__ bind(&no_multiply);
__ mulsd(xmm0, xmm0);
__ j(not_zero, &while_true);
// Base has the original value of the exponent - if the exponent is
// negative return 1/result.
__ testl(rdx, rdx);
__ j(positive, &allocate_return);
// Special case if xmm1 has reached infinity.
__ divsd(xmm3, xmm1);
__ movaps(xmm1, xmm3);
__ xorps(xmm0, xmm0);
__ ucomisd(xmm0, xmm1);
__ j(equal, &call_runtime);
__ jmp(&allocate_return);
// Exponent (or both) is a heapnumber - no matter what we should now work
// on doubles.
__ bind(&exponent_nonsmi);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
// Test if exponent is nan.
__ ucomisd(xmm1, xmm1);
__ j(parity_even, &call_runtime);
Label base_not_smi, handle_special_cases;
__ JumpIfNotSmi(rdx, &base_not_smi, Label::kNear);
__ SmiToInteger32(rdx, rdx);
__ cvtlsi2sd(xmm0, rdx);
__ jmp(&handle_special_cases, Label::kNear);
__ mulsd(double_scratch, double_scratch);
__ j(not_zero, &while_true);
__ bind(&base_not_smi);
__ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
__ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
__ andl(rcx, Immediate(HeapNumber::kExponentMask));
__ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
// base is NaN or +/-Infinity
__ j(greater_equal, &call_runtime);
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// If the exponent is negative, return 1/result.
__ testl(exponent, exponent);
__ j(greater, &done);
__ divsd(double_scratch2, double_result);
__ movsd(double_result, double_scratch2);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
__ xorps(double_scratch2, double_scratch2);
__ ucomisd(double_scratch2, double_result);
// double_exponent aliased as double_scratch2 has already been overwritten
// and may not have contained the exponent value in the first place when the
// input was a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
__ cvtlsi2sd(double_exponent, exponent);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
if (exponent_type_ == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
// base is in xmm0 and exponent is in xmm1.
__ bind(&handle_special_cases);
Label not_minus_half;
// Test for -0.5.
// Load xmm2 with -0.5.
__ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
__ movq(xmm2, rcx);
// xmm2 now has -0.5.
__ ucomisd(xmm2, xmm1);
__ j(not_equal, &not_minus_half, Label::kNear);
// Calculates reciprocal of square root.
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
__ xorps(xmm1, xmm1);
__ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
__ divsd(xmm3, xmm1);
__ movaps(xmm1, xmm3);
__ jmp(&allocate_return);
// Test for 0.5.
__ bind(&not_minus_half);
// Load xmm2 with 0.5.
// Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
__ addsd(xmm2, xmm3);
// xmm2 now has 0.5.
__ ucomisd(xmm2, xmm1);
__ j(not_equal, &call_runtime);
// Calculates square root.
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
__ xorps(xmm1, xmm1);
__ addsd(xmm1, xmm0); // Convert -0 to 0.
__ sqrtsd(xmm1, xmm1);
__ bind(&allocate_return);
__ AllocateHeapNumber(rcx, rax, &call_runtime);
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
__ movq(rax, rcx);
__ ret(2 * kPointerSize);
// The stub is called from non-optimized code, which expects the result
// as heap number in eax.
__ bind(&done);
__ AllocateHeapNumber(rax, rcx, &call_runtime);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
} else {
__ bind(&call_runtime);
// Move base to the correct argument register. Exponent is already in xmm1.
__ movsd(xmm0, double_base);
ASSERT(double_exponent.is(xmm1));
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(2);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()), 2);
}
// Return value is in xmm0.
__ movsd(double_result, xmm0);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(0);
}
}
@ -5501,32 +5608,45 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Save the registers.
__ pop(rcx);
__ push(rdx);
__ push(rax);
__ push(rcx);
void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
__ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ Cmp(rcx, known_map_);
__ j(not_equal, &miss, Label::kNear);
__ Cmp(rbx, known_map_);
__ j(not_equal, &miss, Label::kNear);
__ subq(rax, rdx);
__ ret(0);
__ bind(&miss);
GenerateMiss(masm);
}
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(rdx);
__ push(rax);
__ push(rdx);
__ push(rax);
__ Push(Smi::FromInt(op_));
__ CallExternalReference(miss, 3);
}
// Compute the entry point of the rewritten stub.
__ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
// Restore registers.
__ pop(rcx);
__ pop(rax);
__ pop(rdx);
__ push(rcx);
// Compute the entry point of the rewritten stub.
__ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
__ pop(rax);
__ pop(rdx);
}
// Do a tail call to the rewritten stub.
__ jmp(rdi);

4
deps/v8/src/x64/debug-x64.cc

@ -264,9 +264,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
Label check_codesize;
__ bind(&check_codesize);
__ RecordDebugBreakSlot();
for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
__ nop();
}
__ Nop(Assembler::kDebugBreakSlotLength);
ASSERT_EQ(Assembler::kDebugBreakSlotLength,
masm->SizeOfCodeGeneratedSince(&check_codesize));
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save