|
@ -571,6 +571,9 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code, |
|
|
LInstruction* instr, |
|
|
LInstruction* instr, |
|
|
SafepointMode safepoint_mode) { |
|
|
SafepointMode safepoint_mode) { |
|
|
ASSERT(instr != NULL); |
|
|
ASSERT(instr != NULL); |
|
|
|
|
|
// Block literal pool emission to ensure nop indicating no inlined smi code
|
|
|
|
|
|
// is in the correct position.
|
|
|
|
|
|
Assembler::BlockConstPoolScope block_const_pool(masm()); |
|
|
LPointerMap* pointers = instr->pointer_map(); |
|
|
LPointerMap* pointers = instr->pointer_map(); |
|
|
RecordPosition(pointers->position()); |
|
|
RecordPosition(pointers->position()); |
|
|
__ Call(code, mode); |
|
|
__ Call(code, mode); |
|
@ -631,14 +634,15 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, |
|
|
++jsframe_count; |
|
|
++jsframe_count; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
Translation translation(&translations_, frame_count, jsframe_count); |
|
|
Translation translation(&translations_, frame_count, jsframe_count, |
|
|
|
|
|
zone()); |
|
|
WriteTranslation(environment, &translation); |
|
|
WriteTranslation(environment, &translation); |
|
|
int deoptimization_index = deoptimizations_.length(); |
|
|
int deoptimization_index = deoptimizations_.length(); |
|
|
int pc_offset = masm()->pc_offset(); |
|
|
int pc_offset = masm()->pc_offset(); |
|
|
environment->Register(deoptimization_index, |
|
|
environment->Register(deoptimization_index, |
|
|
translation.index(), |
|
|
translation.index(), |
|
|
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
|
|
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
|
|
deoptimizations_.Add(environment); |
|
|
deoptimizations_.Add(environment, zone()); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -670,7 +674,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { |
|
|
// jump entry if this is the case.
|
|
|
// jump entry if this is the case.
|
|
|
if (deopt_jump_table_.is_empty() || |
|
|
if (deopt_jump_table_.is_empty() || |
|
|
(deopt_jump_table_.last().address != entry)) { |
|
|
(deopt_jump_table_.last().address != entry)) { |
|
|
deopt_jump_table_.Add(JumpTableEntry(entry)); |
|
|
deopt_jump_table_.Add(JumpTableEntry(entry), zone()); |
|
|
} |
|
|
} |
|
|
__ b(cc, &deopt_jump_table_.last().label); |
|
|
__ b(cc, &deopt_jump_table_.last().label); |
|
|
} |
|
|
} |
|
@ -715,7 +719,7 @@ int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { |
|
|
for (int i = 0; i < deoptimization_literals_.length(); ++i) { |
|
|
for (int i = 0; i < deoptimization_literals_.length(); ++i) { |
|
|
if (deoptimization_literals_[i].is_identical_to(literal)) return i; |
|
|
if (deoptimization_literals_[i].is_identical_to(literal)) return i; |
|
|
} |
|
|
} |
|
|
deoptimization_literals_.Add(literal); |
|
|
deoptimization_literals_.Add(literal, zone()); |
|
|
return result; |
|
|
return result; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -761,14 +765,14 @@ void LCodeGen::RecordSafepoint( |
|
|
for (int i = 0; i < operands->length(); i++) { |
|
|
for (int i = 0; i < operands->length(); i++) { |
|
|
LOperand* pointer = operands->at(i); |
|
|
LOperand* pointer = operands->at(i); |
|
|
if (pointer->IsStackSlot()) { |
|
|
if (pointer->IsStackSlot()) { |
|
|
safepoint.DefinePointerSlot(pointer->index()); |
|
|
safepoint.DefinePointerSlot(pointer->index(), zone()); |
|
|
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { |
|
|
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { |
|
|
safepoint.DefinePointerRegister(ToRegister(pointer)); |
|
|
safepoint.DefinePointerRegister(ToRegister(pointer), zone()); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
if (kind & Safepoint::kWithRegisters) { |
|
|
if (kind & Safepoint::kWithRegisters) { |
|
|
// Register cp always contains a pointer to the context.
|
|
|
// Register cp always contains a pointer to the context.
|
|
|
safepoint.DefinePointerRegister(cp); |
|
|
safepoint.DefinePointerRegister(cp, zone()); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -780,7 +784,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { |
|
|
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { |
|
|
LPointerMap empty_pointers(RelocInfo::kNoPosition); |
|
|
LPointerMap empty_pointers(RelocInfo::kNoPosition, zone()); |
|
|
RecordSafepoint(&empty_pointers, deopt_mode); |
|
|
RecordSafepoint(&empty_pointers, deopt_mode); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -1034,6 +1038,100 @@ void LCodeGen::DoModI(LModI* instr) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::EmitSignedIntegerDivisionByConstant( |
|
|
|
|
|
Register result, |
|
|
|
|
|
Register dividend, |
|
|
|
|
|
int32_t divisor, |
|
|
|
|
|
Register remainder, |
|
|
|
|
|
Register scratch, |
|
|
|
|
|
LEnvironment* environment) { |
|
|
|
|
|
ASSERT(!AreAliased(dividend, scratch, ip)); |
|
|
|
|
|
ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor)); |
|
|
|
|
|
|
|
|
|
|
|
uint32_t divisor_abs = abs(divisor); |
|
|
|
|
|
|
|
|
|
|
|
int32_t power_of_2_factor = |
|
|
|
|
|
CompilerIntrinsics::CountTrailingZeros(divisor_abs); |
|
|
|
|
|
|
|
|
|
|
|
switch (divisor_abs) { |
|
|
|
|
|
case 0: |
|
|
|
|
|
DeoptimizeIf(al, environment); |
|
|
|
|
|
return; |
|
|
|
|
|
|
|
|
|
|
|
case 1: |
|
|
|
|
|
if (divisor > 0) { |
|
|
|
|
|
__ Move(result, dividend); |
|
|
|
|
|
} else { |
|
|
|
|
|
__ rsb(result, dividend, Operand(0), SetCC); |
|
|
|
|
|
DeoptimizeIf(vs, environment); |
|
|
|
|
|
} |
|
|
|
|
|
// Compute the remainder.
|
|
|
|
|
|
__ mov(remainder, Operand(0)); |
|
|
|
|
|
return; |
|
|
|
|
|
|
|
|
|
|
|
default: |
|
|
|
|
|
if (IsPowerOf2(divisor_abs)) { |
|
|
|
|
|
// Branch and condition free code for integer division by a power
|
|
|
|
|
|
// of two.
|
|
|
|
|
|
int32_t power = WhichPowerOf2(divisor_abs); |
|
|
|
|
|
if (power > 1) { |
|
|
|
|
|
__ mov(scratch, Operand(dividend, ASR, power - 1)); |
|
|
|
|
|
} |
|
|
|
|
|
__ add(scratch, dividend, Operand(scratch, LSR, 32 - power)); |
|
|
|
|
|
__ mov(result, Operand(scratch, ASR, power)); |
|
|
|
|
|
// Negate if necessary.
|
|
|
|
|
|
// We don't need to check for overflow because the case '-1' is
|
|
|
|
|
|
// handled separately.
|
|
|
|
|
|
if (divisor < 0) { |
|
|
|
|
|
ASSERT(divisor != -1); |
|
|
|
|
|
__ rsb(result, result, Operand(0)); |
|
|
|
|
|
} |
|
|
|
|
|
// Compute the remainder.
|
|
|
|
|
|
if (divisor > 0) { |
|
|
|
|
|
__ sub(remainder, dividend, Operand(result, LSL, power)); |
|
|
|
|
|
} else { |
|
|
|
|
|
__ add(remainder, dividend, Operand(result, LSL, power)); |
|
|
|
|
|
} |
|
|
|
|
|
return; |
|
|
|
|
|
} else { |
|
|
|
|
|
// Use magic numbers for a few specific divisors.
|
|
|
|
|
|
// Details and proofs can be found in:
|
|
|
|
|
|
// - Hacker's Delight, Henry S. Warren, Jr.
|
|
|
|
|
|
// - The PowerPC Compiler Writer’s Guide
|
|
|
|
|
|
// and probably many others.
|
|
|
|
|
|
//
|
|
|
|
|
|
// We handle
|
|
|
|
|
|
// <divisor with magic numbers> * <power of 2>
|
|
|
|
|
|
// but not
|
|
|
|
|
|
// <divisor with magic numbers> * <other divisor with magic numbers>
|
|
|
|
|
|
DivMagicNumbers magic_numbers = |
|
|
|
|
|
DivMagicNumberFor(divisor_abs >> power_of_2_factor); |
|
|
|
|
|
// Branch and condition free code for integer division by a power
|
|
|
|
|
|
// of two.
|
|
|
|
|
|
const int32_t M = magic_numbers.M; |
|
|
|
|
|
const int32_t s = magic_numbers.s + power_of_2_factor; |
|
|
|
|
|
|
|
|
|
|
|
__ mov(ip, Operand(M)); |
|
|
|
|
|
__ smull(ip, scratch, dividend, ip); |
|
|
|
|
|
if (M < 0) { |
|
|
|
|
|
__ add(scratch, scratch, Operand(dividend)); |
|
|
|
|
|
} |
|
|
|
|
|
if (s > 0) { |
|
|
|
|
|
__ mov(scratch, Operand(scratch, ASR, s)); |
|
|
|
|
|
} |
|
|
|
|
|
__ add(result, scratch, Operand(dividend, LSR, 31)); |
|
|
|
|
|
if (divisor < 0) __ rsb(result, result, Operand(0)); |
|
|
|
|
|
// Compute the remainder.
|
|
|
|
|
|
__ mov(ip, Operand(divisor)); |
|
|
|
|
|
// This sequence could be replaced with 'mls' when
|
|
|
|
|
|
// it gets implemented.
|
|
|
|
|
|
__ mul(scratch, result, ip); |
|
|
|
|
|
__ sub(remainder, dividend, scratch); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoDivI(LDivI* instr) { |
|
|
void LCodeGen::DoDivI(LDivI* instr) { |
|
|
class DeferredDivI: public LDeferredCode { |
|
|
class DeferredDivI: public LDeferredCode { |
|
|
public: |
|
|
public: |
|
@ -1096,7 +1194,7 @@ void LCodeGen::DoDivI(LDivI* instr) { |
|
|
|
|
|
|
|
|
// Call the stub. The numbers in r0 and r1 have
|
|
|
// Call the stub. The numbers in r0 and r1 have
|
|
|
// to be tagged to Smis. If that is not possible, deoptimize.
|
|
|
// to be tagged to Smis. If that is not possible, deoptimize.
|
|
|
DeferredDivI* deferred = new DeferredDivI(this, instr); |
|
|
DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr); |
|
|
|
|
|
|
|
|
__ TrySmiTag(left, &deoptimize, scratch); |
|
|
__ TrySmiTag(left, &deoptimize, scratch); |
|
|
__ TrySmiTag(right, &deoptimize, scratch); |
|
|
__ TrySmiTag(right, &deoptimize, scratch); |
|
@ -1115,6 +1213,34 @@ void LCodeGen::DoDivI(LDivI* instr) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { |
|
|
|
|
|
const Register result = ToRegister(instr->result()); |
|
|
|
|
|
const Register left = ToRegister(instr->InputAt(0)); |
|
|
|
|
|
const Register remainder = ToRegister(instr->TempAt(0)); |
|
|
|
|
|
const Register scratch = scratch0(); |
|
|
|
|
|
|
|
|
|
|
|
// We only optimize this for division by constants, because the standard
|
|
|
|
|
|
// integer division routine is usually slower than transitionning to VFP.
|
|
|
|
|
|
// This could be optimized on processors with SDIV available.
|
|
|
|
|
|
ASSERT(instr->InputAt(1)->IsConstantOperand()); |
|
|
|
|
|
int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1))); |
|
|
|
|
|
if (divisor < 0) { |
|
|
|
|
|
__ cmp(left, Operand(0)); |
|
|
|
|
|
DeoptimizeIf(eq, instr->environment()); |
|
|
|
|
|
} |
|
|
|
|
|
EmitSignedIntegerDivisionByConstant(result, |
|
|
|
|
|
left, |
|
|
|
|
|
divisor, |
|
|
|
|
|
remainder, |
|
|
|
|
|
scratch, |
|
|
|
|
|
instr->environment()); |
|
|
|
|
|
// We operated a truncating division. Correct the result if necessary.
|
|
|
|
|
|
__ cmp(remainder, Operand(0)); |
|
|
|
|
|
__ teq(remainder, Operand(divisor), ne); |
|
|
|
|
|
__ sub(result, result, Operand(1), LeaveCC, mi); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template<int T> |
|
|
template<int T> |
|
|
void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, |
|
|
void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, |
|
|
Token::Value op) { |
|
|
Token::Value op) { |
|
@ -1562,6 +1688,9 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
|
|
ASSERT(ToRegister(instr->result()).is(r0)); |
|
|
ASSERT(ToRegister(instr->result()).is(r0)); |
|
|
|
|
|
|
|
|
BinaryOpStub stub(instr->op(), NO_OVERWRITE); |
|
|
BinaryOpStub stub(instr->op(), NO_OVERWRITE); |
|
|
|
|
|
// Block literal pool emission to ensure nop indicating no inlined smi code
|
|
|
|
|
|
// is in the correct position.
|
|
|
|
|
|
Assembler::BlockConstPoolScope block_const_pool(masm()); |
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
|
|
__ nop(); // Signals no inlined code.
|
|
|
__ nop(); // Signals no inlined code.
|
|
|
} |
|
|
} |
|
@ -2174,7 +2303,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
|
|
}; |
|
|
}; |
|
|
|
|
|
|
|
|
DeferredInstanceOfKnownGlobal* deferred; |
|
|
DeferredInstanceOfKnownGlobal* deferred; |
|
|
deferred = new DeferredInstanceOfKnownGlobal(this, instr); |
|
|
deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); |
|
|
|
|
|
|
|
|
Label done, false_result; |
|
|
Label done, false_result; |
|
|
Register object = ToRegister(instr->InputAt(0)); |
|
|
Register object = ToRegister(instr->InputAt(0)); |
|
@ -2193,20 +2322,25 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
|
|
Label cache_miss; |
|
|
Label cache_miss; |
|
|
Register map = temp; |
|
|
Register map = temp; |
|
|
__ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
|
|
__ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
|
|
__ bind(deferred->map_check()); // Label for calculating code patching.
|
|
|
{ |
|
|
// We use Factory::the_hole_value() on purpose instead of loading from the
|
|
|
// Block constant pool emission to ensure the positions of instructions are
|
|
|
// root array to force relocation to be able to later patch with
|
|
|
// as expected by the patcher. See InstanceofStub::Generate().
|
|
|
// the cached map.
|
|
|
Assembler::BlockConstPoolScope block_const_pool(masm()); |
|
|
Handle<JSGlobalPropertyCell> cell = |
|
|
__ bind(deferred->map_check()); // Label for calculating code patching.
|
|
|
factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); |
|
|
// We use Factory::the_hole_value() on purpose instead of loading from the
|
|
|
__ mov(ip, Operand(Handle<Object>(cell))); |
|
|
// root array to force relocation to be able to later patch with
|
|
|
__ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); |
|
|
// the cached map.
|
|
|
__ cmp(map, Operand(ip)); |
|
|
Handle<JSGlobalPropertyCell> cell = |
|
|
__ b(ne, &cache_miss); |
|
|
factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); |
|
|
// We use Factory::the_hole_value() on purpose instead of loading from the
|
|
|
__ mov(ip, Operand(Handle<Object>(cell))); |
|
|
// root array to force relocation to be able to later patch
|
|
|
__ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); |
|
|
// with true or false.
|
|
|
__ cmp(map, Operand(ip)); |
|
|
__ mov(result, Operand(factory()->the_hole_value())); |
|
|
__ b(ne, &cache_miss); |
|
|
|
|
|
// We use Factory::the_hole_value() on purpose instead of loading from the
|
|
|
|
|
|
// root array to force relocation to be able to later patch
|
|
|
|
|
|
// with true or false.
|
|
|
|
|
|
__ mov(result, Operand(factory()->the_hole_value())); |
|
|
|
|
|
} |
|
|
__ b(&done); |
|
|
__ b(&done); |
|
|
|
|
|
|
|
|
// The inlined call site cache did not match. Check null and string before
|
|
|
// The inlined call site cache did not match. Check null and string before
|
|
@ -2267,8 +2401,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, |
|
|
RelocInfo::CODE_TARGET, |
|
|
RelocInfo::CODE_TARGET, |
|
|
instr, |
|
|
instr, |
|
|
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
|
|
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
|
|
ASSERT(instr->HasDeoptimizationEnvironment()); |
|
|
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); |
|
|
LEnvironment* env = instr->deoptimization_environment(); |
|
|
|
|
|
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
|
|
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
|
|
// Put the result value into the result register slot and
|
|
|
// Put the result value into the result register slot and
|
|
|
// restore all registers.
|
|
|
// restore all registers.
|
|
@ -2438,12 +2571,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { |
|
|
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, |
|
|
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, |
|
|
Register object, |
|
|
Register object, |
|
|
Handle<Map> type, |
|
|
Handle<Map> type, |
|
|
Handle<String> name) { |
|
|
Handle<String> name, |
|
|
|
|
|
LEnvironment* env) { |
|
|
LookupResult lookup(isolate()); |
|
|
LookupResult lookup(isolate()); |
|
|
type->LookupInDescriptors(NULL, *name, &lookup); |
|
|
type->LookupInDescriptors(NULL, *name, &lookup); |
|
|
ASSERT(lookup.IsFound() && |
|
|
ASSERT(lookup.IsFound() || lookup.IsCacheable()); |
|
|
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION)); |
|
|
if (lookup.IsFound() && lookup.type() == FIELD) { |
|
|
if (lookup.type() == FIELD) { |
|
|
|
|
|
int index = lookup.GetLocalFieldIndexFromMap(*type); |
|
|
int index = lookup.GetLocalFieldIndexFromMap(*type); |
|
|
int offset = index * kPointerSize; |
|
|
int offset = index * kPointerSize; |
|
|
if (index < 0) { |
|
|
if (index < 0) { |
|
@ -2455,9 +2588,23 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, |
|
|
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
|
|
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
|
|
__ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize)); |
|
|
__ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize)); |
|
|
} |
|
|
} |
|
|
} else { |
|
|
} else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) { |
|
|
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type)); |
|
|
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type)); |
|
|
__ LoadHeapObject(result, function); |
|
|
__ LoadHeapObject(result, function); |
|
|
|
|
|
} else { |
|
|
|
|
|
// Negative lookup.
|
|
|
|
|
|
// Check prototypes.
|
|
|
|
|
|
HeapObject* current = HeapObject::cast((*type)->prototype()); |
|
|
|
|
|
Heap* heap = type->GetHeap(); |
|
|
|
|
|
while (current != heap->null_value()) { |
|
|
|
|
|
Handle<HeapObject> link(current); |
|
|
|
|
|
__ LoadHeapObject(result, link); |
|
|
|
|
|
__ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset)); |
|
|
|
|
|
__ cmp(result, Operand(Handle<Map>(JSObject::cast(current)->map()))); |
|
|
|
|
|
DeoptimizeIf(ne, env); |
|
|
|
|
|
current = HeapObject::cast(current->map()->prototype()); |
|
|
|
|
|
} |
|
|
|
|
|
__ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -2465,43 +2612,45 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, |
|
|
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { |
|
|
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { |
|
|
Register object = ToRegister(instr->object()); |
|
|
Register object = ToRegister(instr->object()); |
|
|
Register result = ToRegister(instr->result()); |
|
|
Register result = ToRegister(instr->result()); |
|
|
Register scratch = scratch0(); |
|
|
Register object_map = scratch0(); |
|
|
|
|
|
|
|
|
int map_count = instr->hydrogen()->types()->length(); |
|
|
int map_count = instr->hydrogen()->types()->length(); |
|
|
|
|
|
bool need_generic = instr->hydrogen()->need_generic(); |
|
|
|
|
|
|
|
|
|
|
|
if (map_count == 0 && !need_generic) { |
|
|
|
|
|
DeoptimizeIf(al, instr->environment()); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
Handle<String> name = instr->hydrogen()->name(); |
|
|
Handle<String> name = instr->hydrogen()->name(); |
|
|
if (map_count == 0) { |
|
|
Label done; |
|
|
ASSERT(instr->hydrogen()->need_generic()); |
|
|
__ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); |
|
|
__ mov(r2, Operand(name)); |
|
|
for (int i = 0; i < map_count; ++i) { |
|
|
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); |
|
|
bool last = (i == map_count - 1); |
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr); |
|
|
Handle<Map> map = instr->hydrogen()->types()->at(i); |
|
|
} else { |
|
|
Label check_passed; |
|
|
Label done; |
|
|
__ CompareMap( |
|
|
__ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
|
|
object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS); |
|
|
for (int i = 0; i < map_count - 1; ++i) { |
|
|
if (last && !need_generic) { |
|
|
Handle<Map> map = instr->hydrogen()->types()->at(i); |
|
|
DeoptimizeIf(ne, instr->environment()); |
|
|
|
|
|
__ bind(&check_passed); |
|
|
|
|
|
EmitLoadFieldOrConstantFunction( |
|
|
|
|
|
result, object, map, name, instr->environment()); |
|
|
|
|
|
} else { |
|
|
Label next; |
|
|
Label next; |
|
|
__ cmp(scratch, Operand(map)); |
|
|
|
|
|
__ b(ne, &next); |
|
|
__ b(ne, &next); |
|
|
EmitLoadFieldOrConstantFunction(result, object, map, name); |
|
|
__ bind(&check_passed); |
|
|
|
|
|
EmitLoadFieldOrConstantFunction( |
|
|
|
|
|
result, object, map, name, instr->environment()); |
|
|
__ b(&done); |
|
|
__ b(&done); |
|
|
__ bind(&next); |
|
|
__ bind(&next); |
|
|
} |
|
|
} |
|
|
Handle<Map> map = instr->hydrogen()->types()->last(); |
|
|
|
|
|
__ cmp(scratch, Operand(map)); |
|
|
|
|
|
if (instr->hydrogen()->need_generic()) { |
|
|
|
|
|
Label generic; |
|
|
|
|
|
__ b(ne, &generic); |
|
|
|
|
|
EmitLoadFieldOrConstantFunction(result, object, map, name); |
|
|
|
|
|
__ b(&done); |
|
|
|
|
|
__ bind(&generic); |
|
|
|
|
|
__ mov(r2, Operand(name)); |
|
|
|
|
|
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); |
|
|
|
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr); |
|
|
|
|
|
} else { |
|
|
|
|
|
DeoptimizeIf(ne, instr->environment()); |
|
|
|
|
|
EmitLoadFieldOrConstantFunction(result, object, map, name); |
|
|
|
|
|
} |
|
|
|
|
|
__ bind(&done); |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
if (need_generic) { |
|
|
|
|
|
__ mov(r2, Operand(name)); |
|
|
|
|
|
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); |
|
|
|
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr); |
|
|
|
|
|
} |
|
|
|
|
|
__ bind(&done); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -2579,8 +2728,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) { |
|
|
__ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset)); |
|
|
__ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset)); |
|
|
__ ubfx(scratch, scratch, Map::kElementsKindShift, |
|
|
__ ubfx(scratch, scratch, Map::kElementsKindShift, |
|
|
Map::kElementsKindBitCount); |
|
|
Map::kElementsKindBitCount); |
|
|
__ cmp(scratch, Operand(FAST_ELEMENTS)); |
|
|
__ cmp(scratch, Operand(GetInitialFastElementsKind())); |
|
|
__ b(eq, &done); |
|
|
__ b(lt, &fail); |
|
|
|
|
|
__ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND)); |
|
|
|
|
|
__ b(le, &done); |
|
|
__ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); |
|
|
__ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); |
|
|
__ b(lt, &fail); |
|
|
__ b(lt, &fail); |
|
|
__ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND)); |
|
|
__ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND)); |
|
@ -2627,13 +2778,20 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { |
|
|
|
|
|
|
|
|
// Load the result.
|
|
|
// Load the result.
|
|
|
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); |
|
|
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); |
|
|
__ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize)); |
|
|
uint32_t offset = FixedArray::kHeaderSize + |
|
|
|
|
|
(instr->additional_index() << kPointerSizeLog2); |
|
|
|
|
|
__ ldr(result, FieldMemOperand(scratch, offset)); |
|
|
|
|
|
|
|
|
// Check for the hole value.
|
|
|
// Check for the hole value.
|
|
|
if (instr->hydrogen()->RequiresHoleCheck()) { |
|
|
if (instr->hydrogen()->RequiresHoleCheck()) { |
|
|
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
|
|
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
|
|
__ cmp(result, scratch); |
|
|
__ tst(result, Operand(kSmiTagMask)); |
|
|
DeoptimizeIf(eq, instr->environment()); |
|
|
DeoptimizeIf(ne, instr->environment()); |
|
|
|
|
|
} else { |
|
|
|
|
|
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
|
|
|
|
|
__ cmp(result, scratch); |
|
|
|
|
|
DeoptimizeIf(eq, instr->environment()); |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -2659,18 +2817,21 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
Operand operand = key_is_constant |
|
|
Operand operand = key_is_constant |
|
|
? Operand(constant_key * (1 << shift_size) + |
|
|
? Operand(((constant_key + instr->additional_index()) << shift_size) + |
|
|
FixedDoubleArray::kHeaderSize - kHeapObjectTag) |
|
|
FixedDoubleArray::kHeaderSize - kHeapObjectTag) |
|
|
: Operand(key, LSL, shift_size); |
|
|
: Operand(key, LSL, shift_size); |
|
|
__ add(elements, elements, operand); |
|
|
__ add(elements, elements, operand); |
|
|
if (!key_is_constant) { |
|
|
if (!key_is_constant) { |
|
|
__ add(elements, elements, |
|
|
__ add(elements, elements, |
|
|
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
|
|
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + |
|
|
|
|
|
(instr->additional_index() << shift_size))); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); |
|
|
if (instr->hydrogen()->RequiresHoleCheck()) { |
|
|
__ cmp(scratch, Operand(kHoleNanUpper32)); |
|
|
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); |
|
|
DeoptimizeIf(eq, instr->environment()); |
|
|
__ cmp(scratch, Operand(kHoleNanUpper32)); |
|
|
|
|
|
DeoptimizeIf(eq, instr->environment()); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
__ vldr(result, elements, 0); |
|
|
__ vldr(result, elements, 0); |
|
|
} |
|
|
} |
|
@ -2692,26 +2853,33 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( |
|
|
key = ToRegister(instr->key()); |
|
|
key = ToRegister(instr->key()); |
|
|
} |
|
|
} |
|
|
int shift_size = ElementsKindToShiftSize(elements_kind); |
|
|
int shift_size = ElementsKindToShiftSize(elements_kind); |
|
|
|
|
|
int additional_offset = instr->additional_index() << shift_size; |
|
|
|
|
|
|
|
|
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || |
|
|
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || |
|
|
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
|
|
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
DwVfpRegister result = ToDoubleRegister(instr->result()); |
|
|
DwVfpRegister result = ToDoubleRegister(instr->result()); |
|
|
Operand operand = key_is_constant |
|
|
Operand operand = key_is_constant |
|
|
? Operand(constant_key * (1 << shift_size)) |
|
|
? Operand(constant_key << shift_size) |
|
|
: Operand(key, LSL, shift_size); |
|
|
: Operand(key, LSL, shift_size); |
|
|
__ add(scratch0(), external_pointer, operand); |
|
|
__ add(scratch0(), external_pointer, operand); |
|
|
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
|
|
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
|
|
__ vldr(result.low(), scratch0(), 0); |
|
|
__ vldr(result.low(), scratch0(), additional_offset); |
|
|
__ vcvt_f64_f32(result, result.low()); |
|
|
__ vcvt_f64_f32(result, result.low()); |
|
|
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
|
|
|
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
|
|
|
__ vldr(result, scratch0(), 0); |
|
|
__ vldr(result, scratch0(), additional_offset); |
|
|
} |
|
|
} |
|
|
} else { |
|
|
} else { |
|
|
Register result = ToRegister(instr->result()); |
|
|
Register result = ToRegister(instr->result()); |
|
|
|
|
|
if (instr->additional_index() != 0 && !key_is_constant) { |
|
|
|
|
|
__ add(scratch0(), key, Operand(instr->additional_index())); |
|
|
|
|
|
} |
|
|
MemOperand mem_operand(key_is_constant |
|
|
MemOperand mem_operand(key_is_constant |
|
|
? MemOperand(external_pointer, constant_key * (1 << shift_size)) |
|
|
? MemOperand(external_pointer, |
|
|
: MemOperand(external_pointer, key, LSL, shift_size)); |
|
|
(constant_key << shift_size) + additional_offset) |
|
|
|
|
|
: (instr->additional_index() == 0 |
|
|
|
|
|
? MemOperand(external_pointer, key, LSL, shift_size) |
|
|
|
|
|
: MemOperand(external_pointer, scratch0(), LSL, shift_size))); |
|
|
switch (elements_kind) { |
|
|
switch (elements_kind) { |
|
|
case EXTERNAL_BYTE_ELEMENTS: |
|
|
case EXTERNAL_BYTE_ELEMENTS: |
|
|
__ ldrsb(result, mem_operand); |
|
|
__ ldrsb(result, mem_operand); |
|
@ -2739,9 +2907,12 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( |
|
|
break; |
|
|
break; |
|
|
case EXTERNAL_FLOAT_ELEMENTS: |
|
|
case EXTERNAL_FLOAT_ELEMENTS: |
|
|
case EXTERNAL_DOUBLE_ELEMENTS: |
|
|
case EXTERNAL_DOUBLE_ELEMENTS: |
|
|
|
|
|
case FAST_HOLEY_DOUBLE_ELEMENTS: |
|
|
|
|
|
case FAST_HOLEY_ELEMENTS: |
|
|
|
|
|
case FAST_HOLEY_SMI_ELEMENTS: |
|
|
case FAST_DOUBLE_ELEMENTS: |
|
|
case FAST_DOUBLE_ELEMENTS: |
|
|
case FAST_ELEMENTS: |
|
|
case FAST_ELEMENTS: |
|
|
case FAST_SMI_ONLY_ELEMENTS: |
|
|
case FAST_SMI_ELEMENTS: |
|
|
case DICTIONARY_ELEMENTS: |
|
|
case DICTIONARY_ELEMENTS: |
|
|
case NON_STRICT_ARGUMENTS_ELEMENTS: |
|
|
case NON_STRICT_ARGUMENTS_ELEMENTS: |
|
|
UNREACHABLE(); |
|
|
UNREACHABLE(); |
|
@ -2764,16 +2935,20 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { |
|
|
Register scratch = scratch0(); |
|
|
Register scratch = scratch0(); |
|
|
Register result = ToRegister(instr->result()); |
|
|
Register result = ToRegister(instr->result()); |
|
|
|
|
|
|
|
|
// Check if the calling frame is an arguments adaptor frame.
|
|
|
if (instr->hydrogen()->from_inlined()) { |
|
|
Label done, adapted; |
|
|
__ sub(result, sp, Operand(2 * kPointerSize)); |
|
|
__ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
|
|
} else { |
|
|
__ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); |
|
|
// Check if the calling frame is an arguments adaptor frame.
|
|
|
__ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
|
|
Label done, adapted; |
|
|
|
|
|
__ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
|
|
|
|
|
__ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); |
|
|
|
|
|
__ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
|
|
|
|
|
|
|
|
// Result is the frame pointer for the frame if not adapted and for the real
|
|
|
// Result is the frame pointer for the frame if not adapted and for the real
|
|
|
// frame below the adaptor frame if adapted.
|
|
|
// frame below the adaptor frame if adapted.
|
|
|
__ mov(result, fp, LeaveCC, ne); |
|
|
__ mov(result, fp, LeaveCC, ne); |
|
|
__ mov(result, scratch, LeaveCC, eq); |
|
|
__ mov(result, scratch, LeaveCC, eq); |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -2882,7 +3057,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { |
|
|
__ b(ne, &loop); |
|
|
__ b(ne, &loop); |
|
|
|
|
|
|
|
|
__ bind(&invoke); |
|
|
__ bind(&invoke); |
|
|
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); |
|
|
ASSERT(instr->HasPointerMap()); |
|
|
LPointerMap* pointers = instr->pointer_map(); |
|
|
LPointerMap* pointers = instr->pointer_map(); |
|
|
RecordPosition(pointers->position()); |
|
|
RecordPosition(pointers->position()); |
|
|
SafepointGenerator safepoint_generator( |
|
|
SafepointGenerator safepoint_generator( |
|
@ -2907,6 +3082,11 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoDrop(LDrop* instr) { |
|
|
|
|
|
__ Drop(instr->count()); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoThisFunction(LThisFunction* instr) { |
|
|
void LCodeGen::DoThisFunction(LThisFunction* instr) { |
|
|
Register result = ToRegister(instr->result()); |
|
|
Register result = ToRegister(instr->result()); |
|
|
__ LoadHeapObject(result, instr->hydrogen()->closure()); |
|
|
__ LoadHeapObject(result, instr->hydrogen()->closure()); |
|
@ -2953,7 +3133,8 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { |
|
|
void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
|
|
void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
|
|
int arity, |
|
|
int arity, |
|
|
LInstruction* instr, |
|
|
LInstruction* instr, |
|
|
CallKind call_kind) { |
|
|
CallKind call_kind, |
|
|
|
|
|
R1State r1_state) { |
|
|
bool can_invoke_directly = !function->NeedsArgumentsAdaption() || |
|
|
bool can_invoke_directly = !function->NeedsArgumentsAdaption() || |
|
|
function->shared()->formal_parameter_count() == arity; |
|
|
function->shared()->formal_parameter_count() == arity; |
|
|
|
|
|
|
|
@ -2961,7 +3142,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
|
|
RecordPosition(pointers->position()); |
|
|
RecordPosition(pointers->position()); |
|
|
|
|
|
|
|
|
if (can_invoke_directly) { |
|
|
if (can_invoke_directly) { |
|
|
__ LoadHeapObject(r1, function); |
|
|
if (r1_state == R1_UNINITIALIZED) { |
|
|
|
|
|
__ LoadHeapObject(r1, function); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
// Change context if needed.
|
|
|
// Change context if needed.
|
|
|
bool change_context = |
|
|
bool change_context = |
|
|
(info()->closure()->context() != function->context()) || |
|
|
(info()->closure()->context() != function->context()) || |
|
@ -3000,7 +3184,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { |
|
|
CallKnownFunction(instr->function(), |
|
|
CallKnownFunction(instr->function(), |
|
|
instr->arity(), |
|
|
instr->arity(), |
|
|
instr, |
|
|
instr, |
|
|
CALL_AS_METHOD); |
|
|
CALL_AS_METHOD, |
|
|
|
|
|
R1_UNINITIALIZED); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -3109,7 +3294,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { |
|
|
} else { |
|
|
} else { |
|
|
// Representation is tagged.
|
|
|
// Representation is tagged.
|
|
|
DeferredMathAbsTaggedHeapNumber* deferred = |
|
|
DeferredMathAbsTaggedHeapNumber* deferred = |
|
|
new DeferredMathAbsTaggedHeapNumber(this, instr); |
|
|
new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); |
|
|
Register input = ToRegister(instr->InputAt(0)); |
|
|
Register input = ToRegister(instr->InputAt(0)); |
|
|
// Smi check.
|
|
|
// Smi check.
|
|
|
__ JumpIfNotSmi(input, deferred->entry()); |
|
|
__ JumpIfNotSmi(input, deferred->entry()); |
|
@ -3286,7 +3471,7 @@ void LCodeGen::DoRandom(LRandom* instr) { |
|
|
LRandom* instr_; |
|
|
LRandom* instr_; |
|
|
}; |
|
|
}; |
|
|
|
|
|
|
|
|
DeferredDoRandom* deferred = new DeferredDoRandom(this, instr); |
|
|
DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); |
|
|
|
|
|
|
|
|
// Having marked this instruction as a call we can use any
|
|
|
// Having marked this instruction as a call we can use any
|
|
|
// registers.
|
|
|
// registers.
|
|
@ -3424,13 +3609,21 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { |
|
|
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { |
|
|
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { |
|
|
ASSERT(ToRegister(instr->function()).is(r1)); |
|
|
ASSERT(ToRegister(instr->function()).is(r1)); |
|
|
ASSERT(instr->HasPointerMap()); |
|
|
ASSERT(instr->HasPointerMap()); |
|
|
ASSERT(instr->HasDeoptimizationEnvironment()); |
|
|
|
|
|
LPointerMap* pointers = instr->pointer_map(); |
|
|
if (instr->known_function().is_null()) { |
|
|
RecordPosition(pointers->position()); |
|
|
LPointerMap* pointers = instr->pointer_map(); |
|
|
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
|
|
RecordPosition(pointers->position()); |
|
|
ParameterCount count(instr->arity()); |
|
|
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
|
|
__ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); |
|
|
ParameterCount count(instr->arity()); |
|
|
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
|
|
__ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); |
|
|
|
|
|
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
|
|
|
|
|
} else { |
|
|
|
|
|
CallKnownFunction(instr->known_function(), |
|
|
|
|
|
instr->arity(), |
|
|
|
|
|
instr, |
|
|
|
|
|
CALL_AS_METHOD, |
|
|
|
|
|
R1_CONTAINS_TARGET); |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -3485,7 +3678,11 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { |
|
|
|
|
|
|
|
|
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { |
|
|
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { |
|
|
ASSERT(ToRegister(instr->result()).is(r0)); |
|
|
ASSERT(ToRegister(instr->result()).is(r0)); |
|
|
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION); |
|
|
CallKnownFunction(instr->target(), |
|
|
|
|
|
instr->arity(), |
|
|
|
|
|
instr, |
|
|
|
|
|
CALL_AS_FUNCTION, |
|
|
|
|
|
R1_UNINITIALIZED); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -3515,6 +3712,18 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { |
|
|
if (!instr->transition().is_null()) { |
|
|
if (!instr->transition().is_null()) { |
|
|
__ mov(scratch, Operand(instr->transition())); |
|
|
__ mov(scratch, Operand(instr->transition())); |
|
|
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
|
|
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
|
|
|
|
|
if (instr->hydrogen()->NeedsWriteBarrierForMap()) { |
|
|
|
|
|
Register temp = ToRegister(instr->TempAt(0)); |
|
|
|
|
|
// Update the write barrier for the map field.
|
|
|
|
|
|
__ RecordWriteField(object, |
|
|
|
|
|
HeapObject::kMapOffset, |
|
|
|
|
|
scratch, |
|
|
|
|
|
temp, |
|
|
|
|
|
kLRHasBeenSaved, |
|
|
|
|
|
kSaveFPRegs, |
|
|
|
|
|
OMIT_REMEMBERED_SET, |
|
|
|
|
|
OMIT_SMI_CHECK); |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// Do the store.
|
|
|
// Do the store.
|
|
@ -3583,10 +3792,16 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { |
|
|
ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
|
|
ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
|
|
LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); |
|
|
LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); |
|
|
int offset = |
|
|
int offset = |
|
|
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; |
|
|
(ToInteger32(const_operand) + instr->additional_index()) * kPointerSize |
|
|
|
|
|
+ FixedArray::kHeaderSize; |
|
|
__ str(value, FieldMemOperand(elements, offset)); |
|
|
__ str(value, FieldMemOperand(elements, offset)); |
|
|
} else { |
|
|
} else { |
|
|
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); |
|
|
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); |
|
|
|
|
|
if (instr->additional_index() != 0) { |
|
|
|
|
|
__ add(scratch, |
|
|
|
|
|
scratch, |
|
|
|
|
|
Operand(instr->additional_index() << kPointerSizeLog2)); |
|
|
|
|
|
} |
|
|
__ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize)); |
|
|
__ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize)); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -3615,7 +3830,6 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( |
|
|
Register scratch = scratch0(); |
|
|
Register scratch = scratch0(); |
|
|
bool key_is_constant = instr->key()->IsConstantOperand(); |
|
|
bool key_is_constant = instr->key()->IsConstantOperand(); |
|
|
int constant_key = 0; |
|
|
int constant_key = 0; |
|
|
Label not_nan; |
|
|
|
|
|
|
|
|
|
|
|
// Calculate the effective address of the slot in the array to store the
|
|
|
// Calculate the effective address of the slot in the array to store the
|
|
|
// double value.
|
|
|
// double value.
|
|
@ -3629,7 +3843,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( |
|
|
} |
|
|
} |
|
|
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); |
|
|
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); |
|
|
Operand operand = key_is_constant |
|
|
Operand operand = key_is_constant |
|
|
? Operand(constant_key * (1 << shift_size) + |
|
|
? Operand((constant_key << shift_size) + |
|
|
FixedDoubleArray::kHeaderSize - kHeapObjectTag) |
|
|
FixedDoubleArray::kHeaderSize - kHeapObjectTag) |
|
|
: Operand(key, LSL, shift_size); |
|
|
: Operand(key, LSL, shift_size); |
|
|
__ add(scratch, elements, operand); |
|
|
__ add(scratch, elements, operand); |
|
@ -3638,14 +3852,16 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( |
|
|
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
|
|
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// Check for NaN. All NaNs must be canonicalized.
|
|
|
if (instr->NeedsCanonicalization()) { |
|
|
__ VFPCompareAndSetFlags(value, value); |
|
|
// Check for NaN. All NaNs must be canonicalized.
|
|
|
|
|
|
__ VFPCompareAndSetFlags(value, value); |
|
|
// Only load canonical NaN if the comparison above set the overflow.
|
|
|
// Only load canonical NaN if the comparison above set the overflow.
|
|
|
__ Vmov(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double(), vs); |
|
|
__ Vmov(value, |
|
|
|
|
|
FixedDoubleArray::canonical_not_the_hole_nan_as_double(), |
|
|
|
|
|
vs); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
__ bind(¬_nan); |
|
|
__ vstr(value, scratch, instr->additional_index() << shift_size); |
|
|
__ vstr(value, scratch, 0); |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -3666,25 +3882,33 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( |
|
|
key = ToRegister(instr->key()); |
|
|
key = ToRegister(instr->key()); |
|
|
} |
|
|
} |
|
|
int shift_size = ElementsKindToShiftSize(elements_kind); |
|
|
int shift_size = ElementsKindToShiftSize(elements_kind); |
|
|
|
|
|
int additional_offset = instr->additional_index() << shift_size; |
|
|
|
|
|
|
|
|
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || |
|
|
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || |
|
|
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
|
|
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
DwVfpRegister value(ToDoubleRegister(instr->value())); |
|
|
DwVfpRegister value(ToDoubleRegister(instr->value())); |
|
|
Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size)) |
|
|
Operand operand(key_is_constant ? Operand(constant_key << shift_size) |
|
|
: Operand(key, LSL, shift_size)); |
|
|
: Operand(key, LSL, shift_size)); |
|
|
__ add(scratch0(), external_pointer, operand); |
|
|
__ add(scratch0(), external_pointer, operand); |
|
|
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
|
|
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
|
|
__ vcvt_f32_f64(double_scratch0().low(), value); |
|
|
__ vcvt_f32_f64(double_scratch0().low(), value); |
|
|
__ vstr(double_scratch0().low(), scratch0(), 0); |
|
|
__ vstr(double_scratch0().low(), scratch0(), additional_offset); |
|
|
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
|
|
|
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
|
|
|
__ vstr(value, scratch0(), 0); |
|
|
__ vstr(value, scratch0(), additional_offset); |
|
|
} |
|
|
} |
|
|
} else { |
|
|
} else { |
|
|
Register value(ToRegister(instr->value())); |
|
|
Register value(ToRegister(instr->value())); |
|
|
|
|
|
if (instr->additional_index() != 0 && !key_is_constant) { |
|
|
|
|
|
__ add(scratch0(), key, Operand(instr->additional_index())); |
|
|
|
|
|
} |
|
|
MemOperand mem_operand(key_is_constant |
|
|
MemOperand mem_operand(key_is_constant |
|
|
? MemOperand(external_pointer, constant_key * (1 << shift_size)) |
|
|
? MemOperand(external_pointer, |
|
|
: MemOperand(external_pointer, key, LSL, shift_size)); |
|
|
((constant_key + instr->additional_index()) |
|
|
|
|
|
<< shift_size)) |
|
|
|
|
|
: (instr->additional_index() == 0 |
|
|
|
|
|
? MemOperand(external_pointer, key, LSL, shift_size) |
|
|
|
|
|
: MemOperand(external_pointer, scratch0(), LSL, shift_size))); |
|
|
switch (elements_kind) { |
|
|
switch (elements_kind) { |
|
|
case EXTERNAL_PIXEL_ELEMENTS: |
|
|
case EXTERNAL_PIXEL_ELEMENTS: |
|
|
case EXTERNAL_BYTE_ELEMENTS: |
|
|
case EXTERNAL_BYTE_ELEMENTS: |
|
@ -3703,7 +3927,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( |
|
|
case EXTERNAL_DOUBLE_ELEMENTS: |
|
|
case EXTERNAL_DOUBLE_ELEMENTS: |
|
|
case FAST_DOUBLE_ELEMENTS: |
|
|
case FAST_DOUBLE_ELEMENTS: |
|
|
case FAST_ELEMENTS: |
|
|
case FAST_ELEMENTS: |
|
|
case FAST_SMI_ONLY_ELEMENTS: |
|
|
case FAST_SMI_ELEMENTS: |
|
|
|
|
|
case FAST_HOLEY_DOUBLE_ELEMENTS: |
|
|
|
|
|
case FAST_HOLEY_ELEMENTS: |
|
|
|
|
|
case FAST_HOLEY_SMI_ELEMENTS: |
|
|
case DICTIONARY_ELEMENTS: |
|
|
case DICTIONARY_ELEMENTS: |
|
|
case NON_STRICT_ARGUMENTS_ELEMENTS: |
|
|
case NON_STRICT_ARGUMENTS_ELEMENTS: |
|
|
UNREACHABLE(); |
|
|
UNREACHABLE(); |
|
@ -3740,20 +3967,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
|
|
__ cmp(scratch, Operand(from_map)); |
|
|
__ cmp(scratch, Operand(from_map)); |
|
|
__ b(ne, ¬_applicable); |
|
|
__ b(ne, ¬_applicable); |
|
|
__ mov(new_map_reg, Operand(to_map)); |
|
|
__ mov(new_map_reg, Operand(to_map)); |
|
|
if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) { |
|
|
|
|
|
|
|
|
if (IsSimpleMapChangeTransition(from_kind, to_kind)) { |
|
|
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); |
|
|
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); |
|
|
// Write barrier.
|
|
|
// Write barrier.
|
|
|
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, |
|
|
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, |
|
|
scratch, kLRHasBeenSaved, kDontSaveFPRegs); |
|
|
scratch, kLRHasBeenSaved, kDontSaveFPRegs); |
|
|
} else if (from_kind == FAST_SMI_ONLY_ELEMENTS && |
|
|
} else if (IsFastSmiElementsKind(from_kind) && |
|
|
to_kind == FAST_DOUBLE_ELEMENTS) { |
|
|
IsFastDoubleElementsKind(to_kind)) { |
|
|
Register fixed_object_reg = ToRegister(instr->temp_reg()); |
|
|
Register fixed_object_reg = ToRegister(instr->temp_reg()); |
|
|
ASSERT(fixed_object_reg.is(r2)); |
|
|
ASSERT(fixed_object_reg.is(r2)); |
|
|
ASSERT(new_map_reg.is(r3)); |
|
|
ASSERT(new_map_reg.is(r3)); |
|
|
__ mov(fixed_object_reg, object_reg); |
|
|
__ mov(fixed_object_reg, object_reg); |
|
|
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), |
|
|
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), |
|
|
RelocInfo::CODE_TARGET, instr); |
|
|
RelocInfo::CODE_TARGET, instr); |
|
|
} else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) { |
|
|
} else if (IsFastDoubleElementsKind(from_kind) && |
|
|
|
|
|
IsFastObjectElementsKind(to_kind)) { |
|
|
Register fixed_object_reg = ToRegister(instr->temp_reg()); |
|
|
Register fixed_object_reg = ToRegister(instr->temp_reg()); |
|
|
ASSERT(fixed_object_reg.is(r2)); |
|
|
ASSERT(fixed_object_reg.is(r2)); |
|
|
ASSERT(new_map_reg.is(r3)); |
|
|
ASSERT(new_map_reg.is(r3)); |
|
@ -3787,7 +4016,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
|
|
}; |
|
|
}; |
|
|
|
|
|
|
|
|
DeferredStringCharCodeAt* deferred = |
|
|
DeferredStringCharCodeAt* deferred = |
|
|
new DeferredStringCharCodeAt(this, instr); |
|
|
new(zone()) DeferredStringCharCodeAt(this, instr); |
|
|
|
|
|
|
|
|
StringCharLoadGenerator::Generate(masm(), |
|
|
StringCharLoadGenerator::Generate(masm(), |
|
|
ToRegister(instr->string()), |
|
|
ToRegister(instr->string()), |
|
@ -3842,7 +4071,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { |
|
|
}; |
|
|
}; |
|
|
|
|
|
|
|
|
DeferredStringCharFromCode* deferred = |
|
|
DeferredStringCharFromCode* deferred = |
|
|
new DeferredStringCharFromCode(this, instr); |
|
|
new(zone()) DeferredStringCharFromCode(this, instr); |
|
|
|
|
|
|
|
|
ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); |
|
|
ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); |
|
|
Register char_code = ToRegister(instr->char_code()); |
|
|
Register char_code = ToRegister(instr->char_code()); |
|
@ -3916,7 +4145,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
|
|
Register src = ToRegister(instr->InputAt(0)); |
|
|
Register src = ToRegister(instr->InputAt(0)); |
|
|
Register dst = ToRegister(instr->result()); |
|
|
Register dst = ToRegister(instr->result()); |
|
|
|
|
|
|
|
|
DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr); |
|
|
DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); |
|
|
__ SmiTag(dst, src, SetCC); |
|
|
__ SmiTag(dst, src, SetCC); |
|
|
__ b(vs, deferred->entry()); |
|
|
__ b(vs, deferred->entry()); |
|
|
__ bind(deferred->exit()); |
|
|
__ bind(deferred->exit()); |
|
@ -3987,7 +4216,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
|
|
Register temp1 = ToRegister(instr->TempAt(0)); |
|
|
Register temp1 = ToRegister(instr->TempAt(0)); |
|
|
Register temp2 = ToRegister(instr->TempAt(1)); |
|
|
Register temp2 = ToRegister(instr->TempAt(1)); |
|
|
|
|
|
|
|
|
DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr); |
|
|
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
|
|
if (FLAG_inline_new) { |
|
|
if (FLAG_inline_new) { |
|
|
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); |
|
|
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); |
|
|
__ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); |
|
|
__ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); |
|
@ -4189,7 +4418,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
|
|
|
|
|
|
|
|
Register input_reg = ToRegister(input); |
|
|
Register input_reg = ToRegister(input); |
|
|
|
|
|
|
|
|
DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr); |
|
|
DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); |
|
|
|
|
|
|
|
|
// Optimistically untag the input.
|
|
|
// Optimistically untag the input.
|
|
|
// If the input is a HeapObject, SmiUntag will set the carry flag.
|
|
|
// If the input is a HeapObject, SmiUntag will set the carry flag.
|
|
@ -4338,14 +4567,22 @@ void LCodeGen::DoCheckMapCommon(Register reg, |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoCheckMap(LCheckMap* instr) { |
|
|
void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
|
|
Register scratch = scratch0(); |
|
|
Register scratch = scratch0(); |
|
|
LOperand* input = instr->InputAt(0); |
|
|
LOperand* input = instr->InputAt(0); |
|
|
ASSERT(input->IsRegister()); |
|
|
ASSERT(input->IsRegister()); |
|
|
Register reg = ToRegister(input); |
|
|
Register reg = ToRegister(input); |
|
|
Handle<Map> map = instr->hydrogen()->map(); |
|
|
|
|
|
DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(), |
|
|
Label success; |
|
|
instr->environment()); |
|
|
SmallMapList* map_set = instr->hydrogen()->map_set(); |
|
|
|
|
|
for (int i = 0; i < map_set->length() - 1; i++) { |
|
|
|
|
|
Handle<Map> map = map_set->at(i); |
|
|
|
|
|
__ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP); |
|
|
|
|
|
__ b(eq, &success); |
|
|
|
|
|
} |
|
|
|
|
|
Handle<Map> map = map_set->last(); |
|
|
|
|
|
DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment()); |
|
|
|
|
|
__ bind(&success); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -4441,7 +4678,8 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { |
|
|
LAllocateObject* instr_; |
|
|
LAllocateObject* instr_; |
|
|
}; |
|
|
}; |
|
|
|
|
|
|
|
|
DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr); |
|
|
DeferredAllocateObject* deferred = |
|
|
|
|
|
new(zone()) DeferredAllocateObject(this, instr); |
|
|
|
|
|
|
|
|
Register result = ToRegister(instr->result()); |
|
|
Register result = ToRegister(instr->result()); |
|
|
Register scratch = ToRegister(instr->TempAt(0)); |
|
|
Register scratch = ToRegister(instr->TempAt(0)); |
|
@ -4464,6 +4702,14 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { |
|
|
deferred->entry(), |
|
|
deferred->entry(), |
|
|
TAG_OBJECT); |
|
|
TAG_OBJECT); |
|
|
|
|
|
|
|
|
|
|
|
__ bind(deferred->exit()); |
|
|
|
|
|
if (FLAG_debug_code) { |
|
|
|
|
|
Label is_in_new_space; |
|
|
|
|
|
__ JumpIfInNewSpace(result, scratch, &is_in_new_space); |
|
|
|
|
|
__ Abort("Allocated object is not in new-space"); |
|
|
|
|
|
__ bind(&is_in_new_space); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
// Load the initial map.
|
|
|
// Load the initial map.
|
|
|
Register map = scratch; |
|
|
Register map = scratch; |
|
|
__ LoadHeapObject(map, constructor); |
|
|
__ LoadHeapObject(map, constructor); |
|
@ -4482,14 +4728,14 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { |
|
|
__ str(scratch, FieldMemOperand(result, property_offset)); |
|
|
__ str(scratch, FieldMemOperand(result, property_offset)); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
__ bind(deferred->exit()); |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { |
|
|
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { |
|
|
Register result = ToRegister(instr->result()); |
|
|
Register result = ToRegister(instr->result()); |
|
|
Handle<JSFunction> constructor = instr->hydrogen()->constructor(); |
|
|
Handle<JSFunction> constructor = instr->hydrogen()->constructor(); |
|
|
|
|
|
Handle<Map> initial_map(constructor->initial_map()); |
|
|
|
|
|
int instance_size = initial_map->instance_size(); |
|
|
|
|
|
|
|
|
// TODO(3095996): Get rid of this. For now, we need to make the
|
|
|
// TODO(3095996): Get rid of this. For now, we need to make the
|
|
|
// result register contain a valid pointer because it is already
|
|
|
// result register contain a valid pointer because it is already
|
|
@ -4497,9 +4743,9 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { |
|
|
__ mov(result, Operand(0)); |
|
|
__ mov(result, Operand(0)); |
|
|
|
|
|
|
|
|
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
|
|
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
|
|
__ LoadHeapObject(r0, constructor); |
|
|
__ mov(r0, Operand(Smi::FromInt(instance_size))); |
|
|
__ push(r0); |
|
|
__ push(r0); |
|
|
CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr); |
|
|
CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); |
|
|
__ StoreToSafepointRegisterSlot(r0, result); |
|
|
__ StoreToSafepointRegisterSlot(r0, result); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -4511,8 +4757,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { |
|
|
|
|
|
|
|
|
// Deopt if the array literal boilerplate ElementsKind is of a type different
|
|
|
// Deopt if the array literal boilerplate ElementsKind is of a type different
|
|
|
// than the expected one. The check isn't necessary if the boilerplate has
|
|
|
// than the expected one. The check isn't necessary if the boilerplate has
|
|
|
// already been converted to FAST_ELEMENTS.
|
|
|
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
|
|
|
if (boilerplate_elements_kind != FAST_ELEMENTS) { |
|
|
if (CanTransitionToMoreGeneralFastElementsKind( |
|
|
|
|
|
boilerplate_elements_kind, true)) { |
|
|
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object()); |
|
|
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object()); |
|
|
// Load map into r2.
|
|
|
// Load map into r2.
|
|
|
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); |
|
|
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); |
|
@ -4633,9 +4880,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, |
|
|
__ str(r2, FieldMemOperand(result, total_offset + 4)); |
|
|
__ str(r2, FieldMemOperand(result, total_offset + 4)); |
|
|
} |
|
|
} |
|
|
} else if (elements->IsFixedArray()) { |
|
|
} else if (elements->IsFixedArray()) { |
|
|
|
|
|
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements); |
|
|
for (int i = 0; i < elements_length; i++) { |
|
|
for (int i = 0; i < elements_length; i++) { |
|
|
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); |
|
|
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); |
|
|
Handle<Object> value = JSObject::GetElement(object, i); |
|
|
Handle<Object> value(fast_elements->get(i)); |
|
|
if (value->IsJSObject()) { |
|
|
if (value->IsJSObject()) { |
|
|
Handle<JSObject> value_object = Handle<JSObject>::cast(value); |
|
|
Handle<JSObject> value_object = Handle<JSObject>::cast(value); |
|
|
__ add(r2, result, Operand(*offset)); |
|
|
__ add(r2, result, Operand(*offset)); |
|
@ -4659,6 +4907,24 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, |
|
|
|
|
|
|
|
|
void LCodeGen::DoFastLiteral(LFastLiteral* instr) { |
|
|
void LCodeGen::DoFastLiteral(LFastLiteral* instr) { |
|
|
int size = instr->hydrogen()->total_size(); |
|
|
int size = instr->hydrogen()->total_size(); |
|
|
|
|
|
ElementsKind boilerplate_elements_kind = |
|
|
|
|
|
instr->hydrogen()->boilerplate()->GetElementsKind(); |
|
|
|
|
|
|
|
|
|
|
|
// Deopt if the array literal boilerplate ElementsKind is of a type different
|
|
|
|
|
|
// than the expected one. The check isn't necessary if the boilerplate has
|
|
|
|
|
|
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
|
|
|
|
|
|
if (CanTransitionToMoreGeneralFastElementsKind( |
|
|
|
|
|
boilerplate_elements_kind, true)) { |
|
|
|
|
|
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate()); |
|
|
|
|
|
// Load map into r2.
|
|
|
|
|
|
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); |
|
|
|
|
|
// Load the map's "bit field 2".
|
|
|
|
|
|
__ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset)); |
|
|
|
|
|
// Retrieve elements_kind from bit field 2.
|
|
|
|
|
|
__ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount); |
|
|
|
|
|
__ cmp(r2, Operand(boilerplate_elements_kind)); |
|
|
|
|
|
DeoptimizeIf(ne, instr->environment()); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
// Allocate all objects that are part of the literal in one big
|
|
|
// Allocate all objects that are part of the literal in one big
|
|
|
// allocation. This avoids multiple limit checks.
|
|
|
// allocation. This avoids multiple limit checks.
|
|
@ -4923,6 +5189,8 @@ void LCodeGen::EnsureSpaceForLazyDeopt() { |
|
|
int current_pc = masm()->pc_offset(); |
|
|
int current_pc = masm()->pc_offset(); |
|
|
int patch_size = Deoptimizer::patch_size(); |
|
|
int patch_size = Deoptimizer::patch_size(); |
|
|
if (current_pc < last_lazy_deopt_pc_ + patch_size) { |
|
|
if (current_pc < last_lazy_deopt_pc_ + patch_size) { |
|
|
|
|
|
// Block literal pool emission for duration of padding.
|
|
|
|
|
|
Assembler::BlockConstPoolScope block_const_pool(masm()); |
|
|
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; |
|
|
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; |
|
|
ASSERT_EQ(0, padding_size % Assembler::kInstrSize); |
|
|
ASSERT_EQ(0, padding_size % Assembler::kInstrSize); |
|
|
while (padding_size > 0) { |
|
|
while (padding_size > 0) { |
|
@ -4954,7 +5222,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { |
|
|
Register strict = scratch0(); |
|
|
Register strict = scratch0(); |
|
|
__ mov(strict, Operand(Smi::FromInt(strict_mode_flag()))); |
|
|
__ mov(strict, Operand(Smi::FromInt(strict_mode_flag()))); |
|
|
__ Push(object, key, strict); |
|
|
__ Push(object, key, strict); |
|
|
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); |
|
|
ASSERT(instr->HasPointerMap()); |
|
|
LPointerMap* pointers = instr->pointer_map(); |
|
|
LPointerMap* pointers = instr->pointer_map(); |
|
|
RecordPosition(pointers->position()); |
|
|
RecordPosition(pointers->position()); |
|
|
SafepointGenerator safepoint_generator( |
|
|
SafepointGenerator safepoint_generator( |
|
@ -4967,7 +5235,7 @@ void LCodeGen::DoIn(LIn* instr) { |
|
|
Register obj = ToRegister(instr->object()); |
|
|
Register obj = ToRegister(instr->object()); |
|
|
Register key = ToRegister(instr->key()); |
|
|
Register key = ToRegister(instr->key()); |
|
|
__ Push(key, obj); |
|
|
__ Push(key, obj); |
|
|
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); |
|
|
ASSERT(instr->HasPointerMap()); |
|
|
LPointerMap* pointers = instr->pointer_map(); |
|
|
LPointerMap* pointers = instr->pointer_map(); |
|
|
RecordPosition(pointers->position()); |
|
|
RecordPosition(pointers->position()); |
|
|
SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); |
|
|
SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); |
|
@ -5017,7 +5285,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { |
|
|
ASSERT(instr->hydrogen()->is_backwards_branch()); |
|
|
ASSERT(instr->hydrogen()->is_backwards_branch()); |
|
|
// Perform stack overflow check if this goto needs it before jumping.
|
|
|
// Perform stack overflow check if this goto needs it before jumping.
|
|
|
DeferredStackCheck* deferred_stack_check = |
|
|
DeferredStackCheck* deferred_stack_check = |
|
|
new DeferredStackCheck(this, instr); |
|
|
new(zone()) DeferredStackCheck(this, instr); |
|
|
__ LoadRoot(ip, Heap::kStackLimitRootIndex); |
|
|
__ LoadRoot(ip, Heap::kStackLimitRootIndex); |
|
|
__ cmp(sp, Operand(ip)); |
|
|
__ cmp(sp, Operand(ip)); |
|
|
__ b(lo, deferred_stack_check->entry()); |
|
|
__ b(lo, deferred_stack_check->entry()); |
|
|