|
|
@ -748,37 +748,43 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target, |
|
|
|
JumpTarget* false_target) { |
|
|
|
// Note: The generated code snippet does not change stack variables.
|
|
|
|
// Only the condition code should be set.
|
|
|
|
bool known_smi = frame_->KnownSmiAt(0); |
|
|
|
Register tos = frame_->PopToRegister(); |
|
|
|
|
|
|
|
// Fast case checks
|
|
|
|
|
|
|
|
// Check if the value is 'false'.
|
|
|
|
__ LoadRoot(ip, Heap::kFalseValueRootIndex); |
|
|
|
__ cmp(tos, ip); |
|
|
|
false_target->Branch(eq); |
|
|
|
if (!known_smi) { |
|
|
|
__ LoadRoot(ip, Heap::kFalseValueRootIndex); |
|
|
|
__ cmp(tos, ip); |
|
|
|
false_target->Branch(eq); |
|
|
|
|
|
|
|
// Check if the value is 'true'.
|
|
|
|
__ LoadRoot(ip, Heap::kTrueValueRootIndex); |
|
|
|
__ cmp(tos, ip); |
|
|
|
true_target->Branch(eq); |
|
|
|
// Check if the value is 'true'.
|
|
|
|
__ LoadRoot(ip, Heap::kTrueValueRootIndex); |
|
|
|
__ cmp(tos, ip); |
|
|
|
true_target->Branch(eq); |
|
|
|
|
|
|
|
// Check if the value is 'undefined'.
|
|
|
|
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
|
|
|
__ cmp(tos, ip); |
|
|
|
false_target->Branch(eq); |
|
|
|
// Check if the value is 'undefined'.
|
|
|
|
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
|
|
|
__ cmp(tos, ip); |
|
|
|
false_target->Branch(eq); |
|
|
|
} |
|
|
|
|
|
|
|
// Check if the value is a smi.
|
|
|
|
__ cmp(tos, Operand(Smi::FromInt(0))); |
|
|
|
false_target->Branch(eq); |
|
|
|
__ tst(tos, Operand(kSmiTagMask)); |
|
|
|
true_target->Branch(eq); |
|
|
|
|
|
|
|
// Slow case: call the runtime.
|
|
|
|
frame_->EmitPush(tos); |
|
|
|
frame_->CallRuntime(Runtime::kToBool, 1); |
|
|
|
// Convert the result (r0) to a condition code.
|
|
|
|
__ LoadRoot(ip, Heap::kFalseValueRootIndex); |
|
|
|
__ cmp(r0, ip); |
|
|
|
if (!known_smi) { |
|
|
|
false_target->Branch(eq); |
|
|
|
__ tst(tos, Operand(kSmiTagMask)); |
|
|
|
true_target->Branch(eq); |
|
|
|
|
|
|
|
// Slow case: call the runtime.
|
|
|
|
frame_->EmitPush(tos); |
|
|
|
frame_->CallRuntime(Runtime::kToBool, 1); |
|
|
|
// Convert the result (r0) to a condition code.
|
|
|
|
__ LoadRoot(ip, Heap::kFalseValueRootIndex); |
|
|
|
__ cmp(r0, ip); |
|
|
|
} |
|
|
|
|
|
|
|
cc_reg_ = ne; |
|
|
|
} |
|
|
@ -1745,11 +1751,15 @@ void CodeGenerator::VisitDeclaration(Declaration* node) { |
|
|
|
val = node->fun(); // NULL if we don't have a function
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (val != NULL) { |
|
|
|
WriteBarrierCharacter wb_info = |
|
|
|
val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI; |
|
|
|
if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE; |
|
|
|
// Set initial value.
|
|
|
|
Reference target(this, node->proxy()); |
|
|
|
Load(val); |
|
|
|
target.SetValue(NOT_CONST_INIT); |
|
|
|
target.SetValue(NOT_CONST_INIT, wb_info); |
|
|
|
|
|
|
|
// Get rid of the assigned value (declarations are statements).
|
|
|
|
frame_->Drop(); |
|
|
@ -2485,13 +2495,13 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { |
|
|
|
if (each.size() > 0) { |
|
|
|
__ ldr(r0, frame_->ElementAt(each.size())); |
|
|
|
frame_->EmitPush(r0); |
|
|
|
each.SetValue(NOT_CONST_INIT); |
|
|
|
each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI); |
|
|
|
frame_->Drop(2); |
|
|
|
} else { |
|
|
|
// If the reference was to a slot we rely on the convenient property
|
|
|
|
// that it doesn't matter whether a value (eg, r3 pushed above) is
|
|
|
|
// right on top of or right underneath a zero-sized reference.
|
|
|
|
each.SetValue(NOT_CONST_INIT); |
|
|
|
each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI); |
|
|
|
frame_->Drop(); |
|
|
|
} |
|
|
|
} |
|
|
@ -3646,6 +3656,8 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) { |
|
|
|
// Evaluate the receiver subexpression.
|
|
|
|
Load(prop->obj()); |
|
|
|
|
|
|
|
WriteBarrierCharacter wb_info; |
|
|
|
|
|
|
|
// Change to slow case in the beginning of an initialization block to
|
|
|
|
// avoid the quadratic behavior of repeatedly adding fast properties.
|
|
|
|
if (node->starts_initialization_block()) { |
|
|
@ -3667,7 +3679,7 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) { |
|
|
|
// [tos] : key
|
|
|
|
// [tos+1] : receiver
|
|
|
|
// [tos+2] : receiver if at the end of an initialization block
|
|
|
|
|
|
|
|
//
|
|
|
|
// Evaluate the right-hand side.
|
|
|
|
if (node->is_compound()) { |
|
|
|
// For a compound assignment the right-hand side is a binary operation
|
|
|
@ -3699,9 +3711,13 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) { |
|
|
|
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE, |
|
|
|
inline_smi); |
|
|
|
} |
|
|
|
wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI; |
|
|
|
} else { |
|
|
|
// For non-compound assignment just load the right-hand side.
|
|
|
|
Load(node->value()); |
|
|
|
wb_info = node->value()->AsLiteral() != NULL ? |
|
|
|
NEVER_NEWSPACE : |
|
|
|
(node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI); |
|
|
|
} |
|
|
|
|
|
|
|
// Stack layout:
|
|
|
@ -3713,7 +3729,7 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) { |
|
|
|
// Perform the assignment. It is safe to ignore constants here.
|
|
|
|
ASSERT(node->op() != Token::INIT_CONST); |
|
|
|
CodeForSourcePosition(node->position()); |
|
|
|
EmitKeyedStore(prop->key()->type()); |
|
|
|
EmitKeyedStore(prop->key()->type(), wb_info); |
|
|
|
frame_->EmitPush(r0); |
|
|
|
|
|
|
|
// Stack layout:
|
|
|
@ -4291,7 +4307,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { |
|
|
|
} else { |
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
JumpTarget runtime, done; |
|
|
|
Label not_minus_half, allocate_return; |
|
|
|
Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return; |
|
|
|
|
|
|
|
Register scratch1 = VirtualFrame::scratch0(); |
|
|
|
Register scratch2 = VirtualFrame::scratch1(); |
|
|
@ -4299,18 +4315,74 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { |
|
|
|
// Get base and exponent to registers.
|
|
|
|
Register exponent = frame_->PopToRegister(); |
|
|
|
Register base = frame_->PopToRegister(exponent); |
|
|
|
Register heap_number_map = no_reg; |
|
|
|
|
|
|
|
// Set the frame for the runtime jump target. The code below jumps to the
|
|
|
|
// jump target label so the frame needs to be established before that.
|
|
|
|
ASSERT(runtime.entry_frame() == NULL); |
|
|
|
runtime.set_entry_frame(frame_); |
|
|
|
|
|
|
|
__ BranchOnSmi(exponent, runtime.entry_label()); |
|
|
|
__ BranchOnNotSmi(exponent, &exponent_nonsmi); |
|
|
|
__ BranchOnNotSmi(base, &base_nonsmi); |
|
|
|
|
|
|
|
heap_number_map = r6; |
|
|
|
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
|
|
|
|
|
|
|
// Exponent is a smi and base is a smi. Get the smi value into vfp register
|
|
|
|
// d1.
|
|
|
|
__ SmiToDoubleVFPRegister(base, d1, scratch1, s0); |
|
|
|
__ b(&powi); |
|
|
|
|
|
|
|
__ bind(&base_nonsmi); |
|
|
|
// Exponent is smi and base is non smi. Get the double value from the base
|
|
|
|
// into vfp register d1.
|
|
|
|
__ ObjectToDoubleVFPRegister(base, d1, |
|
|
|
scratch1, scratch2, heap_number_map, s0, |
|
|
|
runtime.entry_label()); |
|
|
|
|
|
|
|
__ bind(&powi); |
|
|
|
|
|
|
|
// Load 1.0 into d0.
|
|
|
|
__ mov(scratch2, Operand(0x3ff00000)); |
|
|
|
__ mov(scratch1, Operand(0)); |
|
|
|
__ vmov(d0, scratch1, scratch2); |
|
|
|
|
|
|
|
// Get the absolute untagged value of the exponent and use that for the
|
|
|
|
// calculation.
|
|
|
|
__ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC); |
|
|
|
__ rsb(scratch1, scratch1, Operand(0), LeaveCC, mi); // Negate if negative.
|
|
|
|
__ vmov(d2, d0, mi); // 1.0 needed in d2 later if exponent is negative.
|
|
|
|
|
|
|
|
// Run through all the bits in the exponent. The result is calculated in d0
|
|
|
|
// and d1 holds base^(bit^2).
|
|
|
|
Label more_bits; |
|
|
|
__ bind(&more_bits); |
|
|
|
__ mov(scratch1, Operand(scratch1, LSR, 1), SetCC); |
|
|
|
__ vmul(d0, d0, d1, cs); // Multiply with base^(bit^2) if bit is set.
|
|
|
|
__ vmul(d1, d1, d1, ne); // Don't bother calculating next d1 if done.
|
|
|
|
__ b(ne, &more_bits); |
|
|
|
|
|
|
|
// If exponent is positive we are done.
|
|
|
|
__ cmp(exponent, Operand(0)); |
|
|
|
__ b(ge, &allocate_return); |
|
|
|
|
|
|
|
// If exponent is negative result is 1/result (d2 already holds 1.0 in that
|
|
|
|
// case). However if d0 has reached infinity this will not provide the
|
|
|
|
// correct result, so call runtime if that is the case.
|
|
|
|
__ mov(scratch2, Operand(0x7FF00000)); |
|
|
|
__ mov(scratch1, Operand(0)); |
|
|
|
__ vmov(d1, scratch1, scratch2); // Load infinity into d1.
|
|
|
|
__ vcmp(d0, d1); |
|
|
|
__ vmrs(pc); |
|
|
|
runtime.Branch(eq); // d0 reached infinity.
|
|
|
|
__ vdiv(d0, d2, d0); |
|
|
|
__ b(&allocate_return); |
|
|
|
|
|
|
|
__ bind(&exponent_nonsmi); |
|
|
|
// Special handling of raising to the power of -0.5 and 0.5. First check
|
|
|
|
// that the value is a heap number and that the lower bits (which for both
|
|
|
|
// values are zero).
|
|
|
|
Register heap_number_map = r6; |
|
|
|
heap_number_map = r6; |
|
|
|
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
|
|
|
__ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset)); |
|
|
|
__ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset)); |
|
|
@ -4319,7 +4391,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { |
|
|
|
__ tst(scratch2, scratch2); |
|
|
|
runtime.Branch(ne); |
|
|
|
|
|
|
|
// Load the e
|
|
|
|
// Load the higher bits (which contains the floating point exponent).
|
|
|
|
__ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset)); |
|
|
|
|
|
|
|
// Compare exponent with -0.5.
|
|
|
@ -4356,8 +4428,10 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { |
|
|
|
__ vsqrt(d0, d0); |
|
|
|
|
|
|
|
__ bind(&allocate_return); |
|
|
|
__ AllocateHeapNumberWithValue( |
|
|
|
base, d0, scratch1, scratch2, heap_number_map, runtime.entry_label()); |
|
|
|
Register scratch3 = r5; |
|
|
|
__ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2, |
|
|
|
heap_number_map, runtime.entry_label()); |
|
|
|
__ mov(base, scratch3); |
|
|
|
done.Jump(); |
|
|
|
|
|
|
|
runtime.Bind(); |
|
|
@ -5349,9 +5423,13 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { |
|
|
|
frame_->EmitPush(r0); // r0 has result
|
|
|
|
|
|
|
|
} else { |
|
|
|
bool overwrite = |
|
|
|
bool can_overwrite = |
|
|
|
(node->expression()->AsBinaryOperation() != NULL && |
|
|
|
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); |
|
|
|
UnaryOverwriteMode overwrite = |
|
|
|
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; |
|
|
|
|
|
|
|
bool no_negative_zero = node->expression()->no_negative_zero(); |
|
|
|
Load(node->expression()); |
|
|
|
switch (op) { |
|
|
|
case Token::NOT: |
|
|
@ -5362,7 +5440,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { |
|
|
|
|
|
|
|
case Token::SUB: { |
|
|
|
frame_->PopToR0(); |
|
|
|
GenericUnaryOpStub stub(Token::SUB, overwrite); |
|
|
|
GenericUnaryOpStub stub( |
|
|
|
Token::SUB, |
|
|
|
overwrite, |
|
|
|
no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero); |
|
|
|
frame_->CallStub(&stub, 0); |
|
|
|
frame_->EmitPush(r0); // r0 has result
|
|
|
|
break; |
|
|
@ -5451,7 +5532,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { |
|
|
|
__ sub(value, value, Operand(Smi::FromInt(1))); |
|
|
|
} |
|
|
|
frame_->EmitPush(value); |
|
|
|
target.SetValue(NOT_CONST_INIT); |
|
|
|
target.SetValue(NOT_CONST_INIT, LIKELY_SMI); |
|
|
|
if (is_postfix) frame_->Pop(); |
|
|
|
ASSERT_EQ(original_height + 1, frame_->height()); |
|
|
|
return; |
|
|
@ -5550,7 +5631,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { |
|
|
|
// Set the target with the result, leaving the result on
|
|
|
|
// top of the stack. Removes the target from the stack if
|
|
|
|
// it has a non-zero size.
|
|
|
|
if (!is_const) target.SetValue(NOT_CONST_INIT); |
|
|
|
if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI); |
|
|
|
} |
|
|
|
|
|
|
|
// Postfix: Discard the new value and use the old.
|
|
|
@ -6283,7 +6364,8 @@ void CodeGenerator::EmitKeyedLoad() { |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void CodeGenerator::EmitKeyedStore(StaticType* key_type) { |
|
|
|
void CodeGenerator::EmitKeyedStore(StaticType* key_type, |
|
|
|
WriteBarrierCharacter wb_info) { |
|
|
|
// Generate inlined version of the keyed store if the code is in a loop
|
|
|
|
// and the key is likely to be a smi.
|
|
|
|
if (loop_nesting() > 0 && key_type->IsLikelySmi()) { |
|
|
@ -6299,25 +6381,45 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type) { |
|
|
|
__ IncrementCounter(&Counters::keyed_store_inline, 1, |
|
|
|
scratch1, scratch2); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Load the value, key and receiver from the stack.
|
|
|
|
bool value_is_harmless = frame_->KnownSmiAt(0); |
|
|
|
if (wb_info == NEVER_NEWSPACE) value_is_harmless = true; |
|
|
|
bool key_is_smi = frame_->KnownSmiAt(1); |
|
|
|
Register value = frame_->PopToRegister(); |
|
|
|
Register key = frame_->PopToRegister(value); |
|
|
|
VirtualFrame::SpilledScope spilled(frame_); |
|
|
|
Register receiver = r2; |
|
|
|
frame_->EmitPop(receiver); |
|
|
|
|
|
|
|
#ifdef DEBUG |
|
|
|
bool we_remembered_the_write_barrier = value_is_harmless; |
|
|
|
#endif |
|
|
|
|
|
|
|
// The deferred code expects value, key and receiver in registers.
|
|
|
|
DeferredReferenceSetKeyedValue* deferred = |
|
|
|
new DeferredReferenceSetKeyedValue(value, key, receiver); |
|
|
|
|
|
|
|
// Check that the value is a smi. As this inlined code does not set the
|
|
|
|
// write barrier it is only possible to store smi values.
|
|
|
|
__ tst(value, Operand(kSmiTagMask)); |
|
|
|
deferred->Branch(ne); |
|
|
|
if (!value_is_harmless) { |
|
|
|
// If the value is not likely to be a Smi then let's test the fixed array
|
|
|
|
// for new space instead. See below.
|
|
|
|
if (wb_info == LIKELY_SMI) { |
|
|
|
__ tst(value, Operand(kSmiTagMask)); |
|
|
|
deferred->Branch(ne); |
|
|
|
#ifdef DEBUG |
|
|
|
we_remembered_the_write_barrier = true; |
|
|
|
#endif |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// Check that the key is a smi.
|
|
|
|
__ tst(key, Operand(kSmiTagMask)); |
|
|
|
deferred->Branch(ne); |
|
|
|
if (!key_is_smi) { |
|
|
|
// Check that the key is a smi.
|
|
|
|
__ tst(key, Operand(kSmiTagMask)); |
|
|
|
deferred->Branch(ne); |
|
|
|
} |
|
|
|
|
|
|
|
// Check that the receiver is a heap object.
|
|
|
|
__ tst(receiver, Operand(kSmiTagMask)); |
|
|
@ -6333,24 +6435,35 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type) { |
|
|
|
__ cmp(scratch1, key); |
|
|
|
deferred->Branch(ls); // Unsigned less equal.
|
|
|
|
|
|
|
|
// Get the elements array from the receiver.
|
|
|
|
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
|
|
|
if (!value_is_harmless && wb_info != LIKELY_SMI) { |
|
|
|
Label ok; |
|
|
|
__ and_(scratch2, scratch1, Operand(ExternalReference::new_space_mask())); |
|
|
|
__ cmp(scratch2, Operand(ExternalReference::new_space_start())); |
|
|
|
__ tst(value, Operand(kSmiTagMask), ne); |
|
|
|
deferred->Branch(ne); |
|
|
|
#ifdef DEBUG |
|
|
|
we_remembered_the_write_barrier = true; |
|
|
|
#endif |
|
|
|
} |
|
|
|
// Check that the elements array is not a dictionary.
|
|
|
|
__ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset)); |
|
|
|
// The following instructions are the part of the inlined store keyed
|
|
|
|
// property code which can be patched. Therefore the exact number of
|
|
|
|
// instructions generated need to be fixed, so the constant pool is blocked
|
|
|
|
// while generating this code.
|
|
|
|
{ Assembler::BlockConstPoolScope block_const_pool(masm_); |
|
|
|
// Get the elements array from the receiver and check that it
|
|
|
|
// is not a dictionary.
|
|
|
|
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
|
|
|
__ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset)); |
|
|
|
#ifdef DEBUG |
|
|
|
Label check_inlined_codesize; |
|
|
|
masm_->bind(&check_inlined_codesize); |
|
|
|
#endif |
|
|
|
|
|
|
|
// Read the fixed array map from the constant pool (not from the root
|
|
|
|
// array) so that the value can be patched. When debugging, we patch this
|
|
|
|
// comparison to always fail so that we will hit the IC call in the
|
|
|
|
// deferred code which will allow the debugger to break for fast case
|
|
|
|
// stores.
|
|
|
|
#ifdef DEBUG |
|
|
|
Label check_inlined_codesize; |
|
|
|
masm_->bind(&check_inlined_codesize); |
|
|
|
#endif |
|
|
|
__ mov(scratch3, Operand(Factory::fixed_array_map())); |
|
|
|
__ cmp(scratch2, scratch3); |
|
|
|
deferred->Branch(ne); |
|
|
@ -6367,6 +6480,8 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type) { |
|
|
|
masm_->InstructionsGeneratedSince(&check_inlined_codesize)); |
|
|
|
} |
|
|
|
|
|
|
|
ASSERT(we_remembered_the_write_barrier); |
|
|
|
|
|
|
|
deferred->BindExit(); |
|
|
|
} else { |
|
|
|
frame()->CallKeyedStoreIC(); |
|
|
@ -6464,7 +6579,7 @@ void Reference::GetValue() { |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void Reference::SetValue(InitState init_state) { |
|
|
|
void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) { |
|
|
|
ASSERT(!is_illegal()); |
|
|
|
ASSERT(!cgen_->has_cc()); |
|
|
|
MacroAssembler* masm = cgen_->masm(); |
|
|
@ -6496,7 +6611,7 @@ void Reference::SetValue(InitState init_state) { |
|
|
|
Property* property = expression_->AsProperty(); |
|
|
|
ASSERT(property != NULL); |
|
|
|
cgen_->CodeForSourcePosition(property->position()); |
|
|
|
cgen_->EmitKeyedStore(property->key()->type()); |
|
|
|
cgen_->EmitKeyedStore(property->key()->type(), wb_info); |
|
|
|
frame->EmitPush(r0); |
|
|
|
set_unloaded(); |
|
|
|
break; |
|
|
@ -7170,22 +7285,42 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, |
|
|
|
|
|
|
|
|
|
|
|
// Fast negative check for symbol-to-symbol equality.
|
|
|
|
static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) { |
|
|
|
static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, |
|
|
|
Label* possible_strings, |
|
|
|
Label* not_both_strings) { |
|
|
|
// r2 is object type of r0.
|
|
|
|
// Ensure that no non-strings have the symbol bit set.
|
|
|
|
ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); |
|
|
|
Label object_test; |
|
|
|
ASSERT(kSymbolTag != 0); |
|
|
|
__ tst(r2, Operand(kIsNotStringMask)); |
|
|
|
__ b(ne, &object_test); |
|
|
|
__ tst(r2, Operand(kIsSymbolMask)); |
|
|
|
__ b(eq, slow); |
|
|
|
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); |
|
|
|
__ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); |
|
|
|
__ b(eq, possible_strings); |
|
|
|
__ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE); |
|
|
|
__ b(ge, not_both_strings); |
|
|
|
__ tst(r3, Operand(kIsSymbolMask)); |
|
|
|
__ b(eq, slow); |
|
|
|
__ b(eq, possible_strings); |
|
|
|
|
|
|
|
// Both are symbols. We already checked they weren't the same pointer
|
|
|
|
// so they are not equal.
|
|
|
|
__ mov(r0, Operand(1)); // Non-zero indicates not equal.
|
|
|
|
__ mov(pc, Operand(lr)); // Return.
|
|
|
|
|
|
|
|
__ bind(&object_test); |
|
|
|
__ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); |
|
|
|
__ b(lt, not_both_strings); |
|
|
|
__ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE); |
|
|
|
__ b(lt, not_both_strings); |
|
|
|
// If both objects are undetectable, they are equal. Otherwise, they
|
|
|
|
// are not equal, since they are different objects and an object is not
|
|
|
|
// equal to undefined.
|
|
|
|
__ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset)); |
|
|
|
__ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); |
|
|
|
__ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); |
|
|
|
__ and_(r0, r2, Operand(r3)); |
|
|
|
__ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); |
|
|
|
__ eor(r0, r0, Operand(1 << Map::kIsUndetectable)); |
|
|
|
__ mov(pc, Operand(lr)); // Return.
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -7301,7 +7436,8 @@ void NumberToStringStub::Generate(MacroAssembler* masm) { |
|
|
|
|
|
|
|
|
|
|
|
void RecordWriteStub::Generate(MacroAssembler* masm) { |
|
|
|
__ RecordWriteHelper(object_, Operand(offset_), offset_, scratch_); |
|
|
|
__ add(offset_, object_, Operand(offset_)); |
|
|
|
__ RecordWriteHelper(object_, offset_, scratch_); |
|
|
|
__ Ret(); |
|
|
|
} |
|
|
|
|
|
|
@ -7398,9 +7534,10 @@ void CompareStub::Generate(MacroAssembler* masm) { |
|
|
|
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
|
|
|
|
// symbols.
|
|
|
|
if (cc_ == eq && !strict_) { |
|
|
|
// Either jumps to slow or returns the answer. Assumes that r2 is the type
|
|
|
|
// of r0 on entry.
|
|
|
|
EmitCheckForSymbols(masm, &flat_string_check); |
|
|
|
// Returns an answer for two symbols or two detectable objects.
|
|
|
|
// Otherwise jumps to string case or not both strings case.
|
|
|
|
// Assumes that r2 is the type of r0 on entry.
|
|
|
|
EmitCheckForSymbolsOrObjects(masm, &flat_string_check, &slow); |
|
|
|
} |
|
|
|
|
|
|
|
// Check for both being sequential ASCII strings, and inline if that is the
|
|
|
@ -7511,189 +7648,197 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( |
|
|
|
__ Swap(r0, r1, ip); |
|
|
|
} |
|
|
|
|
|
|
|
if (ShouldGenerateFPCode()) { |
|
|
|
Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; |
|
|
|
// The type transition also calculates the answer.
|
|
|
|
bool generate_code_to_calculate_answer = true; |
|
|
|
|
|
|
|
if (ShouldGenerateFPCode()) { |
|
|
|
if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { |
|
|
|
switch (op_) { |
|
|
|
case Token::ADD: |
|
|
|
case Token::SUB: |
|
|
|
case Token::MUL: |
|
|
|
case Token::DIV: |
|
|
|
GenerateTypeTransition(masm); |
|
|
|
GenerateTypeTransition(masm); // Tail call.
|
|
|
|
generate_code_to_calculate_answer = false; |
|
|
|
break; |
|
|
|
|
|
|
|
default: |
|
|
|
break; |
|
|
|
} |
|
|
|
// Restore heap number map register.
|
|
|
|
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
|
|
|
} |
|
|
|
|
|
|
|
if (mode_ == NO_OVERWRITE) { |
|
|
|
// In the case where there is no chance of an overwritable float we may as
|
|
|
|
// well do the allocation immediately while r0 and r1 are untouched.
|
|
|
|
__ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); |
|
|
|
} |
|
|
|
|
|
|
|
// Move r0 to a double in r2-r3.
|
|
|
|
__ tst(r0, Operand(kSmiTagMask)); |
|
|
|
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
|
|
|
|
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
|
|
|
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
|
|
|
__ cmp(r4, heap_number_map); |
|
|
|
__ b(ne, &slow); |
|
|
|
if (mode_ == OVERWRITE_RIGHT) { |
|
|
|
__ mov(r5, Operand(r0)); // Overwrite this heap number.
|
|
|
|
} |
|
|
|
if (use_fp_registers) { |
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
// Load the double from tagged HeapNumber r0 to d7.
|
|
|
|
__ sub(r7, r0, Operand(kHeapObjectTag)); |
|
|
|
__ vldr(d7, r7, HeapNumber::kValueOffset); |
|
|
|
} else { |
|
|
|
// Calling convention says that second double is in r2 and r3.
|
|
|
|
__ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
|
|
|
} |
|
|
|
__ jmp(&finished_loading_r0); |
|
|
|
__ bind(&r0_is_smi); |
|
|
|
if (mode_ == OVERWRITE_RIGHT) { |
|
|
|
// We can't overwrite a Smi so get address of new heap number into r5.
|
|
|
|
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
|
|
|
} |
|
|
|
if (generate_code_to_calculate_answer) { |
|
|
|
Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; |
|
|
|
if (mode_ == NO_OVERWRITE) { |
|
|
|
// In the case where there is no chance of an overwritable float we may
|
|
|
|
// as well do the allocation immediately while r0 and r1 are untouched.
|
|
|
|
__ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); |
|
|
|
} |
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
// Convert smi in r0 to double in d7.
|
|
|
|
__ mov(r7, Operand(r0, ASR, kSmiTagSize)); |
|
|
|
__ vmov(s15, r7); |
|
|
|
__ vcvt_f64_s32(d7, s15); |
|
|
|
if (!use_fp_registers) { |
|
|
|
__ vmov(r2, r3, d7); |
|
|
|
// Move r0 to a double in r2-r3.
|
|
|
|
__ tst(r0, Operand(kSmiTagMask)); |
|
|
|
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
|
|
|
|
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
|
|
|
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
|
|
|
__ cmp(r4, heap_number_map); |
|
|
|
__ b(ne, &slow); |
|
|
|
if (mode_ == OVERWRITE_RIGHT) { |
|
|
|
__ mov(r5, Operand(r0)); // Overwrite this heap number.
|
|
|
|
} |
|
|
|
if (use_fp_registers) { |
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
// Load the double from tagged HeapNumber r0 to d7.
|
|
|
|
__ sub(r7, r0, Operand(kHeapObjectTag)); |
|
|
|
__ vldr(d7, r7, HeapNumber::kValueOffset); |
|
|
|
} else { |
|
|
|
// Calling convention says that second double is in r2 and r3.
|
|
|
|
__ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
|
|
|
} |
|
|
|
__ jmp(&finished_loading_r0); |
|
|
|
__ bind(&r0_is_smi); |
|
|
|
if (mode_ == OVERWRITE_RIGHT) { |
|
|
|
// We can't overwrite a Smi so get address of new heap number into r5.
|
|
|
|
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
|
|
|
} |
|
|
|
} else { |
|
|
|
// Write Smi from r0 to r3 and r2 in double format.
|
|
|
|
__ mov(r7, Operand(r0)); |
|
|
|
ConvertToDoubleStub stub3(r3, r2, r7, r4); |
|
|
|
__ push(lr); |
|
|
|
__ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); |
|
|
|
__ pop(lr); |
|
|
|
} |
|
|
|
|
|
|
|
// HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
|
|
|
|
// r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
|
|
|
|
Label r1_is_not_smi; |
|
|
|
if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) { |
|
|
|
__ tst(r1, Operand(kSmiTagMask)); |
|
|
|
__ b(ne, &r1_is_not_smi); |
|
|
|
GenerateTypeTransition(masm); |
|
|
|
// Restore heap number map register.
|
|
|
|
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
|
|
|
__ jmp(&r1_is_smi); |
|
|
|
} |
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
// Convert smi in r0 to double in d7.
|
|
|
|
__ mov(r7, Operand(r0, ASR, kSmiTagSize)); |
|
|
|
__ vmov(s15, r7); |
|
|
|
__ vcvt_f64_s32(d7, s15); |
|
|
|
if (!use_fp_registers) { |
|
|
|
__ vmov(r2, r3, d7); |
|
|
|
} |
|
|
|
} else { |
|
|
|
// Write Smi from r0 to r3 and r2 in double format.
|
|
|
|
__ mov(r7, Operand(r0)); |
|
|
|
ConvertToDoubleStub stub3(r3, r2, r7, r4); |
|
|
|
__ push(lr); |
|
|
|
__ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); |
|
|
|
__ pop(lr); |
|
|
|
} |
|
|
|
|
|
|
|
__ bind(&finished_loading_r0); |
|
|
|
// HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
|
|
|
|
// r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
|
|
|
|
Label r1_is_not_smi; |
|
|
|
if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) { |
|
|
|
__ tst(r1, Operand(kSmiTagMask)); |
|
|
|
__ b(ne, &r1_is_not_smi); |
|
|
|
GenerateTypeTransition(masm); // Tail call.
|
|
|
|
} |
|
|
|
|
|
|
|
// Move r1 to a double in r0-r1.
|
|
|
|
__ tst(r1, Operand(kSmiTagMask)); |
|
|
|
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
|
|
|
|
__ bind(&r1_is_not_smi); |
|
|
|
__ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); |
|
|
|
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
|
|
|
__ cmp(r4, heap_number_map); |
|
|
|
__ b(ne, &slow); |
|
|
|
if (mode_ == OVERWRITE_LEFT) { |
|
|
|
__ mov(r5, Operand(r1)); // Overwrite this heap number.
|
|
|
|
} |
|
|
|
if (use_fp_registers) { |
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
// Load the double from tagged HeapNumber r1 to d6.
|
|
|
|
__ sub(r7, r1, Operand(kHeapObjectTag)); |
|
|
|
__ vldr(d6, r7, HeapNumber::kValueOffset); |
|
|
|
} else { |
|
|
|
// Calling convention says that first double is in r0 and r1.
|
|
|
|
__ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
|
|
|
} |
|
|
|
__ jmp(&finished_loading_r1); |
|
|
|
__ bind(&r1_is_smi); |
|
|
|
if (mode_ == OVERWRITE_LEFT) { |
|
|
|
// We can't overwrite a Smi so get address of new heap number into r5.
|
|
|
|
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
|
|
|
} |
|
|
|
__ bind(&finished_loading_r0); |
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
// Convert smi in r1 to double in d6.
|
|
|
|
__ mov(r7, Operand(r1, ASR, kSmiTagSize)); |
|
|
|
__ vmov(s13, r7); |
|
|
|
__ vcvt_f64_s32(d6, s13); |
|
|
|
if (!use_fp_registers) { |
|
|
|
__ vmov(r0, r1, d6); |
|
|
|
// Move r1 to a double in r0-r1.
|
|
|
|
__ tst(r1, Operand(kSmiTagMask)); |
|
|
|
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
|
|
|
|
__ bind(&r1_is_not_smi); |
|
|
|
__ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); |
|
|
|
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
|
|
|
__ cmp(r4, heap_number_map); |
|
|
|
__ b(ne, &slow); |
|
|
|
if (mode_ == OVERWRITE_LEFT) { |
|
|
|
__ mov(r5, Operand(r1)); // Overwrite this heap number.
|
|
|
|
} |
|
|
|
if (use_fp_registers) { |
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
// Load the double from tagged HeapNumber r1 to d6.
|
|
|
|
__ sub(r7, r1, Operand(kHeapObjectTag)); |
|
|
|
__ vldr(d6, r7, HeapNumber::kValueOffset); |
|
|
|
} else { |
|
|
|
// Calling convention says that first double is in r0 and r1.
|
|
|
|
__ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
|
|
|
} |
|
|
|
__ jmp(&finished_loading_r1); |
|
|
|
__ bind(&r1_is_smi); |
|
|
|
if (mode_ == OVERWRITE_LEFT) { |
|
|
|
// We can't overwrite a Smi so get address of new heap number into r5.
|
|
|
|
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
|
|
|
} |
|
|
|
} else { |
|
|
|
// Write Smi from r1 to r1 and r0 in double format.
|
|
|
|
__ mov(r7, Operand(r1)); |
|
|
|
ConvertToDoubleStub stub4(r1, r0, r7, r9); |
|
|
|
__ push(lr); |
|
|
|
__ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); |
|
|
|
__ pop(lr); |
|
|
|
} |
|
|
|
|
|
|
|
__ bind(&finished_loading_r1); |
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
// Convert smi in r1 to double in d6.
|
|
|
|
__ mov(r7, Operand(r1, ASR, kSmiTagSize)); |
|
|
|
__ vmov(s13, r7); |
|
|
|
__ vcvt_f64_s32(d6, s13); |
|
|
|
if (!use_fp_registers) { |
|
|
|
__ vmov(r0, r1, d6); |
|
|
|
} |
|
|
|
} else { |
|
|
|
// Write Smi from r1 to r1 and r0 in double format.
|
|
|
|
__ mov(r7, Operand(r1)); |
|
|
|
ConvertToDoubleStub stub4(r1, r0, r7, r9); |
|
|
|
__ push(lr); |
|
|
|
__ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); |
|
|
|
__ pop(lr); |
|
|
|
} |
|
|
|
|
|
|
|
__ bind(&do_the_call); |
|
|
|
// If we are inlining the operation using VFP3 instructions for
|
|
|
|
// add, subtract, multiply, or divide, the arguments are in d6 and d7.
|
|
|
|
if (use_fp_registers) { |
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
// ARMv7 VFP3 instructions to implement
|
|
|
|
// double precision, add, subtract, multiply, divide.
|
|
|
|
|
|
|
|
if (Token::MUL == op_) { |
|
|
|
__ vmul(d5, d6, d7); |
|
|
|
} else if (Token::DIV == op_) { |
|
|
|
__ vdiv(d5, d6, d7); |
|
|
|
} else if (Token::ADD == op_) { |
|
|
|
__ vadd(d5, d6, d7); |
|
|
|
} else if (Token::SUB == op_) { |
|
|
|
__ vsub(d5, d6, d7); |
|
|
|
__ bind(&finished_loading_r1); |
|
|
|
} |
|
|
|
|
|
|
|
if (generate_code_to_calculate_answer || do_the_call.is_linked()) { |
|
|
|
__ bind(&do_the_call); |
|
|
|
// If we are inlining the operation using VFP3 instructions for
|
|
|
|
// add, subtract, multiply, or divide, the arguments are in d6 and d7.
|
|
|
|
if (use_fp_registers) { |
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
// ARMv7 VFP3 instructions to implement
|
|
|
|
// double precision, add, subtract, multiply, divide.
|
|
|
|
|
|
|
|
if (Token::MUL == op_) { |
|
|
|
__ vmul(d5, d6, d7); |
|
|
|
} else if (Token::DIV == op_) { |
|
|
|
__ vdiv(d5, d6, d7); |
|
|
|
} else if (Token::ADD == op_) { |
|
|
|
__ vadd(d5, d6, d7); |
|
|
|
} else if (Token::SUB == op_) { |
|
|
|
__ vsub(d5, d6, d7); |
|
|
|
} else { |
|
|
|
UNREACHABLE(); |
|
|
|
} |
|
|
|
__ sub(r0, r5, Operand(kHeapObjectTag)); |
|
|
|
__ vstr(d5, r0, HeapNumber::kValueOffset); |
|
|
|
__ add(r0, r0, Operand(kHeapObjectTag)); |
|
|
|
__ mov(pc, lr); |
|
|
|
} else { |
|
|
|
UNREACHABLE(); |
|
|
|
// If we did not inline the operation, then the arguments are in:
|
|
|
|
// r0: Left value (least significant part of mantissa).
|
|
|
|
// r1: Left value (sign, exponent, top of mantissa).
|
|
|
|
// r2: Right value (least significant part of mantissa).
|
|
|
|
// r3: Right value (sign, exponent, top of mantissa).
|
|
|
|
// r5: Address of heap number for result.
|
|
|
|
|
|
|
|
__ push(lr); // For later.
|
|
|
|
__ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
|
|
|
|
// Call C routine that may not cause GC or other trouble. r5 is callee
|
|
|
|
// save.
|
|
|
|
__ CallCFunction(ExternalReference::double_fp_operation(op_), 4); |
|
|
|
// Store answer in the overwritable heap number.
|
|
|
|
#if !defined(USE_ARM_EABI) |
|
|
|
// Double returned in fp coprocessor register 0 and 1, encoded as
|
|
|
|
// register cr8. Offsets must be divisible by 4 for coprocessor so we
|
|
|
|
// need to substract the tag from r5.
|
|
|
|
__ sub(r4, r5, Operand(kHeapObjectTag)); |
|
|
|
__ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); |
|
|
|
#else |
|
|
|
// Double returned in registers 0 and 1.
|
|
|
|
__ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); |
|
|
|
#endif |
|
|
|
__ mov(r0, Operand(r5)); |
|
|
|
// And we are done.
|
|
|
|
__ pop(pc); |
|
|
|
} |
|
|
|
__ sub(r0, r5, Operand(kHeapObjectTag)); |
|
|
|
__ vstr(d5, r0, HeapNumber::kValueOffset); |
|
|
|
__ add(r0, r0, Operand(kHeapObjectTag)); |
|
|
|
__ mov(pc, lr); |
|
|
|
} else { |
|
|
|
// If we did not inline the operation, then the arguments are in:
|
|
|
|
// r0: Left value (least significant part of mantissa).
|
|
|
|
// r1: Left value (sign, exponent, top of mantissa).
|
|
|
|
// r2: Right value (least significant part of mantissa).
|
|
|
|
// r3: Right value (sign, exponent, top of mantissa).
|
|
|
|
// r5: Address of heap number for result.
|
|
|
|
|
|
|
|
__ push(lr); // For later.
|
|
|
|
__ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
|
|
|
|
// Call C routine that may not cause GC or other trouble. r5 is callee
|
|
|
|
// save.
|
|
|
|
__ CallCFunction(ExternalReference::double_fp_operation(op_), 4); |
|
|
|
// Store answer in the overwritable heap number.
|
|
|
|
#if !defined(USE_ARM_EABI) |
|
|
|
// Double returned in fp coprocessor register 0 and 1, encoded as register
|
|
|
|
// cr8. Offsets must be divisible by 4 for coprocessor so we need to
|
|
|
|
// substract the tag from r5.
|
|
|
|
__ sub(r4, r5, Operand(kHeapObjectTag)); |
|
|
|
__ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); |
|
|
|
#else |
|
|
|
// Double returned in registers 0 and 1.
|
|
|
|
__ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); |
|
|
|
#endif |
|
|
|
__ mov(r0, Operand(r5)); |
|
|
|
// And we are done.
|
|
|
|
__ pop(pc); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
if (!generate_code_to_calculate_answer && |
|
|
|
!slow_reverse.is_linked() && |
|
|
|
!slow.is_linked()) { |
|
|
|
return; |
|
|
|
} |
|
|
|
|
|
|
|
if (lhs.is(r0)) { |
|
|
|
__ b(&slow); |
|
|
|
__ bind(&slow_reverse); |
|
|
@ -7913,7 +8058,11 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, |
|
|
|
// The code below for writing into heap numbers isn't capable of writing
|
|
|
|
// the register as an unsigned int so we go to slow case if we hit this
|
|
|
|
// case.
|
|
|
|
__ b(mi, &slow); |
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
|
__ b(mi, &result_not_a_smi); |
|
|
|
} else { |
|
|
|
__ b(mi, &slow); |
|
|
|
} |
|
|
|
break; |
|
|
|
case Token::SHL: |
|
|
|
// Use only the 5 least significant bits of the shift count.
|
|
|
@ -7957,10 +8106,24 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, |
|
|
|
// result.
|
|
|
|
__ mov(r0, Operand(r5)); |
|
|
|
|
|
|
|
// Tail call that writes the int32 in r2 to the heap number in r0, using
|
|
|
|
// r3 as scratch. r0 is preserved and returned.
|
|
|
|
WriteInt32ToHeapNumberStub stub(r2, r0, r3); |
|
|
|
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
|
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
|
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
__ vmov(s0, r2); |
|
|
|
if (op_ == Token::SHR) { |
|
|
|
__ vcvt_f64_u32(d0, s0); |
|
|
|
} else { |
|
|
|
__ vcvt_f64_s32(d0, s0); |
|
|
|
} |
|
|
|
__ sub(r3, r0, Operand(kHeapObjectTag)); |
|
|
|
__ vstr(d0, r3, HeapNumber::kValueOffset); |
|
|
|
__ Ret(); |
|
|
|
} else { |
|
|
|
// Tail call that writes the int32 in r2 to the heap number in r0, using
|
|
|
|
// r3 as scratch. r0 is preserved and returned.
|
|
|
|
WriteInt32ToHeapNumberStub stub(r2, r0, r3); |
|
|
|
__ TailCallStub(&stub); |
|
|
|
} |
|
|
|
|
|
|
|
if (mode_ != NO_OVERWRITE) { |
|
|
|
__ bind(&have_to_allocate); |
|
|
@ -8597,29 +8760,15 @@ void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
|
|
|
|
|
|
|
__ Push(r1, r0); |
|
|
|
|
|
|
|
// Internal frame is necessary to handle exceptions properly.
|
|
|
|
__ EnterInternalFrame(); |
|
|
|
// Call the stub proper to get the result in r0.
|
|
|
|
__ Call(&get_result); |
|
|
|
__ LeaveInternalFrame(); |
|
|
|
|
|
|
|
__ push(r0); |
|
|
|
|
|
|
|
__ mov(r0, Operand(Smi::FromInt(MinorKey()))); |
|
|
|
__ push(r0); |
|
|
|
__ mov(r0, Operand(Smi::FromInt(op_))); |
|
|
|
__ push(r0); |
|
|
|
__ mov(r2, Operand(Smi::FromInt(MinorKey()))); |
|
|
|
__ mov(r1, Operand(Smi::FromInt(op_))); |
|
|
|
__ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); |
|
|
|
__ push(r0); |
|
|
|
__ Push(r2, r1, r0); |
|
|
|
|
|
|
|
__ TailCallExternalReference( |
|
|
|
ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), |
|
|
|
6, |
|
|
|
5, |
|
|
|
1); |
|
|
|
|
|
|
|
// The entry point for the result calculation is assumed to be immediately
|
|
|
|
// after this sequence.
|
|
|
|
__ bind(&get_result); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -8751,16 +8900,23 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { |
|
|
|
|
|
|
|
// Go slow case if the value of the expression is zero
|
|
|
|
// to make sure that we switch between 0 and -0.
|
|
|
|
__ cmp(r0, Operand(0)); |
|
|
|
__ b(eq, &slow); |
|
|
|
|
|
|
|
// The value of the expression is a smi that is not zero. Try
|
|
|
|
// optimistic subtraction '0 - value'.
|
|
|
|
__ rsb(r1, r0, Operand(0), SetCC); |
|
|
|
__ b(vs, &slow); |
|
|
|
|
|
|
|
__ mov(r0, Operand(r1)); // Set r0 to result.
|
|
|
|
__ b(&done); |
|
|
|
if (negative_zero_ == kStrictNegativeZero) { |
|
|
|
// If we have to check for zero, then we can check for the max negative
|
|
|
|
// smi while we are at it.
|
|
|
|
__ bic(ip, r0, Operand(0x80000000), SetCC); |
|
|
|
__ b(eq, &slow); |
|
|
|
__ rsb(r0, r0, Operand(0)); |
|
|
|
__ StubReturn(1); |
|
|
|
} else { |
|
|
|
// The value of the expression is a smi and 0 is OK for -0. Try
|
|
|
|
// optimistic subtraction '0 - value'.
|
|
|
|
__ rsb(r0, r0, Operand(0), SetCC); |
|
|
|
__ StubReturn(1, vc); |
|
|
|
// We don't have to reverse the optimistic neg since the only case
|
|
|
|
// where we fall through is the minimum negative Smi, which is the case
|
|
|
|
// where the neg leaves the register unchanged.
|
|
|
|
__ jmp(&slow); // Go slow on max negative Smi.
|
|
|
|
} |
|
|
|
|
|
|
|
__ bind(&try_float); |
|
|
|
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
|
|
@ -8768,7 +8924,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { |
|
|
|
__ cmp(r1, heap_number_map); |
|
|
|
__ b(ne, &slow); |
|
|
|
// r0 is a heap number. Get a new heap number in r1.
|
|
|
|
if (overwrite_) { |
|
|
|
if (overwrite_ == UNARY_OVERWRITE) { |
|
|
|
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
|
|
|
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
|
|
|
|
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
|
|
@ -8801,7 +8957,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { |
|
|
|
__ b(&done); |
|
|
|
|
|
|
|
__ bind(&try_float); |
|
|
|
if (!overwrite_) { |
|
|
|
if (!overwrite_ == UNARY_OVERWRITE) { |
|
|
|
// Allocate a fresh heap number, but don't overwrite r0 until
|
|
|
|
// we're sure we can do it without going through the slow case
|
|
|
|
// that needs the value in r0.
|
|
|
@ -8809,12 +8965,21 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { |
|
|
|
__ mov(r0, Operand(r2)); |
|
|
|
} |
|
|
|
|
|
|
|
// WriteInt32ToHeapNumberStub does not trigger GC, so we do not
|
|
|
|
// have to set up a frame.
|
|
|
|
WriteInt32ToHeapNumberStub stub(r1, r0, r2); |
|
|
|
__ push(lr); |
|
|
|
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET); |
|
|
|
__ pop(lr); |
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
|
// Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
|
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
__ vmov(s0, r1); |
|
|
|
__ vcvt_f64_s32(d0, s0); |
|
|
|
__ sub(r2, r0, Operand(kHeapObjectTag)); |
|
|
|
__ vstr(d0, r2, HeapNumber::kValueOffset); |
|
|
|
} else { |
|
|
|
// WriteInt32ToHeapNumberStub does not trigger GC, so we do not
|
|
|
|
// have to set up a frame.
|
|
|
|
WriteInt32ToHeapNumberStub stub(r1, r0, r2); |
|
|
|
__ push(lr); |
|
|
|
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET); |
|
|
|
__ pop(lr); |
|
|
|
} |
|
|
|
} else { |
|
|
|
UNIMPLEMENTED(); |
|
|
|
} |
|
|
|