|
@ -85,6 +85,8 @@ void ToNumberStub::Generate(MacroAssembler* masm) { |
|
|
void FastNewClosureStub::Generate(MacroAssembler* masm) { |
|
|
void FastNewClosureStub::Generate(MacroAssembler* masm) { |
|
|
// Create a new closure from the given function info in new
|
|
|
// Create a new closure from the given function info in new
|
|
|
// space. Set the context to the current context in cp.
|
|
|
// space. Set the context to the current context in cp.
|
|
|
|
|
|
Counters* counters = masm->isolate()->counters(); |
|
|
|
|
|
|
|
|
Label gc; |
|
|
Label gc; |
|
|
|
|
|
|
|
|
// Pop the function info from the stack.
|
|
|
// Pop the function info from the stack.
|
|
@ -98,32 +100,44 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { |
|
|
&gc, |
|
|
&gc, |
|
|
TAG_OBJECT); |
|
|
TAG_OBJECT); |
|
|
|
|
|
|
|
|
|
|
|
__ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7); |
|
|
|
|
|
|
|
|
int map_index = (language_mode_ == CLASSIC_MODE) |
|
|
int map_index = (language_mode_ == CLASSIC_MODE) |
|
|
? Context::FUNCTION_MAP_INDEX |
|
|
? Context::FUNCTION_MAP_INDEX |
|
|
: Context::STRICT_MODE_FUNCTION_MAP_INDEX; |
|
|
: Context::STRICT_MODE_FUNCTION_MAP_INDEX; |
|
|
|
|
|
|
|
|
// Compute the function map in the current global context and set that
|
|
|
// Compute the function map in the current native context and set that
|
|
|
// as the map of the allocated object.
|
|
|
// as the map of the allocated object.
|
|
|
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
|
|
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
|
|
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); |
|
|
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset)); |
|
|
__ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index))); |
|
|
__ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index))); |
|
|
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); |
|
|
__ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset)); |
|
|
|
|
|
|
|
|
// Initialize the rest of the function. We don't have to update the
|
|
|
// Initialize the rest of the function. We don't have to update the
|
|
|
// write barrier because the allocated object is in new space.
|
|
|
// write barrier because the allocated object is in new space.
|
|
|
__ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); |
|
|
__ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); |
|
|
__ LoadRoot(r2, Heap::kTheHoleValueRootIndex); |
|
|
__ LoadRoot(r5, Heap::kTheHoleValueRootIndex); |
|
|
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex); |
|
|
|
|
|
__ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); |
|
|
__ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); |
|
|
__ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); |
|
|
__ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); |
|
|
__ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); |
|
|
__ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); |
|
|
__ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); |
|
|
__ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); |
|
|
__ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); |
|
|
__ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); |
|
|
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); |
|
|
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); |
|
|
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); |
|
|
|
|
|
|
|
|
|
|
|
// Initialize the code pointer in the function to be the one
|
|
|
// Initialize the code pointer in the function to be the one
|
|
|
// found in the shared function info object.
|
|
|
// found in the shared function info object.
|
|
|
|
|
|
// But first check if there is an optimized version for our context.
|
|
|
|
|
|
Label check_optimized; |
|
|
|
|
|
Label install_unoptimized; |
|
|
|
|
|
if (FLAG_cache_optimized_code) { |
|
|
|
|
|
__ ldr(r1, |
|
|
|
|
|
FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset)); |
|
|
|
|
|
__ tst(r1, r1); |
|
|
|
|
|
__ b(ne, &check_optimized); |
|
|
|
|
|
} |
|
|
|
|
|
__ bind(&install_unoptimized); |
|
|
|
|
|
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex); |
|
|
|
|
|
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); |
|
|
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); |
|
|
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); |
|
|
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); |
|
|
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); |
|
|
__ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); |
|
|
__ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); |
|
@ -131,6 +145,72 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { |
|
|
// Return result. The argument function info has been popped already.
|
|
|
// Return result. The argument function info has been popped already.
|
|
|
__ Ret(); |
|
|
__ Ret(); |
|
|
|
|
|
|
|
|
|
|
|
__ bind(&check_optimized); |
|
|
|
|
|
|
|
|
|
|
|
__ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7); |
|
|
|
|
|
|
|
|
|
|
|
// r2 holds native context, r1 points to fixed array of 3-element entries
|
|
|
|
|
|
// (native context, optimized code, literals).
|
|
|
|
|
|
// The optimized code map must never be empty, so check the first elements.
|
|
|
|
|
|
Label install_optimized; |
|
|
|
|
|
// Speculatively move code object into r4.
|
|
|
|
|
|
__ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize)); |
|
|
|
|
|
__ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize)); |
|
|
|
|
|
__ cmp(r2, r5); |
|
|
|
|
|
__ b(eq, &install_optimized); |
|
|
|
|
|
|
|
|
|
|
|
// Iterate through the rest of map backwards. r4 holds an index as a Smi.
|
|
|
|
|
|
Label loop; |
|
|
|
|
|
__ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset)); |
|
|
|
|
|
__ bind(&loop); |
|
|
|
|
|
// Do not double check first entry.
|
|
|
|
|
|
|
|
|
|
|
|
__ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); |
|
|
|
|
|
__ b(eq, &install_unoptimized); |
|
|
|
|
|
__ sub(r4, r4, Operand( |
|
|
|
|
|
Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
|
|
|
|
|
|
__ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
|
|
|
|
|
__ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); |
|
|
|
|
|
__ ldr(r5, MemOperand(r5)); |
|
|
|
|
|
__ cmp(r2, r5); |
|
|
|
|
|
__ b(ne, &loop); |
|
|
|
|
|
// Hit: fetch the optimized code.
|
|
|
|
|
|
__ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
|
|
|
|
|
__ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); |
|
|
|
|
|
__ add(r5, r5, Operand(kPointerSize)); |
|
|
|
|
|
__ ldr(r4, MemOperand(r5)); |
|
|
|
|
|
|
|
|
|
|
|
__ bind(&install_optimized); |
|
|
|
|
|
__ IncrementCounter(counters->fast_new_closure_install_optimized(), |
|
|
|
|
|
1, r6, r7); |
|
|
|
|
|
|
|
|
|
|
|
// TODO(fschneider): Idea: store proper code pointers in the map and either
|
|
|
|
|
|
// unmangle them on marking or do nothing as the whole map is discarded on
|
|
|
|
|
|
// major GC anyway.
|
|
|
|
|
|
__ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); |
|
|
|
|
|
__ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); |
|
|
|
|
|
|
|
|
|
|
|
// Now link a function into a list of optimized functions.
|
|
|
|
|
|
__ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST)); |
|
|
|
|
|
|
|
|
|
|
|
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); |
|
|
|
|
|
// No need for write barrier as JSFunction (eax) is in the new space.
|
|
|
|
|
|
|
|
|
|
|
|
__ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST)); |
|
|
|
|
|
// Store JSFunction (eax) into edx before issuing write barrier as
|
|
|
|
|
|
// it clobbers all the registers passed.
|
|
|
|
|
|
__ mov(r4, r0); |
|
|
|
|
|
__ RecordWriteContextSlot( |
|
|
|
|
|
r2, |
|
|
|
|
|
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST), |
|
|
|
|
|
r4, |
|
|
|
|
|
r1, |
|
|
|
|
|
kLRHasNotBeenSaved, |
|
|
|
|
|
kDontSaveFPRegs); |
|
|
|
|
|
|
|
|
|
|
|
// Return result. The argument function info has been popped already.
|
|
|
|
|
|
__ Ret(); |
|
|
|
|
|
|
|
|
// Create a new closure through the slower runtime call.
|
|
|
// Create a new closure through the slower runtime call.
|
|
|
__ bind(&gc); |
|
|
__ bind(&gc); |
|
|
__ LoadRoot(r4, Heap::kFalseValueRootIndex); |
|
|
__ LoadRoot(r4, Heap::kFalseValueRootIndex); |
|
@ -162,12 +242,12 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { |
|
|
__ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
|
|
__ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
|
|
|
|
|
|
|
|
// Set up the fixed slots, copy the global object from the previous context.
|
|
|
// Set up the fixed slots, copy the global object from the previous context.
|
|
|
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
|
|
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
|
|
__ mov(r1, Operand(Smi::FromInt(0))); |
|
|
__ mov(r1, Operand(Smi::FromInt(0))); |
|
|
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); |
|
|
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); |
|
|
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
|
|
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
|
|
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); |
|
|
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); |
|
|
__ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX))); |
|
|
__ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
|
|
|
|
|
|
|
|
// Initialize the rest of the slots to undefined.
|
|
|
// Initialize the rest of the slots to undefined.
|
|
|
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex); |
|
|
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex); |
|
@ -210,9 +290,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { |
|
|
__ mov(r2, Operand(Smi::FromInt(length))); |
|
|
__ mov(r2, Operand(Smi::FromInt(length))); |
|
|
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); |
|
|
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); |
|
|
|
|
|
|
|
|
// If this block context is nested in the global context we get a smi
|
|
|
// If this block context is nested in the native context we get a smi
|
|
|
// sentinel instead of a function. The block context should get the
|
|
|
// sentinel instead of a function. The block context should get the
|
|
|
// canonical empty function of the global context as its closure which
|
|
|
// canonical empty function of the native context as its closure which
|
|
|
// we still have to look up.
|
|
|
// we still have to look up.
|
|
|
Label after_sentinel; |
|
|
Label after_sentinel; |
|
|
__ JumpIfNotSmi(r3, &after_sentinel); |
|
|
__ JumpIfNotSmi(r3, &after_sentinel); |
|
@ -222,16 +302,16 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { |
|
|
__ Assert(eq, message); |
|
|
__ Assert(eq, message); |
|
|
} |
|
|
} |
|
|
__ ldr(r3, GlobalObjectOperand()); |
|
|
__ ldr(r3, GlobalObjectOperand()); |
|
|
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset)); |
|
|
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset)); |
|
|
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); |
|
|
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); |
|
|
__ bind(&after_sentinel); |
|
|
__ bind(&after_sentinel); |
|
|
|
|
|
|
|
|
// Set up the fixed slots, copy the global object from the previous context.
|
|
|
// Set up the fixed slots, copy the global object from the previous context.
|
|
|
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX)); |
|
|
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
|
|
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); |
|
|
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); |
|
|
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); |
|
|
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); |
|
|
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); |
|
|
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); |
|
|
__ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX)); |
|
|
__ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX)); |
|
|
|
|
|
|
|
|
// Initialize the rest of the slots to the hole value.
|
|
|
// Initialize the rest of the slots to the hole value.
|
|
|
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex); |
|
|
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex); |
|
@ -519,8 +599,8 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, |
|
|
FloatingPointHelper::Destination destination, |
|
|
FloatingPointHelper::Destination destination, |
|
|
Register scratch1, |
|
|
Register scratch1, |
|
|
Register scratch2) { |
|
|
Register scratch2) { |
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
__ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); |
|
|
__ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); |
|
|
__ vmov(d7.high(), scratch1); |
|
|
__ vmov(d7.high(), scratch1); |
|
|
__ vcvt_f64_s32(d7, d7.high()); |
|
|
__ vcvt_f64_s32(d7, d7.high()); |
|
@ -589,9 +669,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, |
|
|
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
|
|
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
|
|
|
|
|
|
|
|
// Handle loading a double from a heap number.
|
|
|
// Handle loading a double from a heap number.
|
|
|
if (CpuFeatures::IsSupported(VFP3) && |
|
|
if (CpuFeatures::IsSupported(VFP2) && |
|
|
destination == kVFPRegisters) { |
|
|
destination == kVFPRegisters) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
// Load the double from tagged HeapNumber to double register.
|
|
|
// Load the double from tagged HeapNumber to double register.
|
|
|
__ sub(scratch1, object, Operand(kHeapObjectTag)); |
|
|
__ sub(scratch1, object, Operand(kHeapObjectTag)); |
|
|
__ vldr(dst, scratch1, HeapNumber::kValueOffset); |
|
|
__ vldr(dst, scratch1, HeapNumber::kValueOffset); |
|
@ -604,8 +684,8 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, |
|
|
|
|
|
|
|
|
// Handle loading a double from a smi.
|
|
|
// Handle loading a double from a smi.
|
|
|
__ bind(&is_smi); |
|
|
__ bind(&is_smi); |
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
// Convert smi to double using VFP instructions.
|
|
|
// Convert smi to double using VFP instructions.
|
|
|
__ vmov(dst.high(), scratch1); |
|
|
__ vmov(dst.high(), scratch1); |
|
|
__ vcvt_f64_s32(dst, dst.high()); |
|
|
__ vcvt_f64_s32(dst, dst.high()); |
|
@ -682,8 +762,8 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, |
|
|
|
|
|
|
|
|
Label done; |
|
|
Label done; |
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
__ vmov(single_scratch, int_scratch); |
|
|
__ vmov(single_scratch, int_scratch); |
|
|
__ vcvt_f64_s32(double_dst, single_scratch); |
|
|
__ vcvt_f64_s32(double_dst, single_scratch); |
|
|
if (destination == kCoreRegisters) { |
|
|
if (destination == kCoreRegisters) { |
|
@ -776,8 +856,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, |
|
|
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
|
|
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
|
|
|
|
|
|
|
|
// Load the number.
|
|
|
// Load the number.
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
// Load the double value.
|
|
|
// Load the double value.
|
|
|
__ sub(scratch1, object, Operand(kHeapObjectTag)); |
|
|
__ sub(scratch1, object, Operand(kHeapObjectTag)); |
|
|
__ vldr(double_dst, scratch1, HeapNumber::kValueOffset); |
|
|
__ vldr(double_dst, scratch1, HeapNumber::kValueOffset); |
|
@ -847,8 +927,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, |
|
|
|
|
|
|
|
|
// Object is a heap number.
|
|
|
// Object is a heap number.
|
|
|
// Convert the floating point value to a 32-bit integer.
|
|
|
// Convert the floating point value to a 32-bit integer.
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
SwVfpRegister single_scratch = double_scratch.low(); |
|
|
SwVfpRegister single_scratch = double_scratch.low(); |
|
|
// Load the double value.
|
|
|
// Load the double value.
|
|
|
__ sub(scratch1, object, Operand(kHeapObjectTag)); |
|
|
__ sub(scratch1, object, Operand(kHeapObjectTag)); |
|
@ -978,7 +1058,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( |
|
|
__ push(lr); |
|
|
__ push(lr); |
|
|
__ PrepareCallCFunction(0, 2, scratch); |
|
|
__ PrepareCallCFunction(0, 2, scratch); |
|
|
if (masm->use_eabi_hardfloat()) { |
|
|
if (masm->use_eabi_hardfloat()) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
__ vmov(d0, r0, r1); |
|
|
__ vmov(d0, r0, r1); |
|
|
__ vmov(d1, r2, r3); |
|
|
__ vmov(d1, r2, r3); |
|
|
} |
|
|
} |
|
@ -990,7 +1070,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( |
|
|
// Store answer in the overwritable heap number. Double returned in
|
|
|
// Store answer in the overwritable heap number. Double returned in
|
|
|
// registers r0 and r1 or in d0.
|
|
|
// registers r0 and r1 or in d0.
|
|
|
if (masm->use_eabi_hardfloat()) { |
|
|
if (masm->use_eabi_hardfloat()) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
__ vstr(d0, |
|
|
__ vstr(d0, |
|
|
FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
|
|
FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
|
|
} else { |
|
|
} else { |
|
@ -1209,9 +1289,9 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// Lhs is a smi, rhs is a number.
|
|
|
// Lhs is a smi, rhs is a number.
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
// Convert lhs to a double in d7.
|
|
|
// Convert lhs to a double in d7.
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
__ SmiToDoubleVFPRegister(lhs, d7, r7, s15); |
|
|
__ SmiToDoubleVFPRegister(lhs, d7, r7, s15); |
|
|
// Load the double from rhs, tagged HeapNumber r0, to d6.
|
|
|
// Load the double from rhs, tagged HeapNumber r0, to d6.
|
|
|
__ sub(r7, rhs, Operand(kHeapObjectTag)); |
|
|
__ sub(r7, rhs, Operand(kHeapObjectTag)); |
|
@ -1249,8 +1329,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// Rhs is a smi, lhs is a heap number.
|
|
|
// Rhs is a smi, lhs is a heap number.
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
// Load the double from lhs, tagged HeapNumber r1, to d7.
|
|
|
// Load the double from lhs, tagged HeapNumber r1, to d7.
|
|
|
__ sub(r7, lhs, Operand(kHeapObjectTag)); |
|
|
__ sub(r7, lhs, Operand(kHeapObjectTag)); |
|
|
__ vldr(d7, r7, HeapNumber::kValueOffset); |
|
|
__ vldr(d7, r7, HeapNumber::kValueOffset); |
|
@ -1362,7 +1442,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, |
|
|
__ push(lr); |
|
|
__ push(lr); |
|
|
__ PrepareCallCFunction(0, 2, r5); |
|
|
__ PrepareCallCFunction(0, 2, r5); |
|
|
if (masm->use_eabi_hardfloat()) { |
|
|
if (masm->use_eabi_hardfloat()) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
__ vmov(d0, r0, r1); |
|
|
__ vmov(d0, r0, r1); |
|
|
__ vmov(d1, r2, r3); |
|
|
__ vmov(d1, r2, r3); |
|
|
} |
|
|
} |
|
@ -1437,8 +1517,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, |
|
|
|
|
|
|
|
|
// Both are heap numbers. Load them up then jump to the code we have
|
|
|
// Both are heap numbers. Load them up then jump to the code we have
|
|
|
// for that.
|
|
|
// for that.
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
__ sub(r7, rhs, Operand(kHeapObjectTag)); |
|
|
__ sub(r7, rhs, Operand(kHeapObjectTag)); |
|
|
__ vldr(d6, r7, HeapNumber::kValueOffset); |
|
|
__ vldr(d6, r7, HeapNumber::kValueOffset); |
|
|
__ sub(r7, lhs, Operand(kHeapObjectTag)); |
|
|
__ sub(r7, lhs, Operand(kHeapObjectTag)); |
|
@ -1527,8 +1607,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
|
|
Label load_result_from_cache; |
|
|
Label load_result_from_cache; |
|
|
if (!object_is_smi) { |
|
|
if (!object_is_smi) { |
|
|
__ JumpIfSmi(object, &is_smi); |
|
|
__ JumpIfSmi(object, &is_smi); |
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
__ CheckMap(object, |
|
|
__ CheckMap(object, |
|
|
scratch1, |
|
|
scratch1, |
|
|
Heap::kHeapNumberMapRootIndex, |
|
|
Heap::kHeapNumberMapRootIndex, |
|
@ -1659,9 +1739,9 @@ void CompareStub::Generate(MacroAssembler* masm) { |
|
|
// The arguments have been converted to doubles and stored in d6 and d7, if
|
|
|
// The arguments have been converted to doubles and stored in d6 and d7, if
|
|
|
// VFP3 is supported, or in r0, r1, r2, and r3.
|
|
|
// VFP3 is supported, or in r0, r1, r2, and r3.
|
|
|
Isolate* isolate = masm->isolate(); |
|
|
Isolate* isolate = masm->isolate(); |
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
__ bind(&lhs_not_nan); |
|
|
__ bind(&lhs_not_nan); |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
Label no_nan; |
|
|
Label no_nan; |
|
|
// ARMv7 VFP3 instructions to implement double precision comparison.
|
|
|
// ARMv7 VFP3 instructions to implement double precision comparison.
|
|
|
__ VFPCompareAndSetFlags(d7, d6); |
|
|
__ VFPCompareAndSetFlags(d7, d6); |
|
@ -1779,11 +1859,9 @@ void CompareStub::Generate(MacroAssembler* masm) { |
|
|
void ToBooleanStub::Generate(MacroAssembler* masm) { |
|
|
void ToBooleanStub::Generate(MacroAssembler* masm) { |
|
|
// This stub overrides SometimesSetsUpAFrame() to return false. That means
|
|
|
// This stub overrides SometimesSetsUpAFrame() to return false. That means
|
|
|
// we cannot call anything that could cause a GC from this stub.
|
|
|
// we cannot call anything that could cause a GC from this stub.
|
|
|
// This stub uses VFP3 instructions.
|
|
|
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
|
|
|
|
|
|
|
|
|
Label patch; |
|
|
Label patch; |
|
|
const Register map = r9.is(tos_) ? r7 : r9; |
|
|
const Register map = r9.is(tos_) ? r7 : r9; |
|
|
|
|
|
const Register temp = map; |
|
|
|
|
|
|
|
|
// undefined -> false.
|
|
|
// undefined -> false.
|
|
|
CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); |
|
|
CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); |
|
@ -1836,6 +1914,10 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { |
|
|
Label not_heap_number; |
|
|
Label not_heap_number; |
|
|
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
|
|
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
|
|
__ b(ne, ¬_heap_number); |
|
|
__ b(ne, ¬_heap_number); |
|
|
|
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
|
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
|
|
|
|
|
|
__ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); |
|
|
__ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); |
|
|
__ VFPCompareAndSetFlags(d1, 0.0); |
|
|
__ VFPCompareAndSetFlags(d1, 0.0); |
|
|
// "tos_" is a register, and contains a non zero value by default.
|
|
|
// "tos_" is a register, and contains a non zero value by default.
|
|
@ -1843,6 +1925,45 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { |
|
|
// FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
|
|
|
// FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
|
|
|
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
|
|
|
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
|
|
|
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
|
|
|
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
|
|
|
|
|
|
} else { |
|
|
|
|
|
Label done, not_nan, not_zero; |
|
|
|
|
|
__ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); |
|
|
|
|
|
// -0 maps to false:
|
|
|
|
|
|
__ bic( |
|
|
|
|
|
temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE), SetCC); |
|
|
|
|
|
__ b(ne, ¬_zero); |
|
|
|
|
|
// If exponent word is zero then the answer depends on the mantissa word.
|
|
|
|
|
|
__ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); |
|
|
|
|
|
__ jmp(&done); |
|
|
|
|
|
|
|
|
|
|
|
// Check for NaN.
|
|
|
|
|
|
__ bind(¬_zero); |
|
|
|
|
|
// We already zeroed the sign bit, now shift out the mantissa so we only
|
|
|
|
|
|
// have the exponent left.
|
|
|
|
|
|
__ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord)); |
|
|
|
|
|
unsigned int shifted_exponent_mask = |
|
|
|
|
|
HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord; |
|
|
|
|
|
__ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE)); |
|
|
|
|
|
__ b(ne, ¬_nan); // If exponent is not 0x7ff then it can't be a NaN.
|
|
|
|
|
|
|
|
|
|
|
|
// Reload exponent word.
|
|
|
|
|
|
__ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); |
|
|
|
|
|
__ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE)); |
|
|
|
|
|
// If mantissa is not zero then we have a NaN, so return 0.
|
|
|
|
|
|
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); |
|
|
|
|
|
__ b(ne, &done); |
|
|
|
|
|
|
|
|
|
|
|
// Load mantissa word.
|
|
|
|
|
|
__ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); |
|
|
|
|
|
__ cmp(temp, Operand(0, RelocInfo::NONE)); |
|
|
|
|
|
// If mantissa is not zero then we have a NaN, so return 0.
|
|
|
|
|
|
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); |
|
|
|
|
|
__ b(ne, &done); |
|
|
|
|
|
|
|
|
|
|
|
__ bind(¬_nan); |
|
|
|
|
|
__ mov(tos_, Operand(1, RelocInfo::NONE)); |
|
|
|
|
|
__ bind(&done); |
|
|
|
|
|
} |
|
|
__ Ret(); |
|
|
__ Ret(); |
|
|
__ bind(¬_heap_number); |
|
|
__ bind(¬_heap_number); |
|
|
} |
|
|
} |
|
@ -1892,7 +2013,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
|
|
// restore them.
|
|
|
// restore them.
|
|
|
__ stm(db_w, sp, kCallerSaved | lr.bit()); |
|
|
__ stm(db_w, sp, kCallerSaved | lr.bit()); |
|
|
if (save_doubles_ == kSaveFPRegs) { |
|
|
if (save_doubles_ == kSaveFPRegs) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
__ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); |
|
|
__ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); |
|
|
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { |
|
|
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { |
|
|
DwVfpRegister reg = DwVfpRegister::from_code(i); |
|
|
DwVfpRegister reg = DwVfpRegister::from_code(i); |
|
@ -1910,7 +2031,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
|
|
ExternalReference::store_buffer_overflow_function(masm->isolate()), |
|
|
ExternalReference::store_buffer_overflow_function(masm->isolate()), |
|
|
argument_count); |
|
|
argument_count); |
|
|
if (save_doubles_ == kSaveFPRegs) { |
|
|
if (save_doubles_ == kSaveFPRegs) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { |
|
|
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { |
|
|
DwVfpRegister reg = DwVfpRegister::from_code(i); |
|
|
DwVfpRegister reg = DwVfpRegister::from_code(i); |
|
|
__ vldr(reg, MemOperand(sp, i * kDoubleSize)); |
|
|
__ vldr(reg, MemOperand(sp, i * kDoubleSize)); |
|
@ -2140,9 +2261,9 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( |
|
|
__ mov(r0, r2); // Move newly allocated heap number to r0.
|
|
|
__ mov(r0, r2); // Move newly allocated heap number to r0.
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
// Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
|
|
|
// Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
__ vmov(s0, r1); |
|
|
__ vmov(s0, r1); |
|
|
__ vcvt_f64_s32(d0, s0); |
|
|
__ vcvt_f64_s32(d0, s0); |
|
|
__ sub(r2, r0, Operand(kHeapObjectTag)); |
|
|
__ sub(r2, r0, Operand(kHeapObjectTag)); |
|
@ -2442,7 +2563,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
|
|
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3
|
|
|
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3
|
|
|
// depending on whether VFP3 is available or not.
|
|
|
// depending on whether VFP3 is available or not.
|
|
|
FloatingPointHelper::Destination destination = |
|
|
FloatingPointHelper::Destination destination = |
|
|
CpuFeatures::IsSupported(VFP3) && |
|
|
CpuFeatures::IsSupported(VFP2) && |
|
|
op_ != Token::MOD ? |
|
|
op_ != Token::MOD ? |
|
|
FloatingPointHelper::kVFPRegisters : |
|
|
FloatingPointHelper::kVFPRegisters : |
|
|
FloatingPointHelper::kCoreRegisters; |
|
|
FloatingPointHelper::kCoreRegisters; |
|
@ -2469,7 +2590,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
|
|
// Using VFP registers:
|
|
|
// Using VFP registers:
|
|
|
// d6: Left value
|
|
|
// d6: Left value
|
|
|
// d7: Right value
|
|
|
// d7: Right value
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
switch (op_) { |
|
|
switch (op_) { |
|
|
case Token::ADD: |
|
|
case Token::ADD: |
|
|
__ vadd(d5, d6, d7); |
|
|
__ vadd(d5, d6, d7); |
|
@ -2558,7 +2679,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
|
|
// The code below for writing into heap numbers isn't capable of
|
|
|
// The code below for writing into heap numbers isn't capable of
|
|
|
// writing the register as an unsigned int so we go to slow case if we
|
|
|
// writing the register as an unsigned int so we go to slow case if we
|
|
|
// hit this case.
|
|
|
// hit this case.
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
__ b(mi, &result_not_a_smi); |
|
|
__ b(mi, &result_not_a_smi); |
|
|
} else { |
|
|
} else { |
|
|
__ b(mi, not_numbers); |
|
|
__ b(mi, not_numbers); |
|
@ -2597,10 +2718,10 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
|
|
// result.
|
|
|
// result.
|
|
|
__ mov(r0, Operand(r5)); |
|
|
__ mov(r0, Operand(r5)); |
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
|
|
|
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
|
|
|
// mentioned above SHR needs to always produce a positive result.
|
|
|
// mentioned above SHR needs to always produce a positive result.
|
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
__ vmov(s0, r2); |
|
|
__ vmov(s0, r2); |
|
|
if (op_ == Token::SHR) { |
|
|
if (op_ == Token::SHR) { |
|
|
__ vcvt_f64_u32(d0, s0); |
|
|
__ vcvt_f64_u32(d0, s0); |
|
@ -2759,7 +2880,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
|
|
// Jump to type transition if they are not. The registers r0 and r1 (right
|
|
|
// Jump to type transition if they are not. The registers r0 and r1 (right
|
|
|
// and left) are preserved for the runtime call.
|
|
|
// and left) are preserved for the runtime call.
|
|
|
FloatingPointHelper::Destination destination = |
|
|
FloatingPointHelper::Destination destination = |
|
|
(CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD) |
|
|
(CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD) |
|
|
? FloatingPointHelper::kVFPRegisters |
|
|
? FloatingPointHelper::kVFPRegisters |
|
|
: FloatingPointHelper::kCoreRegisters; |
|
|
: FloatingPointHelper::kCoreRegisters; |
|
|
|
|
|
|
|
@ -2787,7 +2908,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
|
|
&transition); |
|
|
&transition); |
|
|
|
|
|
|
|
|
if (destination == FloatingPointHelper::kVFPRegisters) { |
|
|
if (destination == FloatingPointHelper::kVFPRegisters) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
Label return_heap_number; |
|
|
Label return_heap_number; |
|
|
switch (op_) { |
|
|
switch (op_) { |
|
|
case Token::ADD: |
|
|
case Token::ADD: |
|
@ -2954,9 +3075,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
|
|
// We only get a negative result if the shift value (r2) is 0.
|
|
|
// We only get a negative result if the shift value (r2) is 0.
|
|
|
// This result cannot be respresented as a signed 32-bit integer, try
|
|
|
// This result cannot be respresented as a signed 32-bit integer, try
|
|
|
// to return a heap number if we can.
|
|
|
// to return a heap number if we can.
|
|
|
// The non vfp3 code does not support this special case, so jump to
|
|
|
// The non vfp2 code does not support this special case, so jump to
|
|
|
// runtime if we don't support it.
|
|
|
// runtime if we don't support it.
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
__ b(mi, (result_type_ <= BinaryOpIC::INT32) |
|
|
__ b(mi, (result_type_ <= BinaryOpIC::INT32) |
|
|
? &transition |
|
|
? &transition |
|
|
: &return_heap_number); |
|
|
: &return_heap_number); |
|
@ -2991,8 +3112,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
|
|
scratch2, |
|
|
scratch2, |
|
|
&call_runtime); |
|
|
&call_runtime); |
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
if (op_ != Token::SHR) { |
|
|
if (op_ != Token::SHR) { |
|
|
// Convert the result to a floating point value.
|
|
|
// Convert the result to a floating point value.
|
|
|
__ vmov(double_scratch.low(), r2); |
|
|
__ vmov(double_scratch.low(), r2); |
|
@ -3221,8 +3342,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
|
|
const Register cache_entry = r0; |
|
|
const Register cache_entry = r0; |
|
|
const bool tagged = (argument_type_ == TAGGED); |
|
|
const bool tagged = (argument_type_ == TAGGED); |
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
if (tagged) { |
|
|
if (tagged) { |
|
|
// Argument is a number and is on stack and in r0.
|
|
|
// Argument is a number and is on stack and in r0.
|
|
|
// Load argument and check if it is a smi.
|
|
|
// Load argument and check if it is a smi.
|
|
@ -3323,23 +3444,23 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
|
|
ExternalReference(RuntimeFunction(), masm->isolate()); |
|
|
ExternalReference(RuntimeFunction(), masm->isolate()); |
|
|
__ TailCallExternalReference(runtime_function, 1, 1); |
|
|
__ TailCallExternalReference(runtime_function, 1, 1); |
|
|
} else { |
|
|
} else { |
|
|
if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE(); |
|
|
ASSERT(CpuFeatures::IsSupported(VFP2)); |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
|
|
|
|
|
|
Label no_update; |
|
|
Label no_update; |
|
|
Label skip_cache; |
|
|
Label skip_cache; |
|
|
|
|
|
|
|
|
// Call C function to calculate the result and update the cache.
|
|
|
// Call C function to calculate the result and update the cache.
|
|
|
// Register r0 holds precalculated cache entry address; preserve
|
|
|
// r0: precalculated cache entry address.
|
|
|
// it on the stack and pop it into register cache_entry after the
|
|
|
// r2 and r3: parts of the double value.
|
|
|
// call.
|
|
|
// Store r0, r2 and r3 on stack for later before calling C function.
|
|
|
__ push(cache_entry); |
|
|
__ Push(r3, r2, cache_entry); |
|
|
GenerateCallCFunction(masm, scratch0); |
|
|
GenerateCallCFunction(masm, scratch0); |
|
|
__ GetCFunctionDoubleResult(d2); |
|
|
__ GetCFunctionDoubleResult(d2); |
|
|
|
|
|
|
|
|
// Try to update the cache. If we cannot allocate a
|
|
|
// Try to update the cache. If we cannot allocate a
|
|
|
// heap number, we return the result without updating.
|
|
|
// heap number, we return the result without updating.
|
|
|
__ pop(cache_entry); |
|
|
__ Pop(r3, r2, cache_entry); |
|
|
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); |
|
|
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); |
|
|
__ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update); |
|
|
__ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update); |
|
|
__ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); |
|
|
__ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); |
|
@ -3385,6 +3506,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
|
|
|
|
|
|
|
|
void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, |
|
|
void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, |
|
|
Register scratch) { |
|
|
Register scratch) { |
|
|
|
|
|
ASSERT(CpuFeatures::IsEnabled(VFP2)); |
|
|
Isolate* isolate = masm->isolate(); |
|
|
Isolate* isolate = masm->isolate(); |
|
|
|
|
|
|
|
|
__ push(lr); |
|
|
__ push(lr); |
|
@ -3445,7 +3567,7 @@ void InterruptStub::Generate(MacroAssembler* masm) { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void MathPowStub::Generate(MacroAssembler* masm) { |
|
|
void MathPowStub::Generate(MacroAssembler* masm) { |
|
|
CpuFeatures::Scope vfp3_scope(VFP3); |
|
|
CpuFeatures::Scope vfp2_scope(VFP2); |
|
|
const Register base = r1; |
|
|
const Register base = r1; |
|
|
const Register exponent = r2; |
|
|
const Register exponent = r2; |
|
|
const Register heapnumbermap = r5; |
|
|
const Register heapnumbermap = r5; |
|
@ -3544,7 +3666,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { |
|
|
|
|
|
|
|
|
// Add +0 to convert -0 to +0.
|
|
|
// Add +0 to convert -0 to +0.
|
|
|
__ vadd(double_scratch, double_base, kDoubleRegZero); |
|
|
__ vadd(double_scratch, double_base, kDoubleRegZero); |
|
|
__ vmov(double_result, 1); |
|
|
__ vmov(double_result, 1.0); |
|
|
__ vsqrt(double_scratch, double_scratch); |
|
|
__ vsqrt(double_scratch, double_scratch); |
|
|
__ vdiv(double_result, double_result, double_scratch); |
|
|
__ vdiv(double_result, double_result, double_scratch); |
|
|
__ jmp(&done); |
|
|
__ jmp(&done); |
|
@ -3901,8 +4023,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
|
|
// Save callee-saved registers (incl. cp and fp), sp, and lr
|
|
|
// Save callee-saved registers (incl. cp and fp), sp, and lr
|
|
|
__ stm(db_w, sp, kCalleeSaved | lr.bit()); |
|
|
__ stm(db_w, sp, kCalleeSaved | lr.bit()); |
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
// Save callee-saved vfp registers.
|
|
|
// Save callee-saved vfp registers.
|
|
|
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
|
|
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
|
|
// Set up the reserved register for 0.0.
|
|
|
// Set up the reserved register for 0.0.
|
|
@ -3917,7 +4039,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
|
|
|
|
|
|
|
|
// Set up argv in r4.
|
|
|
// Set up argv in r4.
|
|
|
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; |
|
|
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; |
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; |
|
|
offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; |
|
|
} |
|
|
} |
|
|
__ ldr(r4, MemOperand(sp, offset_to_argv)); |
|
|
__ ldr(r4, MemOperand(sp, offset_to_argv)); |
|
@ -4055,8 +4177,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
|
|
} |
|
|
} |
|
|
#endif |
|
|
#endif |
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
// Restore callee-saved vfp registers.
|
|
|
// Restore callee-saved vfp registers.
|
|
|
__ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
|
|
__ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
|
|
} |
|
|
} |
|
@ -4385,14 +4507,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
|
|
|
|
|
|
|
|
// r0 = address of new object(s) (tagged)
|
|
|
// r0 = address of new object(s) (tagged)
|
|
|
// r2 = argument count (tagged)
|
|
|
// r2 = argument count (tagged)
|
|
|
// Get the arguments boilerplate from the current (global) context into r4.
|
|
|
// Get the arguments boilerplate from the current native context into r4.
|
|
|
const int kNormalOffset = |
|
|
const int kNormalOffset = |
|
|
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); |
|
|
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); |
|
|
const int kAliasedOffset = |
|
|
const int kAliasedOffset = |
|
|
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); |
|
|
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); |
|
|
|
|
|
|
|
|
__ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX))); |
|
|
__ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
|
|
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset)); |
|
|
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); |
|
|
__ cmp(r1, Operand::Zero()); |
|
|
__ cmp(r1, Operand::Zero()); |
|
|
__ ldr(r4, MemOperand(r4, kNormalOffset), eq); |
|
|
__ ldr(r4, MemOperand(r4, kNormalOffset), eq); |
|
|
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne); |
|
|
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne); |
|
@ -4565,9 +4687,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
|
|
static_cast<AllocationFlags>(TAG_OBJECT | |
|
|
static_cast<AllocationFlags>(TAG_OBJECT | |
|
|
SIZE_IN_WORDS)); |
|
|
SIZE_IN_WORDS)); |
|
|
|
|
|
|
|
|
// Get the arguments boilerplate from the current (global) context.
|
|
|
// Get the arguments boilerplate from the current native context.
|
|
|
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
|
|
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
|
|
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset)); |
|
|
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); |
|
|
__ ldr(r4, MemOperand(r4, Context::SlotOffset( |
|
|
__ ldr(r4, MemOperand(r4, Context::SlotOffset( |
|
|
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); |
|
|
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); |
|
|
|
|
|
|
|
@ -4696,7 +4818,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
|
|
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
|
|
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
|
|
__ add(r2, r2, Operand(2)); // r2 was a smi.
|
|
|
__ add(r2, r2, Operand(2)); // r2 was a smi.
|
|
|
// Check that the static offsets vector buffer is large enough.
|
|
|
// Check that the static offsets vector buffer is large enough.
|
|
|
__ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize)); |
|
|
__ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize)); |
|
|
__ b(hi, &runtime); |
|
|
__ b(hi, &runtime); |
|
|
|
|
|
|
|
|
// r2: Number of capture registers
|
|
|
// r2: Number of capture registers
|
|
@ -5082,10 +5204,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { |
|
|
// Set empty properties FixedArray.
|
|
|
// Set empty properties FixedArray.
|
|
|
// Set elements to point to FixedArray allocated right after the JSArray.
|
|
|
// Set elements to point to FixedArray allocated right after the JSArray.
|
|
|
// Interleave operations for better latency.
|
|
|
// Interleave operations for better latency.
|
|
|
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX)); |
|
|
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
|
|
__ add(r3, r0, Operand(JSRegExpResult::kSize)); |
|
|
__ add(r3, r0, Operand(JSRegExpResult::kSize)); |
|
|
__ mov(r4, Operand(factory->empty_fixed_array())); |
|
|
__ mov(r4, Operand(factory->empty_fixed_array())); |
|
|
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); |
|
|
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset)); |
|
|
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); |
|
|
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); |
|
|
__ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); |
|
|
__ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); |
|
|
__ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); |
|
|
__ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); |
|
@ -5191,7 +5313,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { |
|
|
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex); |
|
|
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex); |
|
|
__ b(ne, &call); |
|
|
__ b(ne, &call); |
|
|
// Patch the receiver on the stack with the global receiver object.
|
|
|
// Patch the receiver on the stack with the global receiver object.
|
|
|
__ ldr(r3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
|
|
__ ldr(r3, |
|
|
|
|
|
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
|
|
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset)); |
|
|
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset)); |
|
|
__ str(r3, MemOperand(sp, argc_ * kPointerSize)); |
|
|
__ str(r3, MemOperand(sp, argc_ * kPointerSize)); |
|
|
__ bind(&call); |
|
|
__ bind(&call); |
|
@ -6583,8 +6706,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
|
|
|
|
|
|
|
|
// Inlining the double comparison and falling back to the general compare
|
|
|
// Inlining the double comparison and falling back to the general compare
|
|
|
// stub if NaN is involved or VFP3 is unsupported.
|
|
|
// stub if NaN is involved or VFP3 is unsupported.
|
|
|
if (CpuFeatures::IsSupported(VFP3)) { |
|
|
if (CpuFeatures::IsSupported(VFP2)) { |
|
|
CpuFeatures::Scope scope(VFP3); |
|
|
CpuFeatures::Scope scope(VFP2); |
|
|
|
|
|
|
|
|
// Load left and right operand
|
|
|
// Load left and right operand
|
|
|
__ sub(r2, r1, Operand(kHeapObjectTag)); |
|
|
__ sub(r2, r1, Operand(kHeapObjectTag)); |
|
@ -7131,6 +7254,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { |
|
|
{ REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET }, |
|
|
{ REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET }, |
|
|
// StoreArrayLiteralElementStub::Generate
|
|
|
// StoreArrayLiteralElementStub::Generate
|
|
|
{ REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, |
|
|
{ REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, |
|
|
|
|
|
// FastNewClosureStub::Generate
|
|
|
|
|
|
{ REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET }, |
|
|
// Null termination.
|
|
|
// Null termination.
|
|
|
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} |
|
|
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} |
|
|
}; |
|
|
}; |
|
@ -7431,6 +7556,65 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { |
|
|
__ Ret(); |
|
|
__ Ret(); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { |
|
|
|
|
|
if (entry_hook_ != NULL) { |
|
|
|
|
|
ProfileEntryHookStub stub; |
|
|
|
|
|
__ push(lr); |
|
|
|
|
|
__ CallStub(&stub); |
|
|
|
|
|
__ pop(lr); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void ProfileEntryHookStub::Generate(MacroAssembler* masm) { |
|
|
|
|
|
// The entry hook is a "push lr" instruction, followed by a call.
|
|
|
|
|
|
const int32_t kReturnAddressDistanceFromFunctionStart = |
|
|
|
|
|
Assembler::kCallTargetAddressOffset + Assembler::kInstrSize; |
|
|
|
|
|
|
|
|
|
|
|
// Save live volatile registers.
|
|
|
|
|
|
__ Push(lr, r5, r1); |
|
|
|
|
|
const int32_t kNumSavedRegs = 3; |
|
|
|
|
|
|
|
|
|
|
|
// Compute the function's address for the first argument.
|
|
|
|
|
|
__ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart)); |
|
|
|
|
|
|
|
|
|
|
|
// The caller's return address is above the saved temporaries.
|
|
|
|
|
|
// Grab that for the second argument to the hook.
|
|
|
|
|
|
__ add(r1, sp, Operand(kNumSavedRegs * kPointerSize)); |
|
|
|
|
|
|
|
|
|
|
|
// Align the stack if necessary.
|
|
|
|
|
|
int frame_alignment = masm->ActivationFrameAlignment(); |
|
|
|
|
|
if (frame_alignment > kPointerSize) { |
|
|
|
|
|
__ mov(r5, sp); |
|
|
|
|
|
ASSERT(IsPowerOf2(frame_alignment)); |
|
|
|
|
|
__ and_(sp, sp, Operand(-frame_alignment)); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
#if defined(V8_HOST_ARCH_ARM) |
|
|
|
|
|
__ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_))); |
|
|
|
|
|
__ ldr(ip, MemOperand(ip)); |
|
|
|
|
|
#else |
|
|
|
|
|
// Under the simulator we need to indirect the entry hook through a
|
|
|
|
|
|
// trampoline function at a known address.
|
|
|
|
|
|
Address trampoline_address = reinterpret_cast<Address>( |
|
|
|
|
|
reinterpret_cast<intptr_t>(EntryHookTrampoline)); |
|
|
|
|
|
ApiFunction dispatcher(trampoline_address); |
|
|
|
|
|
__ mov(ip, Operand(ExternalReference(&dispatcher, |
|
|
|
|
|
ExternalReference::BUILTIN_CALL, |
|
|
|
|
|
masm->isolate()))); |
|
|
|
|
|
#endif |
|
|
|
|
|
__ Call(ip); |
|
|
|
|
|
|
|
|
|
|
|
// Restore the stack pointer if needed.
|
|
|
|
|
|
if (frame_alignment > kPointerSize) { |
|
|
|
|
|
__ mov(sp, r5); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
__ Pop(lr, r5, r1); |
|
|
|
|
|
__ Ret(); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
#undef __ |
|
|
#undef __ |
|
|
|
|
|
|
|
|
} } // namespace v8::internal
|
|
|
} } // namespace v8::internal
|
|
|