|
|
@ -262,63 +262,23 @@ class DeferredInlineSmiOperationReversed: public DeferredCode { |
|
|
|
|
|
|
|
class FloatingPointHelper : public AllStatic { |
|
|
|
public: |
|
|
|
// Code pattern for loading a floating point value. Input value must
|
|
|
|
// be either a smi or a heap number object (fp value). Requirements:
|
|
|
|
// operand on TOS+1. Returns operand as floating point number on FPU
|
|
|
|
// stack.
|
|
|
|
static void LoadFloatOperand(MacroAssembler* masm, Register scratch); |
|
|
|
|
|
|
|
// Code pattern for loading a floating point value. Input value must
|
|
|
|
// be either a smi or a heap number object (fp value). Requirements:
|
|
|
|
// operand in src register. Returns operand as floating point number
|
|
|
|
// in XMM register. May destroy src register.
|
|
|
|
static void LoadFloatOperand(MacroAssembler* masm, |
|
|
|
Register src, |
|
|
|
XMMRegister dst); |
|
|
|
|
|
|
|
// Code pattern for loading a possible number into a XMM register.
|
|
|
|
// If the contents of src is not a number, control branches to
|
|
|
|
// the Label not_number. If contents of src is a smi or a heap number
|
|
|
|
// object (fp value), it is loaded into the XMM register as a double.
|
|
|
|
// The register src is not changed, and src may not be kScratchRegister.
|
|
|
|
static void LoadFloatOperand(MacroAssembler* masm, |
|
|
|
Register src, |
|
|
|
XMMRegister dst, |
|
|
|
Label *not_number); |
|
|
|
|
|
|
|
// Code pattern for loading floating point values. Input values must
|
|
|
|
// be either smi or heap number objects (fp values). Requirements:
|
|
|
|
// operand_1 in rdx, operand_2 in rax; Returns operands as
|
|
|
|
// floating point numbers in XMM registers.
|
|
|
|
static void LoadFloatOperands(MacroAssembler* masm, |
|
|
|
XMMRegister dst1, |
|
|
|
XMMRegister dst2); |
|
|
|
|
|
|
|
// Similar to LoadFloatOperands, assumes that the operands are smis.
|
|
|
|
static void LoadFloatOperandsFromSmis(MacroAssembler* masm, |
|
|
|
XMMRegister dst1, |
|
|
|
XMMRegister dst2); |
|
|
|
|
|
|
|
// Code pattern for loading floating point values onto the fp stack.
|
|
|
|
// Input values must be either smi or heap number objects (fp values).
|
|
|
|
// Requirements:
|
|
|
|
// Register version: operands in registers lhs and rhs.
|
|
|
|
// Stack version: operands on TOS+1 and TOS+2.
|
|
|
|
// Returns operands as floating point numbers on fp stack.
|
|
|
|
static void LoadFloatOperands(MacroAssembler* masm, |
|
|
|
Register lhs, |
|
|
|
Register rhs); |
|
|
|
|
|
|
|
// Test if operands are smi or number objects (fp). Requirements:
|
|
|
|
// operand_1 in rax, operand_2 in rdx; falls through on float or smi
|
|
|
|
// operands, jumps to the non_float label otherwise.
|
|
|
|
static void CheckNumberOperands(MacroAssembler* masm, |
|
|
|
Label* non_float); |
|
|
|
// Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
|
|
|
|
// If the operands are not both numbers, jump to not_numbers.
|
|
|
|
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
|
|
|
|
// NumberOperands assumes both are smis or heap numbers.
|
|
|
|
static void LoadSSE2SmiOperands(MacroAssembler* masm); |
|
|
|
static void LoadSSE2NumberOperands(MacroAssembler* masm); |
|
|
|
static void LoadSSE2UnknownOperands(MacroAssembler* masm, |
|
|
|
Label* not_numbers); |
|
|
|
|
|
|
|
// Takes the operands in rdx and rax and loads them as integers in rax
|
|
|
|
// and rcx.
|
|
|
|
static void LoadAsIntegers(MacroAssembler* masm, |
|
|
|
Label* operand_conversion_failure); |
|
|
|
Label* operand_conversion_failure, |
|
|
|
Register heap_number_map); |
|
|
|
// As above, but we know the operands to be numbers. In that case,
|
|
|
|
// conversion can't fail.
|
|
|
|
static void LoadNumbersAsIntegers(MacroAssembler* masm); |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
@ -4429,7 +4389,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { |
|
|
|
// Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
|
|
|
|
__ addsd(xmm2, xmm3); |
|
|
|
// xmm2 now has 0.5.
|
|
|
|
__ comisd(xmm2, xmm1); |
|
|
|
__ ucomisd(xmm2, xmm1); |
|
|
|
call_runtime.Branch(not_equal); |
|
|
|
|
|
|
|
// Calculates square root.
|
|
|
@ -4769,8 +4729,8 @@ void DeferredSearchCache::Generate() { |
|
|
|
__ cmpq(ArrayElement(cache_, dst_), key_); |
|
|
|
__ j(not_equal, &first_loop); |
|
|
|
|
|
|
|
__ Integer32ToSmi(scratch_, dst_); |
|
|
|
__ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_); |
|
|
|
__ Integer32ToSmiField( |
|
|
|
FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_); |
|
|
|
__ movq(dst_, ArrayElement(cache_, dst_, 1)); |
|
|
|
__ jmp(exit_label()); |
|
|
|
|
|
|
@ -4791,8 +4751,8 @@ void DeferredSearchCache::Generate() { |
|
|
|
__ cmpq(ArrayElement(cache_, dst_), key_); |
|
|
|
__ j(not_equal, &second_loop); |
|
|
|
|
|
|
|
__ Integer32ToSmi(scratch_, dst_); |
|
|
|
__ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_); |
|
|
|
__ Integer32ToSmiField( |
|
|
|
FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_); |
|
|
|
__ movq(dst_, ArrayElement(cache_, dst_, 1)); |
|
|
|
__ jmp(exit_label()); |
|
|
|
|
|
|
@ -4814,50 +4774,50 @@ void DeferredSearchCache::Generate() { |
|
|
|
// cache miss this optimization would hardly matter much.
|
|
|
|
|
|
|
|
// Check if we could add new entry to cache.
|
|
|
|
__ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); |
|
|
|
__ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset)); |
|
|
|
__ SmiCompare(rbx, r9); |
|
|
|
__ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); |
|
|
|
__ SmiToInteger32(r9, |
|
|
|
FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset)); |
|
|
|
__ cmpl(rbx, r9); |
|
|
|
__ j(greater, &add_new_entry); |
|
|
|
|
|
|
|
// Check if we could evict entry after finger.
|
|
|
|
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); |
|
|
|
__ SmiToInteger32(rdx, rdx); |
|
|
|
__ SmiToInteger32(rbx, rbx); |
|
|
|
__ addq(rdx, kEntrySizeImm); |
|
|
|
__ SmiToInteger32(rdx, |
|
|
|
FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); |
|
|
|
__ addl(rdx, kEntrySizeImm); |
|
|
|
Label forward; |
|
|
|
__ cmpq(rbx, rdx); |
|
|
|
__ cmpl(rbx, rdx); |
|
|
|
__ j(greater, &forward); |
|
|
|
// Need to wrap over the cache.
|
|
|
|
__ movl(rdx, kEntriesIndexImm); |
|
|
|
__ bind(&forward); |
|
|
|
__ Integer32ToSmi(r9, rdx); |
|
|
|
__ movl(r9, rdx); |
|
|
|
__ jmp(&update_cache); |
|
|
|
|
|
|
|
__ bind(&add_new_entry); |
|
|
|
// r9 holds cache size as smi.
|
|
|
|
__ SmiToInteger32(rdx, r9); |
|
|
|
__ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize)); |
|
|
|
__ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx); |
|
|
|
// r9 holds cache size as int32.
|
|
|
|
__ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize)); |
|
|
|
__ Integer32ToSmiField( |
|
|
|
FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx); |
|
|
|
|
|
|
|
// Update the cache itself.
|
|
|
|
// rdx holds the index as int.
|
|
|
|
// r9 holds the index as smi.
|
|
|
|
// r9 holds the index as int32.
|
|
|
|
__ bind(&update_cache); |
|
|
|
__ pop(rbx); // restore the key
|
|
|
|
__ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9); |
|
|
|
__ Integer32ToSmiField( |
|
|
|
FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9); |
|
|
|
// Store key.
|
|
|
|
__ movq(ArrayElement(rcx, rdx), rbx); |
|
|
|
__ movq(ArrayElement(rcx, r9), rbx); |
|
|
|
__ RecordWrite(rcx, 0, rbx, r9); |
|
|
|
|
|
|
|
// Store value.
|
|
|
|
__ pop(rcx); // restore the cache.
|
|
|
|
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); |
|
|
|
__ SmiAddConstant(rdx, rdx, Smi::FromInt(1)); |
|
|
|
__ movq(r9, rdx); |
|
|
|
__ SmiToInteger32(rdx, rdx); |
|
|
|
__ SmiToInteger32(rdx, |
|
|
|
FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); |
|
|
|
__ incl(rdx); |
|
|
|
// Backup rax, because the RecordWrite macro clobbers its arguments.
|
|
|
|
__ movq(rbx, rax); |
|
|
|
__ movq(ArrayElement(rcx, rdx), rbx); |
|
|
|
__ RecordWrite(rcx, 0, rbx, r9); |
|
|
|
__ movq(ArrayElement(rcx, rdx), rax); |
|
|
|
__ RecordWrite(rcx, 0, rbx, rdx); |
|
|
|
|
|
|
|
if (!dst_.is(rax)) { |
|
|
|
__ movq(dst_, rax); |
|
|
@ -6512,7 +6472,7 @@ void CodeGenerator::GenerateInlineNumberComparison(Result* left_side, |
|
|
|
¬_numbers); |
|
|
|
LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side, |
|
|
|
¬_numbers); |
|
|
|
__ comisd(xmm0, xmm1); |
|
|
|
__ ucomisd(xmm0, xmm1); |
|
|
|
// Bail out if a NaN is involved.
|
|
|
|
not_numbers.Branch(parity_even, left_side, right_side); |
|
|
|
|
|
|
@ -8551,18 +8511,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
|
|
|
|
|
|
|
// rcx: RegExp data (FixedArray)
|
|
|
|
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
|
|
|
|
__ movq(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset)); |
|
|
|
__ SmiCompare(rbx, Smi::FromInt(JSRegExp::IRREGEXP)); |
|
|
|
__ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset)); |
|
|
|
__ cmpl(rbx, Immediate(JSRegExp::IRREGEXP)); |
|
|
|
__ j(not_equal, &runtime); |
|
|
|
|
|
|
|
// rcx: RegExp data (FixedArray)
|
|
|
|
// Check that the number of captures fit in the static offsets vector buffer.
|
|
|
|
__ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); |
|
|
|
__ SmiToInteger32(rdx, |
|
|
|
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); |
|
|
|
// Calculate number of capture registers (number_of_captures + 1) * 2.
|
|
|
|
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1); |
|
|
|
__ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
|
|
|
|
__ leal(rdx, Operand(rdx, rdx, times_1, 2)); |
|
|
|
// Check that the static offsets vector buffer is large enough.
|
|
|
|
__ cmpq(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize)); |
|
|
|
__ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize)); |
|
|
|
__ j(above, &runtime); |
|
|
|
|
|
|
|
// rcx: RegExp data (FixedArray)
|
|
|
@ -8572,17 +8532,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
|
|
|
__ JumpIfSmi(rax, &runtime); |
|
|
|
Condition is_string = masm->IsObjectStringType(rax, rbx, rbx); |
|
|
|
__ j(NegateCondition(is_string), &runtime); |
|
|
|
// Get the length of the string to rbx.
|
|
|
|
__ movq(rbx, FieldOperand(rax, String::kLengthOffset)); |
|
|
|
|
|
|
|
// rbx: Length of subject string as smi
|
|
|
|
// rcx: RegExp data (FixedArray)
|
|
|
|
// rdx: Number of capture registers
|
|
|
|
// rax: Subject string.
|
|
|
|
// rcx: RegExp data (FixedArray).
|
|
|
|
// rdx: Number of capture registers.
|
|
|
|
// Check that the third argument is a positive smi less than the string
|
|
|
|
// length. A negative value will be greater (unsigned comparison).
|
|
|
|
__ movq(rax, Operand(rsp, kPreviousIndexOffset)); |
|
|
|
__ JumpIfNotSmi(rax, &runtime); |
|
|
|
__ SmiCompare(rax, rbx); |
|
|
|
__ movq(rbx, Operand(rsp, kPreviousIndexOffset)); |
|
|
|
__ JumpIfNotSmi(rbx, &runtime); |
|
|
|
__ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset)); |
|
|
|
__ j(above_equal, &runtime); |
|
|
|
|
|
|
|
// rcx: RegExp data (FixedArray)
|
|
|
@ -8600,8 +8558,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
|
|
|
// Check that the last match info has space for the capture registers and the
|
|
|
|
// additional information. Ensure no overflow in add.
|
|
|
|
ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); |
|
|
|
__ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); |
|
|
|
__ SmiToInteger32(rax, rax); |
|
|
|
__ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); |
|
|
|
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); |
|
|
|
__ cmpl(rdx, rax); |
|
|
|
__ j(greater, &runtime); |
|
|
@ -8674,8 +8631,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
|
|
|
// r12: code
|
|
|
|
// Load used arguments before starting to push arguments for call to native
|
|
|
|
// RegExp code to avoid handling changing stack height.
|
|
|
|
__ movq(rbx, Operand(rsp, kPreviousIndexOffset)); |
|
|
|
__ SmiToInteger64(rbx, rbx); // Previous index from smi.
|
|
|
|
__ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset)); |
|
|
|
|
|
|
|
// rax: subject string
|
|
|
|
// rbx: previous index
|
|
|
@ -8787,10 +8743,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
|
|
|
__ bind(&success); |
|
|
|
__ movq(rax, Operand(rsp, kJSRegExpOffset)); |
|
|
|
__ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset)); |
|
|
|
__ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); |
|
|
|
__ SmiToInteger32(rax, |
|
|
|
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); |
|
|
|
// Calculate number of capture registers (number_of_captures + 1) * 2.
|
|
|
|
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1); |
|
|
|
__ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
|
|
|
|
__ leal(rdx, Operand(rax, rax, times_1, 2)); |
|
|
|
|
|
|
|
// rdx: Number of capture registers
|
|
|
|
// Load last_match_info which is still known to be a fast case JSArray.
|
|
|
@ -8877,9 +8833,9 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
|
|
|
|
|
|
|
// Make the hash mask from the length of the number string cache. It
|
|
|
|
// contains two elements (number and string) for each cache entry.
|
|
|
|
__ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); |
|
|
|
// Divide smi tagged length by two.
|
|
|
|
__ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1); |
|
|
|
__ SmiToInteger32( |
|
|
|
mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); |
|
|
|
__ shrl(mask, Immediate(1)); |
|
|
|
__ subq(mask, Immediate(1)); // Make mask.
|
|
|
|
|
|
|
|
// Calculate the entry in the number string cache. The hash value in the
|
|
|
@ -8909,15 +8865,14 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
|
|
|
CpuFeatures::Scope fscope(SSE2); |
|
|
|
__ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); |
|
|
|
__ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); |
|
|
|
__ comisd(xmm0, xmm1); |
|
|
|
__ ucomisd(xmm0, xmm1); |
|
|
|
__ j(parity_even, not_found); // Bail out if NaN is involved.
|
|
|
|
__ j(not_equal, not_found); // The cache did not contain this value.
|
|
|
|
__ jmp(&load_result_from_cache); |
|
|
|
} |
|
|
|
|
|
|
|
__ bind(&is_smi); |
|
|
|
__ movq(scratch, object); |
|
|
|
__ SmiToInteger32(scratch, scratch); |
|
|
|
__ SmiToInteger32(scratch, object); |
|
|
|
GenerateConvertHashCodeToIndex(masm, scratch, mask); |
|
|
|
|
|
|
|
Register index = scratch; |
|
|
@ -9111,12 +9066,8 @@ void CompareStub::Generate(MacroAssembler* masm) { |
|
|
|
if (include_number_compare_) { |
|
|
|
Label non_number_comparison; |
|
|
|
Label unordered; |
|
|
|
FloatingPointHelper::LoadFloatOperand(masm, rdx, xmm0, |
|
|
|
&non_number_comparison); |
|
|
|
FloatingPointHelper::LoadFloatOperand(masm, rax, xmm1, |
|
|
|
&non_number_comparison); |
|
|
|
|
|
|
|
__ comisd(xmm0, xmm1); |
|
|
|
FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison); |
|
|
|
__ ucomisd(xmm0, xmm1); |
|
|
|
|
|
|
|
// Don't base result on EFLAGS when a NaN is involved.
|
|
|
|
__ j(parity_even, &unordered); |
|
|
@ -9344,29 +9295,30 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { |
|
|
|
__ j(equal, &adaptor_frame); |
|
|
|
|
|
|
|
// Get the length from the frame.
|
|
|
|
__ movq(rcx, Operand(rsp, 1 * kPointerSize)); |
|
|
|
__ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize)); |
|
|
|
__ jmp(&try_allocate); |
|
|
|
|
|
|
|
// Patch the arguments.length and the parameters pointer.
|
|
|
|
__ bind(&adaptor_frame); |
|
|
|
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
|
|
|
__ movq(Operand(rsp, 1 * kPointerSize), rcx); |
|
|
|
__ SmiToInteger32(rcx, |
|
|
|
Operand(rdx, |
|
|
|
ArgumentsAdaptorFrameConstants::kLengthOffset)); |
|
|
|
// Space on stack must already hold a smi.
|
|
|
|
__ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx); |
|
|
|
// Do not clobber the length index for the indexing operation since
|
|
|
|
// it is used compute the size for allocation later.
|
|
|
|
SmiIndex index = masm->SmiToIndex(rbx, rcx, kPointerSizeLog2); |
|
|
|
__ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement)); |
|
|
|
__ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement)); |
|
|
|
__ movq(Operand(rsp, 2 * kPointerSize), rdx); |
|
|
|
|
|
|
|
// Try the new space allocation. Start out with computing the size of
|
|
|
|
// the arguments object and the elements array.
|
|
|
|
Label add_arguments_object; |
|
|
|
__ bind(&try_allocate); |
|
|
|
__ testq(rcx, rcx); |
|
|
|
__ testl(rcx, rcx); |
|
|
|
__ j(zero, &add_arguments_object); |
|
|
|
index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2); |
|
|
|
__ lea(rcx, Operand(index.reg, index.scale, FixedArray::kHeaderSize)); |
|
|
|
__ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); |
|
|
|
__ bind(&add_arguments_object); |
|
|
|
__ addq(rcx, Immediate(Heap::kArgumentsObjectSize)); |
|
|
|
__ addl(rcx, Immediate(Heap::kArgumentsObjectSize)); |
|
|
|
|
|
|
|
// Do the allocation of both objects in one go.
|
|
|
|
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); |
|
|
@ -9378,10 +9330,13 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { |
|
|
|
__ movq(rdi, Operand(rdi, offset)); |
|
|
|
|
|
|
|
// Copy the JS object part.
|
|
|
|
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { |
|
|
|
__ movq(kScratchRegister, FieldOperand(rdi, i)); |
|
|
|
__ movq(FieldOperand(rax, i), kScratchRegister); |
|
|
|
} |
|
|
|
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize); |
|
|
|
__ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize)); |
|
|
|
__ movq(rdx, FieldOperand(rdi, 1 * kPointerSize)); |
|
|
|
__ movq(rbx, FieldOperand(rdi, 2 * kPointerSize)); |
|
|
|
__ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister); |
|
|
|
__ movq(FieldOperand(rax, 1 * kPointerSize), rdx); |
|
|
|
__ movq(FieldOperand(rax, 2 * kPointerSize), rbx); |
|
|
|
|
|
|
|
// Setup the callee in-object property.
|
|
|
|
ASSERT(Heap::arguments_callee_index == 0); |
|
|
@ -9395,7 +9350,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { |
|
|
|
|
|
|
|
// If there are no actual arguments, we're done.
|
|
|
|
Label done; |
|
|
|
__ testq(rcx, rcx); |
|
|
|
__ SmiTest(rcx); |
|
|
|
__ j(zero, &done); |
|
|
|
|
|
|
|
// Get the parameters pointer from the stack and untag the length.
|
|
|
@ -9417,7 +9372,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { |
|
|
|
__ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); |
|
|
|
__ addq(rdi, Immediate(kPointerSize)); |
|
|
|
__ subq(rdx, Immediate(kPointerSize)); |
|
|
|
__ decq(rcx); |
|
|
|
__ decl(rcx); |
|
|
|
__ j(not_zero, &loop); |
|
|
|
|
|
|
|
// Return and remove the on-stack parameters.
|
|
|
@ -9968,86 +9923,73 @@ void StackCheckStub::Generate(MacroAssembler* masm) { |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, |
|
|
|
Register number) { |
|
|
|
Label load_smi, done; |
|
|
|
|
|
|
|
__ JumpIfSmi(number, &load_smi); |
|
|
|
__ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); |
|
|
|
__ jmp(&done); |
|
|
|
|
|
|
|
__ bind(&load_smi); |
|
|
|
__ SmiToInteger32(number, number); |
|
|
|
__ push(number); |
|
|
|
__ fild_s(Operand(rsp, 0)); |
|
|
|
__ pop(number); |
|
|
|
|
|
|
|
__ bind(&done); |
|
|
|
void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { |
|
|
|
__ SmiToInteger32(kScratchRegister, rdx); |
|
|
|
__ cvtlsi2sd(xmm0, kScratchRegister); |
|
|
|
__ SmiToInteger32(kScratchRegister, rax); |
|
|
|
__ cvtlsi2sd(xmm1, kScratchRegister); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, |
|
|
|
Register src, |
|
|
|
XMMRegister dst) { |
|
|
|
Label load_smi, done; |
|
|
|
|
|
|
|
__ JumpIfSmi(src, &load_smi); |
|
|
|
__ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset)); |
|
|
|
void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) { |
|
|
|
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done; |
|
|
|
// Load operand in rdx into xmm0.
|
|
|
|
__ JumpIfSmi(rdx, &load_smi_rdx); |
|
|
|
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); |
|
|
|
// Load operand in rax into xmm1.
|
|
|
|
__ JumpIfSmi(rax, &load_smi_rax); |
|
|
|
__ bind(&load_nonsmi_rax); |
|
|
|
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); |
|
|
|
__ jmp(&done); |
|
|
|
|
|
|
|
__ bind(&load_smi); |
|
|
|
__ SmiToInteger32(src, src); |
|
|
|
__ cvtlsi2sd(dst, src); |
|
|
|
|
|
|
|
__ bind(&done); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, |
|
|
|
Register src, |
|
|
|
XMMRegister dst, |
|
|
|
Label* not_number) { |
|
|
|
Label load_smi, done; |
|
|
|
ASSERT(!src.is(kScratchRegister)); |
|
|
|
__ JumpIfSmi(src, &load_smi); |
|
|
|
__ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex); |
|
|
|
__ cmpq(FieldOperand(src, HeapObject::kMapOffset), kScratchRegister); |
|
|
|
__ j(not_equal, not_number); |
|
|
|
__ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset)); |
|
|
|
__ jmp(&done); |
|
|
|
__ bind(&load_smi_rdx); |
|
|
|
__ SmiToInteger32(kScratchRegister, rdx); |
|
|
|
__ cvtlsi2sd(xmm0, kScratchRegister); |
|
|
|
__ JumpIfNotSmi(rax, &load_nonsmi_rax); |
|
|
|
|
|
|
|
__ bind(&load_smi); |
|
|
|
__ SmiToInteger32(kScratchRegister, src); |
|
|
|
__ cvtlsi2sd(dst, kScratchRegister); |
|
|
|
__ bind(&load_smi_rax); |
|
|
|
__ SmiToInteger32(kScratchRegister, rax); |
|
|
|
__ cvtlsi2sd(xmm1, kScratchRegister); |
|
|
|
|
|
|
|
__ bind(&done); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, |
|
|
|
XMMRegister dst1, |
|
|
|
XMMRegister dst2) { |
|
|
|
__ movq(kScratchRegister, rdx); |
|
|
|
LoadFloatOperand(masm, kScratchRegister, dst1); |
|
|
|
__ movq(kScratchRegister, rax); |
|
|
|
LoadFloatOperand(masm, kScratchRegister, dst2); |
|
|
|
} |
|
|
|
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, |
|
|
|
Label* not_numbers) { |
|
|
|
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; |
|
|
|
// Load operand in rdx into xmm0, or branch to not_numbers.
|
|
|
|
__ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); |
|
|
|
__ JumpIfSmi(rdx, &load_smi_rdx); |
|
|
|
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx); |
|
|
|
__ j(not_equal, not_numbers); // Argument in rdx is not a number.
|
|
|
|
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); |
|
|
|
// Load operand in rax into xmm1, or branch to not_numbers.
|
|
|
|
__ JumpIfSmi(rax, &load_smi_rax); |
|
|
|
|
|
|
|
__ bind(&load_nonsmi_rax); |
|
|
|
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx); |
|
|
|
__ j(not_equal, not_numbers); |
|
|
|
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); |
|
|
|
__ jmp(&done); |
|
|
|
|
|
|
|
void FloatingPointHelper::LoadFloatOperandsFromSmis(MacroAssembler* masm, |
|
|
|
XMMRegister dst1, |
|
|
|
XMMRegister dst2) { |
|
|
|
__ bind(&load_smi_rdx); |
|
|
|
__ SmiToInteger32(kScratchRegister, rdx); |
|
|
|
__ cvtlsi2sd(dst1, kScratchRegister); |
|
|
|
__ cvtlsi2sd(xmm0, kScratchRegister); |
|
|
|
__ JumpIfNotSmi(rax, &load_nonsmi_rax); |
|
|
|
|
|
|
|
__ bind(&load_smi_rax); |
|
|
|
__ SmiToInteger32(kScratchRegister, rax); |
|
|
|
__ cvtlsi2sd(dst2, kScratchRegister); |
|
|
|
__ cvtlsi2sd(xmm1, kScratchRegister); |
|
|
|
__ bind(&done); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Input: rdx, rax are the left and right objects of a bit op.
|
|
|
|
// Output: rax, rcx are left and right integers for a bit op.
|
|
|
|
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
|
|
|
Label* conversion_failure) { |
|
|
|
Label* conversion_failure, |
|
|
|
Register heap_number_map) { |
|
|
|
// Check float operands.
|
|
|
|
Label arg1_is_object, check_undefined_arg1; |
|
|
|
Label arg2_is_object, check_undefined_arg2; |
|
|
@ -10065,8 +10007,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
|
|
|
__ jmp(&load_arg2); |
|
|
|
|
|
|
|
__ bind(&arg1_is_object); |
|
|
|
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); |
|
|
|
__ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex); |
|
|
|
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); |
|
|
|
__ j(not_equal, &check_undefined_arg1); |
|
|
|
// Get the untagged integer version of the edx heap number in rcx.
|
|
|
|
IntegerConvert(masm, rdx, rdx); |
|
|
@ -10087,8 +10028,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
|
|
|
__ jmp(&done); |
|
|
|
|
|
|
|
__ bind(&arg2_is_object); |
|
|
|
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); |
|
|
|
__ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex); |
|
|
|
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); |
|
|
|
__ j(not_equal, &check_undefined_arg2); |
|
|
|
// Get the untagged integer version of the eax heap number in ecx.
|
|
|
|
IntegerConvert(masm, rcx, rax); |
|
|
@ -10097,51 +10037,35 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, |
|
|
|
Register lhs, |
|
|
|
Register rhs) { |
|
|
|
Label load_smi_lhs, load_smi_rhs, done_load_lhs, done; |
|
|
|
__ JumpIfSmi(lhs, &load_smi_lhs); |
|
|
|
__ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset)); |
|
|
|
__ bind(&done_load_lhs); |
|
|
|
|
|
|
|
__ JumpIfSmi(rhs, &load_smi_rhs); |
|
|
|
__ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset)); |
|
|
|
__ jmp(&done); |
|
|
|
|
|
|
|
__ bind(&load_smi_lhs); |
|
|
|
__ SmiToInteger64(kScratchRegister, lhs); |
|
|
|
__ push(kScratchRegister); |
|
|
|
__ fild_d(Operand(rsp, 0)); |
|
|
|
__ pop(kScratchRegister); |
|
|
|
__ jmp(&done_load_lhs); |
|
|
|
|
|
|
|
__ bind(&load_smi_rhs); |
|
|
|
__ SmiToInteger64(kScratchRegister, rhs); |
|
|
|
__ push(kScratchRegister); |
|
|
|
__ fild_d(Operand(rsp, 0)); |
|
|
|
__ pop(kScratchRegister); |
|
|
|
|
|
|
|
__ bind(&done); |
|
|
|
// Input: rdx, rax are the left and right objects of a bit op.
|
|
|
|
// Output: rax, rcx are left and right integers for a bit op.
|
|
|
|
void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) { |
|
|
|
if (FLAG_debug_code) { |
|
|
|
// Both arguments can not be smis. That case is handled by smi-only code.
|
|
|
|
Label ok; |
|
|
|
__ JumpIfNotBothSmi(rax, rdx, &ok); |
|
|
|
__ Abort("Both arguments smi but not handled by smi-code."); |
|
|
|
__ bind(&ok); |
|
|
|
} |
|
|
|
// Check float operands.
|
|
|
|
Label done; |
|
|
|
Label rax_is_object; |
|
|
|
Label rdx_is_object; |
|
|
|
|
|
|
|
__ JumpIfNotSmi(rdx, &rdx_is_object); |
|
|
|
__ SmiToInteger32(rdx, rdx); |
|
|
|
|
|
|
|
void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm, |
|
|
|
Label* non_float) { |
|
|
|
Label test_other, done; |
|
|
|
// Test if both operands are numbers (heap_numbers or smis).
|
|
|
|
// If not, jump to label non_float.
|
|
|
|
__ JumpIfSmi(rdx, &test_other); // argument in rdx is OK
|
|
|
|
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map()); |
|
|
|
__ j(not_equal, non_float); // The argument in rdx is not a number.
|
|
|
|
__ bind(&rax_is_object); |
|
|
|
IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
|
|
|
|
__ jmp(&done); |
|
|
|
|
|
|
|
__ bind(&test_other); |
|
|
|
__ JumpIfSmi(rax, &done); // argument in rax is OK
|
|
|
|
__ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map()); |
|
|
|
__ j(not_equal, non_float); // The argument in rax is not a number.
|
|
|
|
__ bind(&rdx_is_object); |
|
|
|
IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
|
|
|
|
__ JumpIfNotSmi(rax, &rax_is_object); |
|
|
|
__ SmiToInteger32(rcx, rax); |
|
|
|
|
|
|
|
// Fall-through: Both operands are numbers.
|
|
|
|
__ bind(&done); |
|
|
|
__ movl(rax, rdx); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -10451,15 +10375,15 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
|
|
|
} |
|
|
|
// left is rdx, right is rax.
|
|
|
|
__ AllocateHeapNumber(rbx, rcx, slow); |
|
|
|
FloatingPointHelper::LoadFloatOperandsFromSmis(masm, xmm4, xmm5); |
|
|
|
FloatingPointHelper::LoadSSE2SmiOperands(masm); |
|
|
|
switch (op_) { |
|
|
|
case Token::ADD: __ addsd(xmm4, xmm5); break; |
|
|
|
case Token::SUB: __ subsd(xmm4, xmm5); break; |
|
|
|
case Token::MUL: __ mulsd(xmm4, xmm5); break; |
|
|
|
case Token::DIV: __ divsd(xmm4, xmm5); break; |
|
|
|
case Token::ADD: __ addsd(xmm0, xmm1); break; |
|
|
|
case Token::SUB: __ subsd(xmm0, xmm1); break; |
|
|
|
case Token::MUL: __ mulsd(xmm0, xmm1); break; |
|
|
|
case Token::DIV: __ divsd(xmm0, xmm1); break; |
|
|
|
default: UNREACHABLE(); |
|
|
|
} |
|
|
|
__ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm4); |
|
|
|
__ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0); |
|
|
|
__ movq(rax, rbx); |
|
|
|
GenerateReturn(masm); |
|
|
|
} |
|
|
@ -10522,22 +10446,23 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
|
|
|
Label not_floats; |
|
|
|
// rax: y
|
|
|
|
// rdx: x
|
|
|
|
if (static_operands_type_.IsNumber() && FLAG_debug_code) { |
|
|
|
ASSERT(!static_operands_type_.IsSmi()); |
|
|
|
if (static_operands_type_.IsNumber()) { |
|
|
|
if (FLAG_debug_code) { |
|
|
|
// Assert at runtime that inputs are only numbers.
|
|
|
|
__ AbortIfNotNumber(rdx); |
|
|
|
__ AbortIfNotNumber(rax); |
|
|
|
} |
|
|
|
FloatingPointHelper::LoadSSE2NumberOperands(masm); |
|
|
|
} else { |
|
|
|
FloatingPointHelper::CheckNumberOperands(masm, &call_runtime); |
|
|
|
FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime); |
|
|
|
} |
|
|
|
// Fast-case: Both operands are numbers.
|
|
|
|
// xmm4 and xmm5 are volatile XMM registers.
|
|
|
|
FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5); |
|
|
|
|
|
|
|
switch (op_) { |
|
|
|
case Token::ADD: __ addsd(xmm4, xmm5); break; |
|
|
|
case Token::SUB: __ subsd(xmm4, xmm5); break; |
|
|
|
case Token::MUL: __ mulsd(xmm4, xmm5); break; |
|
|
|
case Token::DIV: __ divsd(xmm4, xmm5); break; |
|
|
|
case Token::ADD: __ addsd(xmm0, xmm1); break; |
|
|
|
case Token::SUB: __ subsd(xmm0, xmm1); break; |
|
|
|
case Token::MUL: __ mulsd(xmm0, xmm1); break; |
|
|
|
case Token::DIV: __ divsd(xmm0, xmm1); break; |
|
|
|
default: UNREACHABLE(); |
|
|
|
} |
|
|
|
// Allocate a heap number, if needed.
|
|
|
@ -10572,7 +10497,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
|
|
|
break; |
|
|
|
default: UNREACHABLE(); |
|
|
|
} |
|
|
|
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4); |
|
|
|
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
|
|
|
GenerateReturn(masm); |
|
|
|
__ bind(¬_floats); |
|
|
|
if (runtime_operands_type_ == BinaryOpIC::DEFAULT && |
|
|
@ -10597,34 +10522,52 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
|
|
|
case Token::SAR: |
|
|
|
case Token::SHL: |
|
|
|
case Token::SHR: { |
|
|
|
Label skip_allocation, non_smi_result; |
|
|
|
FloatingPointHelper::LoadAsIntegers(masm, &call_runtime); |
|
|
|
Label skip_allocation, non_smi_shr_result; |
|
|
|
Register heap_number_map = r9; |
|
|
|
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
|
|
|
if (static_operands_type_.IsNumber()) { |
|
|
|
if (FLAG_debug_code) { |
|
|
|
// Assert at runtime that inputs are only numbers.
|
|
|
|
__ AbortIfNotNumber(rdx); |
|
|
|
__ AbortIfNotNumber(rax); |
|
|
|
} |
|
|
|
FloatingPointHelper::LoadNumbersAsIntegers(masm); |
|
|
|
} else { |
|
|
|
FloatingPointHelper::LoadAsIntegers(masm, |
|
|
|
&call_runtime, |
|
|
|
heap_number_map); |
|
|
|
} |
|
|
|
switch (op_) { |
|
|
|
case Token::BIT_OR: __ orl(rax, rcx); break; |
|
|
|
case Token::BIT_AND: __ andl(rax, rcx); break; |
|
|
|
case Token::BIT_XOR: __ xorl(rax, rcx); break; |
|
|
|
case Token::SAR: __ sarl_cl(rax); break; |
|
|
|
case Token::SHL: __ shll_cl(rax); break; |
|
|
|
case Token::SHR: __ shrl_cl(rax); break; |
|
|
|
default: UNREACHABLE(); |
|
|
|
} |
|
|
|
if (op_ == Token::SHR) { |
|
|
|
case Token::SHR: { |
|
|
|
__ shrl_cl(rax); |
|
|
|
// Check if result is negative. This can only happen for a shift
|
|
|
|
// by zero, which also doesn't update the sign flag.
|
|
|
|
// by zero.
|
|
|
|
__ testl(rax, rax); |
|
|
|
__ j(negative, &non_smi_result); |
|
|
|
__ j(negative, &non_smi_shr_result); |
|
|
|
break; |
|
|
|
} |
|
|
|
default: UNREACHABLE(); |
|
|
|
} |
|
|
|
__ JumpIfNotValidSmiValue(rax, &non_smi_result); |
|
|
|
// Tag smi result, if possible, and return.
|
|
|
|
|
|
|
|
STATIC_ASSERT(kSmiValueSize == 32); |
|
|
|
// Tag smi result and return.
|
|
|
|
__ Integer32ToSmi(rax, rax); |
|
|
|
GenerateReturn(masm); |
|
|
|
|
|
|
|
// All ops except SHR return a signed int32 that we load in
|
|
|
|
// a HeapNumber.
|
|
|
|
if (op_ != Token::SHR && non_smi_result.is_linked()) { |
|
|
|
__ bind(&non_smi_result); |
|
|
|
// All bit-ops except SHR return a signed int32 that can be
|
|
|
|
// returned immediately as a smi.
|
|
|
|
// We might need to allocate a HeapNumber if we shift a negative
|
|
|
|
// number right by zero (i.e., convert to UInt32).
|
|
|
|
if (op_ == Token::SHR) { |
|
|
|
ASSERT(non_smi_shr_result.is_linked()); |
|
|
|
__ bind(&non_smi_shr_result); |
|
|
|
// Allocate a heap number if needed.
|
|
|
|
__ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
|
|
|
|
__ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
|
|
|
|
switch (mode_) { |
|
|
|
case OVERWRITE_LEFT: |
|
|
|
case OVERWRITE_RIGHT: |
|
|
@ -10635,22 +10578,33 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
|
|
|
__ JumpIfNotSmi(rax, &skip_allocation); |
|
|
|
// Fall through!
|
|
|
|
case NO_OVERWRITE: |
|
|
|
__ AllocateHeapNumber(rax, rcx, &call_runtime); |
|
|
|
// Allocate heap number in new space.
|
|
|
|
// Not using AllocateHeapNumber macro in order to reuse
|
|
|
|
// already loaded heap_number_map.
|
|
|
|
__ AllocateInNewSpace(HeapNumber::kSize, |
|
|
|
rax, |
|
|
|
rcx, |
|
|
|
no_reg, |
|
|
|
&call_runtime, |
|
|
|
TAG_OBJECT); |
|
|
|
// Set the map.
|
|
|
|
if (FLAG_debug_code) { |
|
|
|
__ AbortIfNotRootValue(heap_number_map, |
|
|
|
Heap::kHeapNumberMapRootIndex, |
|
|
|
"HeapNumberMap register clobbered."); |
|
|
|
} |
|
|
|
__ movq(FieldOperand(rax, HeapObject::kMapOffset), |
|
|
|
heap_number_map); |
|
|
|
__ bind(&skip_allocation); |
|
|
|
break; |
|
|
|
default: UNREACHABLE(); |
|
|
|
} |
|
|
|
// Store the result in the HeapNumber and return.
|
|
|
|
__ movq(Operand(rsp, 1 * kPointerSize), rbx); |
|
|
|
__ fild_s(Operand(rsp, 1 * kPointerSize)); |
|
|
|
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
|
|
|
__ cvtqsi2sd(xmm0, rbx); |
|
|
|
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
|
|
|
GenerateReturn(masm); |
|
|
|
} |
|
|
|
|
|
|
|
// SHR should return uint32 - go to runtime for non-smi/negative result.
|
|
|
|
if (op_ == Token::SHR) { |
|
|
|
__ bind(&non_smi_result); |
|
|
|
} |
|
|
|
break; |
|
|
|
} |
|
|
|
default: UNREACHABLE(); break; |
|
|
@ -10683,7 +10637,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
|
|
|
Label not_strings, both_strings, not_string1, string1, string1_smi2; |
|
|
|
|
|
|
|
// If this stub has already generated FP-specific code then the arguments
|
|
|
|
// are already in rdx, rax
|
|
|
|
// are already in rdx and rax.
|
|
|
|
if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { |
|
|
|
GenerateLoadArguments(masm); |
|
|
|
} |
|
|
@ -10832,19 +10786,13 @@ void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
|
|
|
__ push(rax); |
|
|
|
|
|
|
|
// Push this stub's key.
|
|
|
|
__ movq(rax, Immediate(MinorKey())); |
|
|
|
__ Integer32ToSmi(rax, rax); |
|
|
|
__ push(rax); |
|
|
|
__ Push(Smi::FromInt(MinorKey())); |
|
|
|
|
|
|
|
// Although the operation and the type info are encoded into the key,
|
|
|
|
// the encoding is opaque, so push them too.
|
|
|
|
__ movq(rax, Immediate(op_)); |
|
|
|
__ Integer32ToSmi(rax, rax); |
|
|
|
__ push(rax); |
|
|
|
__ Push(Smi::FromInt(op_)); |
|
|
|
|
|
|
|
__ movq(rax, Immediate(runtime_operands_type_)); |
|
|
|
__ Integer32ToSmi(rax, rax); |
|
|
|
__ push(rax); |
|
|
|
__ Push(Smi::FromInt(runtime_operands_type_)); |
|
|
|
|
|
|
|
__ push(rcx); |
|
|
|
|
|
|
@ -11212,16 +11160,17 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
|
|
|
// If result is not supposed to be flat, allocate a cons string object. If
|
|
|
|
// both strings are ascii the result is an ascii cons string.
|
|
|
|
// rax: first string
|
|
|
|
// ebx: length of resulting flat string
|
|
|
|
// rbx: length of resulting flat string
|
|
|
|
// rdx: second string
|
|
|
|
// r8: instance type of first string
|
|
|
|
// r9: instance type of second string
|
|
|
|
Label non_ascii, allocated; |
|
|
|
Label non_ascii, allocated, ascii_data; |
|
|
|
__ movl(rcx, r8); |
|
|
|
__ and_(rcx, r9); |
|
|
|
ASSERT(kStringEncodingMask == kAsciiStringTag); |
|
|
|
__ testl(rcx, Immediate(kAsciiStringTag)); |
|
|
|
__ j(zero, &non_ascii); |
|
|
|
__ bind(&ascii_data); |
|
|
|
// Allocate an acsii cons string.
|
|
|
|
__ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime); |
|
|
|
__ bind(&allocated); |
|
|
@ -11235,6 +11184,18 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
|
|
|
__ IncrementCounter(&Counters::string_add_native, 1); |
|
|
|
__ ret(2 * kPointerSize); |
|
|
|
__ bind(&non_ascii); |
|
|
|
// At least one of the strings is two-byte. Check whether it happens
|
|
|
|
// to contain only ascii characters.
|
|
|
|
// rcx: first instance type AND second instance type.
|
|
|
|
// r8: first instance type.
|
|
|
|
// r9: second instance type.
|
|
|
|
__ testb(rcx, Immediate(kAsciiDataHintMask)); |
|
|
|
__ j(not_zero, &ascii_data); |
|
|
|
__ xor_(r8, r9); |
|
|
|
ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); |
|
|
|
__ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag)); |
|
|
|
__ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag)); |
|
|
|
__ j(equal, &ascii_data); |
|
|
|
// Allocate a two byte cons string.
|
|
|
|
__ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime); |
|
|
|
__ jmp(&allocated); |
|
|
@ -11242,7 +11203,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
|
|
|
// Handle creating a flat result. First check that both strings are not
|
|
|
|
// external strings.
|
|
|
|
// rax: first string
|
|
|
|
// ebx: length of resulting flat string as smi
|
|
|
|
// rbx: length of resulting flat string as smi
|
|
|
|
// rdx: second string
|
|
|
|
// r8: instance type of first string
|
|
|
|
// r9: instance type of first string
|
|
|
@ -11258,7 +11219,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
|
|
|
__ j(equal, &string_add_runtime); |
|
|
|
// Now check if both strings are ascii strings.
|
|
|
|
// rax: first string
|
|
|
|
// ebx: length of resulting flat string
|
|
|
|
// rbx: length of resulting flat string
|
|
|
|
// rdx: second string
|
|
|
|
// r8: instance type of first string
|
|
|
|
// r9: instance type of second string
|
|
|
|