|
@ -262,63 +262,23 @@ class DeferredInlineSmiOperationReversed: public DeferredCode { |
|
|
|
|
|
|
|
|
class FloatingPointHelper : public AllStatic { |
|
|
class FloatingPointHelper : public AllStatic { |
|
|
public: |
|
|
public: |
|
|
// Code pattern for loading a floating point value. Input value must
|
|
|
// Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
|
|
|
// be either a smi or a heap number object (fp value). Requirements:
|
|
|
// If the operands are not both numbers, jump to not_numbers.
|
|
|
// operand on TOS+1. Returns operand as floating point number on FPU
|
|
|
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
|
|
|
// stack.
|
|
|
// NumberOperands assumes both are smis or heap numbers.
|
|
|
static void LoadFloatOperand(MacroAssembler* masm, Register scratch); |
|
|
static void LoadSSE2SmiOperands(MacroAssembler* masm); |
|
|
|
|
|
static void LoadSSE2NumberOperands(MacroAssembler* masm); |
|
|
// Code pattern for loading a floating point value. Input value must
|
|
|
static void LoadSSE2UnknownOperands(MacroAssembler* masm, |
|
|
// be either a smi or a heap number object (fp value). Requirements:
|
|
|
Label* not_numbers); |
|
|
// operand in src register. Returns operand as floating point number
|
|
|
|
|
|
// in XMM register. May destroy src register.
|
|
|
|
|
|
static void LoadFloatOperand(MacroAssembler* masm, |
|
|
|
|
|
Register src, |
|
|
|
|
|
XMMRegister dst); |
|
|
|
|
|
|
|
|
|
|
|
// Code pattern for loading a possible number into a XMM register.
|
|
|
|
|
|
// If the contents of src is not a number, control branches to
|
|
|
|
|
|
// the Label not_number. If contents of src is a smi or a heap number
|
|
|
|
|
|
// object (fp value), it is loaded into the XMM register as a double.
|
|
|
|
|
|
// The register src is not changed, and src may not be kScratchRegister.
|
|
|
|
|
|
static void LoadFloatOperand(MacroAssembler* masm, |
|
|
|
|
|
Register src, |
|
|
|
|
|
XMMRegister dst, |
|
|
|
|
|
Label *not_number); |
|
|
|
|
|
|
|
|
|
|
|
// Code pattern for loading floating point values. Input values must
|
|
|
|
|
|
// be either smi or heap number objects (fp values). Requirements:
|
|
|
|
|
|
// operand_1 in rdx, operand_2 in rax; Returns operands as
|
|
|
|
|
|
// floating point numbers in XMM registers.
|
|
|
|
|
|
static void LoadFloatOperands(MacroAssembler* masm, |
|
|
|
|
|
XMMRegister dst1, |
|
|
|
|
|
XMMRegister dst2); |
|
|
|
|
|
|
|
|
|
|
|
// Similar to LoadFloatOperands, assumes that the operands are smis.
|
|
|
|
|
|
static void LoadFloatOperandsFromSmis(MacroAssembler* masm, |
|
|
|
|
|
XMMRegister dst1, |
|
|
|
|
|
XMMRegister dst2); |
|
|
|
|
|
|
|
|
|
|
|
// Code pattern for loading floating point values onto the fp stack.
|
|
|
|
|
|
// Input values must be either smi or heap number objects (fp values).
|
|
|
|
|
|
// Requirements:
|
|
|
|
|
|
// Register version: operands in registers lhs and rhs.
|
|
|
|
|
|
// Stack version: operands on TOS+1 and TOS+2.
|
|
|
|
|
|
// Returns operands as floating point numbers on fp stack.
|
|
|
|
|
|
static void LoadFloatOperands(MacroAssembler* masm, |
|
|
|
|
|
Register lhs, |
|
|
|
|
|
Register rhs); |
|
|
|
|
|
|
|
|
|
|
|
// Test if operands are smi or number objects (fp). Requirements:
|
|
|
|
|
|
// operand_1 in rax, operand_2 in rdx; falls through on float or smi
|
|
|
|
|
|
// operands, jumps to the non_float label otherwise.
|
|
|
|
|
|
static void CheckNumberOperands(MacroAssembler* masm, |
|
|
|
|
|
Label* non_float); |
|
|
|
|
|
|
|
|
|
|
|
// Takes the operands in rdx and rax and loads them as integers in rax
|
|
|
// Takes the operands in rdx and rax and loads them as integers in rax
|
|
|
// and rcx.
|
|
|
// and rcx.
|
|
|
static void LoadAsIntegers(MacroAssembler* masm, |
|
|
static void LoadAsIntegers(MacroAssembler* masm, |
|
|
Label* operand_conversion_failure); |
|
|
Label* operand_conversion_failure, |
|
|
|
|
|
Register heap_number_map); |
|
|
|
|
|
// As above, but we know the operands to be numbers. In that case,
|
|
|
|
|
|
// conversion can't fail.
|
|
|
|
|
|
static void LoadNumbersAsIntegers(MacroAssembler* masm); |
|
|
}; |
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -4429,7 +4389,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { |
|
|
// Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
|
|
|
// Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
|
|
|
__ addsd(xmm2, xmm3); |
|
|
__ addsd(xmm2, xmm3); |
|
|
// xmm2 now has 0.5.
|
|
|
// xmm2 now has 0.5.
|
|
|
__ comisd(xmm2, xmm1); |
|
|
__ ucomisd(xmm2, xmm1); |
|
|
call_runtime.Branch(not_equal); |
|
|
call_runtime.Branch(not_equal); |
|
|
|
|
|
|
|
|
// Calculates square root.
|
|
|
// Calculates square root.
|
|
@ -4769,8 +4729,8 @@ void DeferredSearchCache::Generate() { |
|
|
__ cmpq(ArrayElement(cache_, dst_), key_); |
|
|
__ cmpq(ArrayElement(cache_, dst_), key_); |
|
|
__ j(not_equal, &first_loop); |
|
|
__ j(not_equal, &first_loop); |
|
|
|
|
|
|
|
|
__ Integer32ToSmi(scratch_, dst_); |
|
|
__ Integer32ToSmiField( |
|
|
__ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_); |
|
|
FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_); |
|
|
__ movq(dst_, ArrayElement(cache_, dst_, 1)); |
|
|
__ movq(dst_, ArrayElement(cache_, dst_, 1)); |
|
|
__ jmp(exit_label()); |
|
|
__ jmp(exit_label()); |
|
|
|
|
|
|
|
@ -4791,8 +4751,8 @@ void DeferredSearchCache::Generate() { |
|
|
__ cmpq(ArrayElement(cache_, dst_), key_); |
|
|
__ cmpq(ArrayElement(cache_, dst_), key_); |
|
|
__ j(not_equal, &second_loop); |
|
|
__ j(not_equal, &second_loop); |
|
|
|
|
|
|
|
|
__ Integer32ToSmi(scratch_, dst_); |
|
|
__ Integer32ToSmiField( |
|
|
__ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_); |
|
|
FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_); |
|
|
__ movq(dst_, ArrayElement(cache_, dst_, 1)); |
|
|
__ movq(dst_, ArrayElement(cache_, dst_, 1)); |
|
|
__ jmp(exit_label()); |
|
|
__ jmp(exit_label()); |
|
|
|
|
|
|
|
@ -4814,50 +4774,50 @@ void DeferredSearchCache::Generate() { |
|
|
// cache miss this optimization would hardly matter much.
|
|
|
// cache miss this optimization would hardly matter much.
|
|
|
|
|
|
|
|
|
// Check if we could add new entry to cache.
|
|
|
// Check if we could add new entry to cache.
|
|
|
__ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); |
|
|
__ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); |
|
|
__ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset)); |
|
|
__ SmiToInteger32(r9, |
|
|
__ SmiCompare(rbx, r9); |
|
|
FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset)); |
|
|
|
|
|
__ cmpl(rbx, r9); |
|
|
__ j(greater, &add_new_entry); |
|
|
__ j(greater, &add_new_entry); |
|
|
|
|
|
|
|
|
// Check if we could evict entry after finger.
|
|
|
// Check if we could evict entry after finger.
|
|
|
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); |
|
|
__ SmiToInteger32(rdx, |
|
|
__ SmiToInteger32(rdx, rdx); |
|
|
FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); |
|
|
__ SmiToInteger32(rbx, rbx); |
|
|
__ addl(rdx, kEntrySizeImm); |
|
|
__ addq(rdx, kEntrySizeImm); |
|
|
|
|
|
Label forward; |
|
|
Label forward; |
|
|
__ cmpq(rbx, rdx); |
|
|
__ cmpl(rbx, rdx); |
|
|
__ j(greater, &forward); |
|
|
__ j(greater, &forward); |
|
|
// Need to wrap over the cache.
|
|
|
// Need to wrap over the cache.
|
|
|
__ movl(rdx, kEntriesIndexImm); |
|
|
__ movl(rdx, kEntriesIndexImm); |
|
|
__ bind(&forward); |
|
|
__ bind(&forward); |
|
|
__ Integer32ToSmi(r9, rdx); |
|
|
__ movl(r9, rdx); |
|
|
__ jmp(&update_cache); |
|
|
__ jmp(&update_cache); |
|
|
|
|
|
|
|
|
__ bind(&add_new_entry); |
|
|
__ bind(&add_new_entry); |
|
|
// r9 holds cache size as smi.
|
|
|
// r9 holds cache size as int32.
|
|
|
__ SmiToInteger32(rdx, r9); |
|
|
__ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize)); |
|
|
__ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize)); |
|
|
__ Integer32ToSmiField( |
|
|
__ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx); |
|
|
FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx); |
|
|
|
|
|
|
|
|
// Update the cache itself.
|
|
|
// Update the cache itself.
|
|
|
// rdx holds the index as int.
|
|
|
// r9 holds the index as int32.
|
|
|
// r9 holds the index as smi.
|
|
|
|
|
|
__ bind(&update_cache); |
|
|
__ bind(&update_cache); |
|
|
__ pop(rbx); // restore the key
|
|
|
__ pop(rbx); // restore the key
|
|
|
__ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9); |
|
|
__ Integer32ToSmiField( |
|
|
|
|
|
FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9); |
|
|
// Store key.
|
|
|
// Store key.
|
|
|
__ movq(ArrayElement(rcx, rdx), rbx); |
|
|
__ movq(ArrayElement(rcx, r9), rbx); |
|
|
__ RecordWrite(rcx, 0, rbx, r9); |
|
|
__ RecordWrite(rcx, 0, rbx, r9); |
|
|
|
|
|
|
|
|
// Store value.
|
|
|
// Store value.
|
|
|
__ pop(rcx); // restore the cache.
|
|
|
__ pop(rcx); // restore the cache.
|
|
|
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); |
|
|
__ SmiToInteger32(rdx, |
|
|
__ SmiAddConstant(rdx, rdx, Smi::FromInt(1)); |
|
|
FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); |
|
|
__ movq(r9, rdx); |
|
|
__ incl(rdx); |
|
|
__ SmiToInteger32(rdx, rdx); |
|
|
// Backup rax, because the RecordWrite macro clobbers its arguments.
|
|
|
__ movq(rbx, rax); |
|
|
__ movq(rbx, rax); |
|
|
__ movq(ArrayElement(rcx, rdx), rbx); |
|
|
__ movq(ArrayElement(rcx, rdx), rax); |
|
|
__ RecordWrite(rcx, 0, rbx, r9); |
|
|
__ RecordWrite(rcx, 0, rbx, rdx); |
|
|
|
|
|
|
|
|
if (!dst_.is(rax)) { |
|
|
if (!dst_.is(rax)) { |
|
|
__ movq(dst_, rax); |
|
|
__ movq(dst_, rax); |
|
@ -6512,7 +6472,7 @@ void CodeGenerator::GenerateInlineNumberComparison(Result* left_side, |
|
|
¬_numbers); |
|
|
¬_numbers); |
|
|
LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side, |
|
|
LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side, |
|
|
¬_numbers); |
|
|
¬_numbers); |
|
|
__ comisd(xmm0, xmm1); |
|
|
__ ucomisd(xmm0, xmm1); |
|
|
// Bail out if a NaN is involved.
|
|
|
// Bail out if a NaN is involved.
|
|
|
not_numbers.Branch(parity_even, left_side, right_side); |
|
|
not_numbers.Branch(parity_even, left_side, right_side); |
|
|
|
|
|
|
|
@ -8551,18 +8511,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
|
|
|
|
|
|
|
|
// rcx: RegExp data (FixedArray)
|
|
|
// rcx: RegExp data (FixedArray)
|
|
|
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
|
|
|
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
|
|
|
__ movq(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset)); |
|
|
__ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset)); |
|
|
__ SmiCompare(rbx, Smi::FromInt(JSRegExp::IRREGEXP)); |
|
|
__ cmpl(rbx, Immediate(JSRegExp::IRREGEXP)); |
|
|
__ j(not_equal, &runtime); |
|
|
__ j(not_equal, &runtime); |
|
|
|
|
|
|
|
|
// rcx: RegExp data (FixedArray)
|
|
|
// rcx: RegExp data (FixedArray)
|
|
|
// Check that the number of captures fit in the static offsets vector buffer.
|
|
|
// Check that the number of captures fit in the static offsets vector buffer.
|
|
|
__ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); |
|
|
__ SmiToInteger32(rdx, |
|
|
|
|
|
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); |
|
|
// Calculate number of capture registers (number_of_captures + 1) * 2.
|
|
|
// Calculate number of capture registers (number_of_captures + 1) * 2.
|
|
|
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1); |
|
|
__ leal(rdx, Operand(rdx, rdx, times_1, 2)); |
|
|
__ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
|
|
|
|
|
|
// Check that the static offsets vector buffer is large enough.
|
|
|
// Check that the static offsets vector buffer is large enough.
|
|
|
__ cmpq(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize)); |
|
|
__ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize)); |
|
|
__ j(above, &runtime); |
|
|
__ j(above, &runtime); |
|
|
|
|
|
|
|
|
// rcx: RegExp data (FixedArray)
|
|
|
// rcx: RegExp data (FixedArray)
|
|
@ -8572,17 +8532,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
|
|
__ JumpIfSmi(rax, &runtime); |
|
|
__ JumpIfSmi(rax, &runtime); |
|
|
Condition is_string = masm->IsObjectStringType(rax, rbx, rbx); |
|
|
Condition is_string = masm->IsObjectStringType(rax, rbx, rbx); |
|
|
__ j(NegateCondition(is_string), &runtime); |
|
|
__ j(NegateCondition(is_string), &runtime); |
|
|
// Get the length of the string to rbx.
|
|
|
|
|
|
__ movq(rbx, FieldOperand(rax, String::kLengthOffset)); |
|
|
|
|
|
|
|
|
|
|
|
// rbx: Length of subject string as smi
|
|
|
// rax: Subject string.
|
|
|
// rcx: RegExp data (FixedArray)
|
|
|
// rcx: RegExp data (FixedArray).
|
|
|
// rdx: Number of capture registers
|
|
|
// rdx: Number of capture registers.
|
|
|
// Check that the third argument is a positive smi less than the string
|
|
|
// Check that the third argument is a positive smi less than the string
|
|
|
// length. A negative value will be greater (unsigned comparison).
|
|
|
// length. A negative value will be greater (unsigned comparison).
|
|
|
__ movq(rax, Operand(rsp, kPreviousIndexOffset)); |
|
|
__ movq(rbx, Operand(rsp, kPreviousIndexOffset)); |
|
|
__ JumpIfNotSmi(rax, &runtime); |
|
|
__ JumpIfNotSmi(rbx, &runtime); |
|
|
__ SmiCompare(rax, rbx); |
|
|
__ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset)); |
|
|
__ j(above_equal, &runtime); |
|
|
__ j(above_equal, &runtime); |
|
|
|
|
|
|
|
|
// rcx: RegExp data (FixedArray)
|
|
|
// rcx: RegExp data (FixedArray)
|
|
@ -8600,8 +8558,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
|
|
// Check that the last match info has space for the capture registers and the
|
|
|
// Check that the last match info has space for the capture registers and the
|
|
|
// additional information. Ensure no overflow in add.
|
|
|
// additional information. Ensure no overflow in add.
|
|
|
ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); |
|
|
ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); |
|
|
__ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); |
|
|
__ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); |
|
|
__ SmiToInteger32(rax, rax); |
|
|
|
|
|
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); |
|
|
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); |
|
|
__ cmpl(rdx, rax); |
|
|
__ cmpl(rdx, rax); |
|
|
__ j(greater, &runtime); |
|
|
__ j(greater, &runtime); |
|
@ -8674,8 +8631,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
|
|
// r12: code
|
|
|
// r12: code
|
|
|
// Load used arguments before starting to push arguments for call to native
|
|
|
// Load used arguments before starting to push arguments for call to native
|
|
|
// RegExp code to avoid handling changing stack height.
|
|
|
// RegExp code to avoid handling changing stack height.
|
|
|
__ movq(rbx, Operand(rsp, kPreviousIndexOffset)); |
|
|
__ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset)); |
|
|
__ SmiToInteger64(rbx, rbx); // Previous index from smi.
|
|
|
|
|
|
|
|
|
|
|
|
// rax: subject string
|
|
|
// rax: subject string
|
|
|
// rbx: previous index
|
|
|
// rbx: previous index
|
|
@ -8787,10 +8743,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
|
|
__ bind(&success); |
|
|
__ bind(&success); |
|
|
__ movq(rax, Operand(rsp, kJSRegExpOffset)); |
|
|
__ movq(rax, Operand(rsp, kJSRegExpOffset)); |
|
|
__ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset)); |
|
|
__ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset)); |
|
|
__ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); |
|
|
__ SmiToInteger32(rax, |
|
|
|
|
|
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); |
|
|
// Calculate number of capture registers (number_of_captures + 1) * 2.
|
|
|
// Calculate number of capture registers (number_of_captures + 1) * 2.
|
|
|
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1); |
|
|
__ leal(rdx, Operand(rax, rax, times_1, 2)); |
|
|
__ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
|
|
|
|
|
|
|
|
|
|
|
|
// rdx: Number of capture registers
|
|
|
// rdx: Number of capture registers
|
|
|
// Load last_match_info which is still known to be a fast case JSArray.
|
|
|
// Load last_match_info which is still known to be a fast case JSArray.
|
|
@ -8877,9 +8833,9 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
|
|
|
|
|
|
|
|
// Make the hash mask from the length of the number string cache. It
|
|
|
// Make the hash mask from the length of the number string cache. It
|
|
|
// contains two elements (number and string) for each cache entry.
|
|
|
// contains two elements (number and string) for each cache entry.
|
|
|
__ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); |
|
|
__ SmiToInteger32( |
|
|
// Divide smi tagged length by two.
|
|
|
mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); |
|
|
__ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1); |
|
|
__ shrl(mask, Immediate(1)); |
|
|
__ subq(mask, Immediate(1)); // Make mask.
|
|
|
__ subq(mask, Immediate(1)); // Make mask.
|
|
|
|
|
|
|
|
|
// Calculate the entry in the number string cache. The hash value in the
|
|
|
// Calculate the entry in the number string cache. The hash value in the
|
|
@ -8909,15 +8865,14 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
|
|
CpuFeatures::Scope fscope(SSE2); |
|
|
CpuFeatures::Scope fscope(SSE2); |
|
|
__ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); |
|
|
__ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); |
|
|
__ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); |
|
|
__ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); |
|
|
__ comisd(xmm0, xmm1); |
|
|
__ ucomisd(xmm0, xmm1); |
|
|
__ j(parity_even, not_found); // Bail out if NaN is involved.
|
|
|
__ j(parity_even, not_found); // Bail out if NaN is involved.
|
|
|
__ j(not_equal, not_found); // The cache did not contain this value.
|
|
|
__ j(not_equal, not_found); // The cache did not contain this value.
|
|
|
__ jmp(&load_result_from_cache); |
|
|
__ jmp(&load_result_from_cache); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
__ bind(&is_smi); |
|
|
__ bind(&is_smi); |
|
|
__ movq(scratch, object); |
|
|
__ SmiToInteger32(scratch, object); |
|
|
__ SmiToInteger32(scratch, scratch); |
|
|
|
|
|
GenerateConvertHashCodeToIndex(masm, scratch, mask); |
|
|
GenerateConvertHashCodeToIndex(masm, scratch, mask); |
|
|
|
|
|
|
|
|
Register index = scratch; |
|
|
Register index = scratch; |
|
@ -9111,12 +9066,8 @@ void CompareStub::Generate(MacroAssembler* masm) { |
|
|
if (include_number_compare_) { |
|
|
if (include_number_compare_) { |
|
|
Label non_number_comparison; |
|
|
Label non_number_comparison; |
|
|
Label unordered; |
|
|
Label unordered; |
|
|
FloatingPointHelper::LoadFloatOperand(masm, rdx, xmm0, |
|
|
FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison); |
|
|
&non_number_comparison); |
|
|
__ ucomisd(xmm0, xmm1); |
|
|
FloatingPointHelper::LoadFloatOperand(masm, rax, xmm1, |
|
|
|
|
|
&non_number_comparison); |
|
|
|
|
|
|
|
|
|
|
|
__ comisd(xmm0, xmm1); |
|
|
|
|
|
|
|
|
|
|
|
// Don't base result on EFLAGS when a NaN is involved.
|
|
|
// Don't base result on EFLAGS when a NaN is involved.
|
|
|
__ j(parity_even, &unordered); |
|
|
__ j(parity_even, &unordered); |
|
@ -9344,29 +9295,30 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { |
|
|
__ j(equal, &adaptor_frame); |
|
|
__ j(equal, &adaptor_frame); |
|
|
|
|
|
|
|
|
// Get the length from the frame.
|
|
|
// Get the length from the frame.
|
|
|
__ movq(rcx, Operand(rsp, 1 * kPointerSize)); |
|
|
__ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize)); |
|
|
__ jmp(&try_allocate); |
|
|
__ jmp(&try_allocate); |
|
|
|
|
|
|
|
|
// Patch the arguments.length and the parameters pointer.
|
|
|
// Patch the arguments.length and the parameters pointer.
|
|
|
__ bind(&adaptor_frame); |
|
|
__ bind(&adaptor_frame); |
|
|
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
|
|
__ SmiToInteger32(rcx, |
|
|
__ movq(Operand(rsp, 1 * kPointerSize), rcx); |
|
|
Operand(rdx, |
|
|
|
|
|
ArgumentsAdaptorFrameConstants::kLengthOffset)); |
|
|
|
|
|
// Space on stack must already hold a smi.
|
|
|
|
|
|
__ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx); |
|
|
// Do not clobber the length index for the indexing operation since
|
|
|
// Do not clobber the length index for the indexing operation since
|
|
|
// it is used compute the size for allocation later.
|
|
|
// it is used compute the size for allocation later.
|
|
|
SmiIndex index = masm->SmiToIndex(rbx, rcx, kPointerSizeLog2); |
|
|
__ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement)); |
|
|
__ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement)); |
|
|
|
|
|
__ movq(Operand(rsp, 2 * kPointerSize), rdx); |
|
|
__ movq(Operand(rsp, 2 * kPointerSize), rdx); |
|
|
|
|
|
|
|
|
// Try the new space allocation. Start out with computing the size of
|
|
|
// Try the new space allocation. Start out with computing the size of
|
|
|
// the arguments object and the elements array.
|
|
|
// the arguments object and the elements array.
|
|
|
Label add_arguments_object; |
|
|
Label add_arguments_object; |
|
|
__ bind(&try_allocate); |
|
|
__ bind(&try_allocate); |
|
|
__ testq(rcx, rcx); |
|
|
__ testl(rcx, rcx); |
|
|
__ j(zero, &add_arguments_object); |
|
|
__ j(zero, &add_arguments_object); |
|
|
index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2); |
|
|
__ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); |
|
|
__ lea(rcx, Operand(index.reg, index.scale, FixedArray::kHeaderSize)); |
|
|
|
|
|
__ bind(&add_arguments_object); |
|
|
__ bind(&add_arguments_object); |
|
|
__ addq(rcx, Immediate(Heap::kArgumentsObjectSize)); |
|
|
__ addl(rcx, Immediate(Heap::kArgumentsObjectSize)); |
|
|
|
|
|
|
|
|
// Do the allocation of both objects in one go.
|
|
|
// Do the allocation of both objects in one go.
|
|
|
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); |
|
|
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); |
|
@ -9378,10 +9330,13 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { |
|
|
__ movq(rdi, Operand(rdi, offset)); |
|
|
__ movq(rdi, Operand(rdi, offset)); |
|
|
|
|
|
|
|
|
// Copy the JS object part.
|
|
|
// Copy the JS object part.
|
|
|
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { |
|
|
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize); |
|
|
__ movq(kScratchRegister, FieldOperand(rdi, i)); |
|
|
__ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize)); |
|
|
__ movq(FieldOperand(rax, i), kScratchRegister); |
|
|
__ movq(rdx, FieldOperand(rdi, 1 * kPointerSize)); |
|
|
} |
|
|
__ movq(rbx, FieldOperand(rdi, 2 * kPointerSize)); |
|
|
|
|
|
__ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister); |
|
|
|
|
|
__ movq(FieldOperand(rax, 1 * kPointerSize), rdx); |
|
|
|
|
|
__ movq(FieldOperand(rax, 2 * kPointerSize), rbx); |
|
|
|
|
|
|
|
|
// Setup the callee in-object property.
|
|
|
// Setup the callee in-object property.
|
|
|
ASSERT(Heap::arguments_callee_index == 0); |
|
|
ASSERT(Heap::arguments_callee_index == 0); |
|
@ -9395,7 +9350,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { |
|
|
|
|
|
|
|
|
// If there are no actual arguments, we're done.
|
|
|
// If there are no actual arguments, we're done.
|
|
|
Label done; |
|
|
Label done; |
|
|
__ testq(rcx, rcx); |
|
|
__ SmiTest(rcx); |
|
|
__ j(zero, &done); |
|
|
__ j(zero, &done); |
|
|
|
|
|
|
|
|
// Get the parameters pointer from the stack and untag the length.
|
|
|
// Get the parameters pointer from the stack and untag the length.
|
|
@ -9417,7 +9372,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { |
|
|
__ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); |
|
|
__ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); |
|
|
__ addq(rdi, Immediate(kPointerSize)); |
|
|
__ addq(rdi, Immediate(kPointerSize)); |
|
|
__ subq(rdx, Immediate(kPointerSize)); |
|
|
__ subq(rdx, Immediate(kPointerSize)); |
|
|
__ decq(rcx); |
|
|
__ decl(rcx); |
|
|
__ j(not_zero, &loop); |
|
|
__ j(not_zero, &loop); |
|
|
|
|
|
|
|
|
// Return and remove the on-stack parameters.
|
|
|
// Return and remove the on-stack parameters.
|
|
@ -9968,86 +9923,73 @@ void StackCheckStub::Generate(MacroAssembler* masm) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, |
|
|
void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { |
|
|
Register number) { |
|
|
__ SmiToInteger32(kScratchRegister, rdx); |
|
|
Label load_smi, done; |
|
|
__ cvtlsi2sd(xmm0, kScratchRegister); |
|
|
|
|
|
__ SmiToInteger32(kScratchRegister, rax); |
|
|
__ JumpIfSmi(number, &load_smi); |
|
|
__ cvtlsi2sd(xmm1, kScratchRegister); |
|
|
__ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); |
|
|
|
|
|
__ jmp(&done); |
|
|
|
|
|
|
|
|
|
|
|
__ bind(&load_smi); |
|
|
|
|
|
__ SmiToInteger32(number, number); |
|
|
|
|
|
__ push(number); |
|
|
|
|
|
__ fild_s(Operand(rsp, 0)); |
|
|
|
|
|
__ pop(number); |
|
|
|
|
|
|
|
|
|
|
|
__ bind(&done); |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, |
|
|
void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) { |
|
|
Register src, |
|
|
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done; |
|
|
XMMRegister dst) { |
|
|
// Load operand in rdx into xmm0.
|
|
|
Label load_smi, done; |
|
|
__ JumpIfSmi(rdx, &load_smi_rdx); |
|
|
|
|
|
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); |
|
|
__ JumpIfSmi(src, &load_smi); |
|
|
// Load operand in rax into xmm1.
|
|
|
__ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset)); |
|
|
__ JumpIfSmi(rax, &load_smi_rax); |
|
|
|
|
|
__ bind(&load_nonsmi_rax); |
|
|
|
|
|
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); |
|
|
__ jmp(&done); |
|
|
__ jmp(&done); |
|
|
|
|
|
|
|
|
__ bind(&load_smi); |
|
|
__ bind(&load_smi_rdx); |
|
|
__ SmiToInteger32(src, src); |
|
|
__ SmiToInteger32(kScratchRegister, rdx); |
|
|
__ cvtlsi2sd(dst, src); |
|
|
__ cvtlsi2sd(xmm0, kScratchRegister); |
|
|
|
|
|
__ JumpIfNotSmi(rax, &load_nonsmi_rax); |
|
|
__ bind(&done); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, |
|
|
|
|
|
Register src, |
|
|
|
|
|
XMMRegister dst, |
|
|
|
|
|
Label* not_number) { |
|
|
|
|
|
Label load_smi, done; |
|
|
|
|
|
ASSERT(!src.is(kScratchRegister)); |
|
|
|
|
|
__ JumpIfSmi(src, &load_smi); |
|
|
|
|
|
__ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex); |
|
|
|
|
|
__ cmpq(FieldOperand(src, HeapObject::kMapOffset), kScratchRegister); |
|
|
|
|
|
__ j(not_equal, not_number); |
|
|
|
|
|
__ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset)); |
|
|
|
|
|
__ jmp(&done); |
|
|
|
|
|
|
|
|
|
|
|
__ bind(&load_smi); |
|
|
__ bind(&load_smi_rax); |
|
|
__ SmiToInteger32(kScratchRegister, src); |
|
|
__ SmiToInteger32(kScratchRegister, rax); |
|
|
__ cvtlsi2sd(dst, kScratchRegister); |
|
|
__ cvtlsi2sd(xmm1, kScratchRegister); |
|
|
|
|
|
|
|
|
__ bind(&done); |
|
|
__ bind(&done); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, |
|
|
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, |
|
|
XMMRegister dst1, |
|
|
Label* not_numbers) { |
|
|
XMMRegister dst2) { |
|
|
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; |
|
|
__ movq(kScratchRegister, rdx); |
|
|
// Load operand in rdx into xmm0, or branch to not_numbers.
|
|
|
LoadFloatOperand(masm, kScratchRegister, dst1); |
|
|
__ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); |
|
|
__ movq(kScratchRegister, rax); |
|
|
__ JumpIfSmi(rdx, &load_smi_rdx); |
|
|
LoadFloatOperand(masm, kScratchRegister, dst2); |
|
|
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx); |
|
|
} |
|
|
__ j(not_equal, not_numbers); // Argument in rdx is not a number.
|
|
|
|
|
|
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); |
|
|
|
|
|
// Load operand in rax into xmm1, or branch to not_numbers.
|
|
|
|
|
|
__ JumpIfSmi(rax, &load_smi_rax); |
|
|
|
|
|
|
|
|
|
|
|
__ bind(&load_nonsmi_rax); |
|
|
|
|
|
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx); |
|
|
|
|
|
__ j(not_equal, not_numbers); |
|
|
|
|
|
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); |
|
|
|
|
|
__ jmp(&done); |
|
|
|
|
|
|
|
|
void FloatingPointHelper::LoadFloatOperandsFromSmis(MacroAssembler* masm, |
|
|
__ bind(&load_smi_rdx); |
|
|
XMMRegister dst1, |
|
|
|
|
|
XMMRegister dst2) { |
|
|
|
|
|
__ SmiToInteger32(kScratchRegister, rdx); |
|
|
__ SmiToInteger32(kScratchRegister, rdx); |
|
|
__ cvtlsi2sd(dst1, kScratchRegister); |
|
|
__ cvtlsi2sd(xmm0, kScratchRegister); |
|
|
|
|
|
__ JumpIfNotSmi(rax, &load_nonsmi_rax); |
|
|
|
|
|
|
|
|
|
|
|
__ bind(&load_smi_rax); |
|
|
__ SmiToInteger32(kScratchRegister, rax); |
|
|
__ SmiToInteger32(kScratchRegister, rax); |
|
|
__ cvtlsi2sd(dst2, kScratchRegister); |
|
|
__ cvtlsi2sd(xmm1, kScratchRegister); |
|
|
|
|
|
__ bind(&done); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Input: rdx, rax are the left and right objects of a bit op.
|
|
|
// Input: rdx, rax are the left and right objects of a bit op.
|
|
|
// Output: rax, rcx are left and right integers for a bit op.
|
|
|
// Output: rax, rcx are left and right integers for a bit op.
|
|
|
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
|
|
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
|
|
Label* conversion_failure) { |
|
|
Label* conversion_failure, |
|
|
|
|
|
Register heap_number_map) { |
|
|
// Check float operands.
|
|
|
// Check float operands.
|
|
|
Label arg1_is_object, check_undefined_arg1; |
|
|
Label arg1_is_object, check_undefined_arg1; |
|
|
Label arg2_is_object, check_undefined_arg2; |
|
|
Label arg2_is_object, check_undefined_arg2; |
|
@ -10065,8 +10007,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
|
|
__ jmp(&load_arg2); |
|
|
__ jmp(&load_arg2); |
|
|
|
|
|
|
|
|
__ bind(&arg1_is_object); |
|
|
__ bind(&arg1_is_object); |
|
|
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); |
|
|
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); |
|
|
__ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex); |
|
|
|
|
|
__ j(not_equal, &check_undefined_arg1); |
|
|
__ j(not_equal, &check_undefined_arg1); |
|
|
// Get the untagged integer version of the edx heap number in rcx.
|
|
|
// Get the untagged integer version of the edx heap number in rcx.
|
|
|
IntegerConvert(masm, rdx, rdx); |
|
|
IntegerConvert(masm, rdx, rdx); |
|
@ -10087,8 +10028,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
|
|
__ jmp(&done); |
|
|
__ jmp(&done); |
|
|
|
|
|
|
|
|
__ bind(&arg2_is_object); |
|
|
__ bind(&arg2_is_object); |
|
|
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); |
|
|
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); |
|
|
__ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex); |
|
|
|
|
|
__ j(not_equal, &check_undefined_arg2); |
|
|
__ j(not_equal, &check_undefined_arg2); |
|
|
// Get the untagged integer version of the eax heap number in ecx.
|
|
|
// Get the untagged integer version of the eax heap number in ecx.
|
|
|
IntegerConvert(masm, rcx, rax); |
|
|
IntegerConvert(masm, rcx, rax); |
|
@ -10097,51 +10037,35 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, |
|
|
// Input: rdx, rax are the left and right objects of a bit op.
|
|
|
Register lhs, |
|
|
// Output: rax, rcx are left and right integers for a bit op.
|
|
|
Register rhs) { |
|
|
void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) { |
|
|
Label load_smi_lhs, load_smi_rhs, done_load_lhs, done; |
|
|
if (FLAG_debug_code) { |
|
|
__ JumpIfSmi(lhs, &load_smi_lhs); |
|
|
// Both arguments can not be smis. That case is handled by smi-only code.
|
|
|
__ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset)); |
|
|
Label ok; |
|
|
__ bind(&done_load_lhs); |
|
|
__ JumpIfNotBothSmi(rax, rdx, &ok); |
|
|
|
|
|
__ Abort("Both arguments smi but not handled by smi-code."); |
|
|
__ JumpIfSmi(rhs, &load_smi_rhs); |
|
|
__ bind(&ok); |
|
|
__ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset)); |
|
|
} |
|
|
__ jmp(&done); |
|
|
// Check float operands.
|
|
|
|
|
|
Label done; |
|
|
__ bind(&load_smi_lhs); |
|
|
Label rax_is_object; |
|
|
__ SmiToInteger64(kScratchRegister, lhs); |
|
|
Label rdx_is_object; |
|
|
__ push(kScratchRegister); |
|
|
|
|
|
__ fild_d(Operand(rsp, 0)); |
|
|
|
|
|
__ pop(kScratchRegister); |
|
|
|
|
|
__ jmp(&done_load_lhs); |
|
|
|
|
|
|
|
|
|
|
|
__ bind(&load_smi_rhs); |
|
|
|
|
|
__ SmiToInteger64(kScratchRegister, rhs); |
|
|
|
|
|
__ push(kScratchRegister); |
|
|
|
|
|
__ fild_d(Operand(rsp, 0)); |
|
|
|
|
|
__ pop(kScratchRegister); |
|
|
|
|
|
|
|
|
|
|
|
__ bind(&done); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__ JumpIfNotSmi(rdx, &rdx_is_object); |
|
|
|
|
|
__ SmiToInteger32(rdx, rdx); |
|
|
|
|
|
|
|
|
void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm, |
|
|
__ bind(&rax_is_object); |
|
|
Label* non_float) { |
|
|
IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
|
|
|
Label test_other, done; |
|
|
__ jmp(&done); |
|
|
// Test if both operands are numbers (heap_numbers or smis).
|
|
|
|
|
|
// If not, jump to label non_float.
|
|
|
|
|
|
__ JumpIfSmi(rdx, &test_other); // argument in rdx is OK
|
|
|
|
|
|
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map()); |
|
|
|
|
|
__ j(not_equal, non_float); // The argument in rdx is not a number.
|
|
|
|
|
|
|
|
|
|
|
|
__ bind(&test_other); |
|
|
__ bind(&rdx_is_object); |
|
|
__ JumpIfSmi(rax, &done); // argument in rax is OK
|
|
|
IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
|
|
|
__ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map()); |
|
|
__ JumpIfNotSmi(rax, &rax_is_object); |
|
|
__ j(not_equal, non_float); // The argument in rax is not a number.
|
|
|
__ SmiToInteger32(rcx, rax); |
|
|
|
|
|
|
|
|
// Fall-through: Both operands are numbers.
|
|
|
|
|
|
__ bind(&done); |
|
|
__ bind(&done); |
|
|
|
|
|
__ movl(rax, rdx); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -10451,15 +10375,15 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
|
|
} |
|
|
} |
|
|
// left is rdx, right is rax.
|
|
|
// left is rdx, right is rax.
|
|
|
__ AllocateHeapNumber(rbx, rcx, slow); |
|
|
__ AllocateHeapNumber(rbx, rcx, slow); |
|
|
FloatingPointHelper::LoadFloatOperandsFromSmis(masm, xmm4, xmm5); |
|
|
FloatingPointHelper::LoadSSE2SmiOperands(masm); |
|
|
switch (op_) { |
|
|
switch (op_) { |
|
|
case Token::ADD: __ addsd(xmm4, xmm5); break; |
|
|
case Token::ADD: __ addsd(xmm0, xmm1); break; |
|
|
case Token::SUB: __ subsd(xmm4, xmm5); break; |
|
|
case Token::SUB: __ subsd(xmm0, xmm1); break; |
|
|
case Token::MUL: __ mulsd(xmm4, xmm5); break; |
|
|
case Token::MUL: __ mulsd(xmm0, xmm1); break; |
|
|
case Token::DIV: __ divsd(xmm4, xmm5); break; |
|
|
case Token::DIV: __ divsd(xmm0, xmm1); break; |
|
|
default: UNREACHABLE(); |
|
|
default: UNREACHABLE(); |
|
|
} |
|
|
} |
|
|
__ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm4); |
|
|
__ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0); |
|
|
__ movq(rax, rbx); |
|
|
__ movq(rax, rbx); |
|
|
GenerateReturn(masm); |
|
|
GenerateReturn(masm); |
|
|
} |
|
|
} |
|
@ -10522,22 +10446,23 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
|
|
Label not_floats; |
|
|
Label not_floats; |
|
|
// rax: y
|
|
|
// rax: y
|
|
|
// rdx: x
|
|
|
// rdx: x
|
|
|
if (static_operands_type_.IsNumber() && FLAG_debug_code) { |
|
|
ASSERT(!static_operands_type_.IsSmi()); |
|
|
|
|
|
if (static_operands_type_.IsNumber()) { |
|
|
|
|
|
if (FLAG_debug_code) { |
|
|
// Assert at runtime that inputs are only numbers.
|
|
|
// Assert at runtime that inputs are only numbers.
|
|
|
__ AbortIfNotNumber(rdx); |
|
|
__ AbortIfNotNumber(rdx); |
|
|
__ AbortIfNotNumber(rax); |
|
|
__ AbortIfNotNumber(rax); |
|
|
|
|
|
} |
|
|
|
|
|
FloatingPointHelper::LoadSSE2NumberOperands(masm); |
|
|
} else { |
|
|
} else { |
|
|
FloatingPointHelper::CheckNumberOperands(masm, &call_runtime); |
|
|
FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime); |
|
|
} |
|
|
} |
|
|
// Fast-case: Both operands are numbers.
|
|
|
|
|
|
// xmm4 and xmm5 are volatile XMM registers.
|
|
|
|
|
|
FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5); |
|
|
|
|
|
|
|
|
|
|
|
switch (op_) { |
|
|
switch (op_) { |
|
|
case Token::ADD: __ addsd(xmm4, xmm5); break; |
|
|
case Token::ADD: __ addsd(xmm0, xmm1); break; |
|
|
case Token::SUB: __ subsd(xmm4, xmm5); break; |
|
|
case Token::SUB: __ subsd(xmm0, xmm1); break; |
|
|
case Token::MUL: __ mulsd(xmm4, xmm5); break; |
|
|
case Token::MUL: __ mulsd(xmm0, xmm1); break; |
|
|
case Token::DIV: __ divsd(xmm4, xmm5); break; |
|
|
case Token::DIV: __ divsd(xmm0, xmm1); break; |
|
|
default: UNREACHABLE(); |
|
|
default: UNREACHABLE(); |
|
|
} |
|
|
} |
|
|
// Allocate a heap number, if needed.
|
|
|
// Allocate a heap number, if needed.
|
|
@ -10572,7 +10497,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
|
|
break; |
|
|
break; |
|
|
default: UNREACHABLE(); |
|
|
default: UNREACHABLE(); |
|
|
} |
|
|
} |
|
|
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4); |
|
|
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
|
|
GenerateReturn(masm); |
|
|
GenerateReturn(masm); |
|
|
__ bind(¬_floats); |
|
|
__ bind(¬_floats); |
|
|
if (runtime_operands_type_ == BinaryOpIC::DEFAULT && |
|
|
if (runtime_operands_type_ == BinaryOpIC::DEFAULT && |
|
@ -10597,34 +10522,52 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
|
|
case Token::SAR: |
|
|
case Token::SAR: |
|
|
case Token::SHL: |
|
|
case Token::SHL: |
|
|
case Token::SHR: { |
|
|
case Token::SHR: { |
|
|
Label skip_allocation, non_smi_result; |
|
|
Label skip_allocation, non_smi_shr_result; |
|
|
FloatingPointHelper::LoadAsIntegers(masm, &call_runtime); |
|
|
Register heap_number_map = r9; |
|
|
|
|
|
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
|
|
|
|
|
if (static_operands_type_.IsNumber()) { |
|
|
|
|
|
if (FLAG_debug_code) { |
|
|
|
|
|
// Assert at runtime that inputs are only numbers.
|
|
|
|
|
|
__ AbortIfNotNumber(rdx); |
|
|
|
|
|
__ AbortIfNotNumber(rax); |
|
|
|
|
|
} |
|
|
|
|
|
FloatingPointHelper::LoadNumbersAsIntegers(masm); |
|
|
|
|
|
} else { |
|
|
|
|
|
FloatingPointHelper::LoadAsIntegers(masm, |
|
|
|
|
|
&call_runtime, |
|
|
|
|
|
heap_number_map); |
|
|
|
|
|
} |
|
|
switch (op_) { |
|
|
switch (op_) { |
|
|
case Token::BIT_OR: __ orl(rax, rcx); break; |
|
|
case Token::BIT_OR: __ orl(rax, rcx); break; |
|
|
case Token::BIT_AND: __ andl(rax, rcx); break; |
|
|
case Token::BIT_AND: __ andl(rax, rcx); break; |
|
|
case Token::BIT_XOR: __ xorl(rax, rcx); break; |
|
|
case Token::BIT_XOR: __ xorl(rax, rcx); break; |
|
|
case Token::SAR: __ sarl_cl(rax); break; |
|
|
case Token::SAR: __ sarl_cl(rax); break; |
|
|
case Token::SHL: __ shll_cl(rax); break; |
|
|
case Token::SHL: __ shll_cl(rax); break; |
|
|
case Token::SHR: __ shrl_cl(rax); break; |
|
|
case Token::SHR: { |
|
|
default: UNREACHABLE(); |
|
|
__ shrl_cl(rax); |
|
|
} |
|
|
|
|
|
if (op_ == Token::SHR) { |
|
|
|
|
|
// Check if result is negative. This can only happen for a shift
|
|
|
// Check if result is negative. This can only happen for a shift
|
|
|
// by zero, which also doesn't update the sign flag.
|
|
|
// by zero.
|
|
|
__ testl(rax, rax); |
|
|
__ testl(rax, rax); |
|
|
__ j(negative, &non_smi_result); |
|
|
__ j(negative, &non_smi_shr_result); |
|
|
|
|
|
break; |
|
|
|
|
|
} |
|
|
|
|
|
default: UNREACHABLE(); |
|
|
} |
|
|
} |
|
|
__ JumpIfNotValidSmiValue(rax, &non_smi_result); |
|
|
|
|
|
// Tag smi result, if possible, and return.
|
|
|
STATIC_ASSERT(kSmiValueSize == 32); |
|
|
|
|
|
// Tag smi result and return.
|
|
|
__ Integer32ToSmi(rax, rax); |
|
|
__ Integer32ToSmi(rax, rax); |
|
|
GenerateReturn(masm); |
|
|
GenerateReturn(masm); |
|
|
|
|
|
|
|
|
// All ops except SHR return a signed int32 that we load in
|
|
|
// All bit-ops except SHR return a signed int32 that can be
|
|
|
// a HeapNumber.
|
|
|
// returned immediately as a smi.
|
|
|
if (op_ != Token::SHR && non_smi_result.is_linked()) { |
|
|
// We might need to allocate a HeapNumber if we shift a negative
|
|
|
__ bind(&non_smi_result); |
|
|
// number right by zero (i.e., convert to UInt32).
|
|
|
|
|
|
if (op_ == Token::SHR) { |
|
|
|
|
|
ASSERT(non_smi_shr_result.is_linked()); |
|
|
|
|
|
__ bind(&non_smi_shr_result); |
|
|
// Allocate a heap number if needed.
|
|
|
// Allocate a heap number if needed.
|
|
|
__ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
|
|
|
__ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
|
|
|
switch (mode_) { |
|
|
switch (mode_) { |
|
|
case OVERWRITE_LEFT: |
|
|
case OVERWRITE_LEFT: |
|
|
case OVERWRITE_RIGHT: |
|
|
case OVERWRITE_RIGHT: |
|
@ -10635,22 +10578,33 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
|
|
__ JumpIfNotSmi(rax, &skip_allocation); |
|
|
__ JumpIfNotSmi(rax, &skip_allocation); |
|
|
// Fall through!
|
|
|
// Fall through!
|
|
|
case NO_OVERWRITE: |
|
|
case NO_OVERWRITE: |
|
|
__ AllocateHeapNumber(rax, rcx, &call_runtime); |
|
|
// Allocate heap number in new space.
|
|
|
|
|
|
// Not using AllocateHeapNumber macro in order to reuse
|
|
|
|
|
|
// already loaded heap_number_map.
|
|
|
|
|
|
__ AllocateInNewSpace(HeapNumber::kSize, |
|
|
|
|
|
rax, |
|
|
|
|
|
rcx, |
|
|
|
|
|
no_reg, |
|
|
|
|
|
&call_runtime, |
|
|
|
|
|
TAG_OBJECT); |
|
|
|
|
|
// Set the map.
|
|
|
|
|
|
if (FLAG_debug_code) { |
|
|
|
|
|
__ AbortIfNotRootValue(heap_number_map, |
|
|
|
|
|
Heap::kHeapNumberMapRootIndex, |
|
|
|
|
|
"HeapNumberMap register clobbered."); |
|
|
|
|
|
} |
|
|
|
|
|
__ movq(FieldOperand(rax, HeapObject::kMapOffset), |
|
|
|
|
|
heap_number_map); |
|
|
__ bind(&skip_allocation); |
|
|
__ bind(&skip_allocation); |
|
|
break; |
|
|
break; |
|
|
default: UNREACHABLE(); |
|
|
default: UNREACHABLE(); |
|
|
} |
|
|
} |
|
|
// Store the result in the HeapNumber and return.
|
|
|
// Store the result in the HeapNumber and return.
|
|
|
__ movq(Operand(rsp, 1 * kPointerSize), rbx); |
|
|
__ cvtqsi2sd(xmm0, rbx); |
|
|
__ fild_s(Operand(rsp, 1 * kPointerSize)); |
|
|
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
|
|
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
|
|
|
|
|
GenerateReturn(masm); |
|
|
GenerateReturn(masm); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// SHR should return uint32 - go to runtime for non-smi/negative result.
|
|
|
|
|
|
if (op_ == Token::SHR) { |
|
|
|
|
|
__ bind(&non_smi_result); |
|
|
|
|
|
} |
|
|
|
|
|
break; |
|
|
break; |
|
|
} |
|
|
} |
|
|
default: UNREACHABLE(); break; |
|
|
default: UNREACHABLE(); break; |
|
@ -10683,7 +10637,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
|
|
Label not_strings, both_strings, not_string1, string1, string1_smi2; |
|
|
Label not_strings, both_strings, not_string1, string1, string1_smi2; |
|
|
|
|
|
|
|
|
// If this stub has already generated FP-specific code then the arguments
|
|
|
// If this stub has already generated FP-specific code then the arguments
|
|
|
// are already in rdx, rax
|
|
|
// are already in rdx and rax.
|
|
|
if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { |
|
|
if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { |
|
|
GenerateLoadArguments(masm); |
|
|
GenerateLoadArguments(masm); |
|
|
} |
|
|
} |
|
@ -10832,19 +10786,13 @@ void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
|
|
__ push(rax); |
|
|
__ push(rax); |
|
|
|
|
|
|
|
|
// Push this stub's key.
|
|
|
// Push this stub's key.
|
|
|
__ movq(rax, Immediate(MinorKey())); |
|
|
__ Push(Smi::FromInt(MinorKey())); |
|
|
__ Integer32ToSmi(rax, rax); |
|
|
|
|
|
__ push(rax); |
|
|
|
|
|
|
|
|
|
|
|
// Although the operation and the type info are encoded into the key,
|
|
|
// Although the operation and the type info are encoded into the key,
|
|
|
// the encoding is opaque, so push them too.
|
|
|
// the encoding is opaque, so push them too.
|
|
|
__ movq(rax, Immediate(op_)); |
|
|
__ Push(Smi::FromInt(op_)); |
|
|
__ Integer32ToSmi(rax, rax); |
|
|
|
|
|
__ push(rax); |
|
|
|
|
|
|
|
|
|
|
|
__ movq(rax, Immediate(runtime_operands_type_)); |
|
|
__ Push(Smi::FromInt(runtime_operands_type_)); |
|
|
__ Integer32ToSmi(rax, rax); |
|
|
|
|
|
__ push(rax); |
|
|
|
|
|
|
|
|
|
|
|
__ push(rcx); |
|
|
__ push(rcx); |
|
|
|
|
|
|
|
@ -11212,16 +11160,17 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
|
|
// If result is not supposed to be flat, allocate a cons string object. If
|
|
|
// If result is not supposed to be flat, allocate a cons string object. If
|
|
|
// both strings are ascii the result is an ascii cons string.
|
|
|
// both strings are ascii the result is an ascii cons string.
|
|
|
// rax: first string
|
|
|
// rax: first string
|
|
|
// ebx: length of resulting flat string
|
|
|
// rbx: length of resulting flat string
|
|
|
// rdx: second string
|
|
|
// rdx: second string
|
|
|
// r8: instance type of first string
|
|
|
// r8: instance type of first string
|
|
|
// r9: instance type of second string
|
|
|
// r9: instance type of second string
|
|
|
Label non_ascii, allocated; |
|
|
Label non_ascii, allocated, ascii_data; |
|
|
__ movl(rcx, r8); |
|
|
__ movl(rcx, r8); |
|
|
__ and_(rcx, r9); |
|
|
__ and_(rcx, r9); |
|
|
ASSERT(kStringEncodingMask == kAsciiStringTag); |
|
|
ASSERT(kStringEncodingMask == kAsciiStringTag); |
|
|
__ testl(rcx, Immediate(kAsciiStringTag)); |
|
|
__ testl(rcx, Immediate(kAsciiStringTag)); |
|
|
__ j(zero, &non_ascii); |
|
|
__ j(zero, &non_ascii); |
|
|
|
|
|
__ bind(&ascii_data); |
|
|
// Allocate an acsii cons string.
|
|
|
// Allocate an acsii cons string.
|
|
|
__ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime); |
|
|
__ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime); |
|
|
__ bind(&allocated); |
|
|
__ bind(&allocated); |
|
@ -11235,6 +11184,18 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
|
|
__ IncrementCounter(&Counters::string_add_native, 1); |
|
|
__ IncrementCounter(&Counters::string_add_native, 1); |
|
|
__ ret(2 * kPointerSize); |
|
|
__ ret(2 * kPointerSize); |
|
|
__ bind(&non_ascii); |
|
|
__ bind(&non_ascii); |
|
|
|
|
|
// At least one of the strings is two-byte. Check whether it happens
|
|
|
|
|
|
// to contain only ascii characters.
|
|
|
|
|
|
// rcx: first instance type AND second instance type.
|
|
|
|
|
|
// r8: first instance type.
|
|
|
|
|
|
// r9: second instance type.
|
|
|
|
|
|
__ testb(rcx, Immediate(kAsciiDataHintMask)); |
|
|
|
|
|
__ j(not_zero, &ascii_data); |
|
|
|
|
|
__ xor_(r8, r9); |
|
|
|
|
|
ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); |
|
|
|
|
|
__ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag)); |
|
|
|
|
|
__ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag)); |
|
|
|
|
|
__ j(equal, &ascii_data); |
|
|
// Allocate a two byte cons string.
|
|
|
// Allocate a two byte cons string.
|
|
|
__ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime); |
|
|
__ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime); |
|
|
__ jmp(&allocated); |
|
|
__ jmp(&allocated); |
|
@ -11242,7 +11203,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
|
|
// Handle creating a flat result. First check that both strings are not
|
|
|
// Handle creating a flat result. First check that both strings are not
|
|
|
// external strings.
|
|
|
// external strings.
|
|
|
// rax: first string
|
|
|
// rax: first string
|
|
|
// ebx: length of resulting flat string as smi
|
|
|
// rbx: length of resulting flat string as smi
|
|
|
// rdx: second string
|
|
|
// rdx: second string
|
|
|
// r8: instance type of first string
|
|
|
// r8: instance type of first string
|
|
|
// r9: instance type of first string
|
|
|
// r9: instance type of first string
|
|
@ -11258,7 +11219,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
|
|
__ j(equal, &string_add_runtime); |
|
|
__ j(equal, &string_add_runtime); |
|
|
// Now check if both strings are ascii strings.
|
|
|
// Now check if both strings are ascii strings.
|
|
|
// rax: first string
|
|
|
// rax: first string
|
|
|
// ebx: length of resulting flat string
|
|
|
// rbx: length of resulting flat string
|
|
|
// rdx: second string
|
|
|
// rdx: second string
|
|
|
// r8: instance type of first string
|
|
|
// r8: instance type of first string
|
|
|
// r9: instance type of second string
|
|
|
// r9: instance type of second string
|
|
|