|
|
@ -412,6 +412,729 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) { |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
// Smi tagging, untagging and tag detection.
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::Integer32ToSmi(Register dst, Register src) { |
|
|
|
ASSERT_EQ(1, kSmiTagSize); |
|
|
|
ASSERT_EQ(0, kSmiTag); |
|
|
|
#ifdef DEBUG |
|
|
|
cmpq(src, Immediate(0xC0000000u)); |
|
|
|
Check(positive, "Smi conversion overflow"); |
|
|
|
#endif |
|
|
|
if (dst.is(src)) { |
|
|
|
addl(dst, src); |
|
|
|
} else { |
|
|
|
lea(dst, Operand(src, src, times_1, 0)); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::Integer32ToSmi(Register dst, |
|
|
|
Register src, |
|
|
|
Label* on_overflow) { |
|
|
|
ASSERT_EQ(1, kSmiTagSize); |
|
|
|
ASSERT_EQ(0, kSmiTag); |
|
|
|
if (!dst.is(src)) { |
|
|
|
movl(dst, src); |
|
|
|
} |
|
|
|
addl(dst, src); |
|
|
|
j(overflow, on_overflow); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::Integer64AddToSmi(Register dst, |
|
|
|
Register src, |
|
|
|
int constant) { |
|
|
|
#ifdef DEBUG |
|
|
|
movl(kScratchRegister, src); |
|
|
|
addl(kScratchRegister, Immediate(constant)); |
|
|
|
Check(no_overflow, "Add-and-smi-convert overflow"); |
|
|
|
Condition valid = CheckInteger32ValidSmiValue(kScratchRegister); |
|
|
|
Check(valid, "Add-and-smi-convert overflow"); |
|
|
|
#endif |
|
|
|
lea(dst, Operand(src, src, times_1, constant << kSmiTagSize)); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiToInteger32(Register dst, Register src) { |
|
|
|
ASSERT_EQ(1, kSmiTagSize); |
|
|
|
ASSERT_EQ(0, kSmiTag); |
|
|
|
if (!dst.is(src)) { |
|
|
|
movl(dst, src); |
|
|
|
} |
|
|
|
sarl(dst, Immediate(kSmiTagSize)); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiToInteger64(Register dst, Register src) { |
|
|
|
ASSERT_EQ(1, kSmiTagSize); |
|
|
|
ASSERT_EQ(0, kSmiTag); |
|
|
|
movsxlq(dst, src); |
|
|
|
sar(dst, Immediate(kSmiTagSize)); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, |
|
|
|
Register src, |
|
|
|
int power) { |
|
|
|
ASSERT(power >= 0); |
|
|
|
ASSERT(power < 64); |
|
|
|
if (power == 0) { |
|
|
|
SmiToInteger64(dst, src); |
|
|
|
return; |
|
|
|
} |
|
|
|
movsxlq(dst, src); |
|
|
|
shl(dst, Immediate(power - 1)); |
|
|
|
} |
|
|
|
|
|
|
|
void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) { |
|
|
|
ASSERT_EQ(0, kSmiTag); |
|
|
|
testl(src, Immediate(kSmiTagMask)); |
|
|
|
j(zero, on_smi); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) { |
|
|
|
Condition not_smi = CheckNotSmi(src); |
|
|
|
j(not_smi, on_not_smi); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::JumpIfNotPositiveSmi(Register src, |
|
|
|
Label* on_not_positive_smi) { |
|
|
|
Condition not_positive_smi = CheckNotPositiveSmi(src); |
|
|
|
j(not_positive_smi, on_not_positive_smi); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::JumpIfSmiEqualsConstant(Register src, |
|
|
|
int constant, |
|
|
|
Label* on_equals) { |
|
|
|
if (Smi::IsValid(constant)) { |
|
|
|
Condition are_equal = CheckSmiEqualsConstant(src, constant); |
|
|
|
j(are_equal, on_equals); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) { |
|
|
|
Condition is_valid = CheckInteger32ValidSmiValue(src); |
|
|
|
j(ReverseCondition(is_valid), on_invalid); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::JumpIfNotBothSmi(Register src1, |
|
|
|
Register src2, |
|
|
|
Label* on_not_both_smi) { |
|
|
|
Condition not_both_smi = CheckNotBothSmi(src1, src2); |
|
|
|
j(not_both_smi, on_not_both_smi); |
|
|
|
} |
|
|
|
|
|
|
|
Condition MacroAssembler::CheckSmi(Register src) { |
|
|
|
testb(src, Immediate(kSmiTagMask)); |
|
|
|
return zero; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Condition MacroAssembler::CheckNotSmi(Register src) { |
|
|
|
ASSERT_EQ(0, kSmiTag); |
|
|
|
testb(src, Immediate(kSmiTagMask)); |
|
|
|
return not_zero; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Condition MacroAssembler::CheckPositiveSmi(Register src) { |
|
|
|
ASSERT_EQ(0, kSmiTag); |
|
|
|
testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask))); |
|
|
|
return zero; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Condition MacroAssembler::CheckNotPositiveSmi(Register src) { |
|
|
|
ASSERT_EQ(0, kSmiTag); |
|
|
|
testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask))); |
|
|
|
return not_zero; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Condition MacroAssembler::CheckBothSmi(Register first, Register second) { |
|
|
|
if (first.is(second)) { |
|
|
|
return CheckSmi(first); |
|
|
|
} |
|
|
|
movl(kScratchRegister, first); |
|
|
|
orl(kScratchRegister, second); |
|
|
|
return CheckSmi(kScratchRegister); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) { |
|
|
|
ASSERT_EQ(0, kSmiTag); |
|
|
|
if (first.is(second)) { |
|
|
|
return CheckNotSmi(first); |
|
|
|
} |
|
|
|
movl(kScratchRegister, first); |
|
|
|
or_(kScratchRegister, second); |
|
|
|
return CheckNotSmi(kScratchRegister); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Condition MacroAssembler::CheckIsMinSmi(Register src) { |
|
|
|
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
|
|
|
cmpl(src, Immediate(0x40000000)); |
|
|
|
return equal; |
|
|
|
} |
|
|
|
|
|
|
|
Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) { |
|
|
|
if (constant == 0) { |
|
|
|
testl(src, src); |
|
|
|
return zero; |
|
|
|
} |
|
|
|
if (Smi::IsValid(constant)) { |
|
|
|
cmpl(src, Immediate(Smi::FromInt(constant))); |
|
|
|
return zero; |
|
|
|
} |
|
|
|
// Can't be equal.
|
|
|
|
UNREACHABLE(); |
|
|
|
return no_condition; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { |
|
|
|
// A 32-bit integer value can be converted to a smi if it is in the
|
|
|
|
// range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
|
|
|
|
// representation have bits 30 and 31 be equal.
|
|
|
|
cmpl(src, Immediate(0xC0000000u)); |
|
|
|
return positive; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiNeg(Register dst, |
|
|
|
Register src, |
|
|
|
Label* on_not_smi_result) { |
|
|
|
if (!dst.is(src)) { |
|
|
|
movl(dst, src); |
|
|
|
} |
|
|
|
negl(dst); |
|
|
|
testl(dst, Immediate(0x7fffffff)); |
|
|
|
// If the result is zero or 0x80000000, negation failed to create a smi.
|
|
|
|
j(equal, on_not_smi_result); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiAdd(Register dst, |
|
|
|
Register src1, |
|
|
|
Register src2, |
|
|
|
Label* on_not_smi_result) { |
|
|
|
ASSERT(!dst.is(src2)); |
|
|
|
if (!dst.is(src1)) { |
|
|
|
movl(dst, src1); |
|
|
|
} |
|
|
|
addl(dst, src2); |
|
|
|
if (!dst.is(src1)) { |
|
|
|
j(overflow, on_not_smi_result); |
|
|
|
} else { |
|
|
|
Label smi_result; |
|
|
|
j(no_overflow, &smi_result); |
|
|
|
// Restore src1.
|
|
|
|
subl(src1, src2); |
|
|
|
jmp(on_not_smi_result); |
|
|
|
bind(&smi_result); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiSub(Register dst, |
|
|
|
Register src1, |
|
|
|
Register src2, |
|
|
|
Label* on_not_smi_result) { |
|
|
|
ASSERT(!dst.is(src2)); |
|
|
|
if (!dst.is(src1)) { |
|
|
|
movl(dst, src1); |
|
|
|
} |
|
|
|
subl(dst, src2); |
|
|
|
if (!dst.is(src1)) { |
|
|
|
j(overflow, on_not_smi_result); |
|
|
|
} else { |
|
|
|
Label smi_result; |
|
|
|
j(no_overflow, &smi_result); |
|
|
|
// Restore src1.
|
|
|
|
addl(src1, src2); |
|
|
|
jmp(on_not_smi_result); |
|
|
|
bind(&smi_result); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiMul(Register dst, |
|
|
|
Register src1, |
|
|
|
Register src2, |
|
|
|
Label* on_not_smi_result) { |
|
|
|
ASSERT(!dst.is(src2)); |
|
|
|
|
|
|
|
if (dst.is(src1)) { |
|
|
|
movq(kScratchRegister, src1); |
|
|
|
} |
|
|
|
SmiToInteger32(dst, src1); |
|
|
|
|
|
|
|
imull(dst, src2); |
|
|
|
j(overflow, on_not_smi_result); |
|
|
|
|
|
|
|
// Check for negative zero result. If product is zero, and one
|
|
|
|
// argument is negative, go to slow case. The frame is unchanged
|
|
|
|
// in this block, so local control flow can use a Label rather
|
|
|
|
// than a JumpTarget.
|
|
|
|
Label non_zero_result; |
|
|
|
testl(dst, dst); |
|
|
|
j(not_zero, &non_zero_result); |
|
|
|
|
|
|
|
// Test whether either operand is negative (the other must be zero).
|
|
|
|
orl(kScratchRegister, src2); |
|
|
|
j(negative, on_not_smi_result); |
|
|
|
bind(&non_zero_result); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiTryAddConstant(Register dst, |
|
|
|
Register src, |
|
|
|
int32_t constant, |
|
|
|
Label* on_not_smi_result) { |
|
|
|
// Does not assume that src is a smi.
|
|
|
|
ASSERT_EQ(1, kSmiTagMask); |
|
|
|
ASSERT_EQ(0, kSmiTag); |
|
|
|
ASSERT(Smi::IsValid(constant)); |
|
|
|
|
|
|
|
Register tmp = (src.is(dst) ? kScratchRegister : dst); |
|
|
|
movl(tmp, src); |
|
|
|
addl(tmp, Immediate(Smi::FromInt(constant))); |
|
|
|
if (tmp.is(kScratchRegister)) { |
|
|
|
j(overflow, on_not_smi_result); |
|
|
|
testl(tmp, Immediate(kSmiTagMask)); |
|
|
|
j(not_zero, on_not_smi_result); |
|
|
|
movl(dst, tmp); |
|
|
|
} else { |
|
|
|
movl(kScratchRegister, Immediate(kSmiTagMask)); |
|
|
|
cmovl(overflow, dst, kScratchRegister); |
|
|
|
testl(dst, kScratchRegister); |
|
|
|
j(not_zero, on_not_smi_result); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiAddConstant(Register dst, |
|
|
|
Register src, |
|
|
|
int32_t constant, |
|
|
|
Label* on_not_smi_result) { |
|
|
|
ASSERT(Smi::IsValid(constant)); |
|
|
|
if (on_not_smi_result == NULL) { |
|
|
|
if (dst.is(src)) { |
|
|
|
movl(dst, src); |
|
|
|
} else { |
|
|
|
lea(dst, Operand(src, constant << kSmiTagSize)); |
|
|
|
} |
|
|
|
} else { |
|
|
|
if (!dst.is(src)) { |
|
|
|
movl(dst, src); |
|
|
|
} |
|
|
|
addl(dst, Immediate(Smi::FromInt(constant))); |
|
|
|
if (!dst.is(src)) { |
|
|
|
j(overflow, on_not_smi_result); |
|
|
|
} else { |
|
|
|
Label result_ok; |
|
|
|
j(no_overflow, &result_ok); |
|
|
|
subl(dst, Immediate(Smi::FromInt(constant))); |
|
|
|
jmp(on_not_smi_result); |
|
|
|
bind(&result_ok); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiSubConstant(Register dst, |
|
|
|
Register src, |
|
|
|
int32_t constant, |
|
|
|
Label* on_not_smi_result) { |
|
|
|
ASSERT(Smi::IsValid(constant)); |
|
|
|
Smi* smi_value = Smi::FromInt(constant); |
|
|
|
if (dst.is(src)) { |
|
|
|
// Optimistic subtract - may change value of dst register,
|
|
|
|
// if it has garbage bits in the higher half, but will not change
|
|
|
|
// the value as a tagged smi.
|
|
|
|
subl(dst, Immediate(smi_value)); |
|
|
|
if (on_not_smi_result != NULL) { |
|
|
|
Label add_success; |
|
|
|
j(no_overflow, &add_success); |
|
|
|
addl(dst, Immediate(smi_value)); |
|
|
|
jmp(on_not_smi_result); |
|
|
|
bind(&add_success); |
|
|
|
} |
|
|
|
} else { |
|
|
|
UNIMPLEMENTED(); // Not used yet.
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiDiv(Register dst, |
|
|
|
Register src1, |
|
|
|
Register src2, |
|
|
|
Label* on_not_smi_result) { |
|
|
|
ASSERT(!src2.is(rax)); |
|
|
|
ASSERT(!src2.is(rdx)); |
|
|
|
ASSERT(!src1.is(rdx)); |
|
|
|
|
|
|
|
// Check for 0 divisor (result is +/-Infinity).
|
|
|
|
Label positive_divisor; |
|
|
|
testl(src2, src2); |
|
|
|
j(zero, on_not_smi_result); |
|
|
|
j(positive, &positive_divisor); |
|
|
|
// Check for negative zero result. If the dividend is zero, and the
|
|
|
|
// divisor is negative, return a floating point negative zero.
|
|
|
|
testl(src1, src1); |
|
|
|
j(zero, on_not_smi_result); |
|
|
|
bind(&positive_divisor); |
|
|
|
|
|
|
|
// Sign extend src1 into edx:eax.
|
|
|
|
if (!src1.is(rax)) { |
|
|
|
movl(rax, src1); |
|
|
|
} |
|
|
|
cdq(); |
|
|
|
|
|
|
|
idivl(src2); |
|
|
|
// Check for the corner case of dividing the most negative smi by
|
|
|
|
// -1. We cannot use the overflow flag, since it is not set by
|
|
|
|
// idiv instruction.
|
|
|
|
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
|
|
|
cmpl(rax, Immediate(0x40000000)); |
|
|
|
j(equal, on_not_smi_result); |
|
|
|
// Check that the remainder is zero.
|
|
|
|
testl(rdx, rdx); |
|
|
|
j(not_zero, on_not_smi_result); |
|
|
|
// Tag the result and store it in the destination register.
|
|
|
|
Integer32ToSmi(dst, rax); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiMod(Register dst, |
|
|
|
Register src1, |
|
|
|
Register src2, |
|
|
|
Label* on_not_smi_result) { |
|
|
|
ASSERT(!dst.is(kScratchRegister)); |
|
|
|
ASSERT(!src1.is(kScratchRegister)); |
|
|
|
ASSERT(!src2.is(kScratchRegister)); |
|
|
|
ASSERT(!src2.is(rax)); |
|
|
|
ASSERT(!src2.is(rdx)); |
|
|
|
ASSERT(!src1.is(rdx)); |
|
|
|
|
|
|
|
testl(src2, src2); |
|
|
|
j(zero, on_not_smi_result); |
|
|
|
|
|
|
|
if (src1.is(rax)) { |
|
|
|
// Mist remember the value to see if a zero result should
|
|
|
|
// be a negative zero.
|
|
|
|
movl(kScratchRegister, rax); |
|
|
|
} else { |
|
|
|
movl(rax, src1); |
|
|
|
} |
|
|
|
// Sign extend eax into edx:eax.
|
|
|
|
cdq(); |
|
|
|
idivl(src2); |
|
|
|
// Check for a negative zero result. If the result is zero, and the
|
|
|
|
// dividend is negative, return a floating point negative zero.
|
|
|
|
Label non_zero_result; |
|
|
|
testl(rdx, rdx); |
|
|
|
j(not_zero, &non_zero_result); |
|
|
|
if (src1.is(rax)) { |
|
|
|
testl(kScratchRegister, kScratchRegister); |
|
|
|
} else { |
|
|
|
testl(src1, src1); |
|
|
|
} |
|
|
|
j(negative, on_not_smi_result); |
|
|
|
bind(&non_zero_result); |
|
|
|
if (!dst.is(rdx)) { |
|
|
|
movl(dst, rdx); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiNot(Register dst, Register src) { |
|
|
|
if (dst.is(src)) { |
|
|
|
not_(dst); |
|
|
|
// Remove inverted smi-tag. The mask is sign-extended to 64 bits.
|
|
|
|
xor_(src, Immediate(kSmiTagMask)); |
|
|
|
} else { |
|
|
|
ASSERT_EQ(0, kSmiTag); |
|
|
|
lea(dst, Operand(src, kSmiTagMask)); |
|
|
|
not_(dst); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { |
|
|
|
if (!dst.is(src1)) { |
|
|
|
movl(dst, src1); |
|
|
|
} |
|
|
|
and_(dst, src2); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) { |
|
|
|
ASSERT(Smi::IsValid(constant)); |
|
|
|
if (!dst.is(src)) { |
|
|
|
movl(dst, src); |
|
|
|
} |
|
|
|
and_(dst, Immediate(Smi::FromInt(constant))); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { |
|
|
|
if (!dst.is(src1)) { |
|
|
|
movl(dst, src1); |
|
|
|
} |
|
|
|
or_(dst, src2); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) { |
|
|
|
ASSERT(Smi::IsValid(constant)); |
|
|
|
if (!dst.is(src)) { |
|
|
|
movl(dst, src); |
|
|
|
} |
|
|
|
or_(dst, Immediate(Smi::FromInt(constant))); |
|
|
|
} |
|
|
|
|
|
|
|
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { |
|
|
|
if (!dst.is(src1)) { |
|
|
|
movl(dst, src1); |
|
|
|
} |
|
|
|
xor_(dst, src2); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) { |
|
|
|
ASSERT(Smi::IsValid(constant)); |
|
|
|
if (!dst.is(src)) { |
|
|
|
movl(dst, src); |
|
|
|
} |
|
|
|
xor_(dst, Immediate(Smi::FromInt(constant))); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, |
|
|
|
Register src, |
|
|
|
int shift_value) { |
|
|
|
if (shift_value > 0) { |
|
|
|
if (dst.is(src)) { |
|
|
|
sarl(dst, Immediate(shift_value)); |
|
|
|
and_(dst, Immediate(~kSmiTagMask)); |
|
|
|
} else { |
|
|
|
UNIMPLEMENTED(); // Not used.
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiShiftLogicalRightConstant(Register dst, |
|
|
|
Register src, |
|
|
|
int shift_value, |
|
|
|
Label* on_not_smi_result) { |
|
|
|
// Logic right shift interprets its result as an *unsigned* number.
|
|
|
|
if (dst.is(src)) { |
|
|
|
UNIMPLEMENTED(); // Not used.
|
|
|
|
} else { |
|
|
|
movl(dst, src); |
|
|
|
// Untag the smi.
|
|
|
|
sarl(dst, Immediate(kSmiTagSize)); |
|
|
|
if (shift_value < 2) { |
|
|
|
// A negative Smi shifted right two is in the positive Smi range,
|
|
|
|
// but if shifted only by zero or one, it never is.
|
|
|
|
j(negative, on_not_smi_result); |
|
|
|
} |
|
|
|
if (shift_value > 0) { |
|
|
|
// Do the right shift on the integer value.
|
|
|
|
shrl(dst, Immediate(shift_value)); |
|
|
|
} |
|
|
|
// Re-tag the result.
|
|
|
|
addl(dst, dst); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiShiftLeftConstant(Register dst, |
|
|
|
Register src, |
|
|
|
int shift_value, |
|
|
|
Label* on_not_smi_result) { |
|
|
|
if (dst.is(src)) { |
|
|
|
UNIMPLEMENTED(); // Not used.
|
|
|
|
} else { |
|
|
|
movl(dst, src); |
|
|
|
if (shift_value > 0) { |
|
|
|
// Treat dst as an untagged integer value equal to two times the
|
|
|
|
// smi value of src, i.e., already shifted left by one.
|
|
|
|
if (shift_value > 1) { |
|
|
|
shll(dst, Immediate(shift_value - 1)); |
|
|
|
} |
|
|
|
// Convert int result to Smi, checking that it is in smi range.
|
|
|
|
ASSERT(kSmiTagSize == 1); // adjust code if not the case
|
|
|
|
Integer32ToSmi(dst, dst, on_not_smi_result); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiShiftLeft(Register dst, |
|
|
|
Register src1, |
|
|
|
Register src2, |
|
|
|
Label* on_not_smi_result) { |
|
|
|
ASSERT(!dst.is(rcx)); |
|
|
|
Label result_ok; |
|
|
|
// Untag both operands.
|
|
|
|
SmiToInteger32(dst, src1); |
|
|
|
SmiToInteger32(rcx, src2); |
|
|
|
shll(dst); |
|
|
|
// Check that the *signed* result fits in a smi.
|
|
|
|
Condition is_valid = CheckInteger32ValidSmiValue(dst); |
|
|
|
j(is_valid, &result_ok); |
|
|
|
// Restore the relevant bits of the source registers
|
|
|
|
// and call the slow version.
|
|
|
|
if (dst.is(src1)) { |
|
|
|
shrl(dst); |
|
|
|
Integer32ToSmi(dst, dst); |
|
|
|
} |
|
|
|
Integer32ToSmi(rcx, rcx); |
|
|
|
jmp(on_not_smi_result); |
|
|
|
bind(&result_ok); |
|
|
|
Integer32ToSmi(dst, dst); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiShiftLogicalRight(Register dst, |
|
|
|
Register src1, |
|
|
|
Register src2, |
|
|
|
Label* on_not_smi_result) { |
|
|
|
ASSERT(!dst.is(rcx)); |
|
|
|
Label result_ok; |
|
|
|
// Untag both operands.
|
|
|
|
SmiToInteger32(dst, src1); |
|
|
|
SmiToInteger32(rcx, src2); |
|
|
|
|
|
|
|
shrl(dst); |
|
|
|
// Check that the *unsigned* result fits in a smi.
|
|
|
|
// I.e., that it is a valid positive smi value. The positive smi
|
|
|
|
// values are 0..0x3fffffff, i.e., neither of the top-most two
|
|
|
|
// bits can be set.
|
|
|
|
//
|
|
|
|
// These two cases can only happen with shifts by 0 or 1 when
|
|
|
|
// handed a valid smi. If the answer cannot be represented by a
|
|
|
|
// smi, restore the left and right arguments, and jump to slow
|
|
|
|
// case. The low bit of the left argument may be lost, but only
|
|
|
|
// in a case where it is dropped anyway.
|
|
|
|
testl(dst, Immediate(0xc0000000)); |
|
|
|
j(zero, &result_ok); |
|
|
|
if (dst.is(src1)) { |
|
|
|
shll(dst); |
|
|
|
Integer32ToSmi(dst, dst); |
|
|
|
} |
|
|
|
Integer32ToSmi(rcx, rcx); |
|
|
|
jmp(on_not_smi_result); |
|
|
|
bind(&result_ok); |
|
|
|
// Smi-tag the result in answer.
|
|
|
|
Integer32ToSmi(dst, dst); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SmiShiftArithmeticRight(Register dst, |
|
|
|
Register src1, |
|
|
|
Register src2) { |
|
|
|
ASSERT(!dst.is(rcx)); |
|
|
|
// Untag both operands.
|
|
|
|
SmiToInteger32(dst, src1); |
|
|
|
SmiToInteger32(rcx, src2); |
|
|
|
// Shift as integer.
|
|
|
|
sarl(dst); |
|
|
|
// Retag result.
|
|
|
|
Integer32ToSmi(dst, dst); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SelectNonSmi(Register dst, |
|
|
|
Register src1, |
|
|
|
Register src2, |
|
|
|
Label* on_not_smis) { |
|
|
|
ASSERT(!dst.is(src1)); |
|
|
|
ASSERT(!dst.is(src2)); |
|
|
|
// Both operands must not be smis.
|
|
|
|
#ifdef DEBUG |
|
|
|
Condition not_both_smis = CheckNotBothSmi(src1, src2); |
|
|
|
Check(not_both_smis, "Both registers were smis."); |
|
|
|
#endif |
|
|
|
ASSERT_EQ(0, kSmiTag); |
|
|
|
ASSERT_EQ(0, Smi::FromInt(0)); |
|
|
|
movq(kScratchRegister, Immediate(kSmiTagMask)); |
|
|
|
and_(kScratchRegister, src1); |
|
|
|
testl(kScratchRegister, src2); |
|
|
|
j(not_zero, on_not_smis); |
|
|
|
// One operand is a smi.
|
|
|
|
|
|
|
|
ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); |
|
|
|
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
|
|
|
|
subq(kScratchRegister, Immediate(1)); |
|
|
|
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
|
|
|
|
movq(dst, src1); |
|
|
|
xor_(dst, src2); |
|
|
|
and_(dst, kScratchRegister); |
|
|
|
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
|
|
|
|
xor_(dst, src1); |
|
|
|
// If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi.
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) { |
|
|
|
ASSERT(is_uint6(shift)); |
|
|
|
if (shift == 0) { // times_1.
|
|
|
|
SmiToInteger32(dst, src); |
|
|
|
return SmiIndex(dst, times_1); |
|
|
|
} |
|
|
|
if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
|
|
|
|
// We expect that all smis are actually zero-padded. If this holds after
|
|
|
|
// checking, this line can be omitted.
|
|
|
|
movl(dst, src); // Ensure that the smi is zero-padded.
|
|
|
|
return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize)); |
|
|
|
} |
|
|
|
// Shift by shift-kSmiTagSize.
|
|
|
|
movl(dst, src); // Ensure that the smi is zero-padded.
|
|
|
|
shl(dst, Immediate(shift - kSmiTagSize)); |
|
|
|
return SmiIndex(dst, times_1); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
|
|
|
Register src, |
|
|
|
int shift) { |
|
|
|
// Register src holds a positive smi.
|
|
|
|
ASSERT(is_uint6(shift)); |
|
|
|
if (shift == 0) { // times_1.
|
|
|
|
SmiToInteger32(dst, src); |
|
|
|
neg(dst); |
|
|
|
return SmiIndex(dst, times_1); |
|
|
|
} |
|
|
|
if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
|
|
|
|
movl(dst, src); |
|
|
|
neg(dst); |
|
|
|
return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize)); |
|
|
|
} |
|
|
|
// Shift by shift-kSmiTagSize.
|
|
|
|
movl(dst, src); |
|
|
|
neg(dst); |
|
|
|
shl(dst, Immediate(shift - kSmiTagSize)); |
|
|
|
return SmiIndex(dst, times_1); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool MacroAssembler::IsUnsafeSmi(Smi* value) { |
|
|
|
return false; |
|
|
|
} |
|
|
@ -520,7 +1243,7 @@ void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) { |
|
|
|
#endif |
|
|
|
jmp(kScratchRegister); |
|
|
|
#ifdef DEBUG |
|
|
|
ASSERT_EQ(kPatchReturnSequenceLength, |
|
|
|
ASSERT_EQ(kCallTargetAddressOffset, |
|
|
|
SizeOfCodeGeneratedSince(&target) + kPointerSize); |
|
|
|
#endif |
|
|
|
} |
|
|
@ -549,7 +1272,7 @@ void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) { |
|
|
|
#endif |
|
|
|
call(kScratchRegister); |
|
|
|
#ifdef DEBUG |
|
|
|
ASSERT_EQ(kPatchReturnSequenceLength, |
|
|
|
ASSERT_EQ(kCallTargetAddressOffset, |
|
|
|
SizeOfCodeGeneratedSince(&target) + kPointerSize); |
|
|
|
#endif |
|
|
|
} |
|
|
@ -599,7 +1322,7 @@ void MacroAssembler::Ret() { |
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::FCmp() { |
|
|
|
fcompp(); |
|
|
|
fucompp(); |
|
|
|
push(rax); |
|
|
|
fnstsw_ax(); |
|
|
|
if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) { |
|
|
@ -821,7 +1544,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) { |
|
|
|
Bootstrapper::FixupFlagsIsPCRelative::encode(false) | |
|
|
|
Bootstrapper::FixupFlagsUseCodeObject::encode(false); |
|
|
|
Unresolved entry = |
|
|
|
{ pc_offset() - kPatchReturnSequenceLength, flags, name }; |
|
|
|
{ pc_offset() - kCallTargetAddressOffset, flags, name }; |
|
|
|
unresolved_.Add(entry); |
|
|
|
} |
|
|
|
} |
|
|
@ -1406,4 +2129,23 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) { |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
CodePatcher::CodePatcher(byte* address, int size) |
|
|
|
: address_(address), size_(size), masm_(address, size + Assembler::kGap) { |
|
|
|
// Create a new macro assembler pointing to the address of the code to patch.
|
|
|
|
// The size is adjusted with kGap on order for the assembler to generate size
|
|
|
|
// bytes of instructions without failing with buffer size constraints.
|
|
|
|
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
CodePatcher::~CodePatcher() { |
|
|
|
// Indicate that code has changed.
|
|
|
|
CPU::FlushICache(address_, size_); |
|
|
|
|
|
|
|
// Check that the code was patched as expected.
|
|
|
|
ASSERT(masm_.pc_ == address_ + size_); |
|
|
|
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
} } // namespace v8::internal
|
|
|
|