|
|
|
// Copyright 2012 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
|
|
|
#if V8_TARGET_ARCH_MIPS
|
|
|
|
|
|
|
|
#include "src/base/bits.h"
|
|
|
|
#include "src/bootstrapper.h"
|
|
|
|
#include "src/code-stubs.h"
|
|
|
|
#include "src/codegen.h"
|
|
|
|
#include "src/ic/handler-compiler.h"
|
|
|
|
#include "src/ic/ic.h"
|
|
|
|
#include "src/ic/stub-cache.h"
|
|
|
|
#include "src/isolate.h"
|
|
|
|
#include "src/regexp/jsregexp.h"
|
|
|
|
#include "src/regexp/regexp-macro-assembler.h"
|
|
|
|
#include "src/runtime/runtime.h"
|
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
|
|
|
|
|
|
|
|
static void InitializeArrayConstructorDescriptor(
|
|
|
|
Isolate* isolate, CodeStubDescriptor* descriptor,
|
|
|
|
int constant_stack_parameter_count) {
|
|
|
|
Address deopt_handler = Runtime::FunctionForId(
|
|
|
|
Runtime::kArrayConstructor)->entry;
|
|
|
|
|
|
|
|
if (constant_stack_parameter_count == 0) {
|
|
|
|
descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
|
|
|
|
JS_FUNCTION_STUB_MODE);
|
|
|
|
} else {
|
|
|
|
descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
|
|
|
|
JS_FUNCTION_STUB_MODE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void InitializeInternalArrayConstructorDescriptor(
|
|
|
|
Isolate* isolate, CodeStubDescriptor* descriptor,
|
|
|
|
int constant_stack_parameter_count) {
|
|
|
|
Address deopt_handler = Runtime::FunctionForId(
|
|
|
|
Runtime::kInternalArrayConstructor)->entry;
|
|
|
|
|
|
|
|
if (constant_stack_parameter_count == 0) {
|
|
|
|
descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
|
|
|
|
JS_FUNCTION_STUB_MODE);
|
|
|
|
} else {
|
|
|
|
descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
|
|
|
|
JS_FUNCTION_STUB_MODE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ArrayNoArgumentConstructorStub::InitializeDescriptor(
|
|
|
|
CodeStubDescriptor* descriptor) {
|
|
|
|
InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ArraySingleArgumentConstructorStub::InitializeDescriptor(
|
|
|
|
CodeStubDescriptor* descriptor) {
|
|
|
|
InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ArrayNArgumentsConstructorStub::InitializeDescriptor(
|
|
|
|
CodeStubDescriptor* descriptor) {
|
|
|
|
InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
|
|
|
|
CodeStubDescriptor* descriptor) {
|
|
|
|
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
|
|
|
|
CodeStubDescriptor* descriptor) {
|
|
|
|
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
|
|
|
|
CodeStubDescriptor* descriptor) {
|
|
|
|
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define __ ACCESS_MASM(masm)
|
|
|
|
|
|
|
|
|
|
|
|
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
|
|
|
|
Condition cc, Strength strength);
|
|
|
|
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
|
|
|
Register lhs,
|
|
|
|
Register rhs,
|
|
|
|
Label* rhs_not_nan,
|
|
|
|
Label* slow,
|
|
|
|
bool strict);
|
|
|
|
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
|
|
|
|
Register lhs,
|
|
|
|
Register rhs);
|
|
|
|
|
|
|
|
|
|
|
|
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
|
|
|
|
ExternalReference miss) {
|
|
|
|
// Update the static counter each time a new code stub is generated.
|
|
|
|
isolate()->counters()->code_stubs()->Increment();
|
|
|
|
|
|
|
|
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
|
|
|
|
int param_count = descriptor.GetRegisterParameterCount();
|
|
|
|
{
|
|
|
|
// Call the runtime system in a fresh internal frame.
|
|
|
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
|
|
|
DCHECK(param_count == 0 ||
|
|
|
|
a0.is(descriptor.GetRegisterParameter(param_count - 1)));
|
|
|
|
// Push arguments, adjust sp.
|
|
|
|
__ Subu(sp, sp, Operand(param_count * kPointerSize));
|
|
|
|
for (int i = 0; i < param_count; ++i) {
|
|
|
|
// Store argument to stack.
|
|
|
|
__ sw(descriptor.GetRegisterParameter(i),
|
|
|
|
MemOperand(sp, (param_count - 1 - i) * kPointerSize));
|
|
|
|
}
|
|
|
|
__ CallExternalReference(miss, param_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ Ret();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void DoubleToIStub::Generate(MacroAssembler* masm) {
|
|
|
|
Label out_of_range, only_low, negate, done;
|
|
|
|
Register input_reg = source();
|
|
|
|
Register result_reg = destination();
|
|
|
|
|
|
|
|
int double_offset = offset();
|
|
|
|
// Account for saved regs if input is sp.
|
|
|
|
if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
|
|
|
|
|
|
|
|
Register scratch =
|
|
|
|
GetRegisterThatIsNotOneOf(input_reg, result_reg);
|
|
|
|
Register scratch2 =
|
|
|
|
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
|
|
|
|
Register scratch3 =
|
|
|
|
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
|
|
|
|
DoubleRegister double_scratch = kLithiumScratchDouble;
|
|
|
|
|
|
|
|
__ Push(scratch, scratch2, scratch3);
|
|
|
|
|
|
|
|
if (!skip_fastpath()) {
|
|
|
|
// Load double input.
|
|
|
|
__ ldc1(double_scratch, MemOperand(input_reg, double_offset));
|
|
|
|
|
|
|
|
// Clear cumulative exception flags and save the FCSR.
|
|
|
|
__ cfc1(scratch2, FCSR);
|
|
|
|
__ ctc1(zero_reg, FCSR);
|
|
|
|
|
|
|
|
// Try a conversion to a signed integer.
|
|
|
|
__ Trunc_w_d(double_scratch, double_scratch);
|
|
|
|
// Move the converted value into the result register.
|
|
|
|
__ mfc1(scratch3, double_scratch);
|
|
|
|
|
|
|
|
// Retrieve and restore the FCSR.
|
|
|
|
__ cfc1(scratch, FCSR);
|
|
|
|
__ ctc1(scratch2, FCSR);
|
|
|
|
|
|
|
|
// Check for overflow and NaNs.
|
|
|
|
__ And(
|
|
|
|
scratch, scratch,
|
|
|
|
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
|
|
|
|
| kFCSRInvalidOpFlagMask);
|
|
|
|
// If we had no exceptions then set result_reg and we are done.
|
|
|
|
Label error;
|
|
|
|
__ Branch(&error, ne, scratch, Operand(zero_reg));
|
|
|
|
__ Move(result_reg, scratch3);
|
|
|
|
__ Branch(&done);
|
|
|
|
__ bind(&error);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load the double value and perform a manual truncation.
|
|
|
|
Register input_high = scratch2;
|
|
|
|
Register input_low = scratch3;
|
|
|
|
|
|
|
|
__ lw(input_low,
|
|
|
|
MemOperand(input_reg, double_offset + Register::kMantissaOffset));
|
|
|
|
__ lw(input_high,
|
|
|
|
MemOperand(input_reg, double_offset + Register::kExponentOffset));
|
|
|
|
|
|
|
|
Label normal_exponent, restore_sign;
|
|
|
|
// Extract the biased exponent in result.
|
|
|
|
__ Ext(result_reg,
|
|
|
|
input_high,
|
|
|
|
HeapNumber::kExponentShift,
|
|
|
|
HeapNumber::kExponentBits);
|
|
|
|
|
|
|
|
// Check for Infinity and NaNs, which should return 0.
|
|
|
|
__ Subu(scratch, result_reg, HeapNumber::kExponentMask);
|
|
|
|
__ Movz(result_reg, zero_reg, scratch);
|
|
|
|
__ Branch(&done, eq, scratch, Operand(zero_reg));
|
|
|
|
|
|
|
|
// Express exponent as delta to (number of mantissa bits + 31).
|
|
|
|
__ Subu(result_reg,
|
|
|
|
result_reg,
|
|
|
|
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
|
|
|
|
|
|
|
|
// If the delta is strictly positive, all bits would be shifted away,
|
|
|
|
// which means that we can return 0.
|
|
|
|
__ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
|
|
|
|
__ mov(result_reg, zero_reg);
|
|
|
|
__ Branch(&done);
|
|
|
|
|
|
|
|
__ bind(&normal_exponent);
|
|
|
|
const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
|
|
|
|
// Calculate shift.
|
|
|
|
__ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
|
|
|
|
|
|
|
|
// Save the sign.
|
|
|
|
Register sign = result_reg;
|
|
|
|
result_reg = no_reg;
|
|
|
|
__ And(sign, input_high, Operand(HeapNumber::kSignMask));
|
|
|
|
|
|
|
|
// On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
|
|
|
|
// to check for this specific case.
|
|
|
|
Label high_shift_needed, high_shift_done;
|
|
|
|
__ Branch(&high_shift_needed, lt, scratch, Operand(32));
|
|
|
|
__ mov(input_high, zero_reg);
|
|
|
|
__ Branch(&high_shift_done);
|
|
|
|
__ bind(&high_shift_needed);
|
|
|
|
|
|
|
|
// Set the implicit 1 before the mantissa part in input_high.
|
|
|
|
__ Or(input_high,
|
|
|
|
input_high,
|
|
|
|
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
|
|
|
|
// Shift the mantissa bits to the correct position.
|
|
|
|
// We don't need to clear non-mantissa bits as they will be shifted away.
|
|
|
|
// If they weren't, it would mean that the answer is in the 32bit range.
|
|
|
|
__ sllv(input_high, input_high, scratch);
|
|
|
|
|
|
|
|
__ bind(&high_shift_done);
|
|
|
|
|
|
|
|
// Replace the shifted bits with bits from the lower mantissa word.
|
|
|
|
Label pos_shift, shift_done;
|
|
|
|
__ li(at, 32);
|
|
|
|
__ subu(scratch, at, scratch);
|
|
|
|
__ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
|
|
|
|
|
|
|
|
// Negate scratch.
|
|
|
|
__ Subu(scratch, zero_reg, scratch);
|
|
|
|
__ sllv(input_low, input_low, scratch);
|
|
|
|
__ Branch(&shift_done);
|
|
|
|
|
|
|
|
__ bind(&pos_shift);
|
|
|
|
__ srlv(input_low, input_low, scratch);
|
|
|
|
|
|
|
|
__ bind(&shift_done);
|
|
|
|
__ Or(input_high, input_high, Operand(input_low));
|
|
|
|
// Restore sign if necessary.
|
|
|
|
__ mov(scratch, sign);
|
|
|
|
result_reg = sign;
|
|
|
|
sign = no_reg;
|
|
|
|
__ Subu(result_reg, zero_reg, input_high);
|
|
|
|
__ Movz(result_reg, input_high, scratch);
|
|
|
|
|
|
|
|
__ bind(&done);
|
|
|
|
|
|
|
|
__ Pop(scratch, scratch2, scratch3);
|
|
|
|
__ Ret();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Handle the case where the lhs and rhs are the same object.
|
|
|
|
// Equality is almost reflexive (everything but NaN), so this is a test
|
|
|
|
// for "identity and not NaN".
|
|
|
|
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
|
|
|
|
Condition cc, Strength strength) {
|
|
|
|
Label not_identical;
|
|
|
|
Label heap_number, return_equal;
|
|
|
|
Register exp_mask_reg = t5;
|
|
|
|
|
|
|
|
__ Branch(¬_identical, ne, a0, Operand(a1));
|
|
|
|
|
|
|
|
__ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
|
|
|
|
|
|
|
|
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
|
|
|
|
// so we do the second best thing - test it ourselves.
|
|
|
|
// They are both equal and they are not both Smis so both of them are not
|
|
|
|
// Smis. If it's not a heap number, then return equal.
|
|
|
|
__ GetObjectType(a0, t4, t4);
|
|
|
|
if (cc == less || cc == greater) {
|
|
|
|
// Call runtime on identical JSObjects.
|
|
|
|
__ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
|
|
|
|
// Call runtime on identical symbols since we need to throw a TypeError.
|
|
|
|
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
|
|
|
|
// Call runtime on identical SIMD values since we must throw a TypeError.
|
|
|
|
__ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
|
|
|
|
if (is_strong(strength)) {
|
|
|
|
// Call the runtime on anything that is converted in the semantics, since
|
|
|
|
// we need to throw a TypeError. Smis have already been ruled out.
|
|
|
|
__ Branch(&return_equal, eq, t4, Operand(HEAP_NUMBER_TYPE));
|
|
|
|
__ And(t4, t4, Operand(kIsNotStringMask));
|
|
|
|
__ Branch(slow, ne, t4, Operand(zero_reg));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
__ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
|
|
|
|
// Comparing JS objects with <=, >= is complicated.
|
|
|
|
if (cc != eq) {
|
|
|
|
__ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
|
|
|
|
// Call runtime on identical symbols since we need to throw a TypeError.
|
|
|
|
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
|
|
|
|
// Call runtime on identical SIMD values since we must throw a TypeError.
|
|
|
|
__ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
|
|
|
|
if (is_strong(strength)) {
|
|
|
|
// Call the runtime on anything that is converted in the semantics,
|
|
|
|
// since we need to throw a TypeError. Smis and heap numbers have
|
|
|
|
// already been ruled out.
|
|
|
|
__ And(t4, t4, Operand(kIsNotStringMask));
|
|
|
|
__ Branch(slow, ne, t4, Operand(zero_reg));
|
|
|
|
}
|
|
|
|
// Normally here we fall through to return_equal, but undefined is
|
|
|
|
// special: (undefined == undefined) == true, but
|
|
|
|
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
|
|
|
|
if (cc == less_equal || cc == greater_equal) {
|
|
|
|
__ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
|
|
|
|
__ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
|
|
|
|
__ Branch(&return_equal, ne, a0, Operand(t2));
|
|
|
|
DCHECK(is_int16(GREATER) && is_int16(LESS));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
if (cc == le) {
|
|
|
|
// undefined <= undefined should fail.
|
|
|
|
__ li(v0, Operand(GREATER));
|
|
|
|
} else {
|
|
|
|
// undefined >= undefined should fail.
|
|
|
|
__ li(v0, Operand(LESS));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
__ bind(&return_equal);
|
|
|
|
DCHECK(is_int16(GREATER) && is_int16(LESS));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
if (cc == less) {
|
|
|
|
__ li(v0, Operand(GREATER)); // Things aren't less than themselves.
|
|
|
|
} else if (cc == greater) {
|
|
|
|
__ li(v0, Operand(LESS)); // Things aren't greater than themselves.
|
|
|
|
} else {
|
|
|
|
__ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
|
|
|
|
}
|
|
|
|
|
|
|
|
// For less and greater we don't have to check for NaN since the result of
|
|
|
|
// x < x is false regardless. For the others here is some code to check
|
|
|
|
// for NaN.
|
|
|
|
if (cc != lt && cc != gt) {
|
|
|
|
__ bind(&heap_number);
|
|
|
|
// It is a heap number, so return non-equal if it's NaN and equal if it's
|
|
|
|
// not NaN.
|
|
|
|
|
|
|
|
// The representation of NaN values has all exponent bits (52..62) set,
|
|
|
|
// and not all mantissa bits (0..51) clear.
|
|
|
|
// Read top bits of double representation (second word of value).
|
|
|
|
__ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
|
|
|
|
// Test that exponent bits are all set.
|
|
|
|
__ And(t3, t2, Operand(exp_mask_reg));
|
|
|
|
// If all bits not set (ne cond), then not a NaN, objects are equal.
|
|
|
|
__ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
|
|
|
|
|
|
|
|
// Shift out flag and all exponent bits, retaining only mantissa.
|
|
|
|
__ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
|
|
|
|
// Or with all low-bits of mantissa.
|
|
|
|
__ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
|
|
|
|
__ Or(v0, t3, Operand(t2));
|
|
|
|
// For equal we already have the right value in v0: Return zero (equal)
|
|
|
|
// if all bits in mantissa are zero (it's an Infinity) and non-zero if
|
|
|
|
// not (it's a NaN). For <= and >= we need to load v0 with the failing
|
|
|
|
// value if it's a NaN.
|
|
|
|
if (cc != eq) {
|
|
|
|
// All-zero means Infinity means equal.
|
|
|
|
__ Ret(eq, v0, Operand(zero_reg));
|
|
|
|
DCHECK(is_int16(GREATER) && is_int16(LESS));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
if (cc == le) {
|
|
|
|
__ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
|
|
|
|
} else {
|
|
|
|
__ li(v0, Operand(LESS)); // NaN >= NaN should fail.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// No fall through here.
|
|
|
|
|
|
|
|
__ bind(¬_identical);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
|
|
|
Register lhs,
|
|
|
|
Register rhs,
|
|
|
|
Label* both_loaded_as_doubles,
|
|
|
|
Label* slow,
|
|
|
|
bool strict) {
|
|
|
|
DCHECK((lhs.is(a0) && rhs.is(a1)) ||
|
|
|
|
(lhs.is(a1) && rhs.is(a0)));
|
|
|
|
|
|
|
|
Label lhs_is_smi;
|
|
|
|
__ JumpIfSmi(lhs, &lhs_is_smi);
|
|
|
|
// Rhs is a Smi.
|
|
|
|
// Check whether the non-smi is a heap number.
|
|
|
|
__ GetObjectType(lhs, t4, t4);
|
|
|
|
if (strict) {
|
|
|
|
// If lhs was not a number and rhs was a Smi then strict equality cannot
|
|
|
|
// succeed. Return non-equal (lhs is already not zero).
|
|
|
|
__ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
|
|
|
|
__ mov(v0, lhs);
|
|
|
|
} else {
|
|
|
|
// Smi compared non-strictly with a non-Smi non-heap-number. Call
|
|
|
|
// the runtime.
|
|
|
|
__ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rhs is a smi, lhs is a number.
|
|
|
|
// Convert smi rhs to double.
|
|
|
|
__ sra(at, rhs, kSmiTagSize);
|
|
|
|
__ mtc1(at, f14);
|
|
|
|
__ cvt_d_w(f14, f14);
|
|
|
|
__ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
|
|
|
|
|
|
|
|
// We now have both loaded as doubles.
|
|
|
|
__ jmp(both_loaded_as_doubles);
|
|
|
|
|
|
|
|
__ bind(&lhs_is_smi);
|
|
|
|
// Lhs is a Smi. Check whether the non-smi is a heap number.
|
|
|
|
__ GetObjectType(rhs, t4, t4);
|
|
|
|
if (strict) {
|
|
|
|
// If lhs was not a number and rhs was a Smi then strict equality cannot
|
|
|
|
// succeed. Return non-equal.
|
|
|
|
__ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
|
|
|
|
__ li(v0, Operand(1));
|
|
|
|
} else {
|
|
|
|
// Smi compared non-strictly with a non-Smi non-heap-number. Call
|
|
|
|
// the runtime.
|
|
|
|
__ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lhs is a smi, rhs is a number.
|
|
|
|
// Convert smi lhs to double.
|
|
|
|
__ sra(at, lhs, kSmiTagSize);
|
|
|
|
__ mtc1(at, f12);
|
|
|
|
__ cvt_d_w(f12, f12);
|
|
|
|
__ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
|
|
|
|
// Fall through to both_loaded_as_doubles.
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
|
|
|
|
Register lhs,
|
|
|
|
Register rhs) {
|
|
|
|
// If either operand is a JS object or an oddball value, then they are
|
|
|
|
// not equal since their pointers are different.
|
|
|
|
// There is no test for undetectability in strict equality.
|
|
|
|
STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
|
|
|
|
Label first_non_object;
|
|
|
|
// Get the type of the first operand into a2 and compare it with
|
|
|
|
// FIRST_SPEC_OBJECT_TYPE.
|
|
|
|
__ GetObjectType(lhs, a2, a2);
|
|
|
|
__ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
|
|
|
|
|
|
|
|
// Return non-zero.
|
|
|
|
Label return_not_equal;
|
|
|
|
__ bind(&return_not_equal);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ li(v0, Operand(1));
|
|
|
|
|
|
|
|
__ bind(&first_non_object);
|
|
|
|
// Check for oddballs: true, false, null, undefined.
|
|
|
|
__ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
|
|
|
|
|
|
|
|
__ GetObjectType(rhs, a3, a3);
|
|
|
|
__ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
|
|
|
|
|
|
|
|
// Check for oddballs: true, false, null, undefined.
|
|
|
|
__ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
|
|
|
|
|
|
|
|
// Now that we have the types we might as well check for
|
|
|
|
// internalized-internalized.
|
|
|
|
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
|
|
|
|
__ Or(a2, a2, Operand(a3));
|
|
|
|
__ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
|
|
|
|
__ Branch(&return_not_equal, eq, at, Operand(zero_reg));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
|
|
|
|
Register lhs,
|
|
|
|
Register rhs,
|
|
|
|
Label* both_loaded_as_doubles,
|
|
|
|
Label* not_heap_numbers,
|
|
|
|
Label* slow) {
|
|
|
|
__ GetObjectType(lhs, a3, a2);
|
|
|
|
__ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
|
|
|
|
__ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
|
|
|
|
// If first was a heap number & second wasn't, go to slow case.
|
|
|
|
__ Branch(slow, ne, a3, Operand(a2));
|
|
|
|
|
|
|
|
// Both are heap numbers. Load them up then jump to the code we have
|
|
|
|
// for that.
|
|
|
|
__ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
|
|
|
|
__ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
|
|
|
|
|
|
|
|
__ jmp(both_loaded_as_doubles);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Fast negative check for internalized-to-internalized equality.
|
|
|
|
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
|
|
|
|
Register lhs,
|
|
|
|
Register rhs,
|
|
|
|
Label* possible_strings,
|
|
|
|
Label* not_both_strings) {
|
|
|
|
DCHECK((lhs.is(a0) && rhs.is(a1)) ||
|
|
|
|
(lhs.is(a1) && rhs.is(a0)));
|
|
|
|
|
|
|
|
// a2 is object type of rhs.
|
|
|
|
Label object_test;
|
|
|
|
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
|
|
|
|
__ And(at, a2, Operand(kIsNotStringMask));
|
|
|
|
__ Branch(&object_test, ne, at, Operand(zero_reg));
|
|
|
|
__ And(at, a2, Operand(kIsNotInternalizedMask));
|
|
|
|
__ Branch(possible_strings, ne, at, Operand(zero_reg));
|
|
|
|
__ GetObjectType(rhs, a3, a3);
|
|
|
|
__ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
|
|
|
|
__ And(at, a3, Operand(kIsNotInternalizedMask));
|
|
|
|
__ Branch(possible_strings, ne, at, Operand(zero_reg));
|
|
|
|
|
|
|
|
// Both are internalized strings. We already checked they weren't the same
|
|
|
|
// pointer so they are not equal.
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ li(v0, Operand(1)); // Non-zero indicates not equal.
|
|
|
|
|
|
|
|
__ bind(&object_test);
|
|
|
|
__ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
|
|
|
|
__ GetObjectType(rhs, a2, a3);
|
|
|
|
__ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
|
|
|
|
|
|
|
|
// If both objects are undetectable, they are equal. Otherwise, they
|
|
|
|
// are not equal, since they are different objects and an object is not
|
|
|
|
// equal to undefined.
|
|
|
|
__ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
|
|
|
|
__ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
|
|
|
|
__ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
|
|
|
|
__ and_(a0, a2, a3);
|
|
|
|
__ And(a0, a0, Operand(1 << Map::kIsUndetectable));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ xori(v0, a0, 1 << Map::kIsUndetectable);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
|
|
|
|
Register scratch,
|
|
|
|
CompareICState::State expected,
|
|
|
|
Label* fail) {
|
|
|
|
Label ok;
|
|
|
|
if (expected == CompareICState::SMI) {
|
|
|
|
__ JumpIfNotSmi(input, fail);
|
|
|
|
} else if (expected == CompareICState::NUMBER) {
|
|
|
|
__ JumpIfSmi(input, &ok);
|
|
|
|
__ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
|
|
|
|
DONT_DO_SMI_CHECK);
|
|
|
|
}
|
|
|
|
// We could be strict about internalized/string here, but as long as
|
|
|
|
// hydrogen doesn't care, the stub doesn't have to care either.
|
|
|
|
__ bind(&ok);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// On entry a1 and a2 are the values to be compared.
|
|
|
|
// On exit a0 is 0, positive or negative to indicate the result of
|
|
|
|
// the comparison.
|
|
|
|
void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
|
|
|
|
Register lhs = a1;
|
|
|
|
Register rhs = a0;
|
|
|
|
Condition cc = GetCondition();
|
|
|
|
|
|
|
|
Label miss;
|
|
|
|
CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
|
|
|
|
CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
|
|
|
|
|
|
|
|
Label slow; // Call builtin.
|
|
|
|
Label not_smis, both_loaded_as_doubles;
|
|
|
|
|
|
|
|
Label not_two_smis, smi_done;
|
|
|
|
__ Or(a2, a1, a0);
|
|
|
|
__ JumpIfNotSmi(a2, ¬_two_smis);
|
|
|
|
__ sra(a1, a1, 1);
|
|
|
|
__ sra(a0, a0, 1);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ subu(v0, a1, a0);
|
|
|
|
__ bind(¬_two_smis);
|
|
|
|
|
|
|
|
// NOTICE! This code is only reached after a smi-fast-case check, so
|
|
|
|
// it is certain that at least one operand isn't a smi.
|
|
|
|
|
|
|
|
// Handle the case where the objects are identical. Either returns the answer
|
|
|
|
// or goes to slow. Only falls through if the objects were not identical.
|
|
|
|
EmitIdenticalObjectComparison(masm, &slow, cc, strength());
|
|
|
|
|
|
|
|
// If either is a Smi (we know that not both are), then they can only
|
|
|
|
// be strictly equal if the other is a HeapNumber.
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
|
|
|
|
__ And(t2, lhs, Operand(rhs));
|
|
|
|
__ JumpIfNotSmi(t2, ¬_smis, t0);
|
|
|
|
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
|
|
|
|
// 1) Return the answer.
|
|
|
|
// 2) Go to slow.
|
|
|
|
// 3) Fall through to both_loaded_as_doubles.
|
|
|
|
// 4) Jump to rhs_not_nan.
|
|
|
|
// In cases 3 and 4 we have found out we were dealing with a number-number
|
|
|
|
// comparison and the numbers have been loaded into f12 and f14 as doubles,
|
|
|
|
// or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
|
|
|
|
EmitSmiNonsmiComparison(masm, lhs, rhs,
|
|
|
|
&both_loaded_as_doubles, &slow, strict());
|
|
|
|
|
|
|
|
__ bind(&both_loaded_as_doubles);
|
|
|
|
// f12, f14 are the double representations of the left hand side
|
|
|
|
// and the right hand side if we have FPU. Otherwise a2, a3 represent
|
|
|
|
// left hand side and a0, a1 represent right hand side.
|
|
|
|
Label nan;
|
|
|
|
__ li(t0, Operand(LESS));
|
|
|
|
__ li(t1, Operand(GREATER));
|
|
|
|
__ li(t2, Operand(EQUAL));
|
|
|
|
|
|
|
|
// Check if either rhs or lhs is NaN.
|
|
|
|
__ BranchF(NULL, &nan, eq, f12, f14);
|
|
|
|
|
|
|
|
// Check if LESS condition is satisfied. If true, move conditionally
|
|
|
|
// result to v0.
|
|
|
|
if (!IsMipsArchVariant(kMips32r6)) {
|
|
|
|
__ c(OLT, D, f12, f14);
|
|
|
|
__ Movt(v0, t0);
|
|
|
|
// Use previous check to store conditionally to v0 oposite condition
|
|
|
|
// (GREATER). If rhs is equal to lhs, this will be corrected in next
|
|
|
|
// check.
|
|
|
|
__ Movf(v0, t1);
|
|
|
|
// Check if EQUAL condition is satisfied. If true, move conditionally
|
|
|
|
// result to v0.
|
|
|
|
__ c(EQ, D, f12, f14);
|
|
|
|
__ Movt(v0, t2);
|
|
|
|
} else {
|
|
|
|
Label skip;
|
|
|
|
__ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
|
|
|
|
__ mov(v0, t0); // Return LESS as result.
|
|
|
|
|
|
|
|
__ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
|
|
|
|
__ mov(v0, t2); // Return EQUAL as result.
|
|
|
|
|
|
|
|
__ mov(v0, t1); // Return GREATER as result.
|
|
|
|
__ bind(&skip);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ Ret();
|
|
|
|
|
|
|
|
__ bind(&nan);
|
|
|
|
// NaN comparisons always fail.
|
|
|
|
// Load whatever we need in v0 to make the comparison fail.
|
|
|
|
DCHECK(is_int16(GREATER) && is_int16(LESS));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
if (cc == lt || cc == le) {
|
|
|
|
__ li(v0, Operand(GREATER));
|
|
|
|
} else {
|
|
|
|
__ li(v0, Operand(LESS));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
__ bind(¬_smis);
|
|
|
|
// At this point we know we are dealing with two different objects,
|
|
|
|
// and neither of them is a Smi. The objects are in lhs_ and rhs_.
|
|
|
|
if (strict()) {
|
|
|
|
// This returns non-equal for some object types, or falls through if it
|
|
|
|
// was not lucky.
|
|
|
|
EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
|
|
|
|
}
|
|
|
|
|
|
|
|
Label check_for_internalized_strings;
|
|
|
|
Label flat_string_check;
|
|
|
|
// Check for heap-number-heap-number comparison. Can jump to slow case,
|
|
|
|
// or load both doubles and jump to the code that handles
|
|
|
|
// that case. If the inputs are not doubles then jumps to
|
|
|
|
// check_for_internalized_strings.
|
|
|
|
// In this case a2 will contain the type of lhs_.
|
|
|
|
EmitCheckForTwoHeapNumbers(masm,
|
|
|
|
lhs,
|
|
|
|
rhs,
|
|
|
|
&both_loaded_as_doubles,
|
|
|
|
&check_for_internalized_strings,
|
|
|
|
&flat_string_check);
|
|
|
|
|
|
|
|
__ bind(&check_for_internalized_strings);
|
|
|
|
if (cc == eq && !strict()) {
|
|
|
|
// Returns an answer for two internalized strings or two
|
|
|
|
// detectable objects.
|
|
|
|
// Otherwise jumps to string case or not both strings case.
|
|
|
|
// Assumes that a2 is the type of lhs_ on entry.
|
|
|
|
EmitCheckForInternalizedStringsOrObjects(
|
|
|
|
masm, lhs, rhs, &flat_string_check, &slow);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for both being sequential one-byte strings,
|
|
|
|
// and inline if that is the case.
|
|
|
|
__ bind(&flat_string_check);
|
|
|
|
|
|
|
|
__ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
|
|
|
|
|
|
|
|
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
|
|
|
|
a3);
|
|
|
|
if (cc == eq) {
|
|
|
|
StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
|
|
|
|
} else {
|
|
|
|
StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
|
|
|
|
t1);
|
|
|
|
}
|
|
|
|
// Never falls through to here.
|
|
|
|
|
|
|
|
__ bind(&slow);
|
|
|
|
// Prepare for call to builtin. Push object pointers, a0 (lhs) first,
|
|
|
|
// a1 (rhs) second.
|
|
|
|
__ Push(lhs, rhs);
|
|
|
|
// Figure out which native to call and setup the arguments.
|
|
|
|
if (cc == eq && strict()) {
|
|
|
|
__ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
|
|
|
|
} else {
|
|
|
|
Builtins::JavaScript native;
|
|
|
|
if (cc == eq) {
|
|
|
|
native = Builtins::EQUALS;
|
|
|
|
} else {
|
|
|
|
native =
|
|
|
|
is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
|
|
|
|
int ncr; // NaN compare result.
|
|
|
|
if (cc == lt || cc == le) {
|
|
|
|
ncr = GREATER;
|
|
|
|
} else {
|
|
|
|
DCHECK(cc == gt || cc == ge); // Remaining cases.
|
|
|
|
ncr = LESS;
|
|
|
|
}
|
|
|
|
__ li(a0, Operand(Smi::FromInt(ncr)));
|
|
|
|
__ push(a0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
|
|
|
|
// tagged as a small integer.
|
|
|
|
__ InvokeBuiltin(native, JUMP_FUNCTION);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
GenerateMiss(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
|
|
|
|
__ mov(t9, ra);
|
|
|
|
__ pop(ra);
|
|
|
|
__ PushSafepointRegisters();
|
|
|
|
__ Jump(t9);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
|
|
|
|
__ mov(t9, ra);
|
|
|
|
__ pop(ra);
|
|
|
|
__ PopSafepointRegisters();
|
|
|
|
__ Jump(t9);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
|
|
|
|
// We don't allow a GC during a store buffer overflow so there is no need to
|
|
|
|
// store the registers in any particular way, but we do have to store and
|
|
|
|
// restore them.
|
|
|
|
__ MultiPush(kJSCallerSaved | ra.bit());
|
|
|
|
if (save_doubles()) {
|
|
|
|
__ MultiPushFPU(kCallerSavedFPU);
|
|
|
|
}
|
|
|
|
const int argument_count = 1;
|
|
|
|
const int fp_argument_count = 0;
|
|
|
|
const Register scratch = a1;
|
|
|
|
|
|
|
|
AllowExternalCallThatCantCauseGC scope(masm);
|
|
|
|
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
|
|
|
|
__ li(a0, Operand(ExternalReference::isolate_address(isolate())));
|
|
|
|
__ CallCFunction(
|
|
|
|
ExternalReference::store_buffer_overflow_function(isolate()),
|
|
|
|
argument_count);
|
|
|
|
if (save_doubles()) {
|
|
|
|
__ MultiPopFPU(kCallerSavedFPU);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ MultiPop(kJSCallerSaved | ra.bit());
|
|
|
|
__ Ret();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MathPowStub::Generate(MacroAssembler* masm) {
|
|
|
|
const Register base = a1;
|
|
|
|
const Register exponent = MathPowTaggedDescriptor::exponent();
|
|
|
|
DCHECK(exponent.is(a2));
|
|
|
|
const Register heapnumbermap = t1;
|
|
|
|
const Register heapnumber = v0;
|
|
|
|
const DoubleRegister double_base = f2;
|
|
|
|
const DoubleRegister double_exponent = f4;
|
|
|
|
const DoubleRegister double_result = f0;
|
|
|
|
const DoubleRegister double_scratch = f6;
|
|
|
|
const FPURegister single_scratch = f8;
|
|
|
|
const Register scratch = t5;
|
|
|
|
const Register scratch2 = t3;
|
|
|
|
|
|
|
|
Label call_runtime, done, int_exponent;
|
|
|
|
if (exponent_type() == ON_STACK) {
|
|
|
|
Label base_is_smi, unpack_exponent;
|
|
|
|
// The exponent and base are supplied as arguments on the stack.
|
|
|
|
// This can only happen if the stub is called from non-optimized code.
|
|
|
|
// Load input parameters from stack to double registers.
|
|
|
|
__ lw(base, MemOperand(sp, 1 * kPointerSize));
|
|
|
|
__ lw(exponent, MemOperand(sp, 0 * kPointerSize));
|
|
|
|
|
|
|
|
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
|
|
|
|
|
|
|
|
__ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
|
|
|
|
__ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
|
|
|
|
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
|
|
|
|
|
|
|
|
__ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
|
|
|
|
__ jmp(&unpack_exponent);
|
|
|
|
|
|
|
|
__ bind(&base_is_smi);
|
|
|
|
__ mtc1(scratch, single_scratch);
|
|
|
|
__ cvt_d_w(double_base, single_scratch);
|
|
|
|
__ bind(&unpack_exponent);
|
|
|
|
|
|
|
|
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
|
|
|
|
|
|
|
|
__ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
|
|
|
|
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
|
|
|
|
__ ldc1(double_exponent,
|
|
|
|
FieldMemOperand(exponent, HeapNumber::kValueOffset));
|
|
|
|
} else if (exponent_type() == TAGGED) {
|
|
|
|
// Base is already in double_base.
|
|
|
|
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
|
|
|
|
|
|
|
|
__ ldc1(double_exponent,
|
|
|
|
FieldMemOperand(exponent, HeapNumber::kValueOffset));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (exponent_type() != INTEGER) {
|
|
|
|
Label int_exponent_convert;
|
|
|
|
// Detect integer exponents stored as double.
|
|
|
|
__ EmitFPUTruncate(kRoundToMinusInf,
|
|
|
|
scratch,
|
|
|
|
double_exponent,
|
|
|
|
at,
|
|
|
|
double_scratch,
|
|
|
|
scratch2,
|
|
|
|
kCheckForInexactConversion);
|
|
|
|
// scratch2 == 0 means there was no conversion error.
|
|
|
|
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
|
|
|
|
|
|
|
|
if (exponent_type() == ON_STACK) {
|
|
|
|
// Detect square root case. Crankshaft detects constant +/-0.5 at
|
|
|
|
// compile time and uses DoMathPowHalf instead. We then skip this check
|
|
|
|
// for non-constant cases of +/-0.5 as these hardly occur.
|
|
|
|
Label not_plus_half;
|
|
|
|
// Test for 0.5.
|
|
|
|
__ Move(double_scratch, 0.5);
|
|
|
|
__ BranchF(USE_DELAY_SLOT,
|
|
|
|
¬_plus_half,
|
|
|
|
NULL,
|
|
|
|
ne,
|
|
|
|
double_exponent,
|
|
|
|
double_scratch);
|
|
|
|
// double_scratch can be overwritten in the delay slot.
|
|
|
|
// Calculates square root of base. Check for the special case of
|
|
|
|
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
|
|
|
|
__ Move(double_scratch, static_cast<double>(-V8_INFINITY));
|
|
|
|
__ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
|
|
|
|
__ neg_d(double_result, double_scratch);
|
|
|
|
|
|
|
|
// Add +0 to convert -0 to +0.
|
|
|
|
__ add_d(double_scratch, double_base, kDoubleRegZero);
|
|
|
|
__ sqrt_d(double_result, double_scratch);
|
|
|
|
__ jmp(&done);
|
|
|
|
|
|
|
|
__ bind(¬_plus_half);
|
|
|
|
__ Move(double_scratch, -0.5);
|
|
|
|
__ BranchF(USE_DELAY_SLOT,
|
|
|
|
&call_runtime,
|
|
|
|
NULL,
|
|
|
|
ne,
|
|
|
|
double_exponent,
|
|
|
|
double_scratch);
|
|
|
|
// double_scratch can be overwritten in the delay slot.
|
|
|
|
// Calculates square root of base. Check for the special case of
|
|
|
|
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
|
|
|
|
__ Move(double_scratch, static_cast<double>(-V8_INFINITY));
|
|
|
|
__ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
|
|
|
|
__ Move(double_result, kDoubleRegZero);
|
|
|
|
|
|
|
|
// Add +0 to convert -0 to +0.
|
|
|
|
__ add_d(double_scratch, double_base, kDoubleRegZero);
|
|
|
|
__ Move(double_result, 1.);
|
|
|
|
__ sqrt_d(double_scratch, double_scratch);
|
|
|
|
__ div_d(double_result, double_result, double_scratch);
|
|
|
|
__ jmp(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ push(ra);
|
|
|
|
{
|
|
|
|
AllowExternalCallThatCantCauseGC scope(masm);
|
|
|
|
__ PrepareCallCFunction(0, 2, scratch2);
|
|
|
|
__ MovToFloatParameters(double_base, double_exponent);
|
|
|
|
__ CallCFunction(
|
|
|
|
ExternalReference::power_double_double_function(isolate()),
|
|
|
|
0, 2);
|
|
|
|
}
|
|
|
|
__ pop(ra);
|
|
|
|
__ MovFromFloatResult(double_result);
|
|
|
|
__ jmp(&done);
|
|
|
|
|
|
|
|
__ bind(&int_exponent_convert);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate power with integer exponent.
|
|
|
|
__ bind(&int_exponent);
|
|
|
|
|
|
|
|
// Get two copies of exponent in the registers scratch and exponent.
|
|
|
|
if (exponent_type() == INTEGER) {
|
|
|
|
__ mov(scratch, exponent);
|
|
|
|
} else {
|
|
|
|
// Exponent has previously been stored into scratch as untagged integer.
|
|
|
|
__ mov(exponent, scratch);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ mov_d(double_scratch, double_base); // Back up base.
|
|
|
|
__ Move(double_result, 1.0);
|
|
|
|
|
|
|
|
// Get absolute value of exponent.
|
|
|
|
Label positive_exponent;
|
|
|
|
__ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
|
|
|
|
__ Subu(scratch, zero_reg, scratch);
|
|
|
|
__ bind(&positive_exponent);
|
|
|
|
|
|
|
|
Label while_true, no_carry, loop_end;
|
|
|
|
__ bind(&while_true);
|
|
|
|
|
|
|
|
__ And(scratch2, scratch, 1);
|
|
|
|
|
|
|
|
__ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
|
|
|
|
__ mul_d(double_result, double_result, double_scratch);
|
|
|
|
__ bind(&no_carry);
|
|
|
|
|
|
|
|
__ sra(scratch, scratch, 1);
|
|
|
|
|
|
|
|
__ Branch(&loop_end, eq, scratch, Operand(zero_reg));
|
|
|
|
__ mul_d(double_scratch, double_scratch, double_scratch);
|
|
|
|
|
|
|
|
__ Branch(&while_true);
|
|
|
|
|
|
|
|
__ bind(&loop_end);
|
|
|
|
|
|
|
|
__ Branch(&done, ge, exponent, Operand(zero_reg));
|
|
|
|
__ Move(double_scratch, 1.0);
|
|
|
|
__ div_d(double_result, double_scratch, double_result);
|
|
|
|
// Test whether result is zero. Bail out to check for subnormal result.
|
|
|
|
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
|
|
|
|
__ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
|
|
|
|
|
|
|
|
// double_exponent may not contain the exponent value if the input was a
|
|
|
|
// smi. We set it with exponent value before bailing out.
|
|
|
|
__ mtc1(exponent, single_scratch);
|
|
|
|
__ cvt_d_w(double_exponent, single_scratch);
|
|
|
|
|
|
|
|
// Returning or bailing out.
|
|
|
|
Counters* counters = isolate()->counters();
|
|
|
|
if (exponent_type() == ON_STACK) {
|
|
|
|
// The arguments are still on the stack.
|
|
|
|
__ bind(&call_runtime);
|
|
|
|
__ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
|
|
|
|
|
|
|
|
// The stub is called from non-optimized code, which expects the result
|
|
|
|
// as heap number in exponent.
|
|
|
|
__ bind(&done);
|
|
|
|
__ AllocateHeapNumber(
|
|
|
|
heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
|
|
|
|
__ sdc1(double_result,
|
|
|
|
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
|
|
|
|
DCHECK(heapnumber.is(v0));
|
|
|
|
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
|
|
|
|
__ DropAndRet(2);
|
|
|
|
} else {
|
|
|
|
__ push(ra);
|
|
|
|
{
|
|
|
|
AllowExternalCallThatCantCauseGC scope(masm);
|
|
|
|
__ PrepareCallCFunction(0, 2, scratch);
|
|
|
|
__ MovToFloatParameters(double_base, double_exponent);
|
|
|
|
__ CallCFunction(
|
|
|
|
ExternalReference::power_double_double_function(isolate()),
|
|
|
|
0, 2);
|
|
|
|
}
|
|
|
|
__ pop(ra);
|
|
|
|
__ MovFromFloatResult(double_result);
|
|
|
|
|
|
|
|
__ bind(&done);
|
|
|
|
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
|
|
|
|
__ Ret();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool CEntryStub::NeedsImmovableCode() {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
|
|
|
|
CEntryStub::GenerateAheadOfTime(isolate);
|
|
|
|
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
|
|
|
|
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
|
|
|
|
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
|
|
|
|
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
|
|
|
|
CreateWeakCellStub::GenerateAheadOfTime(isolate);
|
|
|
|
BinaryOpICStub::GenerateAheadOfTime(isolate);
|
|
|
|
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
|
|
|
|
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
|
|
|
|
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
|
|
|
|
StoreFastElementStub::GenerateAheadOfTime(isolate);
|
|
|
|
TypeofStub::GenerateAheadOfTime(isolate);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
|
|
|
|
StoreRegistersStateStub stub(isolate);
|
|
|
|
stub.GetCode();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
|
|
|
|
RestoreRegistersStateStub stub(isolate);
|
|
|
|
stub.GetCode();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CodeStub::GenerateFPStubs(Isolate* isolate) {
|
|
|
|
// Generate if not already in cache.
|
|
|
|
SaveFPRegsMode mode = kSaveFPRegs;
|
|
|
|
CEntryStub(isolate, 1, mode).GetCode();
|
|
|
|
StoreBufferOverflowStub(isolate, mode).GetCode();
|
|
|
|
isolate->set_fp_stubs_generated(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
|
|
|
|
CEntryStub stub(isolate, 1, kDontSaveFPRegs);
|
|
|
|
stub.GetCode();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CEntryStub::Generate(MacroAssembler* masm) {
|
|
|
|
// Called from JavaScript; parameters are on stack as if calling JS function
|
|
|
|
// a0: number of arguments including receiver
|
|
|
|
// a1: pointer to builtin function
|
|
|
|
// fp: frame pointer (restored after C call)
|
|
|
|
// sp: stack pointer (restored as callee's sp after C call)
|
|
|
|
// cp: current context (C callee-saved)
|
|
|
|
|
|
|
|
ProfileEntryHookStub::MaybeCallEntryHook(masm);
|
|
|
|
|
|
|
|
// Compute the argv pointer in a callee-saved register.
|
|
|
|
__ sll(s1, a0, kPointerSizeLog2);
|
|
|
|
__ Addu(s1, sp, s1);
|
|
|
|
__ Subu(s1, s1, kPointerSize);
|
|
|
|
|
|
|
|
// Enter the exit frame that transitions from JavaScript to C++.
|
|
|
|
FrameScope scope(masm, StackFrame::MANUAL);
|
|
|
|
__ EnterExitFrame(save_doubles());
|
|
|
|
|
|
|
|
// s0: number of arguments including receiver (C callee-saved)
|
|
|
|
// s1: pointer to first argument (C callee-saved)
|
|
|
|
// s2: pointer to builtin function (C callee-saved)
|
|
|
|
|
|
|
|
// Prepare arguments for C routine.
|
|
|
|
// a0 = argc
|
|
|
|
__ mov(s0, a0);
|
|
|
|
__ mov(s2, a1);
|
|
|
|
// a1 = argv (set in the delay slot after find_ra below).
|
|
|
|
|
|
|
|
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
|
|
|
|
// also need to reserve the 4 argument slots on the stack.
|
|
|
|
|
|
|
|
__ AssertStackIsAligned();
|
|
|
|
|
|
|
|
__ li(a2, Operand(ExternalReference::isolate_address(isolate())));
|
|
|
|
|
|
|
|
// To let the GC traverse the return address of the exit frames, we need to
|
|
|
|
// know where the return address is. The CEntryStub is unmovable, so
|
|
|
|
// we can store the address on the stack to be able to find it again and
|
|
|
|
// we never have to restore it, because it will not change.
|
|
|
|
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
|
|
|
|
// This branch-and-link sequence is needed to find the current PC on mips,
|
|
|
|
// saved to the ra register.
|
|
|
|
// Use masm-> here instead of the double-underscore macro since extra
|
|
|
|
// coverage code can interfere with the proper calculation of ra.
|
|
|
|
Label find_ra;
|
|
|
|
masm->bal(&find_ra); // bal exposes branch delay slot.
|
|
|
|
masm->mov(a1, s1);
|
|
|
|
masm->bind(&find_ra);
|
|
|
|
|
|
|
|
// Adjust the value in ra to point to the correct return location, 2nd
|
|
|
|
// instruction past the real call into C code (the jalr(t9)), and push it.
|
|
|
|
// This is the return address of the exit frame.
|
|
|
|
const int kNumInstructionsToJump = 5;
|
|
|
|
masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
|
|
|
|
masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
|
|
|
|
// Stack space reservation moved to the branch delay slot below.
|
|
|
|
// Stack is still aligned.
|
|
|
|
|
|
|
|
// Call the C routine.
|
|
|
|
masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
|
|
|
|
masm->jalr(t9);
|
|
|
|
// Set up sp in the delay slot.
|
|
|
|
masm->addiu(sp, sp, -kCArgsSlotsSize);
|
|
|
|
// Make sure the stored 'ra' points to this position.
|
|
|
|
DCHECK_EQ(kNumInstructionsToJump,
|
|
|
|
masm->InstructionsGeneratedSince(&find_ra));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Check result for exception sentinel.
|
|
|
|
Label exception_returned;
|
|
|
|
__ LoadRoot(t0, Heap::kExceptionRootIndex);
|
|
|
|
__ Branch(&exception_returned, eq, t0, Operand(v0));
|
|
|
|
|
|
|
|
// Check that there is no pending exception, otherwise we
|
|
|
|
// should have returned the exception sentinel.
|
|
|
|
if (FLAG_debug_code) {
|
|
|
|
Label okay;
|
|
|
|
ExternalReference pending_exception_address(
|
|
|
|
Isolate::kPendingExceptionAddress, isolate());
|
|
|
|
__ li(a2, Operand(pending_exception_address));
|
|
|
|
__ lw(a2, MemOperand(a2));
|
|
|
|
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
|
|
|
|
// Cannot use check here as it attempts to generate call into runtime.
|
|
|
|
__ Branch(&okay, eq, t0, Operand(a2));
|
|
|
|
__ stop("Unexpected pending exception");
|
|
|
|
__ bind(&okay);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exit C frame and return.
|
|
|
|
// v0:v1: result
|
|
|
|
// sp: stack pointer
|
|
|
|
// fp: frame pointer
|
|
|
|
// s0: still holds argc (callee-saved).
|
|
|
|
__ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
|
|
|
|
|
|
|
|
// Handling of exception.
|
|
|
|
__ bind(&exception_returned);
|
|
|
|
|
|
|
|
ExternalReference pending_handler_context_address(
|
|
|
|
Isolate::kPendingHandlerContextAddress, isolate());
|
|
|
|
ExternalReference pending_handler_code_address(
|
|
|
|
Isolate::kPendingHandlerCodeAddress, isolate());
|
|
|
|
ExternalReference pending_handler_offset_address(
|
|
|
|
Isolate::kPendingHandlerOffsetAddress, isolate());
|
|
|
|
ExternalReference pending_handler_fp_address(
|
|
|
|
Isolate::kPendingHandlerFPAddress, isolate());
|
|
|
|
ExternalReference pending_handler_sp_address(
|
|
|
|
Isolate::kPendingHandlerSPAddress, isolate());
|
|
|
|
|
|
|
|
// Ask the runtime for help to determine the handler. This will set v0 to
|
|
|
|
// contain the current pending exception, don't clobber it.
|
|
|
|
ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
|
|
|
|
isolate());
|
|
|
|
{
|
|
|
|
FrameScope scope(masm, StackFrame::MANUAL);
|
|
|
|
__ PrepareCallCFunction(3, 0, a0);
|
|
|
|
__ mov(a0, zero_reg);
|
|
|
|
__ mov(a1, zero_reg);
|
|
|
|
__ li(a2, Operand(ExternalReference::isolate_address(isolate())));
|
|
|
|
__ CallCFunction(find_handler, 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieve the handler context, SP and FP.
|
|
|
|
__ li(cp, Operand(pending_handler_context_address));
|
|
|
|
__ lw(cp, MemOperand(cp));
|
|
|
|
__ li(sp, Operand(pending_handler_sp_address));
|
|
|
|
__ lw(sp, MemOperand(sp));
|
|
|
|
__ li(fp, Operand(pending_handler_fp_address));
|
|
|
|
__ lw(fp, MemOperand(fp));
|
|
|
|
|
|
|
|
// If the handler is a JS frame, restore the context to the frame. Note that
|
|
|
|
// the context will be set to (cp == 0) for non-JS frames.
|
|
|
|
Label zero;
|
|
|
|
__ Branch(&zero, eq, cp, Operand(zero_reg));
|
|
|
|
__ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
|
|
|
__ bind(&zero);
|
|
|
|
|
|
|
|
// Compute the handler entry address and jump to it.
|
|
|
|
__ li(a1, Operand(pending_handler_code_address));
|
|
|
|
__ lw(a1, MemOperand(a1));
|
|
|
|
__ li(a2, Operand(pending_handler_offset_address));
|
|
|
|
__ lw(a2, MemOperand(a2));
|
|
|
|
__ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
|
|
__ Addu(t9, a1, a2);
|
|
|
|
__ Jump(t9);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void JSEntryStub::Generate(MacroAssembler* masm) {
|
|
|
|
Label invoke, handler_entry, exit;
|
|
|
|
Isolate* isolate = masm->isolate();
|
|
|
|
|
|
|
|
// Registers:
|
|
|
|
// a0: entry address
|
|
|
|
// a1: function
|
|
|
|
// a2: receiver
|
|
|
|
// a3: argc
|
|
|
|
//
|
|
|
|
// Stack:
|
|
|
|
// 4 args slots
|
|
|
|
// args
|
|
|
|
|
|
|
|
ProfileEntryHookStub::MaybeCallEntryHook(masm);
|
|
|
|
|
|
|
|
// Save callee saved registers on the stack.
|
|
|
|
__ MultiPush(kCalleeSaved | ra.bit());
|
|
|
|
|
|
|
|
// Save callee-saved FPU registers.
|
|
|
|
__ MultiPushFPU(kCalleeSavedFPU);
|
|
|
|
// Set up the reserved register for 0.0.
|
|
|
|
__ Move(kDoubleRegZero, 0.0);
|
|
|
|
|
|
|
|
|
|
|
|
// Load argv in s0 register.
|
|
|
|
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
|
|
|
|
offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
|
|
|
|
|
|
|
|
__ InitializeRootRegister();
|
|
|
|
__ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
|
|
|
|
|
|
|
|
// We build an EntryFrame.
|
|
|
|
__ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
|
|
|
|
int marker = type();
|
|
|
|
__ li(t2, Operand(Smi::FromInt(marker)));
|
|
|
|
__ li(t1, Operand(Smi::FromInt(marker)));
|
|
|
|
__ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
|
|
|
|
isolate)));
|
|
|
|
__ lw(t0, MemOperand(t0));
|
|
|
|
__ Push(t3, t2, t1, t0);
|
|
|
|
// Set up frame pointer for the frame to be pushed.
|
|
|
|
__ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
|
|
|
|
|
|
|
|
// Registers:
|
|
|
|
// a0: entry_address
|
|
|
|
// a1: function
|
|
|
|
// a2: receiver_pointer
|
|
|
|
// a3: argc
|
|
|
|
// s0: argv
|
|
|
|
//
|
|
|
|
// Stack:
|
|
|
|
// caller fp |
|
|
|
|
// function slot | entry frame
|
|
|
|
// context slot |
|
|
|
|
// bad fp (0xff...f) |
|
|
|
|
// callee saved registers + ra
|
|
|
|
// 4 args slots
|
|
|
|
// args
|
|
|
|
|
|
|
|
// If this is the outermost JS call, set js_entry_sp value.
|
|
|
|
Label non_outermost_js;
|
|
|
|
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
|
|
|
|
__ li(t1, Operand(ExternalReference(js_entry_sp)));
|
|
|
|
__ lw(t2, MemOperand(t1));
|
|
|
|
__ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
|
|
|
|
__ sw(fp, MemOperand(t1));
|
|
|
|
__ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
|
|
|
|
Label cont;
|
|
|
|
__ b(&cont);
|
|
|
|
__ nop(); // Branch delay slot nop.
|
|
|
|
__ bind(&non_outermost_js);
|
|
|
|
__ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
|
|
|
|
__ bind(&cont);
|
|
|
|
__ push(t0);
|
|
|
|
|
|
|
|
// Jump to a faked try block that does the invoke, with a faked catch
|
|
|
|
// block that sets the pending exception.
|
|
|
|
__ jmp(&invoke);
|
|
|
|
__ bind(&handler_entry);
|
|
|
|
handler_offset_ = handler_entry.pos();
|
|
|
|
// Caught exception: Store result (exception) in the pending exception
|
|
|
|
// field in the JSEnv and return a failure sentinel. Coming in here the
|
|
|
|
// fp will be invalid because the PushStackHandler below sets it to 0 to
|
|
|
|
// signal the existence of the JSEntry frame.
|
|
|
|
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
|
|
|
|
isolate)));
|
|
|
|
__ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
|
|
|
|
__ LoadRoot(v0, Heap::kExceptionRootIndex);
|
|
|
|
__ b(&exit); // b exposes branch delay slot.
|
|
|
|
__ nop(); // Branch delay slot nop.
|
|
|
|
|
|
|
|
// Invoke: Link this frame into the handler chain.
|
|
|
|
__ bind(&invoke);
|
|
|
|
__ PushStackHandler();
|
|
|
|
// If an exception not caught by another handler occurs, this handler
|
|
|
|
// returns control to the code after the bal(&invoke) above, which
|
|
|
|
// restores all kCalleeSaved registers (including cp and fp) to their
|
|
|
|
// saved values before returning a failure to C.
|
|
|
|
|
|
|
|
// Clear any pending exceptions.
|
|
|
|
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
|
|
|
|
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
|
|
|
|
isolate)));
|
|
|
|
__ sw(t1, MemOperand(t0));
|
|
|
|
|
|
|
|
// Invoke the function by calling through JS entry trampoline builtin.
|
|
|
|
// Notice that we cannot store a reference to the trampoline code directly in
|
|
|
|
// this stub, because runtime stubs are not traversed when doing GC.
|
|
|
|
|
|
|
|
// Registers:
|
|
|
|
// a0: entry_address
|
|
|
|
// a1: function
|
|
|
|
// a2: receiver_pointer
|
|
|
|
// a3: argc
|
|
|
|
// s0: argv
|
|
|
|
//
|
|
|
|
// Stack:
|
|
|
|
// handler frame
|
|
|
|
// entry frame
|
|
|
|
// callee saved registers + ra
|
|
|
|
// 4 args slots
|
|
|
|
// args
|
|
|
|
|
|
|
|
if (type() == StackFrame::ENTRY_CONSTRUCT) {
|
|
|
|
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
|
|
|
|
isolate);
|
|
|
|
__ li(t0, Operand(construct_entry));
|
|
|
|
} else {
|
|
|
|
ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
|
|
|
|
__ li(t0, Operand(entry));
|
|
|
|
}
|
|
|
|
__ lw(t9, MemOperand(t0)); // Deref address.
|
|
|
|
|
|
|
|
// Call JSEntryTrampoline.
|
|
|
|
__ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
|
|
|
|
__ Call(t9);
|
|
|
|
|
|
|
|
// Unlink this frame from the handler chain.
|
|
|
|
__ PopStackHandler();
|
|
|
|
|
|
|
|
__ bind(&exit); // v0 holds result
|
|
|
|
// Check if the current stack frame is marked as the outermost JS frame.
|
|
|
|
Label non_outermost_js_2;
|
|
|
|
__ pop(t1);
|
|
|
|
__ Branch(&non_outermost_js_2,
|
|
|
|
ne,
|
|
|
|
t1,
|
|
|
|
Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
|
|
|
|
__ li(t1, Operand(ExternalReference(js_entry_sp)));
|
|
|
|
__ sw(zero_reg, MemOperand(t1));
|
|
|
|
__ bind(&non_outermost_js_2);
|
|
|
|
|
|
|
|
// Restore the top frame descriptors from the stack.
|
|
|
|
__ pop(t1);
|
|
|
|
__ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
|
|
|
|
isolate)));
|
|
|
|
__ sw(t1, MemOperand(t0));
|
|
|
|
|
|
|
|
// Reset the stack to the callee saved registers.
|
|
|
|
__ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
|
|
|
|
|
|
|
|
// Restore callee-saved fpu registers.
|
|
|
|
__ MultiPopFPU(kCalleeSavedFPU);
|
|
|
|
|
|
|
|
// Restore callee saved registers from the stack.
|
|
|
|
__ MultiPop(kCalleeSaved | ra.bit());
|
|
|
|
// Return.
|
|
|
|
__ Jump(ra);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
|
|
|
|
// Return address is in ra.
|
|
|
|
Label miss;
|
|
|
|
|
|
|
|
Register receiver = LoadDescriptor::ReceiverRegister();
|
|
|
|
Register index = LoadDescriptor::NameRegister();
|
|
|
|
Register scratch = t1;
|
|
|
|
Register result = v0;
|
|
|
|
DCHECK(!scratch.is(receiver) && !scratch.is(index));
|
|
|
|
DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()));
|
|
|
|
|
|
|
|
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
|
|
|
|
&miss, // When not a string.
|
|
|
|
&miss, // When not a number.
|
|
|
|
&miss, // When index out of range.
|
|
|
|
STRING_INDEX_IS_ARRAY_INDEX,
|
|
|
|
RECEIVER_IS_STRING);
|
|
|
|
char_at_generator.GenerateFast(masm);
|
|
|
|
__ Ret();
|
|
|
|
|
|
|
|
StubRuntimeCallHelper call_helper;
|
|
|
|
char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
PropertyAccessCompiler::TailCallBuiltin(
|
|
|
|
masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Uses registers a0 to t0.
|
|
|
|
// Expected input (depending on whether args are in registers or on the stack):
|
|
|
|
// * object: a0 or at sp + 1 * kPointerSize.
|
|
|
|
// * function: a1 or at sp.
|
|
|
|
//
|
|
|
|
// An inlined call site may have been generated before calling this stub.
|
|
|
|
// In this case the offset to the inline site to patch is passed on the stack,
|
|
|
|
// in the safepoint slot for register t0.
|
|
|
|
void InstanceofStub::Generate(MacroAssembler* masm) {
|
|
|
|
// Call site inlining and patching implies arguments in registers.
|
|
|
|
DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
|
|
|
|
|
|
|
|
// Fixed register usage throughout the stub:
|
|
|
|
const Register object = a0; // Object (lhs).
|
|
|
|
Register map = a3; // Map of the object.
|
|
|
|
const Register function = a1; // Function (rhs).
|
|
|
|
const Register prototype = t0; // Prototype of the function.
|
|
|
|
const Register inline_site = t5;
|
|
|
|
const Register scratch = a2;
|
|
|
|
|
|
|
|
const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
|
|
|
|
|
|
|
|
Label slow, loop, is_instance, is_not_instance, not_js_object;
|
|
|
|
|
|
|
|
if (!HasArgsInRegisters()) {
|
|
|
|
__ lw(object, MemOperand(sp, 1 * kPointerSize));
|
|
|
|
__ lw(function, MemOperand(sp, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the left hand is a JS object and load map.
|
|
|
|
__ JumpIfSmi(object, ¬_js_object);
|
|
|
|
__ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
|
|
|
|
|
|
|
|
// If there is a call site cache don't look in the global cache, but do the
|
|
|
|
// real lookup and update the call site cache.
|
|
|
|
if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
|
|
|
|
Label miss;
|
|
|
|
__ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
|
|
|
|
__ Branch(&miss, ne, function, Operand(at));
|
|
|
|
__ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
|
|
|
|
__ Branch(&miss, ne, map, Operand(at));
|
|
|
|
__ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
|
|
|
|
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the prototype of the function.
|
|
|
|
__ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
|
|
|
|
|
|
|
|
// Check that the function prototype is a JS object.
|
|
|
|
__ JumpIfSmi(prototype, &slow);
|
|
|
|
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
|
|
|
|
|
|
|
|
// Update the global instanceof or call site inlined cache with the current
|
|
|
|
// map and function. The cached answer will be set when it is known below.
|
|
|
|
if (!HasCallSiteInlineCheck()) {
|
|
|
|
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
|
|
|
|
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
|
|
|
|
} else {
|
|
|
|
DCHECK(HasArgsInRegisters());
|
|
|
|
// Patch the (relocated) inlined map check.
|
|
|
|
|
|
|
|
// The offset was stored in t0 safepoint slot.
|
|
|
|
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
|
|
|
|
__ LoadFromSafepointRegisterSlot(scratch, t0);
|
|
|
|
__ Subu(inline_site, ra, scratch);
|
|
|
|
// Get the map location in scratch and patch it.
|
|
|
|
__ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
|
|
|
|
__ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
|
|
|
|
|
|
|
|
__ mov(t4, map);
|
|
|
|
// |scratch| points at the beginning of the cell. Calculate the field
|
|
|
|
// containing the map.
|
|
|
|
__ Addu(function, scratch, Operand(Cell::kValueOffset - 1));
|
|
|
|
__ RecordWriteField(scratch, Cell::kValueOffset, t4, function,
|
|
|
|
kRAHasNotBeenSaved, kDontSaveFPRegs,
|
|
|
|
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register mapping: a3 is object map and t0 is function prototype.
|
|
|
|
// Get prototype of object into a2.
|
|
|
|
__ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
|
|
|
|
|
|
|
|
// We don't need map any more. Use it as a scratch register.
|
|
|
|
Register scratch2 = map;
|
|
|
|
map = no_reg;
|
|
|
|
|
|
|
|
// Loop through the prototype chain looking for the function prototype.
|
|
|
|
__ LoadRoot(scratch2, Heap::kNullValueRootIndex);
|
|
|
|
__ bind(&loop);
|
|
|
|
__ Branch(&is_instance, eq, scratch, Operand(prototype));
|
|
|
|
__ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
|
|
|
|
__ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
|
|
|
__ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
|
|
|
|
__ Branch(&loop);
|
|
|
|
|
|
|
|
__ bind(&is_instance);
|
|
|
|
DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
|
|
|
|
if (!HasCallSiteInlineCheck()) {
|
|
|
|
__ mov(v0, zero_reg);
|
|
|
|
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
|
|
|
|
if (ReturnTrueFalseObject()) {
|
|
|
|
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Patch the call site to return true.
|
|
|
|
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
|
|
|
|
__ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
|
|
|
|
// Get the boolean result location in scratch and patch it.
|
|
|
|
__ PatchRelocatedValue(inline_site, scratch, v0);
|
|
|
|
|
|
|
|
if (!ReturnTrueFalseObject()) {
|
|
|
|
__ mov(v0, zero_reg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
|
|
|
|
|
|
|
|
__ bind(&is_not_instance);
|
|
|
|
if (!HasCallSiteInlineCheck()) {
|
|
|
|
__ li(v0, Operand(Smi::FromInt(1)));
|
|
|
|
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
|
|
|
|
if (ReturnTrueFalseObject()) {
|
|
|
|
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Patch the call site to return false.
|
|
|
|
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
|
|
|
|
__ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
|
|
|
|
// Get the boolean result location in scratch and patch it.
|
|
|
|
__ PatchRelocatedValue(inline_site, scratch, v0);
|
|
|
|
|
|
|
|
if (!ReturnTrueFalseObject()) {
|
|
|
|
__ li(v0, Operand(Smi::FromInt(1)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
|
|
|
|
|
|
|
|
Label object_not_null, object_not_null_or_smi;
|
|
|
|
__ bind(¬_js_object);
|
|
|
|
// Before null, smi and string value checks, check that the rhs is a function
|
|
|
|
// as for a non-function rhs an exception needs to be thrown.
|
|
|
|
__ JumpIfSmi(function, &slow);
|
|
|
|
__ GetObjectType(function, scratch2, scratch);
|
|
|
|
__ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
|
|
|
|
|
|
|
|
// Null is not instance of anything.
|
|
|
|
__ Branch(&object_not_null, ne, object,
|
|
|
|
Operand(isolate()->factory()->null_value()));
|
|
|
|
if (ReturnTrueFalseObject()) {
|
|
|
|
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
|
|
|
|
} else {
|
|
|
|
__ li(v0, Operand(Smi::FromInt(1)));
|
|
|
|
}
|
|
|
|
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
|
|
|
|
|
|
|
|
__ bind(&object_not_null);
|
|
|
|
// Smi values are not instances of anything.
|
|
|
|
__ JumpIfNotSmi(object, &object_not_null_or_smi);
|
|
|
|
if (ReturnTrueFalseObject()) {
|
|
|
|
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
|
|
|
|
} else {
|
|
|
|
__ li(v0, Operand(Smi::FromInt(1)));
|
|
|
|
}
|
|
|
|
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
|
|
|
|
|
|
|
|
__ bind(&object_not_null_or_smi);
|
|
|
|
// String values are not instances of anything.
|
|
|
|
__ IsObjectJSStringType(object, scratch, &slow);
|
|
|
|
if (ReturnTrueFalseObject()) {
|
|
|
|
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
|
|
|
|
} else {
|
|
|
|
__ li(v0, Operand(Smi::FromInt(1)));
|
|
|
|
}
|
|
|
|
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
|
|
|
|
|
|
|
|
// Slow-case. Tail call builtin.
|
|
|
|
__ bind(&slow);
|
|
|
|
if (!ReturnTrueFalseObject()) {
|
|
|
|
if (HasArgsInRegisters()) {
|
|
|
|
__ Push(a0, a1);
|
|
|
|
}
|
|
|
|
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
|
|
|
|
} else {
|
|
|
|
{
|
|
|
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
|
|
|
__ Push(a0, a1);
|
|
|
|
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
|
|
|
|
}
|
|
|
|
__ mov(a0, v0);
|
|
|
|
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
|
|
|
|
__ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
|
|
|
|
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
|
|
|
|
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
|
|
|
|
Label miss;
|
|
|
|
Register receiver = LoadDescriptor::ReceiverRegister();
|
|
|
|
// Ensure that the vector and slot registers won't be clobbered before
|
|
|
|
// calling the miss handler.
|
|
|
|
DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::VectorRegister(),
|
|
|
|
LoadWithVectorDescriptor::SlotRegister()));
|
|
|
|
|
|
|
|
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, t0,
|
|
|
|
t1, &miss);
|
|
|
|
__ bind(&miss);
|
|
|
|
PropertyAccessCompiler::TailCallBuiltin(
|
|
|
|
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
|
|
|
|
// The displacement is the offset of the last parameter (if any)
|
|
|
|
// relative to the frame pointer.
|
|
|
|
const int kDisplacement =
|
|
|
|
StandardFrameConstants::kCallerSPOffset - kPointerSize;
|
|
|
|
DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
|
|
|
|
DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
|
|
|
|
|
|
|
|
// Check that the key is a smiGenerateReadElement.
|
|
|
|
Label slow;
|
|
|
|
__ JumpIfNotSmi(a1, &slow);
|
|
|
|
|
|
|
|
// Check if the calling frame is an arguments adaptor frame.
|
|
|
|
Label adaptor;
|
|
|
|
__ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
|
|
|
__ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
|
|
|
|
__ Branch(&adaptor,
|
|
|
|
eq,
|
|
|
|
a3,
|
|
|
|
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
|
|
|
|
|
|
|
// Check index (a1) against formal parameters count limit passed in
|
|
|
|
// through register a0. Use unsigned comparison to get negative
|
|
|
|
// check for free.
|
|
|
|
__ Branch(&slow, hs, a1, Operand(a0));
|
|
|
|
|
|
|
|
// Read the argument from the stack and return it.
|
|
|
|
__ subu(a3, a0, a1);
|
|
|
|
__ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(a3, fp, Operand(t3));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ lw(v0, MemOperand(a3, kDisplacement));
|
|
|
|
|
|
|
|
// Arguments adaptor case: Check index (a1) against actual arguments
|
|
|
|
// limit found in the arguments adaptor frame. Use unsigned
|
|
|
|
// comparison to get negative check for free.
|
|
|
|
__ bind(&adaptor);
|
|
|
|
__ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
|
|
|
__ Branch(&slow, Ugreater_equal, a1, Operand(a0));
|
|
|
|
|
|
|
|
// Read the argument from the adaptor frame and return it.
|
|
|
|
__ subu(a3, a0, a1);
|
|
|
|
__ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(a3, a2, Operand(t3));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ lw(v0, MemOperand(a3, kDisplacement));
|
|
|
|
|
|
|
|
// Slow-case: Handle non-smi or out-of-bounds access to arguments
|
|
|
|
// by calling the runtime system.
|
|
|
|
__ bind(&slow);
|
|
|
|
__ push(a1);
|
|
|
|
__ TailCallRuntime(Runtime::kArguments, 1, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
|
|
|
|
// sp[0] : number of parameters
|
|
|
|
// sp[4] : receiver displacement
|
|
|
|
// sp[8] : function
|
|
|
|
|
|
|
|
// Check if the calling frame is an arguments adaptor frame.
|
|
|
|
Label runtime;
|
|
|
|
__ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
|
|
|
__ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
|
|
|
|
__ Branch(&runtime,
|
|
|
|
ne,
|
|
|
|
a2,
|
|
|
|
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
|
|
|
|
|
|
|
// Patch the arguments.length and the parameters pointer in the current frame.
|
|
|
|
__ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
|
|
|
__ sw(a2, MemOperand(sp, 0 * kPointerSize));
|
|
|
|
__ sll(t3, a2, 1);
|
|
|
|
__ Addu(a3, a3, Operand(t3));
|
|
|
|
__ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
|
|
|
|
__ sw(a3, MemOperand(sp, 1 * kPointerSize));
|
|
|
|
|
|
|
|
__ bind(&runtime);
|
|
|
|
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
|
|
|
|
// Stack layout:
|
|
|
|
// sp[0] : number of parameters (tagged)
|
|
|
|
// sp[4] : address of receiver argument
|
|
|
|
// sp[8] : function
|
|
|
|
// Registers used over whole function:
|
|
|
|
// t2 : allocated object (tagged)
|
|
|
|
// t5 : mapped parameter count (tagged)
|
|
|
|
|
|
|
|
__ lw(a1, MemOperand(sp, 0 * kPointerSize));
|
|
|
|
// a1 = parameter count (tagged)
|
|
|
|
|
|
|
|
// Check if the calling frame is an arguments adaptor frame.
|
|
|
|
Label runtime;
|
|
|
|
Label adaptor_frame, try_allocate;
|
|
|
|
__ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
|
|
|
__ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
|
|
|
|
__ Branch(&adaptor_frame,
|
|
|
|
eq,
|
|
|
|
a2,
|
|
|
|
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
|
|
|
|
|
|
|
// No adaptor, parameter count = argument count.
|
|
|
|
__ mov(a2, a1);
|
|
|
|
__ b(&try_allocate);
|
|
|
|
__ nop(); // Branch delay slot nop.
|
|
|
|
|
|
|
|
// We have an adaptor frame. Patch the parameters pointer.
|
|
|
|
__ bind(&adaptor_frame);
|
|
|
|
__ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
|
|
|
__ sll(t6, a2, 1);
|
|
|
|
__ Addu(a3, a3, Operand(t6));
|
|
|
|
__ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
|
|
|
|
__ sw(a3, MemOperand(sp, 1 * kPointerSize));
|
|
|
|
|
|
|
|
// a1 = parameter count (tagged)
|
|
|
|
// a2 = argument count (tagged)
|
|
|
|
// Compute the mapped parameter count = min(a1, a2) in a1.
|
|
|
|
Label skip_min;
|
|
|
|
__ Branch(&skip_min, lt, a1, Operand(a2));
|
|
|
|
__ mov(a1, a2);
|
|
|
|
__ bind(&skip_min);
|
|
|
|
|
|
|
|
__ bind(&try_allocate);
|
|
|
|
|
|
|
|
// Compute the sizes of backing store, parameter map, and arguments object.
|
|
|
|
// 1. Parameter map, has 2 extra words containing context and backing store.
|
|
|
|
const int kParameterMapHeaderSize =
|
|
|
|
FixedArray::kHeaderSize + 2 * kPointerSize;
|
|
|
|
// If there are no mapped parameters, we do not need the parameter_map.
|
|
|
|
Label param_map_size;
|
|
|
|
DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
|
|
|
|
__ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
|
|
|
|
__ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
|
|
|
|
__ sll(t5, a1, 1);
|
|
|
|
__ addiu(t5, t5, kParameterMapHeaderSize);
|
|
|
|
__ bind(¶m_map_size);
|
|
|
|
|
|
|
|
// 2. Backing store.
|
|
|
|
__ sll(t6, a2, 1);
|
|
|
|
__ Addu(t5, t5, Operand(t6));
|
|
|
|
__ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
|
|
|
|
|
|
|
|
// 3. Arguments object.
|
|
|
|
__ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
|
|
|
|
|
|
|
|
// Do the allocation of all three objects in one go.
|
|
|
|
__ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
|
|
|
|
|
|
|
|
// v0 = address of new object(s) (tagged)
|
|
|
|
// a2 = argument count (smi-tagged)
|
|
|
|
// Get the arguments boilerplate from the current native context into t0.
|
|
|
|
const int kNormalOffset =
|
|
|
|
Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
|
|
|
|
const int kAliasedOffset =
|
|
|
|
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
|
|
|
|
|
|
|
|
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
|
|
|
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
|
|
|
|
Label skip2_ne, skip2_eq;
|
|
|
|
__ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
|
|
|
|
__ lw(t0, MemOperand(t0, kNormalOffset));
|
|
|
|
__ bind(&skip2_ne);
|
|
|
|
|
|
|
|
__ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
|
|
|
|
__ lw(t0, MemOperand(t0, kAliasedOffset));
|
|
|
|
__ bind(&skip2_eq);
|
|
|
|
|
|
|
|
// v0 = address of new object (tagged)
|
|
|
|
// a1 = mapped parameter count (tagged)
|
|
|
|
// a2 = argument count (smi-tagged)
|
|
|
|
// t0 = address of arguments map (tagged)
|
|
|
|
__ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
|
|
|
|
__ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
|
|
|
|
__ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
|
|
|
|
__ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
|
|
|
|
|
|
|
|
// Set up the callee in-object property.
|
|
|
|
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
|
|
|
|
__ lw(a3, MemOperand(sp, 2 * kPointerSize));
|
|
|
|
__ AssertNotSmi(a3);
|
|
|
|
const int kCalleeOffset = JSObject::kHeaderSize +
|
|
|
|
Heap::kArgumentsCalleeIndex * kPointerSize;
|
|
|
|
__ sw(a3, FieldMemOperand(v0, kCalleeOffset));
|
|
|
|
|
|
|
|
// Use the length (smi tagged) and set that as an in-object property too.
|
|
|
|
__ AssertSmi(a2);
|
|
|
|
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
|
|
|
|
const int kLengthOffset = JSObject::kHeaderSize +
|
|
|
|
Heap::kArgumentsLengthIndex * kPointerSize;
|
|
|
|
__ sw(a2, FieldMemOperand(v0, kLengthOffset));
|
|
|
|
|
|
|
|
// Set up the elements pointer in the allocated arguments object.
|
|
|
|
// If we allocated a parameter map, t0 will point there, otherwise
|
|
|
|
// it will point to the backing store.
|
|
|
|
__ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
|
|
|
|
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
|
|
|
|
|
|
|
|
// v0 = address of new object (tagged)
|
|
|
|
// a1 = mapped parameter count (tagged)
|
|
|
|
// a2 = argument count (tagged)
|
|
|
|
// t0 = address of parameter map or backing store (tagged)
|
|
|
|
// Initialize parameter map. If there are no mapped arguments, we're done.
|
|
|
|
Label skip_parameter_map;
|
|
|
|
Label skip3;
|
|
|
|
__ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
|
|
|
|
// Move backing store address to a3, because it is
|
|
|
|
// expected there when filling in the unmapped arguments.
|
|
|
|
__ mov(a3, t0);
|
|
|
|
__ bind(&skip3);
|
|
|
|
|
|
|
|
__ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
|
|
|
|
|
|
|
|
__ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
|
|
|
|
__ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
|
|
|
|
__ Addu(t2, a1, Operand(Smi::FromInt(2)));
|
|
|
|
__ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
|
|
|
|
__ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
|
|
|
|
__ sll(t6, a1, 1);
|
|
|
|
__ Addu(t2, t0, Operand(t6));
|
|
|
|
__ Addu(t2, t2, Operand(kParameterMapHeaderSize));
|
|
|
|
__ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
|
|
|
|
|
|
|
|
// Copy the parameter slots and the holes in the arguments.
|
|
|
|
// We need to fill in mapped_parameter_count slots. They index the context,
|
|
|
|
// where parameters are stored in reverse order, at
|
|
|
|
// MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
|
|
|
|
// The mapped parameter thus need to get indices
|
|
|
|
// MIN_CONTEXT_SLOTS+parameter_count-1 ..
|
|
|
|
// MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
|
|
|
|
// We loop from right to left.
|
|
|
|
Label parameters_loop, parameters_test;
|
|
|
|
__ mov(t2, a1);
|
|
|
|
__ lw(t5, MemOperand(sp, 0 * kPointerSize));
|
|
|
|
__ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
|
|
|
|
__ Subu(t5, t5, Operand(a1));
|
|
|
|
__ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
|
|
|
|
__ sll(t6, t2, 1);
|
|
|
|
__ Addu(a3, t0, Operand(t6));
|
|
|
|
__ Addu(a3, a3, Operand(kParameterMapHeaderSize));
|
|
|
|
|
|
|
|
// t2 = loop variable (tagged)
|
|
|
|
// a1 = mapping index (tagged)
|
|
|
|
// a3 = address of backing store (tagged)
|
|
|
|
// t0 = address of parameter map (tagged)
|
|
|
|
// t1 = temporary scratch (a.o., for address calculation)
|
|
|
|
// t3 = the hole value
|
|
|
|
__ jmp(¶meters_test);
|
|
|
|
|
|
|
|
__ bind(¶meters_loop);
|
|
|
|
__ Subu(t2, t2, Operand(Smi::FromInt(1)));
|
|
|
|
__ sll(t1, t2, 1);
|
|
|
|
__ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
|
|
|
|
__ Addu(t6, t0, t1);
|
|
|
|
__ sw(t5, MemOperand(t6));
|
|
|
|
__ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
|
|
|
|
__ Addu(t6, a3, t1);
|
|
|
|
__ sw(t3, MemOperand(t6));
|
|
|
|
__ Addu(t5, t5, Operand(Smi::FromInt(1)));
|
|
|
|
__ bind(¶meters_test);
|
|
|
|
__ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
|
|
|
|
|
|
|
|
__ bind(&skip_parameter_map);
|
|
|
|
// a2 = argument count (tagged)
|
|
|
|
// a3 = address of backing store (tagged)
|
|
|
|
// t1 = scratch
|
|
|
|
// Copy arguments header and remaining slots (if there are any).
|
|
|
|
__ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
|
|
|
|
__ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
|
|
|
|
__ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
|
|
|
|
|
|
|
|
Label arguments_loop, arguments_test;
|
|
|
|
__ mov(t5, a1);
|
|
|
|
__ lw(t0, MemOperand(sp, 1 * kPointerSize));
|
|
|
|
__ sll(t6, t5, 1);
|
|
|
|
__ Subu(t0, t0, Operand(t6));
|
|
|
|
__ jmp(&arguments_test);
|
|
|
|
|
|
|
|
__ bind(&arguments_loop);
|
|
|
|
__ Subu(t0, t0, Operand(kPointerSize));
|
|
|
|
__ lw(t2, MemOperand(t0, 0));
|
|
|
|
__ sll(t6, t5, 1);
|
|
|
|
__ Addu(t1, a3, Operand(t6));
|
|
|
|
__ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
|
|
|
|
__ Addu(t5, t5, Operand(Smi::FromInt(1)));
|
|
|
|
|
|
|
|
__ bind(&arguments_test);
|
|
|
|
__ Branch(&arguments_loop, lt, t5, Operand(a2));
|
|
|
|
|
|
|
|
// Return and remove the on-stack parameters.
|
|
|
|
__ DropAndRet(3);
|
|
|
|
|
|
|
|
// Do the runtime call to allocate the arguments object.
|
|
|
|
// a2 = argument count (tagged)
|
|
|
|
__ bind(&runtime);
|
|
|
|
__ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
|
|
|
|
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
|
|
|
|
// Return address is in ra.
|
|
|
|
Label slow;
|
|
|
|
|
|
|
|
Register receiver = LoadDescriptor::ReceiverRegister();
|
|
|
|
Register key = LoadDescriptor::NameRegister();
|
|
|
|
|
|
|
|
// Check that the key is an array index, that is Uint32.
|
|
|
|
__ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
|
|
|
|
__ Branch(&slow, ne, t0, Operand(zero_reg));
|
|
|
|
|
|
|
|
// Everything is fine, call runtime.
|
|
|
|
__ Push(receiver, key); // Receiver, key.
|
|
|
|
|
|
|
|
// Perform tail call to the entry.
|
|
|
|
__ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
|
|
|
|
|
|
|
|
__ bind(&slow);
|
|
|
|
PropertyAccessCompiler::TailCallBuiltin(
|
|
|
|
masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
|
|
|
// sp[0] : number of parameters
|
|
|
|
// sp[4] : receiver displacement
|
|
|
|
// sp[8] : function
|
|
|
|
// Check if the calling frame is an arguments adaptor frame.
|
|
|
|
Label adaptor_frame, try_allocate, runtime;
|
|
|
|
__ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
|
|
|
__ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
|
|
|
|
__ Branch(&adaptor_frame,
|
|
|
|
eq,
|
|
|
|
a3,
|
|
|
|
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
|
|
|
|
|
|
|
// Get the length from the frame.
|
|
|
|
__ lw(a1, MemOperand(sp, 0));
|
|
|
|
__ Branch(&try_allocate);
|
|
|
|
|
|
|
|
// Patch the arguments.length and the parameters pointer.
|
|
|
|
__ bind(&adaptor_frame);
|
|
|
|
__ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
|
|
|
__ sw(a1, MemOperand(sp, 0));
|
|
|
|
__ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(a3, a2, Operand(at));
|
|
|
|
|
|
|
|
__ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
|
|
|
|
__ sw(a3, MemOperand(sp, 1 * kPointerSize));
|
|
|
|
|
|
|
|
// Try the new space allocation. Start out with computing the size
|
|
|
|
// of the arguments object and the elements array in words.
|
|
|
|
Label add_arguments_object;
|
|
|
|
__ bind(&try_allocate);
|
|
|
|
__ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
|
|
|
|
__ srl(a1, a1, kSmiTagSize);
|
|
|
|
|
|
|
|
__ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
|
|
|
|
__ bind(&add_arguments_object);
|
|
|
|
__ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
|
|
|
|
|
|
|
|
// Do the allocation of both objects in one go.
|
|
|
|
__ Allocate(a1, v0, a2, a3, &runtime,
|
|
|
|
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
|
|
|
|
|
|
|
|
// Get the arguments boilerplate from the current native context.
|
|
|
|
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
|
|
|
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
|
|
|
|
__ lw(t0, MemOperand(
|
|
|
|
t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
|
|
|
|
|
|
|
|
__ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
|
|
|
|
__ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
|
|
|
|
__ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
|
|
|
|
__ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
|
|
|
|
|
|
|
|
// Get the length (smi tagged) and set that as an in-object property too.
|
|
|
|
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
|
|
|
|
__ lw(a1, MemOperand(sp, 0 * kPointerSize));
|
|
|
|
__ AssertSmi(a1);
|
|
|
|
__ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
|
|
|
|
Heap::kArgumentsLengthIndex * kPointerSize));
|
|
|
|
|
|
|
|
Label done;
|
|
|
|
__ Branch(&done, eq, a1, Operand(zero_reg));
|
|
|
|
|
|
|
|
// Get the parameters pointer from the stack.
|
|
|
|
__ lw(a2, MemOperand(sp, 1 * kPointerSize));
|
|
|
|
|
|
|
|
// Set up the elements pointer in the allocated arguments object and
|
|
|
|
// initialize the header in the elements fixed array.
|
|
|
|
__ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
|
|
|
|
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
|
|
|
|
__ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
|
|
|
|
__ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
|
|
|
|
__ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
|
|
|
|
// Untag the length for the loop.
|
|
|
|
__ srl(a1, a1, kSmiTagSize);
|
|
|
|
|
|
|
|
// Copy the fixed array slots.
|
|
|
|
Label loop;
|
|
|
|
// Set up t0 to point to the first array slot.
|
|
|
|
__ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
|
|
|
__ bind(&loop);
|
|
|
|
// Pre-decrement a2 with kPointerSize on each iteration.
|
|
|
|
// Pre-decrement in order to skip receiver.
|
|
|
|
__ Addu(a2, a2, Operand(-kPointerSize));
|
|
|
|
__ lw(a3, MemOperand(a2));
|
|
|
|
// Post-increment t0 with kPointerSize on each iteration.
|
|
|
|
__ sw(a3, MemOperand(t0));
|
|
|
|
__ Addu(t0, t0, Operand(kPointerSize));
|
|
|
|
__ Subu(a1, a1, Operand(1));
|
|
|
|
__ Branch(&loop, ne, a1, Operand(zero_reg));
|
|
|
|
|
|
|
|
// Return and remove the on-stack parameters.
|
|
|
|
__ bind(&done);
|
|
|
|
__ DropAndRet(3);
|
|
|
|
|
|
|
|
// Do the runtime call to allocate the arguments object.
|
|
|
|
__ bind(&runtime);
|
|
|
|
__ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
|
|
|
|
// sp[0] : language mode
|
|
|
|
// sp[4] : index of rest parameter
|
|
|
|
// sp[8] : number of parameters
|
|
|
|
// sp[12] : receiver displacement
|
|
|
|
// Check if the calling frame is an arguments adaptor frame.
|
|
|
|
|
|
|
|
Label runtime;
|
|
|
|
__ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
|
|
|
__ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
|
|
|
|
__ Branch(&runtime, ne, a3,
|
|
|
|
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
|
|
|
|
|
|
|
// Patch the arguments.length and the parameters pointer.
|
|
|
|
__ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
|
|
|
__ sw(a1, MemOperand(sp, 2 * kPointerSize));
|
|
|
|
__ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(a3, a2, Operand(at));
|
|
|
|
|
|
|
|
__ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
|
|
|
|
__ sw(a3, MemOperand(sp, 3 * kPointerSize));
|
|
|
|
|
|
|
|
// Do the runtime call to allocate the arguments object.
|
|
|
|
__ bind(&runtime);
|
|
|
|
__ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void RegExpExecStub::Generate(MacroAssembler* masm) {
|
|
|
|
// Just jump directly to runtime if native RegExp is not selected at compile
|
|
|
|
// time or if regexp entry in generated code is turned off runtime switch or
|
|
|
|
// at compilation.
|
|
|
|
#ifdef V8_INTERPRETED_REGEXP
|
|
|
|
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
|
|
|
|
#else // V8_INTERPRETED_REGEXP
|
|
|
|
|
|
|
|
// Stack frame on entry.
|
|
|
|
// sp[0]: last_match_info (expected JSArray)
|
|
|
|
// sp[4]: previous index
|
|
|
|
// sp[8]: subject string
|
|
|
|
// sp[12]: JSRegExp object
|
|
|
|
|
|
|
|
const int kLastMatchInfoOffset = 0 * kPointerSize;
|
|
|
|
const int kPreviousIndexOffset = 1 * kPointerSize;
|
|
|
|
const int kSubjectOffset = 2 * kPointerSize;
|
|
|
|
const int kJSRegExpOffset = 3 * kPointerSize;
|
|
|
|
|
|
|
|
Label runtime;
|
|
|
|
// Allocation of registers for this function. These are in callee save
|
|
|
|
// registers and will be preserved by the call to the native RegExp code, as
|
|
|
|
// this code is called using the normal C calling convention. When calling
|
|
|
|
// directly from generated code the native RegExp code will not do a GC and
|
|
|
|
// therefore the content of these registers are safe to use after the call.
|
|
|
|
// MIPS - using s0..s2, since we are not using CEntry Stub.
|
|
|
|
Register subject = s0;
|
|
|
|
Register regexp_data = s1;
|
|
|
|
Register last_match_info_elements = s2;
|
|
|
|
|
|
|
|
// Ensure that a RegExp stack is allocated.
|
|
|
|
ExternalReference address_of_regexp_stack_memory_address =
|
|
|
|
ExternalReference::address_of_regexp_stack_memory_address(
|
|
|
|
isolate());
|
|
|
|
ExternalReference address_of_regexp_stack_memory_size =
|
|
|
|
ExternalReference::address_of_regexp_stack_memory_size(isolate());
|
|
|
|
__ li(a0, Operand(address_of_regexp_stack_memory_size));
|
|
|
|
__ lw(a0, MemOperand(a0, 0));
|
|
|
|
__ Branch(&runtime, eq, a0, Operand(zero_reg));
|
|
|
|
|
|
|
|
// Check that the first argument is a JSRegExp object.
|
|
|
|
__ lw(a0, MemOperand(sp, kJSRegExpOffset));
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
__ JumpIfSmi(a0, &runtime);
|
|
|
|
__ GetObjectType(a0, a1, a1);
|
|
|
|
__ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
|
|
|
|
|
|
|
|
// Check that the RegExp has been compiled (data contains a fixed array).
|
|
|
|
__ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
|
|
|
|
if (FLAG_debug_code) {
|
|
|
|
__ SmiTst(regexp_data, t0);
|
|
|
|
__ Check(nz,
|
|
|
|
kUnexpectedTypeForRegExpDataFixedArrayExpected,
|
|
|
|
t0,
|
|
|
|
Operand(zero_reg));
|
|
|
|
__ GetObjectType(regexp_data, a0, a0);
|
|
|
|
__ Check(eq,
|
|
|
|
kUnexpectedTypeForRegExpDataFixedArrayExpected,
|
|
|
|
a0,
|
|
|
|
Operand(FIXED_ARRAY_TYPE));
|
|
|
|
}
|
|
|
|
|
|
|
|
// regexp_data: RegExp data (FixedArray)
|
|
|
|
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
|
|
|
|
__ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
|
|
|
|
__ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
|
|
|
|
|
|
|
|
// regexp_data: RegExp data (FixedArray)
|
|
|
|
// Check that the number of captures fit in the static offsets vector buffer.
|
|
|
|
__ lw(a2,
|
|
|
|
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
|
|
|
|
// Check (number_of_captures + 1) * 2 <= offsets vector size
|
|
|
|
// Or number_of_captures * 2 <= offsets vector size - 2
|
|
|
|
// Multiplying by 2 comes for free since a2 is smi-tagged.
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
|
|
|
STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
|
|
|
|
__ Branch(
|
|
|
|
&runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
|
|
|
|
|
|
|
|
// Reset offset for possibly sliced string.
|
|
|
|
__ mov(t0, zero_reg);
|
|
|
|
__ lw(subject, MemOperand(sp, kSubjectOffset));
|
|
|
|
__ JumpIfSmi(subject, &runtime);
|
|
|
|
__ mov(a3, subject); // Make a copy of the original subject string.
|
|
|
|
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
|
|
|
|
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
|
|
|
|
// subject: subject string
|
|
|
|
// a3: subject string
|
|
|
|
// a0: subject string instance type
|
|
|
|
// regexp_data: RegExp data (FixedArray)
|
|
|
|
// Handle subject string according to its encoding and representation:
|
|
|
|
// (1) Sequential string? If yes, go to (5).
|
|
|
|
// (2) Anything but sequential or cons? If yes, go to (6).
|
|
|
|
// (3) Cons string. If the string is flat, replace subject with first string.
|
|
|
|
// Otherwise bailout.
|
|
|
|
// (4) Is subject external? If yes, go to (7).
|
|
|
|
// (5) Sequential string. Load regexp code according to encoding.
|
|
|
|
// (E) Carry on.
|
|
|
|
/// [...]
|
|
|
|
|
|
|
|
// Deferred code at the end of the stub:
|
|
|
|
// (6) Not a long external string? If yes, go to (8).
|
|
|
|
// (7) External string. Make it, offset-wise, look like a sequential string.
|
|
|
|
// Go to (5).
|
|
|
|
// (8) Short external string or not a string? If yes, bail out to runtime.
|
|
|
|
// (9) Sliced string. Replace subject with parent. Go to (4).
|
|
|
|
|
|
|
|
Label seq_string /* 5 */, external_string /* 7 */,
|
|
|
|
check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
|
|
|
|
not_long_external /* 8 */;
|
|
|
|
|
|
|
|
// (1) Sequential string? If yes, go to (5).
|
|
|
|
__ And(a1,
|
|
|
|
a0,
|
|
|
|
Operand(kIsNotStringMask |
|
|
|
|
kStringRepresentationMask |
|
|
|
|
kShortExternalStringMask));
|
|
|
|
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
|
|
|
|
__ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
|
|
|
|
|
|
|
|
// (2) Anything but sequential or cons? If yes, go to (6).
|
|
|
|
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
|
|
|
|
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
|
|
|
|
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
|
|
|
|
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
|
|
|
|
// Go to (6).
|
|
|
|
__ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
|
|
|
|
|
|
|
|
// (3) Cons string. Check that it's flat.
|
|
|
|
// Replace subject with first string and reload instance type.
|
|
|
|
__ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
|
|
|
|
__ LoadRoot(a1, Heap::kempty_stringRootIndex);
|
|
|
|
__ Branch(&runtime, ne, a0, Operand(a1));
|
|
|
|
__ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
|
|
|
|
|
|
|
|
// (4) Is subject external? If yes, go to (7).
|
|
|
|
__ bind(&check_underlying);
|
|
|
|
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
|
|
|
|
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
|
|
|
|
STATIC_ASSERT(kSeqStringTag == 0);
|
|
|
|
__ And(at, a0, Operand(kStringRepresentationMask));
|
|
|
|
// The underlying external string is never a short external string.
|
|
|
|
STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
|
|
|
|
STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
|
|
|
|
__ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
|
|
|
|
|
|
|
|
// (5) Sequential string. Load regexp code according to encoding.
|
|
|
|
__ bind(&seq_string);
|
|
|
|
// subject: sequential subject string (or look-alike, external string)
|
|
|
|
// a3: original subject string
|
|
|
|
// Load previous index and check range before a3 is overwritten. We have to
|
|
|
|
// use a3 instead of subject here because subject might have been only made
|
|
|
|
// to look like a sequential string when it actually is an external string.
|
|
|
|
__ lw(a1, MemOperand(sp, kPreviousIndexOffset));
|
|
|
|
__ JumpIfNotSmi(a1, &runtime);
|
|
|
|
__ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
|
|
|
|
__ Branch(&runtime, ls, a3, Operand(a1));
|
|
|
|
__ sra(a1, a1, kSmiTagSize); // Untag the Smi.
|
|
|
|
|
|
|
|
STATIC_ASSERT(kStringEncodingMask == 4);
|
|
|
|
STATIC_ASSERT(kOneByteStringTag == 4);
|
|
|
|
STATIC_ASSERT(kTwoByteStringTag == 0);
|
|
|
|
__ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one-byte.
|
|
|
|
__ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
|
|
|
|
__ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
|
|
|
|
__ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
|
|
|
|
__ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
|
|
|
|
|
|
|
|
// (E) Carry on. String handling is done.
|
|
|
|
// t9: irregexp code
|
|
|
|
// Check that the irregexp code has been generated for the actual string
|
|
|
|
// encoding. If it has, the field contains a code object otherwise it contains
|
|
|
|
// a smi (code flushing support).
|
|
|
|
__ JumpIfSmi(t9, &runtime);
|
|
|
|
|
|
|
|
// a1: previous index
|
|
|
|
// a3: encoding of subject string (1 if one_byte, 0 if two_byte);
|
|
|
|
// t9: code
|
|
|
|
// subject: Subject string
|
|
|
|
// regexp_data: RegExp data (FixedArray)
|
|
|
|
// All checks done. Now push arguments for native regexp code.
|
|
|
|
__ IncrementCounter(isolate()->counters()->regexp_entry_native(),
|
|
|
|
1, a0, a2);
|
|
|
|
|
|
|
|
// Isolates: note we add an additional parameter here (isolate pointer).
|
|
|
|
const int kRegExpExecuteArguments = 9;
|
|
|
|
const int kParameterRegisters = 4;
|
|
|
|
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
|
|
|
|
|
|
|
|
// Stack pointer now points to cell where return address is to be written.
|
|
|
|
// Arguments are before that on the stack or in registers, meaning we
|
|
|
|
// treat the return address as argument 5. Thus every argument after that
|
|
|
|
// needs to be shifted back by 1. Since DirectCEntryStub will handle
|
|
|
|
// allocating space for the c argument slots, we don't need to calculate
|
|
|
|
// that into the argument positions on the stack. This is how the stack will
|
|
|
|
// look (sp meaning the value of sp at this moment):
|
|
|
|
// [sp + 5] - Argument 9
|
|
|
|
// [sp + 4] - Argument 8
|
|
|
|
// [sp + 3] - Argument 7
|
|
|
|
// [sp + 2] - Argument 6
|
|
|
|
// [sp + 1] - Argument 5
|
|
|
|
// [sp + 0] - saved ra
|
|
|
|
|
|
|
|
// Argument 9: Pass current isolate address.
|
|
|
|
// CFunctionArgumentOperand handles MIPS stack argument slots.
|
|
|
|
__ li(a0, Operand(ExternalReference::isolate_address(isolate())));
|
|
|
|
__ sw(a0, MemOperand(sp, 5 * kPointerSize));
|
|
|
|
|
|
|
|
// Argument 8: Indicate that this is a direct call from JavaScript.
|
|
|
|
__ li(a0, Operand(1));
|
|
|
|
__ sw(a0, MemOperand(sp, 4 * kPointerSize));
|
|
|
|
|
|
|
|
// Argument 7: Start (high end) of backtracking stack memory area.
|
|
|
|
__ li(a0, Operand(address_of_regexp_stack_memory_address));
|
|
|
|
__ lw(a0, MemOperand(a0, 0));
|
|
|
|
__ li(a2, Operand(address_of_regexp_stack_memory_size));
|
|
|
|
__ lw(a2, MemOperand(a2, 0));
|
|
|
|
__ addu(a0, a0, a2);
|
|
|
|
__ sw(a0, MemOperand(sp, 3 * kPointerSize));
|
|
|
|
|
|
|
|
// Argument 6: Set the number of capture registers to zero to force global
|
|
|
|
// regexps to behave as non-global. This does not affect non-global regexps.
|
|
|
|
__ mov(a0, zero_reg);
|
|
|
|
__ sw(a0, MemOperand(sp, 2 * kPointerSize));
|
|
|
|
|
|
|
|
// Argument 5: static offsets vector buffer.
|
|
|
|
__ li(a0, Operand(
|
|
|
|
ExternalReference::address_of_static_offsets_vector(isolate())));
|
|
|
|
__ sw(a0, MemOperand(sp, 1 * kPointerSize));
|
|
|
|
|
|
|
|
// For arguments 4 and 3 get string length, calculate start of string data
|
|
|
|
// calculate the shift of the index (0 for one-byte and 1 for two-byte).
|
|
|
|
__ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
|
|
|
|
__ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
|
|
|
|
// Load the length from the original subject string from the previous stack
|
|
|
|
// frame. Therefore we have to use fp, which points exactly to two pointer
|
|
|
|
// sizes below the previous sp. (Because creating a new stack frame pushes
|
|
|
|
// the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
|
|
|
|
__ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
|
|
|
|
// If slice offset is not 0, load the length from the original sliced string.
|
|
|
|
// Argument 4, a3: End of string data
|
|
|
|
// Argument 3, a2: Start of string data
|
|
|
|
// Prepare start and end index of the input.
|
|
|
|
__ sllv(t1, t0, a3);
|
|
|
|
__ addu(t0, t2, t1);
|
|
|
|
__ sllv(t1, a1, a3);
|
|
|
|
__ addu(a2, t0, t1);
|
|
|
|
|
|
|
|
__ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
|
|
|
|
__ sra(t2, t2, kSmiTagSize);
|
|
|
|
__ sllv(t1, t2, a3);
|
|
|
|
__ addu(a3, t0, t1);
|
|
|
|
// Argument 2 (a1): Previous index.
|
|
|
|
// Already there
|
|
|
|
|
|
|
|
// Argument 1 (a0): Subject string.
|
|
|
|
__ mov(a0, subject);
|
|
|
|
|
|
|
|
// Locate the code entry and call it.
|
|
|
|
__ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
|
|
DirectCEntryStub stub(isolate());
|
|
|
|
stub.GenerateCall(masm, t9);
|
|
|
|
|
|
|
|
__ LeaveExitFrame(false, no_reg, true);
|
|
|
|
|
|
|
|
// v0: result
|
|
|
|
// subject: subject string (callee saved)
|
|
|
|
// regexp_data: RegExp data (callee saved)
|
|
|
|
// last_match_info_elements: Last match info elements (callee saved)
|
|
|
|
// Check the result.
|
|
|
|
Label success;
|
|
|
|
__ Branch(&success, eq, v0, Operand(1));
|
|
|
|
// We expect exactly one result since we force the called regexp to behave
|
|
|
|
// as non-global.
|
|
|
|
Label failure;
|
|
|
|
__ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
|
|
|
|
// If not exception it can only be retry. Handle that in the runtime system.
|
|
|
|
__ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
|
|
|
|
// Result must now be exception. If there is no pending exception already a
|
|
|
|
// stack overflow (on the backtrack stack) was detected in RegExp code but
|
|
|
|
// haven't created the exception yet. Handle that in the runtime system.
|
|
|
|
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
|
|
|
|
__ li(a1, Operand(isolate()->factory()->the_hole_value()));
|
|
|
|
__ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
|
|
|
|
isolate())));
|
|
|
|
__ lw(v0, MemOperand(a2, 0));
|
|
|
|
__ Branch(&runtime, eq, v0, Operand(a1));
|
|
|
|
|
|
|
|
// For exception, throw the exception again.
|
|
|
|
__ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
|
|
|
|
|
|
|
|
__ bind(&failure);
|
|
|
|
// For failure and exception return null.
|
|
|
|
__ li(v0, Operand(isolate()->factory()->null_value()));
|
|
|
|
__ DropAndRet(4);
|
|
|
|
|
|
|
|
// Process the result from the native regexp code.
|
|
|
|
__ bind(&success);
|
|
|
|
__ lw(a1,
|
|
|
|
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
|
|
|
|
// Calculate number of capture registers (number_of_captures + 1) * 2.
|
|
|
|
// Multiplying by 2 comes for free since r1 is smi-tagged.
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
|
|
|
__ Addu(a1, a1, Operand(2)); // a1 was a smi.
|
|
|
|
|
|
|
|
__ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
|
|
|
|
__ JumpIfSmi(a0, &runtime);
|
|
|
|
__ GetObjectType(a0, a2, a2);
|
|
|
|
__ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
|
|
|
|
// Check that the JSArray is in fast case.
|
|
|
|
__ lw(last_match_info_elements,
|
|
|
|
FieldMemOperand(a0, JSArray::kElementsOffset));
|
|
|
|
__ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
|
|
|
|
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
|
|
|
|
__ Branch(&runtime, ne, a0, Operand(at));
|
|
|
|
// Check that the last match info has space for the capture registers and the
|
|
|
|
// additional information.
|
|
|
|
__ lw(a0,
|
|
|
|
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
|
|
|
|
__ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
|
|
|
|
__ sra(at, a0, kSmiTagSize);
|
|
|
|
__ Branch(&runtime, gt, a2, Operand(at));
|
|
|
|
|
|
|
|
// a1: number of capture registers
|
|
|
|
// subject: subject string
|
|
|
|
// Store the capture count.
|
|
|
|
__ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
|
|
|
|
__ sw(a2, FieldMemOperand(last_match_info_elements,
|
|
|
|
RegExpImpl::kLastCaptureCountOffset));
|
|
|
|
// Store last subject and last input.
|
|
|
|
__ sw(subject,
|
|
|
|
FieldMemOperand(last_match_info_elements,
|
|
|
|
RegExpImpl::kLastSubjectOffset));
|
|
|
|
__ mov(a2, subject);
|
|
|
|
__ RecordWriteField(last_match_info_elements,
|
|
|
|
RegExpImpl::kLastSubjectOffset,
|
|
|
|
subject,
|
|
|
|
t3,
|
|
|
|
kRAHasNotBeenSaved,
|
|
|
|
kDontSaveFPRegs);
|
|
|
|
__ mov(subject, a2);
|
|
|
|
__ sw(subject,
|
|
|
|
FieldMemOperand(last_match_info_elements,
|
|
|
|
RegExpImpl::kLastInputOffset));
|
|
|
|
__ RecordWriteField(last_match_info_elements,
|
|
|
|
RegExpImpl::kLastInputOffset,
|
|
|
|
subject,
|
|
|
|
t3,
|
|
|
|
kRAHasNotBeenSaved,
|
|
|
|
kDontSaveFPRegs);
|
|
|
|
|
|
|
|
// Get the static offsets vector filled by the native regexp code.
|
|
|
|
ExternalReference address_of_static_offsets_vector =
|
|
|
|
ExternalReference::address_of_static_offsets_vector(isolate());
|
|
|
|
__ li(a2, Operand(address_of_static_offsets_vector));
|
|
|
|
|
|
|
|
// a1: number of capture registers
|
|
|
|
// a2: offsets vector
|
|
|
|
Label next_capture, done;
|
|
|
|
// Capture register counter starts from number of capture registers and
|
|
|
|
// counts down until wrapping after zero.
|
|
|
|
__ Addu(a0,
|
|
|
|
last_match_info_elements,
|
|
|
|
Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
|
|
|
|
__ bind(&next_capture);
|
|
|
|
__ Subu(a1, a1, Operand(1));
|
|
|
|
__ Branch(&done, lt, a1, Operand(zero_reg));
|
|
|
|
// Read the value from the static offsets vector buffer.
|
|
|
|
__ lw(a3, MemOperand(a2, 0));
|
|
|
|
__ addiu(a2, a2, kPointerSize);
|
|
|
|
// Store the smi value in the last match info.
|
|
|
|
__ sll(a3, a3, kSmiTagSize); // Convert to Smi.
|
|
|
|
__ sw(a3, MemOperand(a0, 0));
|
|
|
|
__ Branch(&next_capture, USE_DELAY_SLOT);
|
|
|
|
__ addiu(a0, a0, kPointerSize); // In branch delay slot.
|
|
|
|
|
|
|
|
__ bind(&done);
|
|
|
|
|
|
|
|
// Return last match info.
|
|
|
|
__ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
|
|
|
|
__ DropAndRet(4);
|
|
|
|
|
|
|
|
// Do the runtime call to execute the regexp.
|
|
|
|
__ bind(&runtime);
|
|
|
|
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
|
|
|
|
|
|
|
|
// Deferred code for string handling.
|
|
|
|
// (6) Not a long external string? If yes, go to (8).
|
|
|
|
__ bind(¬_seq_nor_cons);
|
|
|
|
// Go to (8).
|
|
|
|
__ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag));
|
|
|
|
|
|
|
|
// (7) External string. Make it, offset-wise, look like a sequential string.
|
|
|
|
__ bind(&external_string);
|
|
|
|
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
|
|
|
|
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
|
|
|
|
if (FLAG_debug_code) {
|
|
|
|
// Assert that we do not have a cons or slice (indirect strings) here.
|
|
|
|
// Sequential strings have already been ruled out.
|
|
|
|
__ And(at, a0, Operand(kIsIndirectStringMask));
|
|
|
|
__ Assert(eq,
|
|
|
|
kExternalStringExpectedButNotFound,
|
|
|
|
at,
|
|
|
|
Operand(zero_reg));
|
|
|
|
}
|
|
|
|
__ lw(subject,
|
|
|
|
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
|
|
|
|
// Move the pointer so that offset-wise, it looks like a sequential string.
|
|
|
|
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
|
|
|
|
__ Subu(subject,
|
|
|
|
subject,
|
|
|
|
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
|
|
|
|
__ jmp(&seq_string); // Go to (5).
|
|
|
|
|
|
|
|
// (8) Short external string or not a string? If yes, bail out to runtime.
|
|
|
|
__ bind(¬_long_external);
|
|
|
|
STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
|
|
|
|
__ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
|
|
|
|
__ Branch(&runtime, ne, at, Operand(zero_reg));
|
|
|
|
|
|
|
|
// (9) Sliced string. Replace subject with parent. Go to (4).
|
|
|
|
// Load offset into t0 and replace subject string with parent.
|
|
|
|
__ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
|
|
|
|
__ sra(t0, t0, kSmiTagSize);
|
|
|
|
__ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
|
|
|
|
__ jmp(&check_underlying); // Go to (4).
|
|
|
|
#endif // V8_INTERPRETED_REGEXP
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
|
|
|
|
bool is_super) {
|
|
|
|
// a0 : number of arguments to the construct function
|
|
|
|
// a2 : feedback vector
|
|
|
|
// a3 : slot in feedback vector (Smi)
|
|
|
|
// a1 : the function to call
|
|
|
|
// t0 : original constructor (for IsSuperConstructorCall)
|
|
|
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
|
|
|
const RegList kSavedRegs = 1 << 4 | // a0
|
|
|
|
1 << 5 | // a1
|
|
|
|
1 << 6 | // a2
|
|
|
|
1 << 7 | // a3
|
|
|
|
BoolToInt(is_super) << 8; // t0
|
|
|
|
|
|
|
|
// Number-of-arguments register must be smi-tagged to call out.
|
|
|
|
__ SmiTag(a0);
|
|
|
|
__ MultiPush(kSavedRegs);
|
|
|
|
|
|
|
|
__ CallStub(stub);
|
|
|
|
|
|
|
|
__ MultiPop(kSavedRegs);
|
|
|
|
__ SmiUntag(a0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
|
|
|
|
// Cache the called function in a feedback vector slot. Cache states
|
|
|
|
// are uninitialized, monomorphic (indicated by a JSFunction), and
|
|
|
|
// megamorphic.
|
|
|
|
// a0 : number of arguments to the construct function
|
|
|
|
// a1 : the function to call
|
|
|
|
// a2 : feedback vector
|
|
|
|
// a3 : slot in feedback vector (Smi)
|
|
|
|
// t0 : original constructor (for IsSuperConstructorCall)
|
|
|
|
Label initialize, done, miss, megamorphic, not_array_function;
|
|
|
|
|
|
|
|
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
|
|
|
|
masm->isolate()->heap()->megamorphic_symbol());
|
|
|
|
DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
|
|
|
|
masm->isolate()->heap()->uninitialized_symbol());
|
|
|
|
|
|
|
|
// Load the cache state into t2.
|
|
|
|
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(t2, a2, Operand(t2));
|
|
|
|
__ lw(t2, FieldMemOperand(t2, FixedArray::kHeaderSize));
|
|
|
|
|
|
|
|
// A monomorphic cache hit or an already megamorphic state: invoke the
|
|
|
|
// function without changing the state.
|
|
|
|
// We don't know if t2 is a WeakCell or a Symbol, but it's harmless to read at
|
|
|
|
// this position in a symbol (see static asserts in type-feedback-vector.h).
|
|
|
|
Label check_allocation_site;
|
|
|
|
Register feedback_map = t1;
|
|
|
|
Register weak_value = t4;
|
|
|
|
__ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset));
|
|
|
|
__ Branch(&done, eq, a1, Operand(weak_value));
|
|
|
|
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
|
|
|
|
__ Branch(&done, eq, t2, Operand(at));
|
|
|
|
__ lw(feedback_map, FieldMemOperand(t2, HeapObject::kMapOffset));
|
|
|
|
__ LoadRoot(at, Heap::kWeakCellMapRootIndex);
|
|
|
|
__ Branch(FLAG_pretenuring_call_new ? &miss : &check_allocation_site, ne,
|
|
|
|
feedback_map, Operand(at));
|
|
|
|
|
|
|
|
// If the weak cell is cleared, we have a new chance to become monomorphic.
|
|
|
|
__ JumpIfSmi(weak_value, &initialize);
|
|
|
|
__ jmp(&megamorphic);
|
|
|
|
|
|
|
|
if (!FLAG_pretenuring_call_new) {
|
|
|
|
__ bind(&check_allocation_site);
|
|
|
|
// If we came here, we need to see if we are the array function.
|
|
|
|
// If we didn't have a matching function, and we didn't find the megamorph
|
|
|
|
// sentinel, then we have in the slot either some other function or an
|
|
|
|
// AllocationSite.
|
|
|
|
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
|
|
|
|
__ Branch(&miss, ne, feedback_map, Operand(at));
|
|
|
|
|
|
|
|
// Make sure the function is the Array() function
|
|
|
|
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
|
|
|
|
__ Branch(&megamorphic, ne, a1, Operand(t2));
|
|
|
|
__ jmp(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
|
|
|
|
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
|
|
|
|
// megamorphic.
|
|
|
|
__ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
|
|
|
|
__ Branch(&initialize, eq, t2, Operand(at));
|
|
|
|
// MegamorphicSentinel is an immortal immovable object (undefined) so no
|
|
|
|
// write-barrier is needed.
|
|
|
|
__ bind(&megamorphic);
|
|
|
|
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(t2, a2, Operand(t2));
|
|
|
|
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
|
|
|
|
__ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize));
|
|
|
|
__ jmp(&done);
|
|
|
|
|
|
|
|
// An uninitialized cache is patched with the function.
|
|
|
|
__ bind(&initialize);
|
|
|
|
if (!FLAG_pretenuring_call_new) {
|
|
|
|
// Make sure the function is the Array() function.
|
|
|
|
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
|
|
|
|
__ Branch(¬_array_function, ne, a1, Operand(t2));
|
|
|
|
|
|
|
|
// The target function is the Array constructor,
|
|
|
|
// Create an AllocationSite if we don't already have it, store it in the
|
|
|
|
// slot.
|
|
|
|
CreateAllocationSiteStub create_stub(masm->isolate());
|
|
|
|
CallStubInRecordCallTarget(masm, &create_stub, is_super);
|
|
|
|
__ Branch(&done);
|
|
|
|
|
|
|
|
__ bind(¬_array_function);
|
|
|
|
}
|
|
|
|
|
|
|
|
CreateWeakCellStub create_stub(masm->isolate());
|
|
|
|
CallStubInRecordCallTarget(masm, &create_stub, is_super);
|
|
|
|
__ bind(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
|
|
|
|
__ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
|
|
|
__ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
|
|
|
|
|
|
|
|
// Do not transform the receiver for strict mode functions.
|
|
|
|
int32_t strict_mode_function_mask =
|
|
|
|
1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
|
|
|
|
// Do not transform the receiver for native (Compilerhints already in a3).
|
|
|
|
int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
|
|
|
|
__ And(at, t0, Operand(strict_mode_function_mask | native_mask));
|
|
|
|
__ Branch(cont, ne, at, Operand(zero_reg));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void EmitSlowCase(MacroAssembler* masm,
|
|
|
|
int argc,
|
|
|
|
Label* non_function) {
|
|
|
|
// Check for function proxy.
|
|
|
|
__ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
|
|
|
|
__ push(a1); // put proxy as additional argument
|
|
|
|
__ li(a0, Operand(argc + 1, RelocInfo::NONE32));
|
|
|
|
__ mov(a2, zero_reg);
|
|
|
|
__ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
|
|
|
|
{
|
|
|
|
Handle<Code> adaptor =
|
|
|
|
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
|
|
|
|
__ Jump(adaptor, RelocInfo::CODE_TARGET);
|
|
|
|
}
|
|
|
|
|
|
|
|
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
|
|
|
|
// of the original receiver from the call site).
|
|
|
|
__ bind(non_function);
|
|
|
|
__ sw(a1, MemOperand(sp, argc * kPointerSize));
|
|
|
|
__ li(a0, Operand(argc)); // Set up the number of arguments.
|
|
|
|
__ mov(a2, zero_reg);
|
|
|
|
__ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
|
|
|
|
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
|
|
|
|
RelocInfo::CODE_TARGET);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
|
|
|
|
// Wrap the receiver and patch it back onto the stack.
|
|
|
|
{ FrameScope frame_scope(masm, StackFrame::INTERNAL);
|
|
|
|
__ Push(a1);
|
|
|
|
__ mov(a0, a3);
|
|
|
|
ToObjectStub stub(masm->isolate());
|
|
|
|
__ CallStub(&stub);
|
|
|
|
__ pop(a1);
|
|
|
|
}
|
|
|
|
__ Branch(USE_DELAY_SLOT, cont);
|
|
|
|
__ sw(v0, MemOperand(sp, argc * kPointerSize));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void CallFunctionNoFeedback(MacroAssembler* masm,
|
|
|
|
int argc, bool needs_checks,
|
|
|
|
bool call_as_method) {
|
|
|
|
// a1 : the function to call
|
|
|
|
Label slow, non_function, wrap, cont;
|
|
|
|
|
|
|
|
if (needs_checks) {
|
|
|
|
// Check that the function is really a JavaScript function.
|
|
|
|
// a1: pushed function (to be verified)
|
|
|
|
__ JumpIfSmi(a1, &non_function);
|
|
|
|
|
|
|
|
// Goto slow case if we do not have a function.
|
|
|
|
__ GetObjectType(a1, t0, t0);
|
|
|
|
__ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fast-case: Invoke the function now.
|
|
|
|
// a1: pushed function
|
|
|
|
ParameterCount actual(argc);
|
|
|
|
|
|
|
|
if (call_as_method) {
|
|
|
|
if (needs_checks) {
|
|
|
|
EmitContinueIfStrictOrNative(masm, &cont);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the receiver in sloppy mode.
|
|
|
|
__ lw(a3, MemOperand(sp, argc * kPointerSize));
|
|
|
|
|
|
|
|
if (needs_checks) {
|
|
|
|
__ JumpIfSmi(a3, &wrap);
|
|
|
|
__ GetObjectType(a3, t0, t0);
|
|
|
|
__ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
|
|
|
|
} else {
|
|
|
|
__ jmp(&wrap);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ bind(&cont);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
|
|
|
|
|
|
|
|
if (needs_checks) {
|
|
|
|
// Slow-case: Non-function called.
|
|
|
|
__ bind(&slow);
|
|
|
|
EmitSlowCase(masm, argc, &non_function);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (call_as_method) {
|
|
|
|
__ bind(&wrap);
|
|
|
|
// Wrap the receiver and patch it back onto the stack.
|
|
|
|
EmitWrapCase(masm, argc, &cont);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CallFunctionStub::Generate(MacroAssembler* masm) {
|
|
|
|
CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CallConstructStub::Generate(MacroAssembler* masm) {
|
|
|
|
// a0 : number of arguments
|
|
|
|
// a1 : the function to call
|
|
|
|
// a2 : feedback vector
|
|
|
|
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
|
|
|
|
// t0 : original constructor (for IsSuperConstructorCall)
|
|
|
|
Label slow, non_function_call;
|
|
|
|
|
|
|
|
// Check that the function is not a smi.
|
|
|
|
__ JumpIfSmi(a1, &non_function_call);
|
|
|
|
// Check that the function is a JSFunction.
|
|
|
|
__ GetObjectType(a1, t1, t1);
|
|
|
|
__ Branch(&slow, ne, t1, Operand(JS_FUNCTION_TYPE));
|
|
|
|
|
|
|
|
if (RecordCallTarget()) {
|
|
|
|
GenerateRecordCallTarget(masm, IsSuperConstructorCall());
|
|
|
|
|
|
|
|
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(t1, a2, at);
|
|
|
|
if (FLAG_pretenuring_call_new) {
|
|
|
|
// Put the AllocationSite from the feedback vector into a2.
|
|
|
|
// By adding kPointerSize we encode that we know the AllocationSite
|
|
|
|
// entry is at the feedback vector slot given by a3 + 1.
|
|
|
|
__ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
|
|
|
|
} else {
|
|
|
|
Label feedback_register_initialized;
|
|
|
|
// Put the AllocationSite from the feedback vector into a2, or undefined.
|
|
|
|
__ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
|
|
|
|
__ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
|
|
|
|
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
|
|
|
|
__ Branch(&feedback_register_initialized, eq, t1, Operand(at));
|
|
|
|
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
|
|
|
|
__ bind(&feedback_register_initialized);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ AssertUndefinedOrAllocationSite(a2, t1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pass function as original constructor.
|
|
|
|
if (IsSuperConstructorCall()) {
|
|
|
|
__ mov(a3, t0);
|
|
|
|
} else {
|
|
|
|
__ mov(a3, a1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Jump to the function-specific construct stub.
|
|
|
|
Register jmp_reg = t0;
|
|
|
|
__ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
|
|
|
__ lw(jmp_reg, FieldMemOperand(jmp_reg,
|
|
|
|
SharedFunctionInfo::kConstructStubOffset));
|
|
|
|
__ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
|
|
__ Jump(at);
|
|
|
|
|
|
|
|
// a0: number of arguments
|
|
|
|
// a1: called object
|
|
|
|
// t1: object type
|
|
|
|
Label do_call;
|
|
|
|
__ bind(&slow);
|
|
|
|
__ Branch(&non_function_call, ne, t1, Operand(JS_FUNCTION_PROXY_TYPE));
|
|
|
|
__ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
|
|
|
|
__ jmp(&do_call);
|
|
|
|
|
|
|
|
__ bind(&non_function_call);
|
|
|
|
__ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
|
|
|
|
__ bind(&do_call);
|
|
|
|
// Set expected number of arguments to zero (not changing r0).
|
|
|
|
__ li(a2, Operand(0, RelocInfo::NONE32));
|
|
|
|
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
|
|
|
|
RelocInfo::CODE_TARGET);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
|
|
|
|
__ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
|
|
|
__ lw(vector, FieldMemOperand(vector,
|
|
|
|
JSFunction::kSharedFunctionInfoOffset));
|
|
|
|
__ lw(vector, FieldMemOperand(vector,
|
|
|
|
SharedFunctionInfo::kFeedbackVectorOffset));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
|
|
|
|
// a1 - function
|
|
|
|
// a3 - slot id
|
|
|
|
// a2 - vector
|
|
|
|
Label miss;
|
|
|
|
|
|
|
|
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
|
|
|
|
__ Branch(&miss, ne, a1, Operand(at));
|
|
|
|
|
|
|
|
__ li(a0, Operand(arg_count()));
|
|
|
|
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(at, a2, Operand(at));
|
|
|
|
__ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize));
|
|
|
|
|
|
|
|
// Verify that t0 contains an AllocationSite
|
|
|
|
__ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
|
|
|
|
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
|
|
|
|
__ Branch(&miss, ne, t1, Operand(at));
|
|
|
|
|
|
|
|
// Increment the call count for monomorphic function calls.
|
|
|
|
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(at, a2, Operand(at));
|
|
|
|
__ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
|
|
|
|
__ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
|
|
|
|
__ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
|
|
|
|
|
|
|
|
__ mov(a2, t0);
|
|
|
|
__ mov(a3, a1);
|
|
|
|
ArrayConstructorStub stub(masm->isolate(), arg_count());
|
|
|
|
__ TailCallStub(&stub);
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
GenerateMiss(masm);
|
|
|
|
|
|
|
|
// The slow case, we need this no matter what to complete a call after a miss.
|
|
|
|
CallFunctionNoFeedback(masm,
|
|
|
|
arg_count(),
|
|
|
|
true,
|
|
|
|
CallAsMethod());
|
|
|
|
|
|
|
|
// Unreachable.
|
|
|
|
__ stop("Unexpected code address");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CallICStub::Generate(MacroAssembler* masm) {
|
|
|
|
// a1 - function
|
|
|
|
// a3 - slot id (Smi)
|
|
|
|
// a2 - vector
|
|
|
|
const int with_types_offset =
|
|
|
|
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
|
|
|
|
const int generic_offset =
|
|
|
|
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
|
|
|
|
Label extra_checks_or_miss, slow_start;
|
|
|
|
Label slow, non_function, wrap, cont;
|
|
|
|
Label have_js_function;
|
|
|
|
int argc = arg_count();
|
|
|
|
ParameterCount actual(argc);
|
|
|
|
|
|
|
|
// The checks. First, does r1 match the recorded monomorphic target?
|
|
|
|
__ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(t0, a2, Operand(t0));
|
|
|
|
__ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
|
|
|
|
|
|
|
|
// We don't know that we have a weak cell. We might have a private symbol
|
|
|
|
// or an AllocationSite, but the memory is safe to examine.
|
|
|
|
// AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
|
|
|
|
// FixedArray.
|
|
|
|
// WeakCell::kValueOffset - contains a JSFunction or Smi(0)
|
|
|
|
// Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
|
|
|
|
// computed, meaning that it can't appear to be a pointer. If the low bit is
|
|
|
|
// 0, then hash is computed, but the 0 bit prevents the field from appearing
|
|
|
|
// to be a pointer.
|
|
|
|
STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
|
|
|
|
STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
|
|
|
|
WeakCell::kValueOffset &&
|
|
|
|
WeakCell::kValueOffset == Symbol::kHashFieldSlot);
|
|
|
|
|
|
|
|
__ lw(t1, FieldMemOperand(t0, WeakCell::kValueOffset));
|
|
|
|
__ Branch(&extra_checks_or_miss, ne, a1, Operand(t1));
|
|
|
|
|
|
|
|
// The compare above could have been a SMI/SMI comparison. Guard against this
|
|
|
|
// convincing us that we have a monomorphic JSFunction.
|
|
|
|
__ JumpIfSmi(a1, &extra_checks_or_miss);
|
|
|
|
|
|
|
|
// Increment the call count for monomorphic function calls.
|
|
|
|
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(at, a2, Operand(at));
|
|
|
|
__ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
|
|
|
|
__ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
|
|
|
|
__ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
|
|
|
|
|
|
|
|
__ bind(&have_js_function);
|
|
|
|
if (CallAsMethod()) {
|
|
|
|
EmitContinueIfStrictOrNative(masm, &cont);
|
|
|
|
// Compute the receiver in sloppy mode.
|
|
|
|
__ lw(a3, MemOperand(sp, argc * kPointerSize));
|
|
|
|
|
|
|
|
__ JumpIfSmi(a3, &wrap);
|
|
|
|
__ GetObjectType(a3, t0, t0);
|
|
|
|
__ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
|
|
|
|
|
|
|
|
__ bind(&cont);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
|
|
|
|
|
|
|
|
__ bind(&slow);
|
|
|
|
EmitSlowCase(masm, argc, &non_function);
|
|
|
|
|
|
|
|
if (CallAsMethod()) {
|
|
|
|
__ bind(&wrap);
|
|
|
|
EmitWrapCase(masm, argc, &cont);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ bind(&extra_checks_or_miss);
|
|
|
|
Label uninitialized, miss;
|
|
|
|
|
|
|
|
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
|
|
|
|
__ Branch(&slow_start, eq, t0, Operand(at));
|
|
|
|
|
|
|
|
// The following cases attempt to handle MISS cases without going to the
|
|
|
|
// runtime.
|
|
|
|
if (FLAG_trace_ic) {
|
|
|
|
__ Branch(&miss);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
|
|
|
|
__ Branch(&uninitialized, eq, t0, Operand(at));
|
|
|
|
|
|
|
|
// We are going megamorphic. If the feedback is a JSFunction, it is fine
|
|
|
|
// to handle it here. More complex cases are dealt with in the runtime.
|
|
|
|
__ AssertNotSmi(t0);
|
|
|
|
__ GetObjectType(t0, t1, t1);
|
|
|
|
__ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
|
|
|
|
__ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(t0, a2, Operand(t0));
|
|
|
|
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
|
|
|
|
__ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
|
|
|
|
// We have to update statistics for runtime profiling.
|
|
|
|
__ lw(t0, FieldMemOperand(a2, with_types_offset));
|
|
|
|
__ Subu(t0, t0, Operand(Smi::FromInt(1)));
|
|
|
|
__ sw(t0, FieldMemOperand(a2, with_types_offset));
|
|
|
|
__ lw(t0, FieldMemOperand(a2, generic_offset));
|
|
|
|
__ Addu(t0, t0, Operand(Smi::FromInt(1)));
|
|
|
|
__ Branch(USE_DELAY_SLOT, &slow_start);
|
|
|
|
__ sw(t0, FieldMemOperand(a2, generic_offset)); // In delay slot.
|
|
|
|
|
|
|
|
__ bind(&uninitialized);
|
|
|
|
|
|
|
|
// We are going monomorphic, provided we actually have a JSFunction.
|
|
|
|
__ JumpIfSmi(a1, &miss);
|
|
|
|
|
|
|
|
// Goto miss case if we do not have a function.
|
|
|
|
__ GetObjectType(a1, t0, t0);
|
|
|
|
__ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE));
|
|
|
|
|
|
|
|
// Make sure the function is not the Array() function, which requires special
|
|
|
|
// behavior on MISS.
|
|
|
|
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
|
|
|
|
__ Branch(&miss, eq, a1, Operand(t0));
|
|
|
|
|
|
|
|
// Update stats.
|
|
|
|
__ lw(t0, FieldMemOperand(a2, with_types_offset));
|
|
|
|
__ Addu(t0, t0, Operand(Smi::FromInt(1)));
|
|
|
|
__ sw(t0, FieldMemOperand(a2, with_types_offset));
|
|
|
|
|
|
|
|
// Initialize the call counter.
|
|
|
|
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(at, a2, Operand(at));
|
|
|
|
__ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
|
|
|
|
__ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
|
|
|
|
|
|
|
|
// Store the function. Use a stub since we need a frame for allocation.
|
|
|
|
// a2 - vector
|
|
|
|
// a3 - slot
|
|
|
|
// a1 - function
|
|
|
|
{
|
|
|
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
|
|
|
CreateWeakCellStub create_stub(masm->isolate());
|
|
|
|
__ Push(a1);
|
|
|
|
__ CallStub(&create_stub);
|
|
|
|
__ Pop(a1);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ Branch(&have_js_function);
|
|
|
|
|
|
|
|
// We are here because tracing is on or we encountered a MISS case we can't
|
|
|
|
// handle here.
|
|
|
|
__ bind(&miss);
|
|
|
|
GenerateMiss(masm);
|
|
|
|
|
|
|
|
// the slow case
|
|
|
|
__ bind(&slow_start);
|
|
|
|
// Check that the function is really a JavaScript function.
|
|
|
|
// r1: pushed function (to be verified)
|
|
|
|
__ JumpIfSmi(a1, &non_function);
|
|
|
|
|
|
|
|
// Goto slow case if we do not have a function.
|
|
|
|
__ GetObjectType(a1, t0, t0);
|
|
|
|
__ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
|
|
|
|
__ Branch(&have_js_function);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CallICStub::GenerateMiss(MacroAssembler* masm) {
|
|
|
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
|
|
|
|
|
|
|
// Push the receiver and the function and feedback info.
|
|
|
|
__ Push(a1, a2, a3);
|
|
|
|
|
|
|
|
// Call the entry.
|
|
|
|
Runtime::FunctionId id = GetICState() == DEFAULT
|
|
|
|
? Runtime::kCallIC_Miss
|
|
|
|
: Runtime::kCallIC_Customization_Miss;
|
|
|
|
__ CallRuntime(id, 3);
|
|
|
|
|
|
|
|
// Move result to a1 and exit the internal frame.
|
|
|
|
__ mov(a1, v0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// StringCharCodeAtGenerator.
|
|
|
|
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
|
|
|
|
DCHECK(!t0.is(index_));
|
|
|
|
DCHECK(!t0.is(result_));
|
|
|
|
DCHECK(!t0.is(object_));
|
|
|
|
if (check_mode_ == RECEIVER_IS_UNKNOWN) {
|
|
|
|
// If the receiver is a smi trigger the non-string case.
|
|
|
|
__ JumpIfSmi(object_, receiver_not_string_);
|
|
|
|
|
|
|
|
// Fetch the instance type of the receiver into result register.
|
|
|
|
__ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
|
|
|
|
__ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
|
|
|
|
// If the receiver is not a string trigger the non-string case.
|
|
|
|
__ And(t0, result_, Operand(kIsNotStringMask));
|
|
|
|
__ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the index is non-smi trigger the non-smi case.
|
|
|
|
__ JumpIfNotSmi(index_, &index_not_smi_);
|
|
|
|
|
|
|
|
__ bind(&got_smi_index_);
|
|
|
|
|
|
|
|
// Check for index out of range.
|
|
|
|
__ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
|
|
|
|
__ Branch(index_out_of_range_, ls, t0, Operand(index_));
|
|
|
|
|
|
|
|
__ sra(index_, index_, kSmiTagSize);
|
|
|
|
|
|
|
|
StringCharLoadGenerator::Generate(masm,
|
|
|
|
object_,
|
|
|
|
index_,
|
|
|
|
result_,
|
|
|
|
&call_runtime_);
|
|
|
|
|
|
|
|
__ sll(result_, result_, kSmiTagSize);
|
|
|
|
__ bind(&exit_);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StringCharCodeAtGenerator::GenerateSlow(
|
|
|
|
MacroAssembler* masm, EmbedMode embed_mode,
|
|
|
|
const RuntimeCallHelper& call_helper) {
|
|
|
|
__ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
|
|
|
|
|
|
|
|
// Index is not a smi.
|
|
|
|
__ bind(&index_not_smi_);
|
|
|
|
// If index is a heap number, try converting it to an integer.
|
|
|
|
__ CheckMap(index_,
|
|
|
|
result_,
|
|
|
|
Heap::kHeapNumberMapRootIndex,
|
|
|
|
index_not_number_,
|
|
|
|
DONT_DO_SMI_CHECK);
|
|
|
|
call_helper.BeforeCall(masm);
|
|
|
|
// Consumed by runtime conversion function:
|
|
|
|
if (embed_mode == PART_OF_IC_HANDLER) {
|
|
|
|
__ Push(LoadWithVectorDescriptor::VectorRegister(),
|
|
|
|
LoadWithVectorDescriptor::SlotRegister(), object_, index_);
|
|
|
|
} else {
|
|
|
|
__ Push(object_, index_);
|
|
|
|
}
|
|
|
|
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
|
|
|
|
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
|
|
|
|
} else {
|
|
|
|
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
|
|
|
|
// NumberToSmi discards numbers that are not exact integers.
|
|
|
|
__ CallRuntime(Runtime::kNumberToSmi, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save the conversion result before the pop instructions below
|
|
|
|
// have a chance to overwrite it.
|
|
|
|
__ Move(index_, v0);
|
|
|
|
if (embed_mode == PART_OF_IC_HANDLER) {
|
|
|
|
__ Pop(LoadWithVectorDescriptor::VectorRegister(),
|
|
|
|
LoadWithVectorDescriptor::SlotRegister(), object_);
|
|
|
|
} else {
|
|
|
|
__ pop(object_);
|
|
|
|
}
|
|
|
|
// Reload the instance type.
|
|
|
|
__ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
|
|
|
|
__ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
|
|
|
|
call_helper.AfterCall(masm);
|
|
|
|
// If index is still not a smi, it must be out of range.
|
|
|
|
__ JumpIfNotSmi(index_, index_out_of_range_);
|
|
|
|
// Otherwise, return to the fast path.
|
|
|
|
__ Branch(&got_smi_index_);
|
|
|
|
|
|
|
|
// Call runtime. We get here when the receiver is a string and the
|
|
|
|
// index is a number, but the code of getting the actual character
|
|
|
|
// is too complex (e.g., when the string needs to be flattened).
|
|
|
|
__ bind(&call_runtime_);
|
|
|
|
call_helper.BeforeCall(masm);
|
|
|
|
__ sll(index_, index_, kSmiTagSize);
|
|
|
|
__ Push(object_, index_);
|
|
|
|
__ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
|
|
|
|
|
|
|
|
__ Move(result_, v0);
|
|
|
|
|
|
|
|
call_helper.AfterCall(masm);
|
|
|
|
__ jmp(&exit_);
|
|
|
|
|
|
|
|
__ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
// StringCharFromCodeGenerator
|
|
|
|
|
|
|
|
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
|
|
|
|
// Fast case of Heap::LookupSingleCharacterStringFromCode.
|
|
|
|
|
|
|
|
DCHECK(!t0.is(result_));
|
|
|
|
DCHECK(!t0.is(code_));
|
|
|
|
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
STATIC_ASSERT(kSmiShiftSize == 0);
|
|
|
|
DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
|
|
|
|
__ And(t0, code_, Operand(kSmiTagMask |
|
|
|
|
((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
|
|
|
|
__ Branch(&slow_case_, ne, t0, Operand(zero_reg));
|
|
|
|
|
|
|
|
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
|
|
|
|
// At this point code register contains smi tagged one-byte char code.
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
__ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(result_, result_, t0);
|
|
|
|
__ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
|
|
|
|
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
|
|
|
|
__ Branch(&slow_case_, eq, result_, Operand(t0));
|
|
|
|
__ bind(&exit_);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StringCharFromCodeGenerator::GenerateSlow(
|
|
|
|
MacroAssembler* masm,
|
|
|
|
const RuntimeCallHelper& call_helper) {
|
|
|
|
__ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
|
|
|
|
|
|
|
|
__ bind(&slow_case_);
|
|
|
|
call_helper.BeforeCall(masm);
|
|
|
|
__ push(code_);
|
|
|
|
__ CallRuntime(Runtime::kCharFromCode, 1);
|
|
|
|
__ Move(result_, v0);
|
|
|
|
|
|
|
|
call_helper.AfterCall(masm);
|
|
|
|
__ Branch(&exit_);
|
|
|
|
|
|
|
|
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
|
|
|
|
|
|
|
|
|
|
|
|
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
|
|
|
|
Register dest,
|
|
|
|
Register src,
|
|
|
|
Register count,
|
|
|
|
Register scratch,
|
|
|
|
String::Encoding encoding) {
|
|
|
|
if (FLAG_debug_code) {
|
|
|
|
// Check that destination is word aligned.
|
|
|
|
__ And(scratch, dest, Operand(kPointerAlignmentMask));
|
|
|
|
__ Check(eq,
|
|
|
|
kDestinationOfCopyNotAligned,
|
|
|
|
scratch,
|
|
|
|
Operand(zero_reg));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assumes word reads and writes are little endian.
|
|
|
|
// Nothing to do for zero characters.
|
|
|
|
Label done;
|
|
|
|
|
|
|
|
if (encoding == String::TWO_BYTE_ENCODING) {
|
|
|
|
__ Addu(count, count, count);
|
|
|
|
}
|
|
|
|
|
|
|
|
Register limit = count; // Read until dest equals this.
|
|
|
|
__ Addu(limit, dest, Operand(count));
|
|
|
|
|
|
|
|
Label loop_entry, loop;
|
|
|
|
// Copy bytes from src to dest until dest hits limit.
|
|
|
|
__ Branch(&loop_entry);
|
|
|
|
__ bind(&loop);
|
|
|
|
__ lbu(scratch, MemOperand(src));
|
|
|
|
__ Addu(src, src, Operand(1));
|
|
|
|
__ sb(scratch, MemOperand(dest));
|
|
|
|
__ Addu(dest, dest, Operand(1));
|
|
|
|
__ bind(&loop_entry);
|
|
|
|
__ Branch(&loop, lt, dest, Operand(limit));
|
|
|
|
|
|
|
|
__ bind(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void SubStringStub::Generate(MacroAssembler* masm) {
|
|
|
|
Label runtime;
|
|
|
|
// Stack frame on entry.
|
|
|
|
// ra: return address
|
|
|
|
// sp[0]: to
|
|
|
|
// sp[4]: from
|
|
|
|
// sp[8]: string
|
|
|
|
|
|
|
|
// This stub is called from the native-call %_SubString(...), so
|
|
|
|
// nothing can be assumed about the arguments. It is tested that:
|
|
|
|
// "string" is a sequential string,
|
|
|
|
// both "from" and "to" are smis, and
|
|
|
|
// 0 <= from <= to <= string.length.
|
|
|
|
// If any of these assumptions fail, we call the runtime system.
|
|
|
|
|
|
|
|
const int kToOffset = 0 * kPointerSize;
|
|
|
|
const int kFromOffset = 1 * kPointerSize;
|
|
|
|
const int kStringOffset = 2 * kPointerSize;
|
|
|
|
|
|
|
|
__ lw(a2, MemOperand(sp, kToOffset));
|
|
|
|
__ lw(a3, MemOperand(sp, kFromOffset));
|
|
|
|
STATIC_ASSERT(kFromOffset == kToOffset + 4);
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
|
|
|
|
|
|
|
// Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
|
|
|
|
// safe in this case.
|
|
|
|
__ UntagAndJumpIfNotSmi(a2, a2, &runtime);
|
|
|
|
__ UntagAndJumpIfNotSmi(a3, a3, &runtime);
|
|
|
|
// Both a2 and a3 are untagged integers.
|
|
|
|
|
|
|
|
__ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
|
|
|
|
|
|
|
|
__ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
|
|
|
|
__ Subu(a2, a2, a3);
|
|
|
|
|
|
|
|
// Make sure first argument is a string.
|
|
|
|
__ lw(v0, MemOperand(sp, kStringOffset));
|
|
|
|
__ JumpIfSmi(v0, &runtime);
|
|
|
|
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
|
|
|
|
__ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
|
|
|
|
__ And(t0, a1, Operand(kIsNotStringMask));
|
|
|
|
|
|
|
|
__ Branch(&runtime, ne, t0, Operand(zero_reg));
|
|
|
|
|
|
|
|
Label single_char;
|
|
|
|
__ Branch(&single_char, eq, a2, Operand(1));
|
|
|
|
|
|
|
|
// Short-cut for the case of trivial substring.
|
|
|
|
Label return_v0;
|
|
|
|
// v0: original string
|
|
|
|
// a2: result string length
|
|
|
|
__ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
|
|
|
|
__ sra(t0, t0, 1);
|
|
|
|
// Return original string.
|
|
|
|
__ Branch(&return_v0, eq, a2, Operand(t0));
|
|
|
|
// Longer than original string's length or negative: unsafe arguments.
|
|
|
|
__ Branch(&runtime, hi, a2, Operand(t0));
|
|
|
|
// Shorter than original string's length: an actual substring.
|
|
|
|
|
|
|
|
// Deal with different string types: update the index if necessary
|
|
|
|
// and put the underlying string into t1.
|
|
|
|
// v0: original string
|
|
|
|
// a1: instance type
|
|
|
|
// a2: length
|
|
|
|
// a3: from index (untagged)
|
|
|
|
Label underlying_unpacked, sliced_string, seq_or_external_string;
|
|
|
|
// If the string is not indirect, it can only be sequential or external.
|
|
|
|
STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
|
|
|
|
STATIC_ASSERT(kIsIndirectStringMask != 0);
|
|
|
|
__ And(t0, a1, Operand(kIsIndirectStringMask));
|
|
|
|
__ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
|
|
|
|
// t0 is used as a scratch register and can be overwritten in either case.
|
|
|
|
__ And(t0, a1, Operand(kSlicedNotConsMask));
|
|
|
|
__ Branch(&sliced_string, ne, t0, Operand(zero_reg));
|
|
|
|
// Cons string. Check whether it is flat, then fetch first part.
|
|
|
|
__ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
|
|
|
|
__ LoadRoot(t0, Heap::kempty_stringRootIndex);
|
|
|
|
__ Branch(&runtime, ne, t1, Operand(t0));
|
|
|
|
__ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
|
|
|
|
// Update instance type.
|
|
|
|
__ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
|
|
|
|
__ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
|
|
|
|
__ jmp(&underlying_unpacked);
|
|
|
|
|
|
|
|
__ bind(&sliced_string);
|
|
|
|
// Sliced string. Fetch parent and correct start index by offset.
|
|
|
|
__ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
|
|
|
|
__ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
|
|
|
|
__ sra(t0, t0, 1); // Add offset to index.
|
|
|
|
__ Addu(a3, a3, t0);
|
|
|
|
// Update instance type.
|
|
|
|
__ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
|
|
|
|
__ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
|
|
|
|
__ jmp(&underlying_unpacked);
|
|
|
|
|
|
|
|
__ bind(&seq_or_external_string);
|
|
|
|
// Sequential or external string. Just move string to the expected register.
|
|
|
|
__ mov(t1, v0);
|
|
|
|
|
|
|
|
__ bind(&underlying_unpacked);
|
|
|
|
|
|
|
|
if (FLAG_string_slices) {
|
|
|
|
Label copy_routine;
|
|
|
|
// t1: underlying subject string
|
|
|
|
// a1: instance type of underlying subject string
|
|
|
|
// a2: length
|
|
|
|
// a3: adjusted start index (untagged)
|
|
|
|
// Short slice. Copy instead of slicing.
|
|
|
|
__ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
|
|
|
|
// Allocate new sliced string. At this point we do not reload the instance
|
|
|
|
// type including the string encoding because we simply rely on the info
|
|
|
|
// provided by the original string. It does not matter if the original
|
|
|
|
// string's encoding is wrong because we always have to recheck encoding of
|
|
|
|
// the newly created string's parent anyways due to externalized strings.
|
|
|
|
Label two_byte_slice, set_slice_header;
|
|
|
|
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
|
|
|
|
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
|
|
|
|
__ And(t0, a1, Operand(kStringEncodingMask));
|
|
|
|
__ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
|
|
|
|
__ AllocateOneByteSlicedString(v0, a2, t2, t3, &runtime);
|
|
|
|
__ jmp(&set_slice_header);
|
|
|
|
__ bind(&two_byte_slice);
|
|
|
|
__ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
|
|
|
|
__ bind(&set_slice_header);
|
|
|
|
__ sll(a3, a3, 1);
|
|
|
|
__ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
|
|
|
|
__ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
|
|
|
|
__ jmp(&return_v0);
|
|
|
|
|
|
|
|
__ bind(©_routine);
|
|
|
|
}
|
|
|
|
|
|
|
|
// t1: underlying subject string
|
|
|
|
// a1: instance type of underlying subject string
|
|
|
|
// a2: length
|
|
|
|
// a3: adjusted start index (untagged)
|
|
|
|
Label two_byte_sequential, sequential_string, allocate_result;
|
|
|
|
STATIC_ASSERT(kExternalStringTag != 0);
|
|
|
|
STATIC_ASSERT(kSeqStringTag == 0);
|
|
|
|
__ And(t0, a1, Operand(kExternalStringTag));
|
|
|
|
__ Branch(&sequential_string, eq, t0, Operand(zero_reg));
|
|
|
|
|
|
|
|
// Handle external string.
|
|
|
|
// Rule out short external strings.
|
|
|
|
STATIC_ASSERT(kShortExternalStringTag != 0);
|
|
|
|
__ And(t0, a1, Operand(kShortExternalStringTag));
|
|
|
|
__ Branch(&runtime, ne, t0, Operand(zero_reg));
|
|
|
|
__ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
|
|
|
|
// t1 already points to the first character of underlying string.
|
|
|
|
__ jmp(&allocate_result);
|
|
|
|
|
|
|
|
__ bind(&sequential_string);
|
|
|
|
// Locate first character of underlying subject string.
|
|
|
|
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
|
|
|
|
__ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
|
|
|
|
|
|
|
__ bind(&allocate_result);
|
|
|
|
// Sequential acii string. Allocate the result.
|
|
|
|
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
|
|
|
|
__ And(t0, a1, Operand(kStringEncodingMask));
|
|
|
|
__ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
|
|
|
|
|
|
|
|
// Allocate and copy the resulting ASCII string.
|
|
|
|
__ AllocateOneByteString(v0, a2, t0, t2, t3, &runtime);
|
|
|
|
|
|
|
|
// Locate first character of substring to copy.
|
|
|
|
__ Addu(t1, t1, a3);
|
|
|
|
|
|
|
|
// Locate first character of result.
|
|
|
|
__ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
|
|
|
|
|
|
|
// v0: result string
|
|
|
|
// a1: first character of result string
|
|
|
|
// a2: result string length
|
|
|
|
// t1: first character of substring to copy
|
|
|
|
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
|
|
|
StringHelper::GenerateCopyCharacters(
|
|
|
|
masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
|
|
|
|
__ jmp(&return_v0);
|
|
|
|
|
|
|
|
// Allocate and copy the resulting two-byte string.
|
|
|
|
__ bind(&two_byte_sequential);
|
|
|
|
__ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
|
|
|
|
|
|
|
|
// Locate first character of substring to copy.
|
|
|
|
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
|
|
|
|
__ sll(t0, a3, 1);
|
|
|
|
__ Addu(t1, t1, t0);
|
|
|
|
// Locate first character of result.
|
|
|
|
__ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
|
|
|
|
|
|
|
|
// v0: result string.
|
|
|
|
// a1: first character of result.
|
|
|
|
// a2: result length.
|
|
|
|
// t1: first character of substring to copy.
|
|
|
|
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
|
|
|
StringHelper::GenerateCopyCharacters(
|
|
|
|
masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
|
|
|
|
|
|
|
|
__ bind(&return_v0);
|
|
|
|
Counters* counters = isolate()->counters();
|
|
|
|
__ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
|
|
|
|
__ DropAndRet(3);
|
|
|
|
|
|
|
|
// Just jump to runtime to create the sub string.
|
|
|
|
__ bind(&runtime);
|
|
|
|
__ TailCallRuntime(Runtime::kSubString, 3, 1);
|
|
|
|
|
|
|
|
__ bind(&single_char);
|
|
|
|
// v0: original string
|
|
|
|
// a1: instance type
|
|
|
|
// a2: length
|
|
|
|
// a3: from index (untagged)
|
|
|
|
__ SmiTag(a3, a3);
|
|
|
|
StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
|
|
|
|
STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
|
|
|
|
generator.GenerateFast(masm);
|
|
|
|
__ DropAndRet(3);
|
|
|
|
generator.SkipSlow(masm, &runtime);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ToNumberStub::Generate(MacroAssembler* masm) {
|
|
|
|
// The ToNumber stub takes one argument in a0.
|
|
|
|
Label not_smi;
|
|
|
|
__ JumpIfNotSmi(a0, ¬_smi);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ mov(v0, a0);
|
|
|
|
__ bind(¬_smi);
|
|
|
|
|
|
|
|
Label not_heap_number;
|
|
|
|
__ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
|
|
|
|
__ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
|
|
|
|
// a0: object
|
|
|
|
// a1: instance type.
|
|
|
|
__ Branch(¬_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ mov(v0, a0);
|
|
|
|
__ bind(¬_heap_number);
|
|
|
|
|
|
|
|
Label not_string, slow_string;
|
|
|
|
__ Branch(¬_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
|
|
|
|
// Check if string has a cached array index.
|
|
|
|
__ lw(a2, FieldMemOperand(a0, String::kHashFieldOffset));
|
|
|
|
__ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
|
|
|
|
__ Branch(&slow_string, ne, at, Operand(zero_reg));
|
|
|
|
__ IndexFromHash(a2, a0);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ mov(v0, a0);
|
|
|
|
__ bind(&slow_string);
|
|
|
|
__ push(a0); // Push argument.
|
|
|
|
__ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
|
|
|
|
__ bind(¬_string);
|
|
|
|
|
|
|
|
Label not_oddball;
|
|
|
|
__ Branch(¬_oddball, ne, a1, Operand(ODDBALL_TYPE));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ lw(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));
|
|
|
|
__ bind(¬_oddball);
|
|
|
|
|
|
|
|
__ push(a0); // Push argument.
|
|
|
|
__ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StringHelper::GenerateFlatOneByteStringEquals(
|
|
|
|
MacroAssembler* masm, Register left, Register right, Register scratch1,
|
|
|
|
Register scratch2, Register scratch3) {
|
|
|
|
Register length = scratch1;
|
|
|
|
|
|
|
|
// Compare lengths.
|
|
|
|
Label strings_not_equal, check_zero_length;
|
|
|
|
__ lw(length, FieldMemOperand(left, String::kLengthOffset));
|
|
|
|
__ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
|
|
|
|
__ Branch(&check_zero_length, eq, length, Operand(scratch2));
|
|
|
|
__ bind(&strings_not_equal);
|
|
|
|
DCHECK(is_int16(NOT_EQUAL));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
|
|
|
|
|
|
|
|
// Check if the length is zero.
|
|
|
|
Label compare_chars;
|
|
|
|
__ bind(&check_zero_length);
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
__ Branch(&compare_chars, ne, length, Operand(zero_reg));
|
|
|
|
DCHECK(is_int16(EQUAL));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ li(v0, Operand(Smi::FromInt(EQUAL)));
|
|
|
|
|
|
|
|
// Compare characters.
|
|
|
|
__ bind(&compare_chars);
|
|
|
|
|
|
|
|
GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
|
|
|
|
v0, &strings_not_equal);
|
|
|
|
|
|
|
|
// Characters are equal.
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ li(v0, Operand(Smi::FromInt(EQUAL)));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StringHelper::GenerateCompareFlatOneByteStrings(
|
|
|
|
MacroAssembler* masm, Register left, Register right, Register scratch1,
|
|
|
|
Register scratch2, Register scratch3, Register scratch4) {
|
|
|
|
Label result_not_equal, compare_lengths;
|
|
|
|
// Find minimum length and length difference.
|
|
|
|
__ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
|
|
|
|
__ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
|
|
|
|
__ Subu(scratch3, scratch1, Operand(scratch2));
|
|
|
|
Register length_delta = scratch3;
|
|
|
|
__ slt(scratch4, scratch2, scratch1);
|
|
|
|
__ Movn(scratch1, scratch2, scratch4);
|
|
|
|
Register min_length = scratch1;
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
__ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
|
|
|
|
|
|
|
|
// Compare loop.
|
|
|
|
GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
|
|
|
|
scratch4, v0, &result_not_equal);
|
|
|
|
|
|
|
|
// Compare lengths - strings up to min-length are equal.
|
|
|
|
__ bind(&compare_lengths);
|
|
|
|
DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
|
|
|
|
// Use length_delta as result if it's zero.
|
|
|
|
__ mov(scratch2, length_delta);
|
|
|
|
__ mov(scratch4, zero_reg);
|
|
|
|
__ mov(v0, zero_reg);
|
|
|
|
|
|
|
|
__ bind(&result_not_equal);
|
|
|
|
// Conditionally update the result based either on length_delta or
|
|
|
|
// the last comparion performed in the loop above.
|
|
|
|
Label ret;
|
|
|
|
__ Branch(&ret, eq, scratch2, Operand(scratch4));
|
|
|
|
__ li(v0, Operand(Smi::FromInt(GREATER)));
|
|
|
|
__ Branch(&ret, gt, scratch2, Operand(scratch4));
|
|
|
|
__ li(v0, Operand(Smi::FromInt(LESS)));
|
|
|
|
__ bind(&ret);
|
|
|
|
__ Ret();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StringHelper::GenerateOneByteCharsCompareLoop(
|
|
|
|
MacroAssembler* masm, Register left, Register right, Register length,
|
|
|
|
Register scratch1, Register scratch2, Register scratch3,
|
|
|
|
Label* chars_not_equal) {
|
|
|
|
// Change index to run from -length to -1 by adding length to string
|
|
|
|
// start. This means that loop ends when index reaches zero, which
|
|
|
|
// doesn't need an additional compare.
|
|
|
|
__ SmiUntag(length);
|
|
|
|
__ Addu(scratch1, length,
|
|
|
|
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
|
|
|
__ Addu(left, left, Operand(scratch1));
|
|
|
|
__ Addu(right, right, Operand(scratch1));
|
|
|
|
__ Subu(length, zero_reg, length);
|
|
|
|
Register index = length; // index = -length;
|
|
|
|
|
|
|
|
|
|
|
|
// Compare loop.
|
|
|
|
Label loop;
|
|
|
|
__ bind(&loop);
|
|
|
|
__ Addu(scratch3, left, index);
|
|
|
|
__ lbu(scratch1, MemOperand(scratch3));
|
|
|
|
__ Addu(scratch3, right, index);
|
|
|
|
__ lbu(scratch2, MemOperand(scratch3));
|
|
|
|
__ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
|
|
|
|
__ Addu(index, index, 1);
|
|
|
|
__ Branch(&loop, ne, index, Operand(zero_reg));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StringCompareStub::Generate(MacroAssembler* masm) {
|
|
|
|
Label runtime;
|
|
|
|
|
|
|
|
Counters* counters = isolate()->counters();
|
|
|
|
|
|
|
|
// Stack frame on entry.
|
|
|
|
// sp[0]: right string
|
|
|
|
// sp[4]: left string
|
|
|
|
__ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
|
|
|
|
__ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
|
|
|
|
|
|
|
|
Label not_same;
|
|
|
|
__ Branch(¬_same, ne, a0, Operand(a1));
|
|
|
|
STATIC_ASSERT(EQUAL == 0);
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
__ li(v0, Operand(Smi::FromInt(EQUAL)));
|
|
|
|
__ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
|
|
|
|
__ DropAndRet(2);
|
|
|
|
|
|
|
|
__ bind(¬_same);
|
|
|
|
|
|
|
|
// Check that both objects are sequential one-byte strings.
|
|
|
|
__ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
|
|
|
|
|
|
|
|
// Compare flat ASCII strings natively. Remove arguments from stack first.
|
|
|
|
__ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
|
|
|
|
__ Addu(sp, sp, Operand(2 * kPointerSize));
|
|
|
|
StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
|
|
|
|
|
|
|
|
__ bind(&runtime);
|
|
|
|
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
|
|
|
|
// ----------- S t a t e -------------
|
|
|
|
// -- a1 : left
|
|
|
|
// -- a0 : right
|
|
|
|
// -- ra : return address
|
|
|
|
// -----------------------------------
|
|
|
|
|
|
|
|
// Load a2 with the allocation site. We stick an undefined dummy value here
|
|
|
|
// and replace it with the real allocation site later when we instantiate this
|
|
|
|
// stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
|
|
|
|
__ li(a2, handle(isolate()->heap()->undefined_value()));
|
|
|
|
|
|
|
|
// Make sure that we actually patched the allocation site.
|
|
|
|
if (FLAG_debug_code) {
|
|
|
|
__ And(at, a2, Operand(kSmiTagMask));
|
|
|
|
__ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
|
|
|
|
__ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
|
|
|
|
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
|
|
|
|
__ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tail call into the stub that handles binary operations with allocation
|
|
|
|
// sites.
|
|
|
|
BinaryOpWithAllocationSiteStub stub(isolate(), state());
|
|
|
|
__ TailCallStub(&stub);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CompareICStub::GenerateSmis(MacroAssembler* masm) {
|
|
|
|
DCHECK(state() == CompareICState::SMI);
|
|
|
|
Label miss;
|
|
|
|
__ Or(a2, a1, a0);
|
|
|
|
__ JumpIfNotSmi(a2, &miss);
|
|
|
|
|
|
|
|
if (GetCondition() == eq) {
|
|
|
|
// For equality we do not care about the sign of the result.
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ Subu(v0, a0, a1);
|
|
|
|
} else {
|
|
|
|
// Untag before subtracting to avoid handling overflow.
|
|
|
|
__ SmiUntag(a1);
|
|
|
|
__ SmiUntag(a0);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ Subu(v0, a1, a0);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
GenerateMiss(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
|
|
|
|
DCHECK(state() == CompareICState::NUMBER);
|
|
|
|
|
|
|
|
Label generic_stub;
|
|
|
|
Label unordered, maybe_undefined1, maybe_undefined2;
|
|
|
|
Label miss;
|
|
|
|
|
|
|
|
if (left() == CompareICState::SMI) {
|
|
|
|
__ JumpIfNotSmi(a1, &miss);
|
|
|
|
}
|
|
|
|
if (right() == CompareICState::SMI) {
|
|
|
|
__ JumpIfNotSmi(a0, &miss);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Inlining the double comparison and falling back to the general compare
|
|
|
|
// stub if NaN is involved.
|
|
|
|
// Load left and right operand.
|
|
|
|
Label done, left, left_smi, right_smi;
|
|
|
|
__ JumpIfSmi(a0, &right_smi);
|
|
|
|
__ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
|
|
|
|
DONT_DO_SMI_CHECK);
|
|
|
|
__ Subu(a2, a0, Operand(kHeapObjectTag));
|
|
|
|
__ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
|
|
|
|
__ Branch(&left);
|
|
|
|
__ bind(&right_smi);
|
|
|
|
__ SmiUntag(a2, a0); // Can't clobber a0 yet.
|
|
|
|
FPURegister single_scratch = f6;
|
|
|
|
__ mtc1(a2, single_scratch);
|
|
|
|
__ cvt_d_w(f2, single_scratch);
|
|
|
|
|
|
|
|
__ bind(&left);
|
|
|
|
__ JumpIfSmi(a1, &left_smi);
|
|
|
|
__ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
|
|
|
|
DONT_DO_SMI_CHECK);
|
|
|
|
__ Subu(a2, a1, Operand(kHeapObjectTag));
|
|
|
|
__ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
|
|
|
|
__ Branch(&done);
|
|
|
|
__ bind(&left_smi);
|
|
|
|
__ SmiUntag(a2, a1); // Can't clobber a1 yet.
|
|
|
|
single_scratch = f8;
|
|
|
|
__ mtc1(a2, single_scratch);
|
|
|
|
__ cvt_d_w(f0, single_scratch);
|
|
|
|
|
|
|
|
__ bind(&done);
|
|
|
|
|
|
|
|
// Return a result of -1, 0, or 1, or use CompareStub for NaNs.
|
|
|
|
Label fpu_eq, fpu_lt;
|
|
|
|
// Test if equal, and also handle the unordered/NaN case.
|
|
|
|
__ BranchF(&fpu_eq, &unordered, eq, f0, f2);
|
|
|
|
|
|
|
|
// Test if less (unordered case is already handled).
|
|
|
|
__ BranchF(&fpu_lt, NULL, lt, f0, f2);
|
|
|
|
|
|
|
|
// Otherwise it's greater, so just fall thru, and return.
|
|
|
|
DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ li(v0, Operand(GREATER));
|
|
|
|
|
|
|
|
__ bind(&fpu_eq);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ li(v0, Operand(EQUAL));
|
|
|
|
|
|
|
|
__ bind(&fpu_lt);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ li(v0, Operand(LESS));
|
|
|
|
|
|
|
|
__ bind(&unordered);
|
|
|
|
__ bind(&generic_stub);
|
|
|
|
CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
|
|
|
|
CompareICState::GENERIC, CompareICState::GENERIC);
|
|
|
|
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
|
|
|
|
|
|
|
|
__ bind(&maybe_undefined1);
|
|
|
|
if (Token::IsOrderedRelationalCompareOp(op())) {
|
|
|
|
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
|
|
|
|
__ Branch(&miss, ne, a0, Operand(at));
|
|
|
|
__ JumpIfSmi(a1, &unordered);
|
|
|
|
__ GetObjectType(a1, a2, a2);
|
|
|
|
__ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
|
|
|
|
__ jmp(&unordered);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ bind(&maybe_undefined2);
|
|
|
|
if (Token::IsOrderedRelationalCompareOp(op())) {
|
|
|
|
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
|
|
|
|
__ Branch(&unordered, eq, a1, Operand(at));
|
|
|
|
}
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
GenerateMiss(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
|
|
|
|
DCHECK(state() == CompareICState::INTERNALIZED_STRING);
|
|
|
|
Label miss;
|
|
|
|
|
|
|
|
// Registers containing left and right operands respectively.
|
|
|
|
Register left = a1;
|
|
|
|
Register right = a0;
|
|
|
|
Register tmp1 = a2;
|
|
|
|
Register tmp2 = a3;
|
|
|
|
|
|
|
|
// Check that both operands are heap objects.
|
|
|
|
__ JumpIfEitherSmi(left, right, &miss);
|
|
|
|
|
|
|
|
// Check that both operands are internalized strings.
|
|
|
|
__ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
|
|
|
|
__ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
|
|
|
|
__ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
|
|
|
|
__ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
|
|
|
|
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
|
|
|
|
__ Or(tmp1, tmp1, Operand(tmp2));
|
|
|
|
__ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
|
|
|
|
__ Branch(&miss, ne, at, Operand(zero_reg));
|
|
|
|
|
|
|
|
// Make sure a0 is non-zero. At this point input operands are
|
|
|
|
// guaranteed to be non-zero.
|
|
|
|
DCHECK(right.is(a0));
|
|
|
|
STATIC_ASSERT(EQUAL == 0);
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
__ mov(v0, right);
|
|
|
|
// Internalized strings are compared by identity.
|
|
|
|
__ Ret(ne, left, Operand(right));
|
|
|
|
DCHECK(is_int16(EQUAL));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ li(v0, Operand(Smi::FromInt(EQUAL)));
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
GenerateMiss(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
|
|
|
|
DCHECK(state() == CompareICState::UNIQUE_NAME);
|
|
|
|
DCHECK(GetCondition() == eq);
|
|
|
|
Label miss;
|
|
|
|
|
|
|
|
// Registers containing left and right operands respectively.
|
|
|
|
Register left = a1;
|
|
|
|
Register right = a0;
|
|
|
|
Register tmp1 = a2;
|
|
|
|
Register tmp2 = a3;
|
|
|
|
|
|
|
|
// Check that both operands are heap objects.
|
|
|
|
__ JumpIfEitherSmi(left, right, &miss);
|
|
|
|
|
|
|
|
// Check that both operands are unique names. This leaves the instance
|
|
|
|
// types loaded in tmp1 and tmp2.
|
|
|
|
__ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
|
|
|
|
__ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
|
|
|
|
__ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
|
|
|
|
__ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
|
|
|
|
|
|
|
|
__ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
|
|
|
|
__ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
|
|
|
|
|
|
|
|
// Use a0 as result
|
|
|
|
__ mov(v0, a0);
|
|
|
|
|
|
|
|
// Unique names are compared by identity.
|
|
|
|
Label done;
|
|
|
|
__ Branch(&done, ne, left, Operand(right));
|
|
|
|
// Make sure a0 is non-zero. At this point input operands are
|
|
|
|
// guaranteed to be non-zero.
|
|
|
|
DCHECK(right.is(a0));
|
|
|
|
STATIC_ASSERT(EQUAL == 0);
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
__ li(v0, Operand(Smi::FromInt(EQUAL)));
|
|
|
|
__ bind(&done);
|
|
|
|
__ Ret();
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
GenerateMiss(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CompareICStub::GenerateStrings(MacroAssembler* masm) {
|
|
|
|
DCHECK(state() == CompareICState::STRING);
|
|
|
|
Label miss;
|
|
|
|
|
|
|
|
bool equality = Token::IsEqualityOp(op());
|
|
|
|
|
|
|
|
// Registers containing left and right operands respectively.
|
|
|
|
Register left = a1;
|
|
|
|
Register right = a0;
|
|
|
|
Register tmp1 = a2;
|
|
|
|
Register tmp2 = a3;
|
|
|
|
Register tmp3 = t0;
|
|
|
|
Register tmp4 = t1;
|
|
|
|
Register tmp5 = t2;
|
|
|
|
|
|
|
|
// Check that both operands are heap objects.
|
|
|
|
__ JumpIfEitherSmi(left, right, &miss);
|
|
|
|
|
|
|
|
// Check that both operands are strings. This leaves the instance
|
|
|
|
// types loaded in tmp1 and tmp2.
|
|
|
|
__ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
|
|
|
|
__ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
|
|
|
|
__ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
|
|
|
|
__ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
|
|
|
|
STATIC_ASSERT(kNotStringTag != 0);
|
|
|
|
__ Or(tmp3, tmp1, tmp2);
|
|
|
|
__ And(tmp5, tmp3, Operand(kIsNotStringMask));
|
|
|
|
__ Branch(&miss, ne, tmp5, Operand(zero_reg));
|
|
|
|
|
|
|
|
// Fast check for identical strings.
|
|
|
|
Label left_ne_right;
|
|
|
|
STATIC_ASSERT(EQUAL == 0);
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
__ Branch(&left_ne_right, ne, left, Operand(right));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ mov(v0, zero_reg); // In the delay slot.
|
|
|
|
__ bind(&left_ne_right);
|
|
|
|
|
|
|
|
// Handle not identical strings.
|
|
|
|
|
|
|
|
// Check that both strings are internalized strings. If they are, we're done
|
|
|
|
// because we already know they are not identical. We know they are both
|
|
|
|
// strings.
|
|
|
|
if (equality) {
|
|
|
|
DCHECK(GetCondition() == eq);
|
|
|
|
STATIC_ASSERT(kInternalizedTag == 0);
|
|
|
|
__ Or(tmp3, tmp1, Operand(tmp2));
|
|
|
|
__ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
|
|
|
|
Label is_symbol;
|
|
|
|
__ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
|
|
|
|
// Make sure a0 is non-zero. At this point input operands are
|
|
|
|
// guaranteed to be non-zero.
|
|
|
|
DCHECK(right.is(a0));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ mov(v0, a0); // In the delay slot.
|
|
|
|
__ bind(&is_symbol);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that both strings are sequential one-byte.
|
|
|
|
Label runtime;
|
|
|
|
__ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
|
|
|
|
&runtime);
|
|
|
|
|
|
|
|
// Compare flat one-byte strings. Returns when done.
|
|
|
|
if (equality) {
|
|
|
|
StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
|
|
|
|
tmp3);
|
|
|
|
} else {
|
|
|
|
StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
|
|
|
|
tmp2, tmp3, tmp4);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle more complex cases in runtime.
|
|
|
|
__ bind(&runtime);
|
|
|
|
__ Push(left, right);
|
|
|
|
if (equality) {
|
|
|
|
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
|
|
|
|
} else {
|
|
|
|
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
GenerateMiss(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CompareICStub::GenerateObjects(MacroAssembler* masm) {
|
|
|
|
DCHECK(state() == CompareICState::OBJECT);
|
|
|
|
Label miss;
|
|
|
|
__ And(a2, a1, Operand(a0));
|
|
|
|
__ JumpIfSmi(a2, &miss);
|
|
|
|
|
|
|
|
__ GetObjectType(a0, a2, a2);
|
|
|
|
__ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
|
|
|
|
__ GetObjectType(a1, a2, a2);
|
|
|
|
__ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
|
|
|
|
|
|
|
|
DCHECK(GetCondition() == eq);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ subu(v0, a0, a1);
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
GenerateMiss(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
|
|
|
|
Label miss;
|
|
|
|
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
|
|
|
|
__ And(a2, a1, a0);
|
|
|
|
__ JumpIfSmi(a2, &miss);
|
|
|
|
__ GetWeakValue(t0, cell);
|
|
|
|
__ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
|
|
|
|
__ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
|
|
|
|
__ Branch(&miss, ne, a2, Operand(t0));
|
|
|
|
__ Branch(&miss, ne, a3, Operand(t0));
|
|
|
|
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ subu(v0, a0, a1);
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
GenerateMiss(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
|
|
|
|
{
|
|
|
|
// Call the runtime system in a fresh internal frame.
|
|
|
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
|
|
|
__ Push(a1, a0);
|
|
|
|
__ Push(ra, a1, a0);
|
|
|
|
__ li(t0, Operand(Smi::FromInt(op())));
|
|
|
|
__ addiu(sp, sp, -kPointerSize);
|
|
|
|
__ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
|
|
|
|
USE_DELAY_SLOT);
|
|
|
|
__ sw(t0, MemOperand(sp)); // In the delay slot.
|
|
|
|
// Compute the entry point of the rewritten stub.
|
|
|
|
__ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
|
|
// Restore registers.
|
|
|
|
__ Pop(a1, a0, ra);
|
|
|
|
}
|
|
|
|
__ Jump(a2);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void DirectCEntryStub::Generate(MacroAssembler* masm) {
|
|
|
|
// Make place for arguments to fit C calling convention. Most of the callers
|
|
|
|
// of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
|
|
|
|
// so they handle stack restoring and we don't have to do that here.
|
|
|
|
// Any caller of DirectCEntryStub::GenerateCall must take care of dropping
|
|
|
|
// kCArgsSlotsSize stack space after the call.
|
|
|
|
__ Subu(sp, sp, Operand(kCArgsSlotsSize));
|
|
|
|
// Place the return address on the stack, making the call
|
|
|
|
// GC safe. The RegExp backend also relies on this.
|
|
|
|
__ sw(ra, MemOperand(sp, kCArgsSlotsSize));
|
|
|
|
__ Call(t9); // Call the C++ function.
|
|
|
|
__ lw(t9, MemOperand(sp, kCArgsSlotsSize));
|
|
|
|
|
|
|
|
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
|
|
|
|
// In case of an error the return address may point to a memory area
|
|
|
|
// filled with kZapValue by the GC.
|
|
|
|
// Dereference the address and check for this.
|
|
|
|
__ lw(t0, MemOperand(t9));
|
|
|
|
__ Assert(ne, kReceivedInvalidReturnAddress, t0,
|
|
|
|
Operand(reinterpret_cast<uint32_t>(kZapValue)));
|
|
|
|
}
|
|
|
|
__ Jump(t9);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
|
|
|
|
Register target) {
|
|
|
|
intptr_t loc =
|
|
|
|
reinterpret_cast<intptr_t>(GetCode().location());
|
|
|
|
__ Move(t9, target);
|
|
|
|
__ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
|
|
|
|
__ Call(at);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
|
|
|
|
Label* miss,
|
|
|
|
Label* done,
|
|
|
|
Register receiver,
|
|
|
|
Register properties,
|
|
|
|
Handle<Name> name,
|
|
|
|
Register scratch0) {
|
|
|
|
DCHECK(name->IsUniqueName());
|
|
|
|
// If names of slots in range from 1 to kProbes - 1 for the hash value are
|
|
|
|
// not equal to the name and kProbes-th slot is not used (its name is the
|
|
|
|
// undefined value), it guarantees the hash table doesn't contain the
|
|
|
|
// property. It's true even if some slots represent deleted properties
|
|
|
|
// (their names are the hole value).
|
|
|
|
for (int i = 0; i < kInlinedProbes; i++) {
|
|
|
|
// scratch0 points to properties hash.
|
|
|
|
// Compute the masked index: (hash + i + i * i) & mask.
|
|
|
|
Register index = scratch0;
|
|
|
|
// Capacity is smi 2^n.
|
|
|
|
__ lw(index, FieldMemOperand(properties, kCapacityOffset));
|
|
|
|
__ Subu(index, index, Operand(1));
|
|
|
|
__ And(index, index, Operand(
|
|
|
|
Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
|
|
|
|
|
|
|
|
// Scale the index by multiplying by the entry size.
|
|
|
|
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
|
|
|
|
__ sll(at, index, 1);
|
|
|
|
__ Addu(index, index, at);
|
|
|
|
|
|
|
|
Register entity_name = scratch0;
|
|
|
|
// Having undefined at this place means the name is not contained.
|
|
|
|
STATIC_ASSERT(kSmiTagSize == 1);
|
|
|
|
Register tmp = properties;
|
|
|
|
__ sll(scratch0, index, 1);
|
|
|
|
__ Addu(tmp, properties, scratch0);
|
|
|
|
__ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
|
|
|
|
|
|
|
|
DCHECK(!tmp.is(entity_name));
|
|
|
|
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
|
|
|
|
__ Branch(done, eq, entity_name, Operand(tmp));
|
|
|
|
|
|
|
|
// Load the hole ready for use below:
|
|
|
|
__ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
|
|
|
|
|
|
|
|
// Stop if found the property.
|
|
|
|
__ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
|
|
|
|
|
|
|
|
Label good;
|
|
|
|
__ Branch(&good, eq, entity_name, Operand(tmp));
|
|
|
|
|
|
|
|
// Check if the entry name is not a unique name.
|
|
|
|
__ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
|
|
|
|
__ lbu(entity_name,
|
|
|
|
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
|
|
|
|
__ JumpIfNotUniqueNameInstanceType(entity_name, miss);
|
|
|
|
__ bind(&good);
|
|
|
|
|
|
|
|
// Restore the properties.
|
|
|
|
__ lw(properties,
|
|
|
|
FieldMemOperand(receiver, JSObject::kPropertiesOffset));
|
|
|
|
}
|
|
|
|
|
|
|
|
const int spill_mask =
|
|
|
|
(ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
|
|
|
|
a2.bit() | a1.bit() | a0.bit() | v0.bit());
|
|
|
|
|
|
|
|
__ MultiPush(spill_mask);
|
|
|
|
__ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
|
|
|
|
__ li(a1, Operand(Handle<Name>(name)));
|
|
|
|
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
|
|
|
|
__ CallStub(&stub);
|
|
|
|
__ mov(at, v0);
|
|
|
|
__ MultiPop(spill_mask);
|
|
|
|
|
|
|
|
__ Branch(done, eq, at, Operand(zero_reg));
|
|
|
|
__ Branch(miss, ne, at, Operand(zero_reg));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Probe the name dictionary in the |elements| register. Jump to the
|
|
|
|
// |done| label if a property with the given name is found. Jump to
|
|
|
|
// the |miss| label otherwise.
|
|
|
|
// If lookup was successful |scratch2| will be equal to elements + 4 * index.
|
|
|
|
void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
|
|
|
|
Label* miss,
|
|
|
|
Label* done,
|
|
|
|
Register elements,
|
|
|
|
Register name,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2) {
|
|
|
|
DCHECK(!elements.is(scratch1));
|
|
|
|
DCHECK(!elements.is(scratch2));
|
|
|
|
DCHECK(!name.is(scratch1));
|
|
|
|
DCHECK(!name.is(scratch2));
|
|
|
|
|
|
|
|
__ AssertName(name);
|
|
|
|
|
|
|
|
// Compute the capacity mask.
|
|
|
|
__ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
|
|
|
|
__ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
|
|
|
|
__ Subu(scratch1, scratch1, Operand(1));
|
|
|
|
|
|
|
|
// Generate an unrolled loop that performs a few probes before
|
|
|
|
// giving up. Measurements done on Gmail indicate that 2 probes
|
|
|
|
// cover ~93% of loads from dictionaries.
|
|
|
|
for (int i = 0; i < kInlinedProbes; i++) {
|
|
|
|
// Compute the masked index: (hash + i + i * i) & mask.
|
|
|
|
__ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
|
|
|
|
if (i > 0) {
|
|
|
|
// Add the probe offset (i + i * i) left shifted to avoid right shifting
|
|
|
|
// the hash in a separate instruction. The value hash + i + i * i is right
|
|
|
|
// shifted in the following and instruction.
|
|
|
|
DCHECK(NameDictionary::GetProbeOffset(i) <
|
|
|
|
1 << (32 - Name::kHashFieldOffset));
|
|
|
|
__ Addu(scratch2, scratch2, Operand(
|
|
|
|
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
|
|
|
|
}
|
|
|
|
__ srl(scratch2, scratch2, Name::kHashShift);
|
|
|
|
__ And(scratch2, scratch1, scratch2);
|
|
|
|
|
|
|
|
// Scale the index by multiplying by the element size.
|
|
|
|
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
|
|
|
|
// scratch2 = scratch2 * 3.
|
|
|
|
|
|
|
|
__ sll(at, scratch2, 1);
|
|
|
|
__ Addu(scratch2, scratch2, at);
|
|
|
|
|
|
|
|
// Check if the key is identical to the name.
|
|
|
|
__ sll(at, scratch2, 2);
|
|
|
|
__ Addu(scratch2, elements, at);
|
|
|
|
__ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
|
|
|
|
__ Branch(done, eq, name, Operand(at));
|
|
|
|
}
|
|
|
|
|
|
|
|
const int spill_mask =
|
|
|
|
(ra.bit() | t2.bit() | t1.bit() | t0.bit() |
|
|
|
|
a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
|
|
|
|
~(scratch1.bit() | scratch2.bit());
|
|
|
|
|
|
|
|
__ MultiPush(spill_mask);
|
|
|
|
if (name.is(a0)) {
|
|
|
|
DCHECK(!elements.is(a1));
|
|
|
|
__ Move(a1, name);
|
|
|
|
__ Move(a0, elements);
|
|
|
|
} else {
|
|
|
|
__ Move(a0, elements);
|
|
|
|
__ Move(a1, name);
|
|
|
|
}
|
|
|
|
NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
|
|
|
|
__ CallStub(&stub);
|
|
|
|
__ mov(scratch2, a2);
|
|
|
|
__ mov(at, v0);
|
|
|
|
__ MultiPop(spill_mask);
|
|
|
|
|
|
|
|
__ Branch(done, ne, at, Operand(zero_reg));
|
|
|
|
__ Branch(miss, eq, at, Operand(zero_reg));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
|
|
|
|
// This stub overrides SometimesSetsUpAFrame() to return false. That means
|
|
|
|
// we cannot call anything that could cause a GC from this stub.
|
|
|
|
// Registers:
|
|
|
|
// result: NameDictionary to probe
|
|
|
|
// a1: key
|
|
|
|
// dictionary: NameDictionary to probe.
|
|
|
|
// index: will hold an index of entry if lookup is successful.
|
|
|
|
// might alias with result_.
|
|
|
|
// Returns:
|
|
|
|
// result_ is zero if lookup failed, non zero otherwise.
|
|
|
|
|
|
|
|
Register result = v0;
|
|
|
|
Register dictionary = a0;
|
|
|
|
Register key = a1;
|
|
|
|
Register index = a2;
|
|
|
|
Register mask = a3;
|
|
|
|
Register hash = t0;
|
|
|
|
Register undefined = t1;
|
|
|
|
Register entry_key = t2;
|
|
|
|
|
|
|
|
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
|
|
|
|
|
|
|
|
__ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
|
|
|
|
__ sra(mask, mask, kSmiTagSize);
|
|
|
|
__ Subu(mask, mask, Operand(1));
|
|
|
|
|
|
|
|
__ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
|
|
|
|
|
|
|
|
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
|
|
|
|
|
|
|
|
for (int i = kInlinedProbes; i < kTotalProbes; i++) {
|
|
|
|
// Compute the masked index: (hash + i + i * i) & mask.
|
|
|
|
// Capacity is smi 2^n.
|
|
|
|
if (i > 0) {
|
|
|
|
// Add the probe offset (i + i * i) left shifted to avoid right shifting
|
|
|
|
// the hash in a separate instruction. The value hash + i + i * i is right
|
|
|
|
// shifted in the following and instruction.
|
|
|
|
DCHECK(NameDictionary::GetProbeOffset(i) <
|
|
|
|
1 << (32 - Name::kHashFieldOffset));
|
|
|
|
__ Addu(index, hash, Operand(
|
|
|
|
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
|
|
|
|
} else {
|
|
|
|
__ mov(index, hash);
|
|
|
|
}
|
|
|
|
__ srl(index, index, Name::kHashShift);
|
|
|
|
__ And(index, mask, index);
|
|
|
|
|
|
|
|
// Scale the index by multiplying by the entry size.
|
|
|
|
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
|
|
|
|
// index *= 3.
|
|
|
|
__ mov(at, index);
|
|
|
|
__ sll(index, index, 1);
|
|
|
|
__ Addu(index, index, at);
|
|
|
|
|
|
|
|
|
|
|
|
STATIC_ASSERT(kSmiTagSize == 1);
|
|
|
|
__ sll(index, index, 2);
|
|
|
|
__ Addu(index, index, dictionary);
|
|
|
|
__ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
|
|
|
|
|
|
|
|
// Having undefined at this place means the name is not contained.
|
|
|
|
__ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
|
|
|
|
|
|
|
|
// Stop if found the property.
|
|
|
|
__ Branch(&in_dictionary, eq, entry_key, Operand(key));
|
|
|
|
|
|
|
|
if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
|
|
|
|
// Check if the entry name is not a unique name.
|
|
|
|
__ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
|
|
|
|
__ lbu(entry_key,
|
|
|
|
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
|
|
|
|
__ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
__ bind(&maybe_in_dictionary);
|
|
|
|
// If we are doing negative lookup then probing failure should be
|
|
|
|
// treated as a lookup success. For positive lookup probing failure
|
|
|
|
// should be treated as lookup failure.
|
|
|
|
if (mode() == POSITIVE_LOOKUP) {
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ mov(result, zero_reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
__ bind(&in_dictionary);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ li(result, 1);
|
|
|
|
|
|
|
|
__ bind(¬_in_dictionary);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ mov(result, zero_reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
|
|
|
|
Isolate* isolate) {
|
|
|
|
StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
|
|
|
|
stub1.GetCode();
|
|
|
|
// Hydrogen code stubs need stub2 at snapshot time.
|
|
|
|
StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
|
|
|
|
stub2.GetCode();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
|
|
|
|
// the value has just been written into the object, now this stub makes sure
|
|
|
|
// we keep the GC informed. The word in the object where the value has been
|
|
|
|
// written is in the address register.
|
|
|
|
void RecordWriteStub::Generate(MacroAssembler* masm) {
|
|
|
|
Label skip_to_incremental_noncompacting;
|
|
|
|
Label skip_to_incremental_compacting;
|
|
|
|
|
|
|
|
// The first two branch+nop instructions are generated with labels so as to
|
|
|
|
// get the offset fixed up correctly by the bind(Label*) call. We patch it
|
|
|
|
// back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
|
|
|
|
// position) and the "beq zero_reg, zero_reg, ..." when we start and stop
|
|
|
|
// incremental heap marking.
|
|
|
|
// See RecordWriteStub::Patch for details.
|
|
|
|
__ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
|
|
|
|
__ nop();
|
|
|
|
__ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
|
|
|
|
__ nop();
|
|
|
|
|
|
|
|
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
|
|
|
|
__ RememberedSetHelper(object(),
|
|
|
|
address(),
|
|
|
|
value(),
|
|
|
|
save_fp_regs_mode(),
|
|
|
|
MacroAssembler::kReturnAtEnd);
|
|
|
|
}
|
|
|
|
__ Ret();
|
|
|
|
|
|
|
|
__ bind(&skip_to_incremental_noncompacting);
|
|
|
|
GenerateIncremental(masm, INCREMENTAL);
|
|
|
|
|
|
|
|
__ bind(&skip_to_incremental_compacting);
|
|
|
|
GenerateIncremental(masm, INCREMENTAL_COMPACTION);
|
|
|
|
|
|
|
|
// Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
|
|
|
|
// Will be checked in IncrementalMarking::ActivateGeneratedStub.
|
|
|
|
|
|
|
|
PatchBranchIntoNop(masm, 0);
|
|
|
|
PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
|
|
|
|
regs_.Save(masm);
|
|
|
|
|
|
|
|
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
|
|
|
|
Label dont_need_remembered_set;
|
|
|
|
|
|
|
|
__ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
|
|
|
|
__ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
|
|
|
|
regs_.scratch0(),
|
|
|
|
&dont_need_remembered_set);
|
|
|
|
|
|
|
|
__ CheckPageFlag(regs_.object(),
|
|
|
|
regs_.scratch0(),
|
|
|
|
1 << MemoryChunk::SCAN_ON_SCAVENGE,
|
|
|
|
ne,
|
|
|
|
&dont_need_remembered_set);
|
|
|
|
|
|
|
|
// First notify the incremental marker if necessary, then update the
|
|
|
|
// remembered set.
|
|
|
|
CheckNeedsToInformIncrementalMarker(
|
|
|
|
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
|
|
|
|
InformIncrementalMarker(masm);
|
|
|
|
regs_.Restore(masm);
|
|
|
|
__ RememberedSetHelper(object(),
|
|
|
|
address(),
|
|
|
|
value(),
|
|
|
|
save_fp_regs_mode(),
|
|
|
|
MacroAssembler::kReturnAtEnd);
|
|
|
|
|
|
|
|
__ bind(&dont_need_remembered_set);
|
|
|
|
}
|
|
|
|
|
|
|
|
CheckNeedsToInformIncrementalMarker(
|
|
|
|
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
|
|
|
|
InformIncrementalMarker(masm);
|
|
|
|
regs_.Restore(masm);
|
|
|
|
__ Ret();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
|
|
|
|
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
|
|
|
|
int argument_count = 3;
|
|
|
|
__ PrepareCallCFunction(argument_count, regs_.scratch0());
|
|
|
|
Register address =
|
|
|
|
a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
|
|
|
|
DCHECK(!address.is(regs_.object()));
|
|
|
|
DCHECK(!address.is(a0));
|
|
|
|
__ Move(address, regs_.address());
|
|
|
|
__ Move(a0, regs_.object());
|
|
|
|
__ Move(a1, address);
|
|
|
|
__ li(a2, Operand(ExternalReference::isolate_address(isolate())));
|
|
|
|
|
|
|
|
AllowExternalCallThatCantCauseGC scope(masm);
|
|
|
|
__ CallCFunction(
|
|
|
|
ExternalReference::incremental_marking_record_write_function(isolate()),
|
|
|
|
argument_count);
|
|
|
|
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
|
|
|
|
MacroAssembler* masm,
|
|
|
|
OnNoNeedToInformIncrementalMarker on_no_need,
|
|
|
|
Mode mode) {
|
|
|
|
Label on_black;
|
|
|
|
Label need_incremental;
|
|
|
|
Label need_incremental_pop_scratch;
|
|
|
|
|
|
|
|
__ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
|
|
|
|
__ lw(regs_.scratch1(),
|
|
|
|
MemOperand(regs_.scratch0(),
|
|
|
|
MemoryChunk::kWriteBarrierCounterOffset));
|
|
|
|
__ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
|
|
|
|
__ sw(regs_.scratch1(),
|
|
|
|
MemOperand(regs_.scratch0(),
|
|
|
|
MemoryChunk::kWriteBarrierCounterOffset));
|
|
|
|
__ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
|
|
|
|
|
|
|
|
// Let's look at the color of the object: If it is not black we don't have
|
|
|
|
// to inform the incremental marker.
|
|
|
|
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
|
|
|
|
|
|
|
|
regs_.Restore(masm);
|
|
|
|
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
|
|
|
|
__ RememberedSetHelper(object(),
|
|
|
|
address(),
|
|
|
|
value(),
|
|
|
|
save_fp_regs_mode(),
|
|
|
|
MacroAssembler::kReturnAtEnd);
|
|
|
|
} else {
|
|
|
|
__ Ret();
|
|
|
|
}
|
|
|
|
|
|
|
|
__ bind(&on_black);
|
|
|
|
|
|
|
|
// Get the value from the slot.
|
|
|
|
__ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
|
|
|
|
|
|
|
|
if (mode == INCREMENTAL_COMPACTION) {
|
|
|
|
Label ensure_not_white;
|
|
|
|
|
|
|
|
__ CheckPageFlag(regs_.scratch0(), // Contains value.
|
|
|
|
regs_.scratch1(), // Scratch.
|
|
|
|
MemoryChunk::kEvacuationCandidateMask,
|
|
|
|
eq,
|
|
|
|
&ensure_not_white);
|
|
|
|
|
|
|
|
__ CheckPageFlag(regs_.object(),
|
|
|
|
regs_.scratch1(), // Scratch.
|
|
|
|
MemoryChunk::kSkipEvacuationSlotsRecordingMask,
|
|
|
|
eq,
|
|
|
|
&need_incremental);
|
|
|
|
|
|
|
|
__ bind(&ensure_not_white);
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need extra registers for this, so we push the object and the address
|
|
|
|
// register temporarily.
|
|
|
|
__ Push(regs_.object(), regs_.address());
|
|
|
|
__ EnsureNotWhite(regs_.scratch0(), // The value.
|
|
|
|
regs_.scratch1(), // Scratch.
|
|
|
|
regs_.object(), // Scratch.
|
|
|
|
regs_.address(), // Scratch.
|
|
|
|
&need_incremental_pop_scratch);
|
|
|
|
__ Pop(regs_.object(), regs_.address());
|
|
|
|
|
|
|
|
regs_.Restore(masm);
|
|
|
|
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
|
|
|
|
__ RememberedSetHelper(object(),
|
|
|
|
address(),
|
|
|
|
value(),
|
|
|
|
save_fp_regs_mode(),
|
|
|
|
MacroAssembler::kReturnAtEnd);
|
|
|
|
} else {
|
|
|
|
__ Ret();
|
|
|
|
}
|
|
|
|
|
|
|
|
__ bind(&need_incremental_pop_scratch);
|
|
|
|
__ Pop(regs_.object(), regs_.address());
|
|
|
|
|
|
|
|
__ bind(&need_incremental);
|
|
|
|
|
|
|
|
// Fall through when we need to inform the incremental marker.
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
|
|
|
|
// ----------- S t a t e -------------
|
|
|
|
// -- a0 : element value to store
|
|
|
|
// -- a3 : element index as smi
|
|
|
|
// -- sp[0] : array literal index in function as smi
|
|
|
|
// -- sp[4] : array literal
|
|
|
|
// clobbers a1, a2, t0
|
|
|
|
// -----------------------------------
|
|
|
|
|
|
|
|
Label element_done;
|
|
|
|
Label double_elements;
|
|
|
|
Label smi_element;
|
|
|
|
Label slow_elements;
|
|
|
|
Label fast_elements;
|
|
|
|
|
|
|
|
// Get array literal index, array literal and its map.
|
|
|
|
__ lw(t0, MemOperand(sp, 0 * kPointerSize));
|
|
|
|
__ lw(a1, MemOperand(sp, 1 * kPointerSize));
|
|
|
|
__ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
|
|
|
|
|
|
|
|
__ CheckFastElements(a2, t1, &double_elements);
|
|
|
|
// Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
|
|
|
|
__ JumpIfSmi(a0, &smi_element);
|
|
|
|
__ CheckFastSmiElements(a2, t1, &fast_elements);
|
|
|
|
|
|
|
|
// Store into the array literal requires a elements transition. Call into
|
|
|
|
// the runtime.
|
|
|
|
__ bind(&slow_elements);
|
|
|
|
// call.
|
|
|
|
__ Push(a1, a3, a0);
|
|
|
|
__ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
|
|
|
__ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
|
|
|
|
__ Push(t1, t0);
|
|
|
|
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
|
|
|
|
|
|
|
|
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
|
|
|
|
__ bind(&fast_elements);
|
|
|
|
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
|
|
|
|
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(t2, t1, t2);
|
|
|
|
__ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
|
|
|
__ sw(a0, MemOperand(t2, 0));
|
|
|
|
// Update the write barrier for the array store.
|
|
|
|
__ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
|
|
|
|
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ mov(v0, a0);
|
|
|
|
|
|
|
|
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
|
|
|
|
// and value is Smi.
|
|
|
|
__ bind(&smi_element);
|
|
|
|
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
|
|
|
|
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(t2, t1, t2);
|
|
|
|
__ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ mov(v0, a0);
|
|
|
|
|
|
|
|
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
|
|
|
|
__ bind(&double_elements);
|
|
|
|
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
|
|
|
|
__ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ mov(v0, a0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
|
|
|
|
CEntryStub ces(isolate(), 1, kSaveFPRegs);
|
|
|
|
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
|
|
|
|
int parameter_count_offset =
|
|
|
|
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
|
|
|
|
__ lw(a1, MemOperand(fp, parameter_count_offset));
|
|
|
|
if (function_mode() == JS_FUNCTION_STUB_MODE) {
|
|
|
|
__ Addu(a1, a1, Operand(1));
|
|
|
|
}
|
|
|
|
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
|
|
|
|
__ sll(a1, a1, kPointerSizeLog2);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ Addu(sp, sp, a1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
|
|
|
|
EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
|
|
|
|
LoadICStub stub(isolate(), state());
|
|
|
|
stub.GenerateForTrampoline(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
|
|
|
|
EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
|
|
|
|
KeyedLoadICStub stub(isolate(), state());
|
|
|
|
stub.GenerateForTrampoline(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
|
|
|
|
EmitLoadTypeFeedbackVector(masm, a2);
|
|
|
|
CallICStub stub(isolate(), state());
|
|
|
|
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
|
|
|
|
EmitLoadTypeFeedbackVector(masm, a2);
|
|
|
|
CallIC_ArrayStub stub(isolate(), state());
|
|
|
|
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
|
|
|
|
|
|
|
|
|
|
|
|
void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
|
|
|
|
GenerateImpl(masm, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void HandleArrayCases(MacroAssembler* masm, Register receiver,
|
|
|
|
Register key, Register vector, Register slot,
|
|
|
|
Register feedback, Register receiver_map,
|
|
|
|
Register scratch1, Register scratch2,
|
|
|
|
bool is_polymorphic, Label* miss) {
|
|
|
|
// feedback initially contains the feedback array
|
|
|
|
Label next_loop, prepare_next;
|
|
|
|
Label start_polymorphic;
|
|
|
|
|
|
|
|
Register cached_map = scratch1;
|
|
|
|
|
|
|
|
__ lw(cached_map,
|
|
|
|
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
|
|
|
|
__ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
|
|
|
__ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
|
|
|
|
// found, now call handler.
|
|
|
|
Register handler = feedback;
|
|
|
|
__ lw(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
|
|
|
|
__ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
|
|
__ Jump(t9);
|
|
|
|
|
|
|
|
|
|
|
|
Register length = scratch2;
|
|
|
|
__ bind(&start_polymorphic);
|
|
|
|
__ lw(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
|
|
|
|
if (!is_polymorphic) {
|
|
|
|
// If the IC could be monomorphic we have to make sure we don't go past the
|
|
|
|
// end of the feedback array.
|
|
|
|
__ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
|
|
|
|
}
|
|
|
|
|
|
|
|
Register too_far = length;
|
|
|
|
Register pointer_reg = feedback;
|
|
|
|
|
|
|
|
// +-----+------+------+-----+-----+ ... ----+
|
|
|
|
// | map | len | wm0 | h0 | wm1 | hN |
|
|
|
|
// +-----+------+------+-----+-----+ ... ----+
|
|
|
|
// 0 1 2 len-1
|
|
|
|
// ^ ^
|
|
|
|
// | |
|
|
|
|
// pointer_reg too_far
|
|
|
|
// aka feedback scratch2
|
|
|
|
// also need receiver_map
|
|
|
|
// use cached_map (scratch1) to look in the weak map values.
|
|
|
|
__ sll(at, length, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(too_far, feedback, Operand(at));
|
|
|
|
__ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
|
|
|
__ Addu(pointer_reg, feedback,
|
|
|
|
Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
|
|
|
|
|
|
|
|
__ bind(&next_loop);
|
|
|
|
__ lw(cached_map, MemOperand(pointer_reg));
|
|
|
|
__ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
|
|
|
__ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
|
|
|
|
__ lw(handler, MemOperand(pointer_reg, kPointerSize));
|
|
|
|
__ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
|
|
__ Jump(t9);
|
|
|
|
|
|
|
|
__ bind(&prepare_next);
|
|
|
|
__ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
|
|
|
|
__ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
|
|
|
|
|
|
|
|
// We exhausted our array of map handler pairs.
|
|
|
|
__ jmp(miss);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
|
|
|
|
Register receiver_map, Register feedback,
|
|
|
|
Register vector, Register slot,
|
|
|
|
Register scratch, Label* compare_map,
|
|
|
|
Label* load_smi_map, Label* try_array) {
|
|
|
|
__ JumpIfSmi(receiver, load_smi_map);
|
|
|
|
__ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
|
|
|
__ bind(compare_map);
|
|
|
|
Register cached_map = scratch;
|
|
|
|
// Move the weak map into the weak_cell register.
|
|
|
|
__ lw(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
|
|
|
|
__ Branch(try_array, ne, cached_map, Operand(receiver_map));
|
|
|
|
Register handler = feedback;
|
|
|
|
|
|
|
|
__ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(handler, vector, Operand(at));
|
|
|
|
__ lw(handler,
|
|
|
|
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
|
|
|
|
__ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
|
|
__ Jump(t9);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
|
|
|
|
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1
|
|
|
|
Register name = LoadWithVectorDescriptor::NameRegister(); // a2
|
|
|
|
Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3
|
|
|
|
Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0
|
|
|
|
Register feedback = t0;
|
|
|
|
Register receiver_map = t1;
|
|
|
|
Register scratch1 = t4;
|
|
|
|
|
|
|
|
__ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(feedback, vector, Operand(at));
|
|
|
|
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
|
|
|
|
|
|
|
|
// Try to quickly handle the monomorphic case without knowing for sure
|
|
|
|
// if we have a weak cell in feedback. We do know it's safe to look
|
|
|
|
// at WeakCell::kValueOffset.
|
|
|
|
Label try_array, load_smi_map, compare_map;
|
|
|
|
Label not_array, miss;
|
|
|
|
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
|
|
|
|
scratch1, &compare_map, &load_smi_map, &try_array);
|
|
|
|
|
|
|
|
// Is it a fixed array?
|
|
|
|
__ bind(&try_array);
|
|
|
|
__ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
|
|
|
|
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
|
|
|
|
__ Branch(¬_array, ne, at, Operand(scratch1));
|
|
|
|
HandleArrayCases(masm, receiver, name, vector, slot, feedback, receiver_map,
|
|
|
|
scratch1, t5, true, &miss);
|
|
|
|
|
|
|
|
__ bind(¬_array);
|
|
|
|
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
|
|
|
|
__ Branch(&miss, ne, at, Operand(feedback));
|
|
|
|
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
|
|
|
|
Code::ComputeHandlerFlags(Code::LOAD_IC));
|
|
|
|
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
|
|
|
|
receiver, name, feedback,
|
|
|
|
receiver_map, scratch1, t5);
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
LoadIC::GenerateMiss(masm);
|
|
|
|
|
|
|
|
__ bind(&load_smi_map);
|
|
|
|
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
|
|
|
|
__ jmp(&compare_map);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
|
|
|
|
GenerateImpl(masm, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
|
|
|
|
GenerateImpl(masm, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
|
|
|
|
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1
|
|
|
|
Register key = LoadWithVectorDescriptor::NameRegister(); // a2
|
|
|
|
Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3
|
|
|
|
Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0
|
|
|
|
Register feedback = t0;
|
|
|
|
Register receiver_map = t1;
|
|
|
|
Register scratch1 = t4;
|
|
|
|
|
|
|
|
__ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(feedback, vector, Operand(at));
|
|
|
|
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
|
|
|
|
|
|
|
|
// Try to quickly handle the monomorphic case without knowing for sure
|
|
|
|
// if we have a weak cell in feedback. We do know it's safe to look
|
|
|
|
// at WeakCell::kValueOffset.
|
|
|
|
Label try_array, load_smi_map, compare_map;
|
|
|
|
Label not_array, miss;
|
|
|
|
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
|
|
|
|
scratch1, &compare_map, &load_smi_map, &try_array);
|
|
|
|
|
|
|
|
__ bind(&try_array);
|
|
|
|
// Is it a fixed array?
|
|
|
|
__ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
|
|
|
|
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
|
|
|
|
__ Branch(¬_array, ne, at, Operand(scratch1));
|
|
|
|
// We have a polymorphic element handler.
|
|
|
|
__ JumpIfNotSmi(key, &miss);
|
|
|
|
|
|
|
|
Label polymorphic, try_poly_name;
|
|
|
|
__ bind(&polymorphic);
|
|
|
|
HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
|
|
|
|
scratch1, t5, true, &miss);
|
|
|
|
|
|
|
|
__ bind(¬_array);
|
|
|
|
// Is it generic?
|
|
|
|
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
|
|
|
|
__ Branch(&try_poly_name, ne, at, Operand(feedback));
|
|
|
|
Handle<Code> megamorphic_stub =
|
|
|
|
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
|
|
|
|
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
|
|
|
|
|
|
|
|
__ bind(&try_poly_name);
|
|
|
|
// We might have a name in feedback, and a fixed array in the next slot.
|
|
|
|
__ Branch(&miss, ne, key, Operand(feedback));
|
|
|
|
// If the name comparison succeeded, we know we have a fixed array with
|
|
|
|
// at least one map/handler pair.
|
|
|
|
__ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
|
|
|
|
__ Addu(feedback, vector, Operand(at));
|
|
|
|
__ lw(feedback,
|
|
|
|
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
|
|
|
|
HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
|
|
|
|
scratch1, t5, false, &miss);
|
|
|
|
|
|
|
|
__ bind(&miss);
|
|
|
|
KeyedLoadIC::GenerateMiss(masm);
|
|
|
|
|
|
|
|
__ bind(&load_smi_map);
|
|
|
|
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
|
|
|
|
__ jmp(&compare_map);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
|
|
|
|
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
|
|
|
|
VectorStoreICStub stub(isolate(), state());
|
|
|
|
stub.GenerateForTrampoline(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
|
|
|
|
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
|
|
|
|
VectorKeyedStoreICStub stub(isolate(), state());
|
|
|
|
stub.GenerateForTrampoline(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void VectorStoreICStub::Generate(MacroAssembler* masm) {
|
|
|
|
GenerateImpl(masm, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
|
|
|
|
GenerateImpl(masm, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
|
|
|
|
Label miss;
|
|
|
|
|
|
|
|
// TODO(mvstanton): Implement.
|
|
|
|
__ bind(&miss);
|
|
|
|
StoreIC::GenerateMiss(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
|
|
|
|
GenerateImpl(masm, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
|
|
|
|
GenerateImpl(masm, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
|
|
|
|
Label miss;
|
|
|
|
|
|
|
|
// TODO(mvstanton): Implement.
|
|
|
|
__ bind(&miss);
|
|
|
|
KeyedStoreIC::GenerateMiss(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
|
|
|
|
if (masm->isolate()->function_entry_hook() != NULL) {
|
|
|
|
ProfileEntryHookStub stub(masm->isolate());
|
|
|
|
__ push(ra);
|
|
|
|
__ CallStub(&stub);
|
|
|
|
__ pop(ra);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
|
|
|
|
// The entry hook is a "push ra" instruction, followed by a call.
|
|
|
|
// Note: on MIPS "push" is 2 instruction
|
|
|
|
const int32_t kReturnAddressDistanceFromFunctionStart =
|
|
|
|
Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
|
|
|
|
|
|
|
|
// This should contain all kJSCallerSaved registers.
|
|
|
|
const RegList kSavedRegs =
|
|
|
|
kJSCallerSaved | // Caller saved registers.
|
|
|
|
s5.bit(); // Saved stack pointer.
|
|
|
|
|
|
|
|
// We also save ra, so the count here is one higher than the mask indicates.
|
|
|
|
const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
|
|
|
|
|
|
|
|
// Save all caller-save registers as this may be called from anywhere.
|
|
|
|
__ MultiPush(kSavedRegs | ra.bit());
|
|
|
|
|
|
|
|
// Compute the function's address for the first argument.
|
|
|
|
__ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
|
|
|
|
|
|
|
|
// The caller's return address is above the saved temporaries.
|
|
|
|
// Grab that for the second argument to the hook.
|
|
|
|
__ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
|
|
|
|
|
|
|
|
// Align the stack if necessary.
|
|
|
|
int frame_alignment = masm->ActivationFrameAlignment();
|
|
|
|
if (frame_alignment > kPointerSize) {
|
|
|
|
__ mov(s5, sp);
|
|
|
|
DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
|
|
|
|
__ And(sp, sp, Operand(-frame_alignment));
|
|
|
|
}
|
|
|
|
__ Subu(sp, sp, kCArgsSlotsSize);
|
|
|
|
#if defined(V8_HOST_ARCH_MIPS)
|
|
|
|
int32_t entry_hook =
|
|
|
|
reinterpret_cast<int32_t>(isolate()->function_entry_hook());
|
|
|
|
__ li(t9, Operand(entry_hook));
|
|
|
|
#else
|
|
|
|
// Under the simulator we need to indirect the entry hook through a
|
|
|
|
// trampoline function at a known address.
|
|
|
|
// It additionally takes an isolate as a third parameter.
|
|
|
|
__ li(a2, Operand(ExternalReference::isolate_address(isolate())));
|
|
|
|
|
|
|
|
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
|
|
|
|
__ li(t9, Operand(ExternalReference(&dispatcher,
|
|
|
|
ExternalReference::BUILTIN_CALL,
|
|
|
|
isolate())));
|
|
|
|
#endif
|
|
|
|
// Call C function through t9 to conform ABI for PIC.
|
|
|
|
__ Call(t9);
|
|
|
|
|
|
|
|
// Restore the stack pointer if needed.
|
|
|
|
if (frame_alignment > kPointerSize) {
|
|
|
|
__ mov(sp, s5);
|
|
|
|
} else {
|
|
|
|
__ Addu(sp, sp, kCArgsSlotsSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Also pop ra to get Ret(0).
|
|
|
|
__ MultiPop(kSavedRegs | ra.bit());
|
|
|
|
__ Ret();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template<class T>
|
|
|
|
static void CreateArrayDispatch(MacroAssembler* masm,
|
|
|
|
AllocationSiteOverrideMode mode) {
|
|
|
|
if (mode == DISABLE_ALLOCATION_SITES) {
|
|
|
|
T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
|
|
|
|
__ TailCallStub(&stub);
|
|
|
|
} else if (mode == DONT_OVERRIDE) {
|
|
|
|
int last_index = GetSequenceIndexFromFastElementsKind(
|
|
|
|
TERMINAL_FAST_ELEMENTS_KIND);
|
|
|
|
for (int i = 0; i <= last_index; ++i) {
|
|
|
|
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
|
|
|
|
T stub(masm->isolate(), kind);
|
|
|
|
__ TailCallStub(&stub, eq, a3, Operand(kind));
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we reached this point there is a problem.
|
|
|
|
__ Abort(kUnexpectedElementsKindInArrayConstructor);
|
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
|
|
|
|
AllocationSiteOverrideMode mode) {
|
|
|
|
// a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
|
|
|
|
// a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
|
|
|
|
// a0 - number of arguments
|
|
|
|
// a1 - constructor?
|
|
|
|
// sp[0] - last argument
|
|
|
|
Label normal_sequence;
|
|
|
|
if (mode == DONT_OVERRIDE) {
|
|
|
|
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
|
|
|
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
|
|
|
STATIC_ASSERT(FAST_ELEMENTS == 2);
|
|
|
|
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
|
|
|
STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
|
|
|
|
STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
|
|
|
|
|
|
|
|
// is the low bit set? If so, we are holey and that is good.
|
|
|
|
__ And(at, a3, Operand(1));
|
|
|
|
__ Branch(&normal_sequence, ne, at, Operand(zero_reg));
|
|
|
|
}
|
|
|
|
|
|
|
|
// look at the first argument
|
|
|
|
__ lw(t1, MemOperand(sp, 0));
|
|
|
|
__ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
|
|
|
|
|
|
|
|
if (mode == DISABLE_ALLOCATION_SITES) {
|
|
|
|
ElementsKind initial = GetInitialFastElementsKind();
|
|
|
|
ElementsKind holey_initial = GetHoleyElementsKind(initial);
|
|
|
|
|
|
|
|
ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
|
|
|
|
holey_initial,
|
|
|
|
DISABLE_ALLOCATION_SITES);
|
|
|
|
__ TailCallStub(&stub_holey);
|
|
|
|
|
|
|
|
__ bind(&normal_sequence);
|
|
|
|
ArraySingleArgumentConstructorStub stub(masm->isolate(),
|
|
|
|
initial,
|
|
|
|
DISABLE_ALLOCATION_SITES);
|
|
|
|
__ TailCallStub(&stub);
|
|
|
|
} else if (mode == DONT_OVERRIDE) {
|
|
|
|
// We are going to create a holey array, but our kind is non-holey.
|
|
|
|
// Fix kind and retry (only if we have an allocation site in the slot).
|
|
|
|
__ Addu(a3, a3, Operand(1));
|
|
|
|
|
|
|
|
if (FLAG_debug_code) {
|
|
|
|
__ lw(t1, FieldMemOperand(a2, 0));
|
|
|
|
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
|
|
|
|
__ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save the resulting elements kind in type info. We can't just store a3
|
|
|
|
// in the AllocationSite::transition_info field because elements kind is
|
|
|
|
// restricted to a portion of the field...upper bits need to be left alone.
|
|
|
|
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
|
|
|
|
__ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
|
|
|
|
__ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
|
|
|
|
__ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
|
|
|
|
|
|
|
|
|
|
|
|
__ bind(&normal_sequence);
|
|
|
|
int last_index = GetSequenceIndexFromFastElementsKind(
|
|
|
|
TERMINAL_FAST_ELEMENTS_KIND);
|
|
|
|
for (int i = 0; i <= last_index; ++i) {
|
|
|
|
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
|
|
|
|
ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
|
|
|
|
__ TailCallStub(&stub, eq, a3, Operand(kind));
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we reached this point there is a problem.
|
|
|
|
__ Abort(kUnexpectedElementsKindInArrayConstructor);
|
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template<class T>
|
|
|
|
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
|
|
|
|
int to_index = GetSequenceIndexFromFastElementsKind(
|
|
|
|
TERMINAL_FAST_ELEMENTS_KIND);
|
|
|
|
for (int i = 0; i <= to_index; ++i) {
|
|
|
|
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
|
|
|
|
T stub(isolate, kind);
|
|
|
|
stub.GetCode();
|
|
|
|
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
|
|
|
|
T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
|
|
|
|
stub1.GetCode();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
|
|
|
|
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
|
|
|
|
isolate);
|
|
|
|
ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
|
|
|
|
isolate);
|
|
|
|
ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
|
|
|
|
isolate);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
|
|
|
|
Isolate* isolate) {
|
|
|
|
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
// For internal arrays we only need a few things.
|
|
|
|
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
|
|
|
|
stubh1.GetCode();
|
|
|
|
InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
|
|
|
|
stubh2.GetCode();
|
|
|
|
InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
|
|
|
|
stubh3.GetCode();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ArrayConstructorStub::GenerateDispatchToArrayStub(
|
|
|
|
MacroAssembler* masm,
|
|
|
|
AllocationSiteOverrideMode mode) {
|
|
|
|
if (argument_count() == ANY) {
|
|
|
|
Label not_zero_case, not_one_case;
|
|
|
|
__ And(at, a0, a0);
|
|
|
|
__ Branch(¬_zero_case, ne, at, Operand(zero_reg));
|
|
|
|
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
|
|
|
|
|
|
|
|
__ bind(¬_zero_case);
|
|
|
|
__ Branch(¬_one_case, gt, a0, Operand(1));
|
|
|
|
CreateArrayDispatchOneArgument(masm, mode);
|
|
|
|
|
|
|
|
__ bind(¬_one_case);
|
|
|
|
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
|
|
|
|
} else if (argument_count() == NONE) {
|
|
|
|
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
|
|
|
|
} else if (argument_count() == ONE) {
|
|
|
|
CreateArrayDispatchOneArgument(masm, mode);
|
|
|
|
} else if (argument_count() == MORE_THAN_ONE) {
|
|
|
|
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
|
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
|
|
|
|
// ----------- S t a t e -------------
|
|
|
|
// -- a0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
|
|
|
|
// -- a1 : constructor
|
|
|
|
// -- a2 : AllocationSite or undefined
|
|
|
|
// -- a3 : Original constructor
|
|
|
|
// -- sp[0] : last argument
|
|
|
|
// -----------------------------------
|
|
|
|
|
|
|
|
if (FLAG_debug_code) {
|
|
|
|
// The array construct code is only set for the global and natives
|
|
|
|
// builtin Array functions which always have maps.
|
|
|
|
|
|
|
|
// Initial map for the builtin Array function should be a map.
|
|
|
|
__ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
|
|
|
|
// Will both indicate a NULL and a Smi.
|
|
|
|
__ SmiTst(t0, at);
|
|
|
|
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
|
|
|
|
at, Operand(zero_reg));
|
|
|
|
__ GetObjectType(t0, t0, t1);
|
|
|
|
__ Assert(eq, kUnexpectedInitialMapForArrayFunction,
|
|
|
|
t1, Operand(MAP_TYPE));
|
|
|
|
|
|
|
|
// We should either have undefined in a2 or a valid AllocationSite
|
|
|
|
__ AssertUndefinedOrAllocationSite(a2, t0);
|
|
|
|
}
|
|
|
|
|
|
|
|
Label subclassing;
|
|
|
|
__ Branch(&subclassing, ne, a1, Operand(a3));
|
|
|
|
|
|
|
|
Label no_info;
|
|
|
|
// Get the elements kind and case on that.
|
|
|
|
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
|
|
|
|
__ Branch(&no_info, eq, a2, Operand(at));
|
|
|
|
|
|
|
|
__ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
|
|
|
|
__ SmiUntag(a3);
|
|
|
|
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
|
|
|
|
__ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
|
|
|
|
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
|
|
|
|
|
|
|
|
__ bind(&no_info);
|
|
|
|
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
|
|
|
|
|
|
|
|
// Subclassing.
|
|
|
|
__ bind(&subclassing);
|
|
|
|
__ Push(a1);
|
|
|
|
__ Push(a3);
|
|
|
|
|
|
|
|
// Adjust argc.
|
|
|
|
switch (argument_count()) {
|
|
|
|
case ANY:
|
|
|
|
case MORE_THAN_ONE:
|
|
|
|
__ li(at, Operand(2));
|
|
|
|
__ addu(a0, a0, at);
|
|
|
|
break;
|
|
|
|
case NONE:
|
|
|
|
__ li(a0, Operand(2));
|
|
|
|
break;
|
|
|
|
case ONE:
|
|
|
|
__ li(a0, Operand(3));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
__ JumpToExternalReference(
|
|
|
|
ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void InternalArrayConstructorStub::GenerateCase(
|
|
|
|
MacroAssembler* masm, ElementsKind kind) {
|
|
|
|
|
|
|
|
InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
|
|
|
|
__ TailCallStub(&stub0, lo, a0, Operand(1));
|
|
|
|
|
|
|
|
InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
|
|
|
|
__ TailCallStub(&stubN, hi, a0, Operand(1));
|
|
|
|
|
|
|
|
if (IsFastPackedElementsKind(kind)) {
|
|
|
|
// We might need to create a holey array
|
|
|
|
// look at the first argument.
|
|
|
|
__ lw(at, MemOperand(sp, 0));
|
|
|
|
|
|
|
|
InternalArraySingleArgumentConstructorStub
|
|
|
|
stub1_holey(isolate(), GetHoleyElementsKind(kind));
|
|
|
|
__ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
|
|
|
|
}
|
|
|
|
|
|
|
|
InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
|
|
|
|
__ TailCallStub(&stub1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
|
|
|
|
// ----------- S t a t e -------------
|
|
|
|
// -- a0 : argc
|
|
|
|
// -- a1 : constructor
|
|
|
|
// -- sp[0] : return address
|
|
|
|
// -- sp[4] : last argument
|
|
|
|
// -----------------------------------
|
|
|
|
|
|
|
|
if (FLAG_debug_code) {
|
|
|
|
// The array construct code is only set for the global and natives
|
|
|
|
// builtin Array functions which always have maps.
|
|
|
|
|
|
|
|
// Initial map for the builtin Array function should be a map.
|
|
|
|
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
|
|
|
|
// Will both indicate a NULL and a Smi.
|
|
|
|
__ SmiTst(a3, at);
|
|
|
|
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
|
|
|
|
at, Operand(zero_reg));
|
|
|
|
__ GetObjectType(a3, a3, t0);
|
|
|
|
__ Assert(eq, kUnexpectedInitialMapForArrayFunction,
|
|
|
|
t0, Operand(MAP_TYPE));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Figure out the right elements kind.
|
|
|
|
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
|
|
|
|
|
|
|
|
// Load the map's "bit field 2" into a3. We only need the first byte,
|
|
|
|
// but the following bit field extraction takes care of that anyway.
|
|
|
|
__ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
|
|
|
|
// Retrieve elements_kind from bit field 2.
|
|
|
|
__ DecodeField<Map::ElementsKindBits>(a3);
|
|
|
|
|
|
|
|
if (FLAG_debug_code) {
|
|
|
|
Label done;
|
|
|
|
__ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
|
|
|
|
__ Assert(
|
|
|
|
eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
|
|
|
|
a3, Operand(FAST_HOLEY_ELEMENTS));
|
|
|
|
__ bind(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
Label fast_elements_case;
|
|
|
|
__ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
|
|
|
|
GenerateCase(masm, FAST_HOLEY_ELEMENTS);
|
|
|
|
|
|
|
|
__ bind(&fast_elements_case);
|
|
|
|
GenerateCase(masm, FAST_ELEMENTS);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
|
|
|
|
Register context_reg = cp;
|
|
|
|
Register slot_reg = a2;
|
|
|
|
Register result_reg = v0;
|
|
|
|
Label slow_case;
|
|
|
|
|
|
|
|
// Go up context chain to the script context.
|
|
|
|
for (int i = 0; i < depth(); ++i) {
|
|
|
|
__ lw(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
|
|
|
|
context_reg = result_reg;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load the PropertyCell value at the specified slot.
|
|
|
|
__ sll(at, slot_reg, kPointerSizeLog2);
|
|
|
|
__ Addu(at, at, Operand(context_reg));
|
|
|
|
__ lw(result_reg, ContextOperand(at, 0));
|
|
|
|
__ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
|
|
|
|
|
|
|
|
// Check that value is not the_hole.
|
|
|
|
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
|
|
|
|
__ Branch(&slow_case, eq, result_reg, Operand(at));
|
|
|
|
__ Ret();
|
|
|
|
|
|
|
|
// Fallback to the runtime.
|
|
|
|
__ bind(&slow_case);
|
|
|
|
__ SmiTag(slot_reg);
|
|
|
|
__ Push(slot_reg);
|
|
|
|
__ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
|
|
|
|
Register context_reg = cp;
|
|
|
|
Register slot_reg = a2;
|
|
|
|
Register value_reg = a0;
|
|
|
|
Register cell_reg = t0;
|
|
|
|
Register cell_value_reg = t1;
|
|
|
|
Register cell_details_reg = t2;
|
|
|
|
Label fast_heapobject_case, fast_smi_case, slow_case;
|
|
|
|
|
|
|
|
if (FLAG_debug_code) {
|
|
|
|
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
|
|
|
|
__ Check(ne, kUnexpectedValue, value_reg, Operand(at));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Go up context chain to the script context.
|
|
|
|
for (int i = 0; i < depth(); ++i) {
|
|
|
|
__ lw(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
|
|
|
|
context_reg = cell_reg;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load the PropertyCell at the specified slot.
|
|
|
|
__ sll(at, slot_reg, kPointerSizeLog2);
|
|
|
|
__ Addu(at, at, Operand(context_reg));
|
|
|
|
__ lw(cell_reg, ContextOperand(at, 0));
|
|
|
|
|
|
|
|
// Load PropertyDetails for the cell (actually only the cell_type and kind).
|
|
|
|
__ lw(cell_details_reg,
|
|
|
|
FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
|
|
|
|
__ SmiUntag(cell_details_reg);
|
|
|
|
__ And(cell_details_reg, cell_details_reg,
|
|
|
|
PropertyDetails::PropertyCellTypeField::kMask |
|
|
|
|
PropertyDetails::KindField::kMask |
|
|
|
|
PropertyDetails::kAttributesReadOnlyMask);
|
|
|
|
|
|
|
|
// Check if PropertyCell holds mutable data.
|
|
|
|
Label not_mutable_data;
|
|
|
|
__ Branch(¬_mutable_data, ne, cell_details_reg,
|
|
|
|
Operand(PropertyDetails::PropertyCellTypeField::encode(
|
|
|
|
PropertyCellType::kMutable) |
|
|
|
|
PropertyDetails::KindField::encode(kData)));
|
|
|
|
__ JumpIfSmi(value_reg, &fast_smi_case);
|
|
|
|
__ bind(&fast_heapobject_case);
|
|
|
|
__ sw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
|
|
|
|
__ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
|
|
|
|
cell_details_reg, kRAHasNotBeenSaved, kDontSaveFPRegs,
|
|
|
|
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
|
|
|
// RecordWriteField clobbers the value register, so we need to reload.
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ lw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
|
|
|
|
__ bind(¬_mutable_data);
|
|
|
|
|
|
|
|
// Check if PropertyCell value matches the new value (relevant for Constant,
|
|
|
|
// ConstantType and Undefined cells).
|
|
|
|
Label not_same_value;
|
|
|
|
__ lw(cell_value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
|
|
|
|
__ Branch(¬_same_value, ne, value_reg, Operand(cell_value_reg));
|
|
|
|
// Make sure the PropertyCell is not marked READ_ONLY.
|
|
|
|
__ And(at, cell_details_reg, PropertyDetails::kAttributesReadOnlyMask);
|
|
|
|
__ Branch(&slow_case, ne, at, Operand(zero_reg));
|
|
|
|
if (FLAG_debug_code) {
|
|
|
|
Label done;
|
|
|
|
// This can only be true for Constant, ConstantType and Undefined cells,
|
|
|
|
// because we never store the_hole via this stub.
|
|
|
|
__ Branch(&done, eq, cell_details_reg,
|
|
|
|
Operand(PropertyDetails::PropertyCellTypeField::encode(
|
|
|
|
PropertyCellType::kConstant) |
|
|
|
|
PropertyDetails::KindField::encode(kData)));
|
|
|
|
__ Branch(&done, eq, cell_details_reg,
|
|
|
|
Operand(PropertyDetails::PropertyCellTypeField::encode(
|
|
|
|
PropertyCellType::kConstantType) |
|
|
|
|
PropertyDetails::KindField::encode(kData)));
|
|
|
|
__ Check(eq, kUnexpectedValue, cell_details_reg,
|
|
|
|
Operand(PropertyDetails::PropertyCellTypeField::encode(
|
|
|
|
PropertyCellType::kUndefined) |
|
|
|
|
PropertyDetails::KindField::encode(kData)));
|
|
|
|
__ bind(&done);
|
|
|
|
}
|
|
|
|
__ Ret();
|
|
|
|
__ bind(¬_same_value);
|
|
|
|
|
|
|
|
// Check if PropertyCell contains data with constant type (and is not
|
|
|
|
// READ_ONLY).
|
|
|
|
__ Branch(&slow_case, ne, cell_details_reg,
|
|
|
|
Operand(PropertyDetails::PropertyCellTypeField::encode(
|
|
|
|
PropertyCellType::kConstantType) |
|
|
|
|
PropertyDetails::KindField::encode(kData)));
|
|
|
|
|
|
|
|
// Now either both old and new values must be SMIs or both must be heap
|
|
|
|
// objects with same map.
|
|
|
|
Label value_is_heap_object;
|
|
|
|
__ JumpIfNotSmi(value_reg, &value_is_heap_object);
|
|
|
|
__ JumpIfNotSmi(cell_value_reg, &slow_case);
|
|
|
|
// Old and new values are SMIs, no need for a write barrier here.
|
|
|
|
__ bind(&fast_smi_case);
|
|
|
|
__ Ret(USE_DELAY_SLOT);
|
|
|
|
__ sw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
|
|
|
|
__ bind(&value_is_heap_object);
|
|
|
|
__ JumpIfSmi(cell_value_reg, &slow_case);
|
|
|
|
Register cell_value_map_reg = cell_value_reg;
|
|
|
|
__ lw(cell_value_map_reg,
|
|
|
|
FieldMemOperand(cell_value_reg, HeapObject::kMapOffset));
|
|
|
|
__ Branch(&fast_heapobject_case, eq, cell_value_map_reg,
|
|
|
|
FieldMemOperand(value_reg, HeapObject::kMapOffset));
|
|
|
|
|
|
|
|
// Fallback to the runtime.
|
|
|
|
__ bind(&slow_case);
|
|
|
|
__ SmiTag(slot_reg);
|
|
|
|
__ Push(slot_reg, value_reg);
|
|
|
|
__ TailCallRuntime(is_strict(language_mode())
|
|
|
|
? Runtime::kStoreGlobalViaContext_Strict
|
|
|
|
: Runtime::kStoreGlobalViaContext_Sloppy,
|
|
|
|
2, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
|
|
|
|
return ref0.address() - ref1.address();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Calls an API function. Allocates HandleScope, extracts returned value
|
|
|
|
// from handle and propagates exceptions. Restores context. stack_space
|
|
|
|
// - space to be unwound on exit (includes the call JS arguments space and
|
|
|
|
// the additional space allocated for the fast call).
|
|
|
|
static void CallApiFunctionAndReturn(
|
|
|
|
MacroAssembler* masm, Register function_address,
|
|
|
|
ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
|
|
|
|
MemOperand return_value_operand, MemOperand* context_restore_operand) {
|
|
|
|
Isolate* isolate = masm->isolate();
|
|
|
|
ExternalReference next_address =
|
|
|
|
ExternalReference::handle_scope_next_address(isolate);
|
|
|
|
const int kNextOffset = 0;
|
|
|
|
const int kLimitOffset = AddressOffset(
|
|
|
|
ExternalReference::handle_scope_limit_address(isolate), next_address);
|
|
|
|
const int kLevelOffset = AddressOffset(
|
|
|
|
ExternalReference::handle_scope_level_address(isolate), next_address);
|
|
|
|
|
|
|
|
DCHECK(function_address.is(a1) || function_address.is(a2));
|
|
|
|
|
|
|
|
Label profiler_disabled;
|
|
|
|
Label end_profiler_check;
|
|
|
|
__ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
|
|
|
|
__ lb(t9, MemOperand(t9, 0));
|
|
|
|
__ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
|
|
|
|
|
|
|
|
// Additional parameter is the address of the actual callback.
|
|
|
|
__ li(t9, Operand(thunk_ref));
|
|
|
|
__ jmp(&end_profiler_check);
|
|
|
|
|
|
|
|
__ bind(&profiler_disabled);
|
|
|
|
__ mov(t9, function_address);
|
|
|
|
__ bind(&end_profiler_check);
|
|
|
|
|
|
|
|
// Allocate HandleScope in callee-save registers.
|
|
|
|
__ li(s3, Operand(next_address));
|
|
|
|
__ lw(s0, MemOperand(s3, kNextOffset));
|
|
|
|
__ lw(s1, MemOperand(s3, kLimitOffset));
|
|
|
|
__ lw(s2, MemOperand(s3, kLevelOffset));
|
|
|
|
__ Addu(s2, s2, Operand(1));
|
|
|
|
__ sw(s2, MemOperand(s3, kLevelOffset));
|
|
|
|
|
|
|
|
if (FLAG_log_timer_events) {
|
|
|
|
FrameScope frame(masm, StackFrame::MANUAL);
|
|
|
|
__ PushSafepointRegisters();
|
|
|
|
__ PrepareCallCFunction(1, a0);
|
|
|
|
__ li(a0, Operand(ExternalReference::isolate_address(isolate)));
|
|
|
|
__ CallCFunction(ExternalReference::log_enter_external_function(isolate),
|
|
|
|
1);
|
|
|
|
__ PopSafepointRegisters();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Native call returns to the DirectCEntry stub which redirects to the
|
|
|
|
// return address pushed on stack (could have moved after GC).
|
|
|
|
// DirectCEntry stub itself is generated early and never moves.
|
|
|
|
DirectCEntryStub stub(isolate);
|
|
|
|
stub.GenerateCall(masm, t9);
|
|
|
|
|
|
|
|
if (FLAG_log_timer_events) {
|
|
|
|
FrameScope frame(masm, StackFrame::MANUAL);
|
|
|
|
__ PushSafepointRegisters();
|
|
|
|
__ PrepareCallCFunction(1, a0);
|
|
|
|
__ li(a0, Operand(ExternalReference::isolate_address(isolate)));
|
|
|
|
__ CallCFunction(ExternalReference::log_leave_external_function(isolate),
|
|
|
|
1);
|
|
|
|
__ PopSafepointRegisters();
|
|
|
|
}
|
|
|
|
|
|
|
|
Label promote_scheduled_exception;
|
|
|
|
Label delete_allocated_handles;
|
|
|
|
Label leave_exit_frame;
|
|
|
|
Label return_value_loaded;
|
|
|
|
|
|
|
|
// Load value from ReturnValue.
|
|
|
|
__ lw(v0, return_value_operand);
|
|
|
|
__ bind(&return_value_loaded);
|
|
|
|
|
|
|
|
// No more valid handles (the result handle was the last one). Restore
|
|
|
|
// previous handle scope.
|
|
|
|
__ sw(s0, MemOperand(s3, kNextOffset));
|
|
|
|
if (__ emit_debug_code()) {
|
|
|
|
__ lw(a1, MemOperand(s3, kLevelOffset));
|
|
|
|
__ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
|
|
|
|
}
|
|
|
|
__ Subu(s2, s2, Operand(1));
|
|
|
|
__ sw(s2, MemOperand(s3, kLevelOffset));
|
|
|
|
__ lw(at, MemOperand(s3, kLimitOffset));
|
|
|
|
__ Branch(&delete_allocated_handles, ne, s1, Operand(at));
|
|
|
|
|
|
|
|
// Leave the API exit frame.
|
|
|
|
__ bind(&leave_exit_frame);
|
|
|
|
|
|
|
|
bool restore_context = context_restore_operand != NULL;
|
|
|
|
if (restore_context) {
|
|
|
|
__ lw(cp, *context_restore_operand);
|
|
|
|
}
|
|
|
|
if (stack_space_offset != kInvalidStackOffset) {
|
|
|
|
// ExitFrame contains four MIPS argument slots after DirectCEntryStub call
|
|
|
|
// so this must be accounted for.
|
|
|
|
__ lw(s0, MemOperand(sp, stack_space_offset + kCArgsSlotsSize));
|
|
|
|
} else {
|
|
|
|
__ li(s0, Operand(stack_space));
|
|
|
|
}
|
|
|
|
__ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
|
|
|
|
stack_space_offset != kInvalidStackOffset);
|
|
|
|
|
|
|
|
// Check if the function scheduled an exception.
|
|
|
|
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
|
|
|
|
__ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
|
|
|
|
__ lw(t1, MemOperand(at));
|
|
|
|
__ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
|
|
|
|
|
|
|
|
__ Ret();
|
|
|
|
|
|
|
|
// Re-throw by promoting a scheduled exception.
|
|
|
|
__ bind(&promote_scheduled_exception);
|
|
|
|
__ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
|
|
|
|
|
|
|
|
// HandleScope limit has changed. Delete allocated extensions.
|
|
|
|
__ bind(&delete_allocated_handles);
|
|
|
|
__ sw(s1, MemOperand(s3, kLimitOffset));
|
|
|
|
__ mov(s0, v0);
|
|
|
|
__ mov(a0, v0);
|
|
|
|
__ PrepareCallCFunction(1, s1);
|
|
|
|
__ li(a0, Operand(ExternalReference::isolate_address(isolate)));
|
|
|
|
__ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
|
|
|
|
1);
|
|
|
|
__ mov(v0, s0);
|
|
|
|
__ jmp(&leave_exit_frame);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void CallApiFunctionStubHelper(MacroAssembler* masm,
|
|
|
|
const ParameterCount& argc,
|
|
|
|
bool return_first_arg,
|
|
|
|
bool call_data_undefined) {
|
|
|
|
// ----------- S t a t e -------------
|
|
|
|
// -- a0 : callee
|
|
|
|
// -- t0 : call_data
|
|
|
|
// -- a2 : holder
|
|
|
|
// -- a1 : api_function_address
|
|
|
|
// -- a3 : number of arguments if argc is a register
|
|
|
|
// -- cp : context
|
|
|
|
// --
|
|
|
|
// -- sp[0] : last argument
|
|
|
|
// -- ...
|
|
|
|
// -- sp[(argc - 1)* 4] : first argument
|
|
|
|
// -- sp[argc * 4] : receiver
|
|
|
|
// -----------------------------------
|
|
|
|
|
|
|
|
Register callee = a0;
|
|
|
|
Register call_data = t0;
|
|
|
|
Register holder = a2;
|
|
|
|
Register api_function_address = a1;
|
|
|
|
Register context = cp;
|
|
|
|
|
|
|
|
typedef FunctionCallbackArguments FCA;
|
|
|
|
|
|
|
|
STATIC_ASSERT(FCA::kContextSaveIndex == 6);
|
|
|
|
STATIC_ASSERT(FCA::kCalleeIndex == 5);
|
|
|
|
STATIC_ASSERT(FCA::kDataIndex == 4);
|
|
|
|
STATIC_ASSERT(FCA::kReturnValueOffset == 3);
|
|
|
|
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
|
|
|
|
STATIC_ASSERT(FCA::kIsolateIndex == 1);
|
|
|
|
STATIC_ASSERT(FCA::kHolderIndex == 0);
|
|
|
|
STATIC_ASSERT(FCA::kArgsLength == 7);
|
|
|
|
|
|
|
|
DCHECK(argc.is_immediate() || a3.is(argc.reg()));
|
|
|
|
|
|
|
|
// Save context, callee and call data.
|
|
|
|
__ Push(context, callee, call_data);
|
|
|
|
// Load context from callee.
|
|
|
|
__ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
|
|
|
|
|
|
|
|
Register scratch = call_data;
|
|
|
|
if (!call_data_undefined) {
|
|
|
|
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
|
|
|
|
}
|
|
|
|
// Push return value and default return value.
|
|
|
|
__ Push(scratch, scratch);
|
|
|
|
__ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
|
|
|
|
// Push isolate and holder.
|
|
|
|
__ Push(scratch, holder);
|
|
|
|
|
|
|
|
// Prepare arguments.
|
|
|
|
__ mov(scratch, sp);
|
|
|
|
|
|
|
|
// Allocate the v8::Arguments structure in the arguments' space since
|
|
|
|
// it's not controlled by GC.
|
|
|
|
const int kApiStackSpace = 4;
|
|
|
|
|
|
|
|
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
|
|
|
__ EnterExitFrame(false, kApiStackSpace);
|
|
|
|
|
|
|
|
DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
|
|
|
|
// a0 = FunctionCallbackInfo&
|
|
|
|
// Arguments is after the return address.
|
|
|
|
__ Addu(a0, sp, Operand(1 * kPointerSize));
|
|
|
|
// FunctionCallbackInfo::implicit_args_
|
|
|
|
__ sw(scratch, MemOperand(a0, 0 * kPointerSize));
|
|
|
|
if (argc.is_immediate()) {
|
|
|
|
// FunctionCallbackInfo::values_
|
|
|
|
__ Addu(at, scratch,
|
|
|
|
Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
|
|
|
|
__ sw(at, MemOperand(a0, 1 * kPointerSize));
|
|
|
|
// FunctionCallbackInfo::length_ = argc
|
|
|
|
__ li(at, Operand(argc.immediate()));
|
|
|
|
__ sw(at, MemOperand(a0, 2 * kPointerSize));
|
|
|
|
// FunctionCallbackInfo::is_construct_call_ = 0
|
|
|
|
__ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
|
|
|
|
} else {
|
|
|
|
// FunctionCallbackInfo::values_
|
|
|
|
__ sll(at, argc.reg(), kPointerSizeLog2);
|
|
|
|
__ Addu(at, at, scratch);
|
|
|
|
__ Addu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
|
|
|
|
__ sw(at, MemOperand(a0, 1 * kPointerSize));
|
|
|
|
// FunctionCallbackInfo::length_ = argc
|
|
|
|
__ sw(argc.reg(), MemOperand(a0, 2 * kPointerSize));
|
|
|
|
// FunctionCallbackInfo::is_construct_call_
|
|
|
|
__ Addu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
|
|
|
|
__ sll(at, argc.reg(), kPointerSizeLog2);
|
|
|
|
__ sw(at, MemOperand(a0, 3 * kPointerSize));
|
|
|
|
}
|
|
|
|
|
|
|
|
ExternalReference thunk_ref =
|
|
|
|
ExternalReference::invoke_function_callback(masm->isolate());
|
|
|
|
|
|
|
|
AllowExternalCallThatCantCauseGC scope(masm);
|
|
|
|
MemOperand context_restore_operand(
|
|
|
|
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
|
|
|
|
// Stores return the first js argument.
|
|
|
|
int return_value_offset = 0;
|
|
|
|
if (return_first_arg) {
|
|
|
|
return_value_offset = 2 + FCA::kArgsLength;
|
|
|
|
} else {
|
|
|
|
return_value_offset = 2 + FCA::kReturnValueOffset;
|
|
|
|
}
|
|
|
|
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
|
|
|
|
int stack_space = 0;
|
|
|
|
int32_t stack_space_offset = 4 * kPointerSize;
|
|
|
|
if (argc.is_immediate()) {
|
|
|
|
stack_space = argc.immediate() + FCA::kArgsLength + 1;
|
|
|
|
stack_space_offset = kInvalidStackOffset;
|
|
|
|
}
|
|
|
|
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
|
|
|
|
stack_space_offset, return_value_operand,
|
|
|
|
&context_restore_operand);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
|
|
|
|
bool call_data_undefined = this->call_data_undefined();
|
|
|
|
CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
|
|
|
|
call_data_undefined);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CallApiAccessorStub::Generate(MacroAssembler* masm) {
|
|
|
|
bool is_store = this->is_store();
|
|
|
|
int argc = this->argc();
|
|
|
|
bool call_data_undefined = this->call_data_undefined();
|
|
|
|
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
|
|
|
|
call_data_undefined);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CallApiGetterStub::Generate(MacroAssembler* masm) {
|
|
|
|
// ----------- S t a t e -------------
|
|
|
|
// -- sp[0] : name
|
|
|
|
// -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
|
|
|
|
// -- ...
|
|
|
|
// -- a2 : api_function_address
|
|
|
|
// -----------------------------------
|
|
|
|
|
|
|
|
Register api_function_address = ApiGetterDescriptor::function_address();
|
|
|
|
DCHECK(api_function_address.is(a2));
|
|
|
|
|
|
|
|
__ mov(a0, sp); // a0 = Handle<Name>
|
|
|
|
__ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
|
|
|
|
|
|
|
|
const int kApiStackSpace = 1;
|
|
|
|
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
|
|
|
__ EnterExitFrame(false, kApiStackSpace);
|
|
|
|
|
|
|
|
// Create PropertyAccessorInfo instance on the stack above the exit frame with
|
|
|
|
// a1 (internal::Object** args_) as the data.
|
|
|
|
__ sw(a1, MemOperand(sp, 1 * kPointerSize));
|
|
|
|
__ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
|
|
|
|
|
|
|
|
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
|
|
|
|
|
|
|
|
ExternalReference thunk_ref =
|
|
|
|
ExternalReference::invoke_accessor_getter_callback(isolate());
|
|
|
|
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
|
|
|
|
kStackUnwindSpace, kInvalidStackOffset,
|
|
|
|
MemOperand(fp, 6 * kPointerSize), NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#undef __
|
|
|
|
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|
|
|
|
|
|
|
|
#endif // V8_TARGET_ARCH_MIPS
|