Browse Source

Upgrade V8 to 2.3.3

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
552cf28260
  1. 15
      deps/v8/ChangeLog
  2. 5
      deps/v8/SConstruct
  3. 34
      deps/v8/src/arm/assembler-arm.cc
  4. 4
      deps/v8/src/arm/assembler-arm.h
  5. 295
      deps/v8/src/arm/codegen-arm.cc
  6. 7
      deps/v8/src/arm/codegen-arm.h
  7. 45
      deps/v8/src/arm/ic-arm.cc
  8. 328
      deps/v8/src/ia32/codegen-ia32.cc
  9. 9
      deps/v8/src/ia32/codegen-ia32.h
  10. 3
      deps/v8/src/jump-target-light.h
  11. 6
      deps/v8/src/objects.cc
  12. 64
      deps/v8/src/runtime.cc
  13. 1
      deps/v8/src/runtime.h
  14. 2
      deps/v8/src/v8-counters.h
  15. 82
      deps/v8/src/v8natives.js
  16. 2
      deps/v8/src/version.cc
  17. 4
      deps/v8/src/x64/builtins-x64.cc
  18. 726
      deps/v8/src/x64/codegen-x64.cc
  19. 11
      deps/v8/src/x64/codegen-x64.h
  20. 61
      deps/v8/src/x64/ic-x64.cc
  21. 145
      deps/v8/src/x64/virtual-frame-x64.cc
  22. 33
      deps/v8/src/x64/virtual-frame-x64.h
  23. 23
      deps/v8/test/mjsunit/debug-setbreakpoint.js
  24. 184
      deps/v8/test/mjsunit/function-bind.js
  25. 4
      deps/v8/tools/js2c.py

15
deps/v8/ChangeLog

@ -1,3 +1,17 @@
2010-07-26: Version 2.3.3
Fixed error when building the d8 shell in a fresh checkout.
Implemented Function.prototype.bind (ES5 15.3.4.5).
Fixed an error in inlined stores on ia32.
Fixed an error when setting a breakpoint at the end of a function
that does not end with a newline character.
Performance improvements on all platforms.
2010-07-21: Version 2.3.2 2010-07-21: Version 2.3.2
Fixed compiler warnings when building with LLVM. Fixed compiler warnings when building with LLVM.
@ -6,6 +20,7 @@
Performance improvements on all platforms. Performance improvements on all platforms.
2010-07-19: Version 2.3.1 2010-07-19: Version 2.3.1
Fixed compilation and linking with V8_INTERPRETED_REGEXP flag. Fixed compilation and linking with V8_INTERPRETED_REGEXP flag.

5
deps/v8/SConstruct

@ -43,7 +43,7 @@ if ANDROID_TOP is None:
ANDROID_TOP="" ANDROID_TOP=""
# ARM_TARGET_LIB is the path to the dynamic library to use on the target # ARM_TARGET_LIB is the path to the dynamic library to use on the target
# machine if cross-compiling to an arm machine. You will also need to set # machine if cross-compiling to an arm machine. You will also need to set
# the additional cross-compiling environment variables to the cross compiler. # the additional cross-compiling environment variables to the cross compiler.
ARM_TARGET_LIB = os.environ.get('ARM_TARGET_LIB') ARM_TARGET_LIB = os.environ.get('ARM_TARGET_LIB')
if ARM_TARGET_LIB: if ARM_TARGET_LIB:
@ -628,6 +628,9 @@ D8_FLAGS = {
'os:win32': { 'os:win32': {
'LIBS': ['winmm', 'ws2_32'], 'LIBS': ['winmm', 'ws2_32'],
}, },
'arch:arm': {
'LINKFLAGS': ARM_LINK_FLAGS
},
}, },
'msvc': { 'msvc': {
'all': { 'all': {

34
deps/v8/src/arm/assembler-arm.cc

@ -445,6 +445,37 @@ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
} }
bool Assembler::IsStrRegisterImmediate(Instr instr) {
return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
}
Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
ASSERT(IsStrRegisterImmediate(instr));
bool positive = offset >= 0;
if (!positive) offset = -offset;
ASSERT(is_uint12(offset));
// Set bit indicating whether the offset should be added.
instr = (instr & ~B23) | (positive ? B23 : 0);
// Set the actual offset.
return (instr & ~Off12Mask) | offset;
}
bool Assembler::IsAddRegisterImmediate(Instr instr) {
return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
}
Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
ASSERT(IsAddRegisterImmediate(instr));
ASSERT(offset >= 0);
ASSERT(is_uint12(offset));
// Set the offset.
return (instr & ~Off12Mask) | offset;
}
Register Assembler::GetRd(Instr instr) { Register Assembler::GetRd(Instr instr) {
Register reg; Register reg;
reg.code_ = ((instr & kRdMask) >> kRdShift); reg.code_ = ((instr & kRdMask) >> kRdShift);
@ -796,9 +827,10 @@ void Assembler::addrmod1(Instr instr,
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
} }
emit(instr | rn.code()*B16 | rd.code()*B12); emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc)) if (rn.is(pc) || x.rm_.is(pc)) {
// Block constant pool emission for one instruction after reading pc. // Block constant pool emission for one instruction after reading pc.
BlockConstPoolBefore(pc_offset() + kInstrSize); BlockConstPoolBefore(pc_offset() + kInstrSize);
}
} }

4
deps/v8/src/arm/assembler-arm.h

@ -1120,6 +1120,10 @@ class Assembler : public Malloced {
static bool IsLdrRegisterImmediate(Instr instr); static bool IsLdrRegisterImmediate(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr); static int GetLdrRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset); static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
static bool IsStrRegisterImmediate(Instr instr);
static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
static bool IsAddRegisterImmediate(Instr instr);
static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
static Register GetRd(Instr instr); static Register GetRd(Instr instr);
static bool IsPush(Instr instr); static bool IsPush(Instr instr);
static bool IsPop(Instr instr); static bool IsPop(Instr instr);

295
deps/v8/src/arm/codegen-arm.cc

@ -151,6 +151,8 @@ TypeInfoCodeGenState::~TypeInfoCodeGenState() {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// CodeGenerator implementation // CodeGenerator implementation
int CodeGenerator::inlined_write_barrier_size_ = -1;
CodeGenerator::CodeGenerator(MacroAssembler* masm) CodeGenerator::CodeGenerator(MacroAssembler* masm)
: deferred_(8), : deferred_(8),
masm_(masm), masm_(masm),
@ -815,7 +817,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
// Check they are both small and positive. // Check they are both small and positive.
__ tst(scratch, Operand(kSmiTagMask | 0xc0000000)); __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now. ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
if (op == Token::ADD) { if (op == Token::ADD) {
__ add(r0, lhs, Operand(rhs), LeaveCC, eq); __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
} else { } else {
@ -863,7 +865,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
__ and_(r0, lhs, Operand(rhs), LeaveCC, cond); __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
} else { } else {
ASSERT(op == Token::BIT_XOR); ASSERT(op == Token::BIT_XOR);
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
__ eor(r0, lhs, Operand(rhs), LeaveCC, cond); __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
} }
if (cond != al) { if (cond != al) {
@ -1520,8 +1522,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// JS_FUNCTION_TYPE is the last instance type and it is right // JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound. // bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE); __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &build_args); __ b(lt, &build_args);
@ -2610,7 +2612,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
// The next handler address is on top of the frame. Unlink from // The next handler address is on top of the frame. Unlink from
// the handler list and drop the rest of this handler from the // the handler list and drop the rest of this handler from the
// frame. // frame.
ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(r1); frame_->EmitPop(r1);
__ mov(r3, Operand(handler_address)); __ mov(r3, Operand(handler_address));
__ str(r1, MemOperand(r3)); __ str(r1, MemOperand(r3));
@ -2636,7 +2638,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
__ ldr(sp, MemOperand(r3)); __ ldr(sp, MemOperand(r3));
frame_->Forget(frame_->height() - handler_height); frame_->Forget(frame_->height() - handler_height);
ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(r1); frame_->EmitPop(r1);
__ str(r1, MemOperand(r3)); __ str(r1, MemOperand(r3));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@ -2723,7 +2725,7 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
// chain and set the state on the frame to FALLING. // chain and set the state on the frame to FALLING.
if (has_valid_frame()) { if (has_valid_frame()) {
// The next handler address is on top of the frame. // The next handler address is on top of the frame.
ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(r1); frame_->EmitPop(r1);
__ mov(r3, Operand(handler_address)); __ mov(r3, Operand(handler_address));
__ str(r1, MemOperand(r3)); __ str(r1, MemOperand(r3));
@ -2762,7 +2764,7 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
// Unlink this handler and drop it from the frame. The next // Unlink this handler and drop it from the frame. The next
// handler address is currently on top of the frame. // handler address is currently on top of the frame.
ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(r1); frame_->EmitPop(r1);
__ str(r1, MemOperand(r3)); __ str(r1, MemOperand(r3));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@ -4181,8 +4183,8 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
// As long as JS_FUNCTION_TYPE is the last instance type and it is // As long as JS_FUNCTION_TYPE is the last instance type and it is
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
// LAST_JS_OBJECT_TYPE. // LAST_JS_OBJECT_TYPE.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ cmp(r1, Operand(JS_FUNCTION_TYPE)); __ cmp(r1, Operand(JS_FUNCTION_TYPE));
function.Branch(eq); function.Branch(eq);
@ -5128,7 +5130,7 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
const int kFingerOffset = const int kFingerOffset =
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex); FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ ldr(r0, FieldMemOperand(r1, kFingerOffset)); __ ldr(r0, FieldMemOperand(r1, kFingerOffset));
// r0 now holds finger offset as a smi. // r0 now holds finger offset as a smi.
__ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -6207,6 +6209,60 @@ void DeferredReferenceSetKeyedValue::Generate() {
} }
class DeferredReferenceSetNamedValue: public DeferredCode {
public:
DeferredReferenceSetNamedValue(Register value,
Register receiver,
Handle<String> name)
: value_(value), receiver_(receiver), name_(name) {
set_comment("[ DeferredReferenceSetNamedValue");
}
virtual void Generate();
private:
Register value_;
Register receiver_;
Handle<String> name_;
};
// Takes value in r0, receiver in r1 and returns the result (the
// value) in r0.
void DeferredReferenceSetNamedValue::Generate() {
// Record the entry frame and spill.
VirtualFrame copied_frame(*frame_state()->frame());
copied_frame.SpillAll();
// Ensure value in r0, receiver in r1 to match store ic calling
// convention.
ASSERT(value_.is(r0) && receiver_.is(r1));
__ mov(r2, Operand(name_));
// The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed store IC. It has the arguments value, key and receiver in r0,
// r1 and r2.
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the
// named store has been inlined.
__ nop(PROPERTY_ACCESS_INLINED);
// Go back to the frame we entered with. The instructions
// generated by this merge are skipped over by the inline store
// patching mechanism when looking for the branch instruction that
// tells it where the code to patch is.
copied_frame.MergeTo(frame_state()->frame());
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the
// deferred code.
__ BlockConstPoolFor(1);
}
}
// Consumes the top of stack (the receiver) and pushes the result instead. // Consumes the top of stack (the receiver) and pushes the result instead.
void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) { void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
@ -6277,11 +6333,88 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) { void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
#ifdef DEBUG #ifdef DEBUG
int expected_height = frame_->height() - (is_contextual ? 1 : 2); int expected_height = frame()->height() - (is_contextual ? 1 : 2);
#endif
Result result;
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
frame()->CallStoreIC(name, is_contextual);
} else {
// Inline the in-object property case.
JumpTarget slow, done;
// Get the value and receiver from the stack.
frame()->PopToR0();
Register value = r0;
frame()->PopToR1();
Register receiver = r1;
DeferredReferenceSetNamedValue* deferred =
new DeferredReferenceSetNamedValue(value, receiver, name);
// Check that the receiver is a heap object.
__ tst(receiver, Operand(kSmiTagMask));
deferred->Branch(eq);
// The following instructions are the part of the inlined
// in-object property store code which can be patched. Therefore
// the exact number of instructions generated must be fixed, so
// the constant pool is blocked while generating this code.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
Register scratch0 = VirtualFrame::scratch0();
Register scratch1 = VirtualFrame::scratch1();
// Check the map. Initially use an invalid map to force a
// failure. The map check will be patched in the runtime system.
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
#ifdef DEBUG
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif #endif
frame_->CallStoreIC(name, is_contextual); __ mov(scratch0, Operand(Factory::null_value()));
__ cmp(scratch0, scratch1);
deferred->Branch(ne);
ASSERT_EQ(expected_height, frame_->height()); int offset = 0;
__ str(value, MemOperand(receiver, offset));
// Update the write barrier and record its size. We do not use
// the RecordWrite macro here because we want the offset
// addition instruction first to make it easy to patch.
Label record_write_start, record_write_done;
__ bind(&record_write_start);
// Add offset into the object.
__ add(scratch0, receiver, Operand(offset));
// Test that the object is not in the new space. We cannot set
// region marks for new space pages.
__ InNewSpace(receiver, scratch1, eq, &record_write_done);
// Record the actual write.
__ RecordWriteHelper(receiver, scratch0, scratch1);
__ bind(&record_write_done);
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
__ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
__ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
__ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
}
// Check that this is the first inlined write barrier or that
// this inlined write barrier has the same size as all the other
// inlined write barriers.
ASSERT((inlined_write_barrier_size_ == -1) ||
(inlined_write_barrier_size_ ==
masm()->InstructionsGeneratedSince(&record_write_start)));
inlined_write_barrier_size_ =
masm()->InstructionsGeneratedSince(&record_write_start);
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
masm()->InstructionsGeneratedSince(&check_inlined_codesize));
}
deferred->BindExit();
}
ASSERT_EQ(expected_height, frame()->height());
} }
@ -6848,7 +6981,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
// Move sign bit from source to destination. This works because the sign bit // Move sign bit from source to destination. This works because the sign bit
// in the exponent word of the double has the same position and polarity as // in the exponent word of the double has the same position and polarity as
// the 2's complement sign bit in a Smi. // the 2's complement sign bit in a Smi.
ASSERT(HeapNumber::kSignMask == 0x80000000u); STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
__ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
// Subtract from 0 if source was negative. // Subtract from 0 if source was negative.
__ rsb(source_, source_, Operand(0), LeaveCC, ne); __ rsb(source_, source_, Operand(0), LeaveCC, ne);
@ -6901,7 +7034,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// the_int_ has the answer which is a signed int32 but not a Smi. // the_int_ has the answer which is a signed int32 but not a Smi.
// We test for the special value that has a different exponent. This test // We test for the special value that has a different exponent. This test
// has the neat side effect of setting the flags according to the sign. // has the neat side effect of setting the flags according to the sign.
ASSERT(HeapNumber::kSignMask == 0x80000000u); STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
__ cmp(the_int_, Operand(0x80000000u)); __ cmp(the_int_, Operand(0x80000000u));
__ b(eq, &max_negative_int); __ b(eq, &max_negative_int);
// Set up the correct exponent in scratch_. All non-Smi int32s have the same. // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
@ -7246,7 +7379,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JSObject or an oddball value, then they are // If either operand is a JSObject or an oddball value, then they are
// not equal since their pointers are different. // not equal since their pointers are different.
// There is no test for undetectability in strict equality. // There is no test for undetectability in strict equality.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
Label first_non_object; Label first_non_object;
// Get the type of the first operand into r2 and compare it with // Get the type of the first operand into r2 and compare it with
// FIRST_JS_OBJECT_TYPE. // FIRST_JS_OBJECT_TYPE.
@ -7272,8 +7405,8 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// Now that we have the types we might as well check for symbol-symbol. // Now that we have the types we might as well check for symbol-symbol.
// Ensure that no non-strings have the symbol bit set. // Ensure that no non-strings have the symbol bit set.
ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
ASSERT(kSymbolTag != 0); STATIC_ASSERT(kSymbolTag != 0);
__ and_(r2, r2, Operand(r3)); __ and_(r2, r2, Operand(r3));
__ tst(r2, Operand(kIsSymbolMask)); __ tst(r2, Operand(kIsSymbolMask));
__ b(ne, &return_not_equal); __ b(ne, &return_not_equal);
@ -7324,7 +7457,7 @@ static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
// r2 is object type of rhs. // r2 is object type of rhs.
// Ensure that no non-strings have the symbol bit set. // Ensure that no non-strings have the symbol bit set.
Label object_test; Label object_test;
ASSERT(kSymbolTag != 0); STATIC_ASSERT(kSymbolTag != 0);
__ tst(r2, Operand(kIsNotStringMask)); __ tst(r2, Operand(kIsNotStringMask));
__ b(ne, &object_test); __ b(ne, &object_test);
__ tst(r2, Operand(kIsSymbolMask)); __ tst(r2, Operand(kIsSymbolMask));
@ -7395,7 +7528,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
not_found, not_found,
true); true);
ASSERT_EQ(8, kDoubleSize); STATIC_ASSERT(8 == kDoubleSize);
__ add(scratch1, __ add(scratch1,
object, object,
Operand(HeapNumber::kValueOffset - kHeapObjectTag)); Operand(HeapNumber::kValueOffset - kHeapObjectTag));
@ -7494,7 +7627,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only // If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber. // be strictly equal if the other is a HeapNumber.
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0)); ASSERT_EQ(0, Smi::FromInt(0));
__ and_(r2, lhs_, Operand(rhs_)); __ and_(r2, lhs_, Operand(rhs_));
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
@ -8497,7 +8630,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label not_smi; Label not_smi;
// Fast path. // Fast path.
if (ShouldGenerateSmiCode()) { if (ShouldGenerateSmiCode()) {
ASSERT(kSmiTag == 0); // Adjust code below. STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
__ tst(smi_test_reg, Operand(kSmiTagMask)); __ tst(smi_test_reg, Operand(kSmiTagMask));
__ b(ne, &not_smi); __ b(ne, &not_smi);
__ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
@ -8513,7 +8646,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label not_smi; Label not_smi;
// Fast path. // Fast path.
if (ShouldGenerateSmiCode()) { if (ShouldGenerateSmiCode()) {
ASSERT(kSmiTag == 0); // Adjust code below. STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
__ tst(smi_test_reg, Operand(kSmiTagMask)); __ tst(smi_test_reg, Operand(kSmiTagMask));
__ b(ne, &not_smi); __ b(ne, &not_smi);
if (lhs.is(r1)) { if (lhs.is(r1)) {
@ -8535,7 +8668,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::MUL: { case Token::MUL: {
Label not_smi, slow; Label not_smi, slow;
if (ShouldGenerateSmiCode()) { if (ShouldGenerateSmiCode()) {
ASSERT(kSmiTag == 0); // adjust code below STATIC_ASSERT(kSmiTag == 0); // adjust code below
__ tst(smi_test_reg, Operand(kSmiTagMask)); __ tst(smi_test_reg, Operand(kSmiTagMask));
Register scratch2 = smi_test_reg; Register scratch2 = smi_test_reg;
smi_test_reg = no_reg; smi_test_reg = no_reg;
@ -8671,7 +8804,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label slow; Label slow;
Label not_power_of_2; Label not_power_of_2;
ASSERT(!ShouldGenerateSmiCode()); ASSERT(!ShouldGenerateSmiCode());
ASSERT(kSmiTag == 0); // Adjust code below. STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
// Check for two positive smis. // Check for two positive smis.
__ orr(smi_test_reg, lhs, Operand(rhs)); __ orr(smi_test_reg, lhs, Operand(rhs));
__ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask)); __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
@ -8731,7 +8864,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SHR: case Token::SHR:
case Token::SHL: { case Token::SHL: {
Label slow; Label slow;
ASSERT(kSmiTag == 0); // adjust code below STATIC_ASSERT(kSmiTag == 0); // adjust code below
__ tst(smi_test_reg, Operand(kSmiTagMask)); __ tst(smi_test_reg, Operand(kSmiTagMask));
__ b(ne, &slow); __ b(ne, &slow);
Register scratch2 = smi_test_reg; Register scratch2 = smi_test_reg;
@ -9045,17 +9178,17 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// r0 holds the exception. // r0 holds the exception.
// Adjust this code if not the case. // Adjust this code if not the case.
ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler. // Drop the sp to the top of the handler.
__ mov(r3, Operand(ExternalReference(Top::k_handler_address))); __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
__ ldr(sp, MemOperand(r3)); __ ldr(sp, MemOperand(r3));
// Restore the next handler and frame pointer, discard handler state. // Restore the next handler and frame pointer, discard handler state.
ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(r2); __ pop(r2);
__ str(r2, MemOperand(r3)); __ str(r2, MemOperand(r3));
ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
__ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
// Before returning we restore the context from the frame pointer if // Before returning we restore the context from the frame pointer if
@ -9071,7 +9204,7 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ mov(lr, Operand(pc)); __ mov(lr, Operand(pc));
} }
#endif #endif
ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ pop(pc); __ pop(pc);
} }
@ -9079,7 +9212,7 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) { UncatchableExceptionType type) {
// Adjust this code if not the case. // Adjust this code if not the case.
ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop sp to the top stack handler. // Drop sp to the top stack handler.
__ mov(r3, Operand(ExternalReference(Top::k_handler_address))); __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
@ -9100,7 +9233,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
__ bind(&done); __ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler. // Set the top handler address to next handler past the current ENTRY handler.
ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(r2); __ pop(r2);
__ str(r2, MemOperand(r3)); __ str(r2, MemOperand(r3));
@ -9124,7 +9257,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
// lr // lr
// Discard handler state (r2 is not used) and restore frame pointer. // Discard handler state (r2 is not used) and restore frame pointer.
ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
__ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
// Before returning we restore the context from the frame pointer if // Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of a // not NULL. The frame pointer is NULL in the exception handler of a
@ -9139,7 +9272,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
__ mov(lr, Operand(pc)); __ mov(lr, Operand(pc));
} }
#endif #endif
ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ pop(pc); __ pop(pc);
} }
@ -9234,7 +9367,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// check for failure result // check for failure result
Label failure_returned; Label failure_returned;
ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
// Lower 2 bits of r2 are 0 iff r0 has failure tag. // Lower 2 bits of r2 are 0 iff r0 has failure tag.
__ add(r2, r0, Operand(1)); __ add(r2, r0, Operand(1));
__ tst(r2, Operand(kFailureTagMask)); __ tst(r2, Operand(kFailureTagMask));
@ -9249,7 +9382,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// check if we should retry or throw exception // check if we should retry or throw exception
Label retry; Label retry;
__ bind(&failure_returned); __ bind(&failure_returned);
ASSERT(Failure::RETRY_AFTER_GC == 0); STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
__ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ b(eq, &retry); __ b(eq, &retry);
@ -9652,12 +9785,12 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
} }
// Setup the callee in-object property. // Setup the callee in-object property.
ASSERT(Heap::arguments_callee_index == 0); STATIC_ASSERT(Heap::arguments_callee_index == 0);
__ ldr(r3, MemOperand(sp, 2 * kPointerSize)); __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
__ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize)); __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
// Get the length (smi tagged) and set that as an in-object property too. // Get the length (smi tagged) and set that as an in-object property too.
ASSERT(Heap::arguments_length_index == 1); STATIC_ASSERT(Heap::arguments_length_index == 1);
__ ldr(r1, MemOperand(sp, 0 * kPointerSize)); __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
__ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize)); __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
@ -9749,7 +9882,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the first argument is a JSRegExp object. // Check that the first argument is a JSRegExp object.
__ ldr(r0, MemOperand(sp, kJSRegExpOffset)); __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
__ tst(r0, Operand(kSmiTagMask)); __ tst(r0, Operand(kSmiTagMask));
__ b(eq, &runtime); __ b(eq, &runtime);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
@ -9776,8 +9909,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2. This // Calculate number of capture registers (number_of_captures + 1) * 2. This
// uses the asumption that smis are 2 * their untagged value. // uses the asumption that smis are 2 * their untagged value.
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r2, r2, Operand(2)); // r2 was a smi. __ add(r2, r2, Operand(2)); // r2 was a smi.
// Check that the static offsets vector buffer is large enough. // Check that the static offsets vector buffer is large enough.
__ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize)); __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
@ -9838,7 +9971,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
// First check for flat string. // First check for flat string.
__ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask)); __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
ASSERT_EQ(0, kStringTag | kSeqStringTag); STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
__ b(eq, &seq_string); __ b(eq, &seq_string);
// subject: Subject string // subject: Subject string
@ -9848,8 +9981,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// string. In that case the subject string is just the first part of the cons // string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be // string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string. // a sequential string or an external string.
ASSERT(kExternalStringTag !=0); STATIC_ASSERT(kExternalStringTag !=0);
ASSERT_EQ(0, kConsStringTag & kExternalStringTag); STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
__ tst(r0, Operand(kIsNotStringMask | kExternalStringTag)); __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
__ b(ne, &runtime); __ b(ne, &runtime);
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
@ -9860,7 +9993,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
// Is first part a flat string? // Is first part a flat string?
ASSERT_EQ(0, kSeqStringTag); STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask)); __ tst(r0, Operand(kStringRepresentationMask));
__ b(nz, &runtime); __ b(nz, &runtime);
@ -9868,8 +10001,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: Subject string // subject: Subject string
// regexp_data: RegExp data (FixedArray) // regexp_data: RegExp data (FixedArray)
// r0: Instance type of subject string // r0: Instance type of subject string
ASSERT_EQ(4, kAsciiStringTag); STATIC_ASSERT(4 == kAsciiStringTag);
ASSERT_EQ(0, kTwoByteStringTag); STATIC_ASSERT(kTwoByteStringTag == 0);
// Find the code object based on the assumptions above. // Find the code object based on the assumptions above.
__ and_(r0, r0, Operand(kStringEncodingMask)); __ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 2), SetCC); __ mov(r3, Operand(r0, ASR, 2), SetCC);
@ -9923,7 +10056,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// calculate the shift of the index (0 for ASCII and 1 for two byte). // calculate the shift of the index (0 for ASCII and 1 for two byte).
__ ldr(r0, FieldMemOperand(subject, String::kLengthOffset)); __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
__ mov(r0, Operand(r0, ASR, kSmiTagSize)); __ mov(r0, Operand(r0, ASR, kSmiTagSize));
ASSERT_EQ(SeqAsciiString::kHeaderSize, SeqTwoByteString::kHeaderSize); STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1)); __ eor(r3, r3, Operand(1));
// Argument 4 (r3): End of string data // Argument 4 (r3): End of string data
@ -9978,8 +10111,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(r1, __ ldr(r1,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2. // Calculate number of capture registers (number_of_captures + 1) * 2.
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r1, r1, Operand(2)); // r1 was a smi. __ add(r1, r1, Operand(2)); // r1 was a smi.
// r1: number of capture registers // r1: number of capture registers
@ -10191,7 +10324,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ b(ls, index_out_of_range_); __ b(ls, index_out_of_range_);
// We need special handling for non-flat strings. // We need special handling for non-flat strings.
ASSERT(kSeqStringTag == 0); STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result_, Operand(kStringRepresentationMask)); __ tst(result_, Operand(kStringRepresentationMask));
__ b(eq, &flat_string); __ b(eq, &flat_string);
@ -10213,13 +10346,13 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// If the first cons component is also non-flat, then go to runtime. // If the first cons component is also non-flat, then go to runtime.
ASSERT(kSeqStringTag == 0); STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result_, Operand(kStringRepresentationMask)); __ tst(result_, Operand(kStringRepresentationMask));
__ b(nz, &call_runtime_); __ b(nz, &call_runtime_);
// Check for 1-byte or 2-byte string. // Check for 1-byte or 2-byte string.
__ bind(&flat_string); __ bind(&flat_string);
ASSERT(kAsciiStringTag != 0); STATIC_ASSERT(kAsciiStringTag != 0);
__ tst(result_, Operand(kStringEncodingMask)); __ tst(result_, Operand(kStringEncodingMask));
__ b(nz, &ascii_string); __ b(nz, &ascii_string);
@ -10227,7 +10360,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// Load the 2-byte character code into the result register. We can // Load the 2-byte character code into the result register. We can
// add without shifting since the smi tag size is the log2 of the // add without shifting since the smi tag size is the log2 of the
// number of bytes in a two-byte character. // number of bytes in a two-byte character.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
__ add(scratch_, object_, Operand(scratch_)); __ add(scratch_, object_, Operand(scratch_));
__ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
__ jmp(&got_char_code); __ jmp(&got_char_code);
@ -10304,8 +10437,8 @@ void StringCharCodeAtGenerator::GenerateSlow(
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode. // Fast case of Heap::LookupSingleCharacterStringFromCode.
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
ASSERT(kSmiShiftSize == 0); STATIC_ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
__ tst(code_, __ tst(code_,
Operand(kSmiTagMask | Operand(kSmiTagMask |
@ -10314,7 +10447,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged ascii char code. // At this point code register contains smi tagged ascii char code.
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@ -10419,7 +10552,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
// Ensure that reading an entire aligned word containing the last character // Ensure that reading an entire aligned word containing the last character
// of a string will not read outside the allocated area (because we pad up // of a string will not read outside the allocated area (because we pad up
// to kObjectAlignment). // to kObjectAlignment).
ASSERT(kObjectAlignment >= kReadAlignment); STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
// Assumes word reads and writes are little endian. // Assumes word reads and writes are little endian.
// Nothing to do for zero characters. // Nothing to do for zero characters.
Label done; Label done;
@ -10623,7 +10756,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ and_(candidate, candidate, Operand(mask)); __ and_(candidate, candidate, Operand(mask));
// Load the entry from the symble table. // Load the entry from the symble table.
ASSERT_EQ(1, SymbolTable::kEntrySize); STATIC_ASSERT(SymbolTable::kEntrySize == 1);
__ ldr(candidate, __ ldr(candidate,
MemOperand(first_symbol_table_element, MemOperand(first_symbol_table_element,
candidate, candidate,
@ -10723,8 +10856,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Check bounds and smi-ness. // Check bounds and smi-ness.
__ ldr(r7, MemOperand(sp, kToOffset)); __ ldr(r7, MemOperand(sp, kToOffset));
__ ldr(r6, MemOperand(sp, kFromOffset)); __ ldr(r6, MemOperand(sp, kFromOffset));
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
// I.e., arithmetic shift right by one un-smi-tags. // I.e., arithmetic shift right by one un-smi-tags.
__ mov(r2, Operand(r7, ASR, 1), SetCC); __ mov(r2, Operand(r7, ASR, 1), SetCC);
__ mov(r3, Operand(r6, ASR, 1), SetCC, cc); __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
@ -10747,7 +10880,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Make sure first argument is a sequential (or flat) string. // Make sure first argument is a sequential (or flat) string.
__ ldr(r5, MemOperand(sp, kStringOffset)); __ ldr(r5, MemOperand(sp, kStringOffset));
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
__ tst(r5, Operand(kSmiTagMask)); __ tst(r5, Operand(kSmiTagMask));
__ b(eq, &runtime); __ b(eq, &runtime);
Condition is_string = masm->IsObjectStringType(r5, r1); Condition is_string = masm->IsObjectStringType(r5, r1);
@ -10761,8 +10894,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r7: to (smi) // r7: to (smi)
Label seq_string; Label seq_string;
__ and_(r4, r1, Operand(kStringRepresentationMask)); __ and_(r4, r1, Operand(kStringRepresentationMask));
ASSERT(kSeqStringTag < kConsStringTag); STATIC_ASSERT(kSeqStringTag < kConsStringTag);
ASSERT(kExternalStringTag > kConsStringTag); STATIC_ASSERT(kConsStringTag < kExternalStringTag);
__ cmp(r4, Operand(kConsStringTag)); __ cmp(r4, Operand(kConsStringTag));
__ b(gt, &runtime); // External strings go to runtime. __ b(gt, &runtime); // External strings go to runtime.
__ b(lt, &seq_string); // Sequential strings are handled directly. __ b(lt, &seq_string); // Sequential strings are handled directly.
@ -10774,7 +10907,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset)); __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ tst(r1, Operand(kStringRepresentationMask)); __ tst(r1, Operand(kStringRepresentationMask));
ASSERT_EQ(0, kSeqStringTag); STATIC_ASSERT(kSeqStringTag == 0);
__ b(ne, &runtime); // Cons and External strings go to runtime. __ b(ne, &runtime); // Cons and External strings go to runtime.
// Definitly a sequential string. // Definitly a sequential string.
@ -10798,7 +10931,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Check for flat ascii string. // Check for flat ascii string.
Label non_ascii_flat; Label non_ascii_flat;
__ tst(r1, Operand(kStringEncodingMask)); __ tst(r1, Operand(kStringEncodingMask));
ASSERT_EQ(0, kTwoByteStringTag); STATIC_ASSERT(kTwoByteStringTag == 0);
__ b(eq, &non_ascii_flat); __ b(eq, &non_ascii_flat);
Label result_longer_than_two; Label result_longer_than_two;
@ -10847,7 +10980,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r1: first character of result string. // r1: first character of result string.
// r2: result string length. // r2: result string length.
// r5: first character of sub string to copy. // r5: first character of sub string to copy.
ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask); STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
COPY_ASCII | DEST_ALWAYS_ALIGNED); COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
@ -10878,7 +11011,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r1: first character of result. // r1: first character of result.
// r2: result length. // r2: result length.
// r5: first character of string to copy. // r5: first character of string to copy.
ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask); STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
DEST_ALWAYS_ALIGNED); DEST_ALWAYS_ALIGNED);
__ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
@ -10906,7 +11039,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register length_delta = scratch3; Register length_delta = scratch3;
__ mov(scratch1, scratch2, LeaveCC, gt); __ mov(scratch1, scratch2, LeaveCC, gt);
Register min_length = scratch1; Register min_length = scratch1;
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ tst(min_length, Operand(min_length)); __ tst(min_length, Operand(min_length));
__ b(eq, &compare_lengths); __ b(eq, &compare_lengths);
@ -10962,8 +11095,8 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
Label not_same; Label not_same;
__ cmp(r0, r1); __ cmp(r0, r1);
__ b(ne, &not_same); __ b(ne, &not_same);
ASSERT_EQ(0, EQUAL); STATIC_ASSERT(EQUAL == 0);
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
__ mov(r0, Operand(Smi::FromInt(EQUAL))); __ mov(r0, Operand(Smi::FromInt(EQUAL)));
__ IncrementCounter(&Counters::string_compare_native, 1, r1, r2); __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
__ add(sp, sp, Operand(2 * kPointerSize)); __ add(sp, sp, Operand(2 * kPointerSize));
@ -10998,14 +11131,14 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Make sure that both arguments are strings if not known in advance. // Make sure that both arguments are strings if not known in advance.
if (string_check_) { if (string_check_) {
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
__ JumpIfEitherSmi(r0, r1, &string_add_runtime); __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
// Load instance types. // Load instance types.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
ASSERT_EQ(0, kStringTag); STATIC_ASSERT(kStringTag == 0);
// If either is not a string, go to runtime. // If either is not a string, go to runtime.
__ tst(r4, Operand(kIsNotStringMask)); __ tst(r4, Operand(kIsNotStringMask));
__ tst(r5, Operand(kIsNotStringMask), eq); __ tst(r5, Operand(kIsNotStringMask), eq);
@ -11022,10 +11155,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Check if either of the strings are empty. In that case return the other. // Check if either of the strings are empty. In that case return the other.
__ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
__ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
__ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second. __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
// Else test if second string is empty. // Else test if second string is empty.
__ cmp(r3, Operand(Smi::FromInt(0)), ne); __ cmp(r3, Operand(Smi::FromInt(0)), ne);
__ b(ne, &strings_not_empty); // If either string was empty, return r0. __ b(ne, &strings_not_empty); // If either string was empty, return r0.
@ -11049,7 +11182,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Look at the length of the result of adding the two strings. // Look at the length of the result of adding the two strings.
Label string_add_flat_result, longer_than_two; Label string_add_flat_result, longer_than_two;
// Adding two lengths can't overflow. // Adding two lengths can't overflow.
ASSERT(String::kMaxLength * 2 > String::kMaxLength); STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
__ add(r6, r2, Operand(r3)); __ add(r6, r2, Operand(r3));
// Use the runtime system when adding two one character strings, as it // Use the runtime system when adding two one character strings, as it
// contains optimizations for this specific case using the symbol table. // contains optimizations for this specific case using the symbol table.
@ -11097,7 +11230,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ cmp(r6, Operand(String::kMinNonFlatLength)); __ cmp(r6, Operand(String::kMinNonFlatLength));
__ b(lt, &string_add_flat_result); __ b(lt, &string_add_flat_result);
// Handle exceptionally long strings in the runtime system. // Handle exceptionally long strings in the runtime system.
ASSERT((String::kMaxLength & 0x80000000) == 0); STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
ASSERT(IsPowerOf2(String::kMaxLength + 1)); ASSERT(IsPowerOf2(String::kMaxLength + 1));
// kMaxLength + 1 is representable as shifted literal, kMaxLength is not. // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
__ cmp(r6, Operand(String::kMaxLength + 1)); __ cmp(r6, Operand(String::kMaxLength + 1));
@ -11112,7 +11245,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
} }
Label non_ascii, allocated, ascii_data; Label non_ascii, allocated, ascii_data;
ASSERT_EQ(0, kTwoByteStringTag); STATIC_ASSERT(kTwoByteStringTag == 0);
__ tst(r4, Operand(kStringEncodingMask)); __ tst(r4, Operand(kStringEncodingMask));
__ tst(r5, Operand(kStringEncodingMask), ne); __ tst(r5, Operand(kStringEncodingMask), ne);
__ b(eq, &non_ascii); __ b(eq, &non_ascii);
@ -11138,7 +11271,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ tst(r5, Operand(kAsciiDataHintMask), ne); __ tst(r5, Operand(kAsciiDataHintMask), ne);
__ b(ne, &ascii_data); __ b(ne, &ascii_data);
__ eor(r4, r4, Operand(r5)); __ eor(r4, r4, Operand(r5));
ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
__ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
__ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
__ b(eq, &ascii_data); __ b(eq, &ascii_data);
@ -11164,7 +11297,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
} }
// Check that both strings are sequential. // Check that both strings are sequential.
ASSERT_EQ(0, kSeqStringTag); STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r4, Operand(kStringRepresentationMask)); __ tst(r4, Operand(kStringRepresentationMask));
__ tst(r5, Operand(kStringRepresentationMask), eq); __ tst(r5, Operand(kStringRepresentationMask), eq);
__ b(ne, &string_add_runtime); __ b(ne, &string_add_runtime);

7
deps/v8/src/arm/codegen-arm.h

@ -281,6 +281,10 @@ class CodeGenerator: public AstVisitor {
return FLAG_debug_code ? 27 : 13; return FLAG_debug_code ? 27 : 13;
} }
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5; static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
static int GetInlinedNamedStoreInstructionsAfterPatch() {
ASSERT(inlined_write_barrier_size_ != -1);
return inlined_write_barrier_size_ + 4;
}
private: private:
// Construction/Destruction // Construction/Destruction
@ -586,6 +590,9 @@ class CodeGenerator: public AstVisitor {
// to some unlinking code). // to some unlinking code).
bool function_return_is_shadowed_; bool function_return_is_shadowed_;
// Size of inlined write barriers generated by EmitNamedStore.
static int inlined_write_barrier_size_;
static InlineRuntimeLUT kInlineRuntimeLUT[]; static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame; friend class VirtualFrame;

45
deps/v8/src/arm/ic-arm.cc

@ -989,8 +989,49 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) { bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
// TODO(787): Implement inline stores on arm. // Find the end of the inlined code for the store if there is an
return false; // inlined version of the store.
Address inline_end_address;
if (!IsInlinedICSite(address, &inline_end_address)) return false;
// Compute the address of the map load instruction.
Address ldr_map_instr_address =
inline_end_address -
(CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() *
Assembler::kInstrSize);
// Update the offsets if initializing the inlined store. No reason
// to update the offsets when clearing the inlined version because
// it will bail out in the map check.
if (map != Heap::null_value()) {
// Patch the offset in the actual store instruction.
Address str_property_instr_address =
ldr_map_instr_address + 3 * Assembler::kInstrSize;
Instr str_property_instr = Assembler::instr_at(str_property_instr_address);
ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr));
str_property_instr = Assembler::SetStrRegisterImmediateOffset(
str_property_instr, offset - kHeapObjectTag);
Assembler::instr_at_put(str_property_instr_address, str_property_instr);
// Patch the offset in the add instruction that is part of the
// write barrier.
Address add_offset_instr_address =
str_property_instr_address + Assembler::kInstrSize;
Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
add_offset_instr, offset - kHeapObjectTag);
Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
// Indicate that code has changed.
CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
}
// Patch the map check.
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
} }

328
deps/v8/src/ia32/codegen-ia32.cc

@ -905,7 +905,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
__ AbortIfNotNumber(value.reg()); __ AbortIfNotNumber(value.reg());
} }
// Smi => false iff zero. // Smi => false iff zero.
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ test(value.reg(), Operand(value.reg())); __ test(value.reg(), Operand(value.reg()));
dest->false_target()->Branch(zero); dest->false_target()->Branch(zero);
__ test(value.reg(), Immediate(kSmiTagMask)); __ test(value.reg(), Immediate(kSmiTagMask));
@ -930,7 +930,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
dest->false_target()->Branch(equal); dest->false_target()->Branch(equal);
// Smi => false iff zero. // Smi => false iff zero.
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ test(value.reg(), Operand(value.reg())); __ test(value.reg(), Operand(value.reg()));
dest->false_target()->Branch(zero); dest->false_target()->Branch(zero);
__ test(value.reg(), Immediate(kSmiTagMask)); __ test(value.reg(), Immediate(kSmiTagMask));
@ -1169,7 +1169,7 @@ static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
const Result& left) { const Result& left) {
// Set TypeInfo of result according to the operation performed. // Set TypeInfo of result according to the operation performed.
// Rely on the fact that smis have a 31 bit payload on ia32. // Rely on the fact that smis have a 31 bit payload on ia32.
ASSERT(kSmiValueSize == 31); STATIC_ASSERT(kSmiValueSize == 31);
switch (op) { switch (op) {
case Token::COMMA: case Token::COMMA:
return right.type_info(); return right.type_info();
@ -1445,6 +1445,55 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
} }
void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
Result* right,
JumpTarget* both_smi) {
TypeInfo left_info = left->type_info();
TypeInfo right_info = right->type_info();
if (left_info.IsDouble() || left_info.IsString() ||
right_info.IsDouble() || right_info.IsString()) {
// We know that left and right are not both smi. Don't do any tests.
return;
}
if (left->reg().is(right->reg())) {
if (!left_info.IsSmi()) {
__ test(left->reg(), Immediate(kSmiTagMask));
both_smi->Branch(zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
left->Unuse();
right->Unuse();
both_smi->Jump();
}
} else if (!left_info.IsSmi()) {
if (!right_info.IsSmi()) {
Result temp = allocator_->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), left->reg());
__ or_(temp.reg(), Operand(right->reg()));
__ test(temp.reg(), Immediate(kSmiTagMask));
temp.Unuse();
both_smi->Branch(zero);
} else {
__ test(left->reg(), Immediate(kSmiTagMask));
both_smi->Branch(zero);
}
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
if (!right_info.IsSmi()) {
__ test(right->reg(), Immediate(kSmiTagMask));
both_smi->Branch(zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
left->Unuse();
right->Unuse();
both_smi->Jump();
}
}
}
void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left, void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
Register right, Register right,
Register scratch, Register scratch,
@ -1599,7 +1648,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
// Check for the corner case of dividing the most negative smi by // Check for the corner case of dividing the most negative smi by
// -1. We cannot use the overflow flag, since it is not set by // -1. We cannot use the overflow flag, since it is not set by
// idiv instruction. // idiv instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ cmp(eax, 0x40000000); __ cmp(eax, 0x40000000);
deferred->Branch(equal); deferred->Branch(equal);
// Check that the remainder is zero. // Check that the remainder is zero.
@ -1789,7 +1838,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
case Token::MUL: { case Token::MUL: {
// If the smi tag is 0 we can just leave the tag on one operand. // If the smi tag is 0 we can just leave the tag on one operand.
ASSERT(kSmiTag == 0); // Adjust code below if not the case. STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
// Remove smi tag from the left operand (but keep sign). // Remove smi tag from the left operand (but keep sign).
// Left-hand operand has been copied into answer. // Left-hand operand has been copied into answer.
__ SmiUntag(answer.reg()); __ SmiUntag(answer.reg());
@ -2296,13 +2345,13 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
__ AbortIfNotSmi(operand->reg()); __ AbortIfNotSmi(operand->reg());
} }
__ mov(answer.reg(), operand->reg()); __ mov(answer.reg(), operand->reg());
ASSERT(kSmiTag == 0); // adjust code if not the case STATIC_ASSERT(kSmiTag == 0); // adjust code if not the case
// We do no shifts, only the Smi conversion, if shift_value is 1. // We do no shifts, only the Smi conversion, if shift_value is 1.
if (shift_value > 1) { if (shift_value > 1) {
__ shl(answer.reg(), shift_value - 1); __ shl(answer.reg(), shift_value - 1);
} }
// Convert int result to Smi, checking that it is in int range. // Convert int result to Smi, checking that it is in int range.
ASSERT(kSmiTagSize == 1); // adjust code if not the case STATIC_ASSERT(kSmiTagSize == 1); // adjust code if not the case
__ add(answer.reg(), Operand(answer.reg())); __ add(answer.reg(), Operand(answer.reg()));
deferred->Branch(overflow); deferred->Branch(overflow);
deferred->BindExit(); deferred->BindExit();
@ -2370,8 +2419,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
overwrite_mode); overwrite_mode);
// Check that lowest log2(value) bits of operand are zero, and test // Check that lowest log2(value) bits of operand are zero, and test
// smi tag at the same time. // smi tag at the same time.
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(1, kSmiTagSize); STATIC_ASSERT(kSmiTagSize == 1);
__ test(operand->reg(), Immediate(3)); __ test(operand->reg(), Immediate(3));
deferred->Branch(not_zero); // Branch if non-smi or odd smi. deferred->Branch(not_zero); // Branch if non-smi or odd smi.
__ sar(operand->reg(), 1); __ sar(operand->reg(), 1);
@ -2605,9 +2654,9 @@ void CodeGenerator::Comparison(AstNode* node,
// side (which is always a symbol). // side (which is always a symbol).
if (cc == equal) { if (cc == equal) {
Label not_a_symbol; Label not_a_symbol;
ASSERT(kSymbolTag != 0); STATIC_ASSERT(kSymbolTag != 0);
// Ensure that no non-strings have the symbol bit set. // Ensure that no non-strings have the symbol bit set.
ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
__ test(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit. __ test(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
__ j(zero, &not_a_symbol); __ j(zero, &not_a_symbol);
// They are symbols, so do identity compare. // They are symbols, so do identity compare.
@ -2735,42 +2784,44 @@ void CodeGenerator::Comparison(AstNode* node,
Register right_reg = right_side.reg(); Register right_reg = right_side.reg();
// In-line check for comparing two smis. // In-line check for comparing two smis.
Result temp = allocator_->Allocate(); JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
ASSERT(temp.is_valid());
__ mov(temp.reg(), left_side.reg());
__ or_(temp.reg(), Operand(right_side.reg()));
__ test(temp.reg(), Immediate(kSmiTagMask));
temp.Unuse();
is_smi.Branch(zero, taken);
// Inline the equality check if both operands can't be a NaN. If both if (has_valid_frame()) {
// objects are the same they are equal. // Inline the equality check if both operands can't be a NaN. If both
if (nan_info == kCantBothBeNaN && cc == equal) { // objects are the same they are equal.
__ cmp(left_side.reg(), Operand(right_side.reg())); if (nan_info == kCantBothBeNaN && cc == equal) {
dest->true_target()->Branch(equal); __ cmp(left_side.reg(), Operand(right_side.reg()));
} dest->true_target()->Branch(equal);
}
// Inlined number comparison: // Inlined number comparison:
if (inline_number_compare) { if (inline_number_compare) {
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest); GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
} }
// End of in-line compare, call out to the compare stub. Don't include // End of in-line compare, call out to the compare stub. Don't include
// number comparison in the stub if it was inlined. // number comparison in the stub if it was inlined.
CompareStub stub(cc, strict, nan_info, !inline_number_compare); CompareStub stub(cc, strict, nan_info, !inline_number_compare);
Result answer = frame_->CallStub(&stub, &left_side, &right_side); Result answer = frame_->CallStub(&stub, &left_side, &right_side);
__ test(answer.reg(), Operand(answer.reg())); __ test(answer.reg(), Operand(answer.reg()));
answer.Unuse(); answer.Unuse();
dest->true_target()->Branch(cc); if (is_smi.is_linked()) {
dest->false_target()->Jump(); dest->true_target()->Branch(cc);
dest->false_target()->Jump();
} else {
dest->Split(cc);
}
}
is_smi.Bind(); if (is_smi.is_linked()) {
left_side = Result(left_reg); is_smi.Bind();
right_side = Result(right_reg); left_side = Result(left_reg);
__ cmp(left_side.reg(), Operand(right_side.reg())); right_side = Result(right_reg);
right_side.Unuse(); __ cmp(left_side.reg(), Operand(right_side.reg()));
left_side.Unuse(); right_side.Unuse();
dest->Split(cc); left_side.Unuse();
dest->Split(cc);
}
} }
} }
} }
@ -3151,8 +3202,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// JS_FUNCTION_TYPE is the last instance type and it is right // JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound. // bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(below, &build_args); __ j(below, &build_args);
@ -4476,7 +4527,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
// The next handler address is on top of the frame. Unlink from // The next handler address is on top of the frame. Unlink from
// the handler list and drop the rest of this handler from the // the handler list and drop the rest of this handler from the
// frame. // frame.
ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(Operand::StaticVariable(handler_address)); frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
if (has_unlinks) { if (has_unlinks) {
@ -4507,7 +4558,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
__ mov(esp, Operand::StaticVariable(handler_address)); __ mov(esp, Operand::StaticVariable(handler_address));
frame_->Forget(frame_->height() - handler_height); frame_->Forget(frame_->height() - handler_height);
ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(Operand::StaticVariable(handler_address)); frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@ -4593,7 +4644,7 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
// chain and set the state on the frame to FALLING. // chain and set the state on the frame to FALLING.
if (has_valid_frame()) { if (has_valid_frame()) {
// The next handler address is on top of the frame. // The next handler address is on top of the frame.
ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(Operand::StaticVariable(handler_address)); frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@ -4632,7 +4683,7 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
frame_->Forget(frame_->height() - handler_height); frame_->Forget(frame_->height() - handler_height);
// Unlink this handler and drop it from the frame. // Unlink this handler and drop it from the frame.
ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(Operand::StaticVariable(handler_address)); frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@ -5339,13 +5390,13 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
// Duplicate the object as the IC receiver. // Duplicate the object as the IC receiver.
frame_->Dup(); frame_->Dup();
Load(property->value()); Load(property->value());
Result dummy = frame_->CallStoreIC(Handle<String>::cast(key), false); Result ignored =
frame_->CallStoreIC(Handle<String>::cast(key), false);
// A test eax instruction following the store IC call would // A test eax instruction following the store IC call would
// indicate the presence of an inlined version of the // indicate the presence of an inlined version of the
// store. Add a nop to indicate that there is no such // store. Add a nop to indicate that there is no such
// inlined version. // inlined version.
__ nop(); __ nop();
dummy.Unuse();
break; break;
} }
// Fall through // Fall through
@ -6573,8 +6624,8 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
// As long as JS_FUNCTION_TYPE is the last instance type and it is // As long as JS_FUNCTION_TYPE is the last instance type and it is
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
// LAST_JS_OBJECT_TYPE. // LAST_JS_OBJECT_TYPE.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE); __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
function.Branch(equal); function.Branch(equal);
@ -6715,7 +6766,7 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) { void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0); ASSERT(args->length() == 0);
ASSERT(kSmiTag == 0); // EBP value is aligned, so it should look like Smi. STATIC_ASSERT(kSmiTag == 0); // EBP value is aligned, so it looks like a Smi.
Result ebp_as_smi = allocator_->Allocate(); Result ebp_as_smi = allocator_->Allocate();
ASSERT(ebp_as_smi.is_valid()); ASSERT(ebp_as_smi.is_valid());
__ mov(ebp_as_smi.reg(), Operand(ebp)); __ mov(ebp_as_smi.reg(), Operand(ebp));
@ -7069,7 +7120,7 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
key.reg()); key.reg());
// tmp.reg() now holds finger offset as a smi. // tmp.reg() now holds finger offset as a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ mov(tmp.reg(), FieldOperand(cache.reg(), __ mov(tmp.reg(), FieldOperand(cache.reg(),
JSFunctionResultCache::kFingerOffset)); JSFunctionResultCache::kFingerOffset));
__ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg())); __ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg()));
@ -8917,16 +8968,21 @@ Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
// Allocate scratch register for write barrier. // Allocate scratch register for write barrier.
Result scratch = allocator()->Allocate(); Result scratch = allocator()->Allocate();
ASSERT(scratch.is_valid() && ASSERT(scratch.is_valid());
result.is_valid() &&
receiver.is_valid() &&
value.is_valid());
// The write barrier clobbers all input registers, so spill the // The write barrier clobbers all input registers, so spill the
// receiver and the value. // receiver and the value.
frame_->Spill(receiver.reg()); frame_->Spill(receiver.reg());
frame_->Spill(value.reg()); frame_->Spill(value.reg());
// If the receiver and the value share a register allocate a new
// register for the receiver.
if (receiver.reg().is(value.reg())) {
receiver = allocator()->Allocate();
ASSERT(receiver.is_valid());
__ mov(receiver.reg(), Operand(value.reg()));
}
// Update the write barrier. To save instructions in the inlined // Update the write barrier. To save instructions in the inlined
// version we do not filter smis. // version we do not filter smis.
Label skip_write_barrier; Label skip_write_barrier;
@ -9031,7 +9087,7 @@ Result CodeGenerator::EmitKeyedLoad() {
// Load and check that the result is not the hole. // Load and check that the result is not the hole.
// Key holds a smi. // Key holds a smi.
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ mov(elements.reg(), __ mov(elements.reg(),
FieldOperand(elements.reg(), FieldOperand(elements.reg(),
key.reg(), key.reg(),
@ -9407,7 +9463,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
Label slow_case; Label slow_case;
__ mov(ecx, Operand(esp, 3 * kPointerSize)); __ mov(ecx, Operand(esp, 3 * kPointerSize));
__ mov(eax, Operand(esp, 2 * kPointerSize)); __ mov(eax, Operand(esp, 2 * kPointerSize));
ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0)); STATIC_ASSERT(kPointerSize == 4);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax)); __ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax));
__ cmp(ecx, Factory::undefined_value()); __ cmp(ecx, Factory::undefined_value());
__ j(equal, &slow_case); __ j(equal, &slow_case);
@ -9471,7 +9529,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
// String value => false iff empty. // String value => false iff empty.
__ CmpInstanceType(edx, FIRST_NONSTRING_TYPE); __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string); __ j(above_equal, &not_string);
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0)); __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
__ j(zero, &false_result); __ j(zero, &false_result);
__ jmp(&true_result); __ jmp(&true_result);
@ -9721,7 +9779,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
} }
// 3. Perform the smi check of the operands. // 3. Perform the smi check of the operands.
ASSERT(kSmiTag == 0); // Adjust zero check if not the case. STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
__ test(combined, Immediate(kSmiTagMask)); __ test(combined, Immediate(kSmiTagMask));
__ j(not_zero, &not_smis, not_taken); __ j(not_zero, &not_smis, not_taken);
@ -9802,7 +9860,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
case Token::MUL: case Token::MUL:
// If the smi tag is 0 we can just leave the tag on one operand. // If the smi tag is 0 we can just leave the tag on one operand.
ASSERT(kSmiTag == 0); // Adjust code below if not the case. STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
// We can't revert the multiplication if the result is not a smi // We can't revert the multiplication if the result is not a smi
// so save the right operand. // so save the right operand.
__ mov(ebx, right); __ mov(ebx, right);
@ -9830,7 +9888,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Check for the corner case of dividing the most negative smi by // Check for the corner case of dividing the most negative smi by
// -1. We cannot use the overflow flag, since it is not set by idiv // -1. We cannot use the overflow flag, since it is not set by idiv
// instruction. // instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ cmp(eax, 0x40000000); __ cmp(eax, 0x40000000);
__ j(equal, &use_fp_on_smis); __ j(equal, &use_fp_on_smis);
// Check for negative zero result. Use combined = left | right. // Check for negative zero result. Use combined = left | right.
@ -10403,7 +10461,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &input_not_smi); __ j(not_zero, &input_not_smi);
// Input is a smi. Untag and load it onto the FPU stack. // Input is a smi. Untag and load it onto the FPU stack.
// Then load the low and high words of the double into ebx, edx. // Then load the low and high words of the double into ebx, edx.
ASSERT_EQ(1, kSmiTagSize); STATIC_ASSERT(kSmiTagSize == 1);
__ sar(eax, 1); __ sar(eax, 1);
__ sub(Operand(esp), Immediate(2 * kPointerSize)); __ sub(Operand(esp), Immediate(2 * kPointerSize));
__ mov(Operand(esp, 0), eax); __ mov(Operand(esp, 0), eax);
@ -11122,7 +11180,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ j(sign, &try_float, not_taken); __ j(sign, &try_float, not_taken);
// Tag the result as a smi and we're done. // Tag the result as a smi and we're done.
ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTagSize == 1);
__ lea(eax, Operand(ecx, times_2, kSmiTag)); __ lea(eax, Operand(ecx, times_2, kSmiTag));
__ jmp(&done); __ jmp(&done);
@ -11198,7 +11256,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow, not_taken); __ j(above_equal, &slow, not_taken);
// Read the argument from the stack and return it. // Read the argument from the stack and return it.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
__ lea(ebx, Operand(ebp, eax, times_2, 0)); __ lea(ebx, Operand(ebp, eax, times_2, 0));
__ neg(edx); __ neg(edx);
__ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
@ -11213,7 +11272,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow, not_taken); __ j(above_equal, &slow, not_taken);
// Read the argument from the stack and return it. // Read the argument from the stack and return it.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
__ lea(ebx, Operand(ebx, ecx, times_2, 0)); __ lea(ebx, Operand(ebx, ecx, times_2, 0));
__ neg(edx); __ neg(edx);
__ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
@ -11284,12 +11344,12 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
} }
// Setup the callee in-object property. // Setup the callee in-object property.
ASSERT(Heap::arguments_callee_index == 0); STATIC_ASSERT(Heap::arguments_callee_index == 0);
__ mov(ebx, Operand(esp, 3 * kPointerSize)); __ mov(ebx, Operand(esp, 3 * kPointerSize));
__ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx); __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
// Get the length (smi tagged) and set that as an in-object property too. // Get the length (smi tagged) and set that as an in-object property too.
ASSERT(Heap::arguments_length_index == 1); STATIC_ASSERT(Heap::arguments_length_index == 1);
__ mov(ecx, Operand(esp, 1 * kPointerSize)); __ mov(ecx, Operand(esp, 1 * kPointerSize));
__ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx); __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
@ -11368,7 +11428,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the first argument is a JSRegExp object. // Check that the first argument is a JSRegExp object.
__ mov(eax, Operand(esp, kJSRegExpOffset)); __ mov(eax, Operand(esp, kJSRegExpOffset));
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
__ test(eax, Immediate(kSmiTagMask)); __ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime); __ j(zero, &runtime);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ecx); __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
@ -11393,8 +11453,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2. This // Calculate number of capture registers (number_of_captures + 1) * 2. This
// uses the asumption that smis are 2 * their untagged value. // uses the asumption that smis are 2 * their untagged value.
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(Operand(edx), Immediate(2)); // edx was a smi. __ add(Operand(edx), Immediate(2)); // edx was a smi.
// Check that the static offsets vector buffer is large enough. // Check that the static offsets vector buffer is large enough.
__ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize); __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
@ -11452,7 +11512,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// First check for flat two byte string. // First check for flat two byte string.
__ and_(ebx, __ and_(ebx,
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask); kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag); STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string); __ j(zero, &seq_two_byte_string);
// Any other flat string must be a flat ascii string. // Any other flat string must be a flat ascii string.
__ test(Operand(ebx), __ test(Operand(ebx),
@ -11464,8 +11524,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// string. In that case the subject string is just the first part of the cons // string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be // string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string. // a sequential string or an external string.
ASSERT(kExternalStringTag !=0); STATIC_ASSERT(kExternalStringTag != 0);
ASSERT_EQ(0, kConsStringTag & kExternalStringTag); STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
__ test(Operand(ebx), __ test(Operand(ebx),
Immediate(kIsNotStringMask | kExternalStringTag)); Immediate(kIsNotStringMask | kExternalStringTag));
__ j(not_zero, &runtime); __ j(not_zero, &runtime);
@ -11481,7 +11541,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Is first part a flat two byte string? // Is first part a flat two byte string?
__ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
kStringRepresentationMask | kStringEncodingMask); kStringRepresentationMask | kStringEncodingMask);
ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag); STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string); __ j(zero, &seq_two_byte_string);
// Any other flat string must be ascii. // Any other flat string must be ascii.
__ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
@ -11552,7 +11612,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ jmp(&setup_rest); __ jmp(&setup_rest);
__ bind(&setup_two_byte); __ bind(&setup_two_byte);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); // edi is smi (powered by 2). STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2).
__ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize)); __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
__ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize)); __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
@ -11600,8 +11661,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
__ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2. // Calculate number of capture registers (number_of_captures + 1) * 2.
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(Operand(edx), Immediate(2)); // edx was a smi. __ add(Operand(edx), Immediate(2)); // edx was a smi.
// edx: Number of capture registers // edx: Number of capture registers
@ -11696,7 +11757,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ SmiUntag(scratch); __ SmiUntag(scratch);
} else { } else {
Label not_smi, hash_calculated; Label not_smi, hash_calculated;
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ test(object, Immediate(kSmiTagMask)); __ test(object, Immediate(kSmiTagMask));
__ j(not_zero, &not_smi); __ j(not_zero, &not_smi);
__ mov(scratch, object); __ mov(scratch, object);
@ -11706,7 +11767,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ cmp(FieldOperand(object, HeapObject::kMapOffset), __ cmp(FieldOperand(object, HeapObject::kMapOffset),
Factory::heap_number_map()); Factory::heap_number_map());
__ j(not_equal, not_found); __ j(not_equal, not_found);
ASSERT_EQ(8, kDoubleSize); STATIC_ASSERT(8 == kDoubleSize);
__ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
__ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
// Object is heap number and hash is now in scratch. Calculate cache index. // Object is heap number and hash is now in scratch. Calculate cache index.
@ -11837,7 +11898,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e., // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
// all bits in the mask are set. We only need to check the word // all bits in the mask are set. We only need to check the word
// that contains the exponent and high bit of the mantissa. // that contains the exponent and high bit of the mantissa.
ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u); STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
__ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset)); __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ xor_(eax, Operand(eax)); __ xor_(eax, Operand(eax));
// Shift value and mask so kQuietNaNHighBitsMask applies to topmost // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
@ -11845,7 +11906,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ add(edx, Operand(edx)); __ add(edx, Operand(edx));
__ cmp(edx, kQuietNaNHighBitsMask << 1); __ cmp(edx, kQuietNaNHighBitsMask << 1);
if (cc_ == equal) { if (cc_ == equal) {
ASSERT_NE(1, EQUAL); STATIC_ASSERT(EQUAL != 1);
__ setcc(above_equal, eax); __ setcc(above_equal, eax);
__ ret(0); __ ret(0);
} else { } else {
@ -11873,7 +11934,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// slow-case code. // slow-case code.
// If either is a Smi (we know that not both are), then they can only // If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case. // be equal if the other is a HeapNumber. If so, use the slow case.
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0)); ASSERT_EQ(0, Smi::FromInt(0));
__ mov(ecx, Immediate(kSmiTagMask)); __ mov(ecx, Immediate(kSmiTagMask));
__ and_(ecx, Operand(eax)); __ and_(ecx, Operand(eax));
@ -11882,7 +11943,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// One operand is a smi. // One operand is a smi.
// Check whether the non-smi is a heap number. // Check whether the non-smi is a heap number.
ASSERT_EQ(1, kSmiTagMask); STATIC_ASSERT(kSmiTagMask == 1);
// ecx still holds eax & kSmiTag, which is either zero or one. // ecx still holds eax & kSmiTag, which is either zero or one.
__ sub(Operand(ecx), Immediate(0x01)); __ sub(Operand(ecx), Immediate(0x01));
__ mov(ebx, edx); __ mov(ebx, edx);
@ -11908,13 +11969,13 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Get the type of the first operand. // Get the type of the first operand.
// If the first object is a JS object, we have done pointer comparison. // If the first object is a JS object, we have done pointer comparison.
Label first_non_object; Label first_non_object;
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(below, &first_non_object); __ j(below, &first_non_object);
// Return non-zero (eax is not zero) // Return non-zero (eax is not zero)
Label return_not_equal; Label return_not_equal;
ASSERT(kHeapObjectTag != 0); STATIC_ASSERT(kHeapObjectTag != 0);
__ bind(&return_not_equal); __ bind(&return_not_equal);
__ ret(0); __ ret(0);
@ -12034,8 +12095,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
// At most one is a smi, so we can test for smi by adding the two. // At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus // A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear. // a heap object has the low bit clear.
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(1, kSmiTagMask); STATIC_ASSERT(kSmiTagMask == 1);
__ lea(ecx, Operand(eax, edx, times_1, 0)); __ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask)); __ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &not_both_objects); __ j(not_zero, &not_both_objects);
@ -12175,16 +12236,16 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// eax holds the exception. // eax holds the exception.
// Adjust this code if not the case. // Adjust this code if not the case.
ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler. // Drop the sp to the top of the handler.
ExternalReference handler_address(Top::k_handler_address); ExternalReference handler_address(Top::k_handler_address);
__ mov(esp, Operand::StaticVariable(handler_address)); __ mov(esp, Operand::StaticVariable(handler_address));
// Restore next handler and frame pointer, discard handler state. // Restore next handler and frame pointer, discard handler state.
ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(Operand::StaticVariable(handler_address)); __ pop(Operand::StaticVariable(handler_address));
ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
__ pop(ebp); __ pop(ebp);
__ pop(edx); // Remove state. __ pop(edx); // Remove state.
@ -12198,7 +12259,7 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ bind(&skip); __ bind(&skip);
ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ ret(0); __ ret(0);
} }
@ -12218,7 +12279,7 @@ void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
Label prologue; Label prologue;
Label promote_scheduled_exception; Label promote_scheduled_exception;
__ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc); __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
ASSERT_EQ(kArgc, 4); STATIC_ASSERT(kArgc == 4);
if (kPassHandlesDirectly) { if (kPassHandlesDirectly) {
// When handles as passed directly we don't have to allocate extra // When handles as passed directly we don't have to allocate extra
// space for and pass an out parameter. // space for and pass an out parameter.
@ -12333,7 +12394,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Check for failure result. // Check for failure result.
Label failure_returned; Label failure_returned;
ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
__ lea(ecx, Operand(eax, 1)); __ lea(ecx, Operand(eax, 1));
// Lower 2 bits of ecx are 0 iff eax has failure tag. // Lower 2 bits of ecx are 0 iff eax has failure tag.
__ test(ecx, Immediate(kFailureTagMask)); __ test(ecx, Immediate(kFailureTagMask));
@ -12348,7 +12409,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label retry; Label retry;
// If the returned exception is RETRY_AFTER_GC continue at retry label // If the returned exception is RETRY_AFTER_GC continue at retry label
ASSERT(Failure::RETRY_AFTER_GC == 0); STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
__ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ j(zero, &retry, taken); __ j(zero, &retry, taken);
@ -12379,7 +12440,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) { UncatchableExceptionType type) {
// Adjust this code if not the case. // Adjust this code if not the case.
ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop sp to the top stack handler. // Drop sp to the top stack handler.
ExternalReference handler_address(Top::k_handler_address); ExternalReference handler_address(Top::k_handler_address);
@ -12399,7 +12460,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
__ bind(&done); __ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler. // Set the top handler address to next handler past the current ENTRY handler.
ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(Operand::StaticVariable(handler_address)); __ pop(Operand::StaticVariable(handler_address));
if (type == OUT_OF_MEMORY) { if (type == OUT_OF_MEMORY) {
@ -12418,11 +12479,11 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
__ xor_(esi, Operand(esi)); __ xor_(esi, Operand(esi));
// Restore fp from handler and discard handler state. // Restore fp from handler and discard handler state.
ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
__ pop(ebp); __ pop(ebp);
__ pop(edx); // State. __ pop(edx); // State.
ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ ret(0); __ ret(0);
} }
@ -12733,7 +12794,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label got_char_code; Label got_char_code;
// If the receiver is a smi trigger the non-string case. // If the receiver is a smi trigger the non-string case.
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ test(object_, Immediate(kSmiTagMask)); __ test(object_, Immediate(kSmiTagMask));
__ j(zero, receiver_not_string_); __ j(zero, receiver_not_string_);
@ -12745,7 +12806,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ j(not_zero, receiver_not_string_); __ j(not_zero, receiver_not_string_);
// If the index is non-smi trigger the non-smi case. // If the index is non-smi trigger the non-smi case.
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ test(index_, Immediate(kSmiTagMask)); __ test(index_, Immediate(kSmiTagMask));
__ j(not_zero, &index_not_smi_); __ j(not_zero, &index_not_smi_);
@ -12758,7 +12819,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ j(above_equal, index_out_of_range_); __ j(above_equal, index_out_of_range_);
// We need special handling for non-flat strings. // We need special handling for non-flat strings.
ASSERT(kSeqStringTag == 0); STATIC_ASSERT(kSeqStringTag == 0);
__ test(result_, Immediate(kStringRepresentationMask)); __ test(result_, Immediate(kStringRepresentationMask));
__ j(zero, &flat_string); __ j(zero, &flat_string);
@ -12779,19 +12840,19 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the first cons component is also non-flat, then go to runtime. // If the first cons component is also non-flat, then go to runtime.
ASSERT(kSeqStringTag == 0); STATIC_ASSERT(kSeqStringTag == 0);
__ test(result_, Immediate(kStringRepresentationMask)); __ test(result_, Immediate(kStringRepresentationMask));
__ j(not_zero, &call_runtime_); __ j(not_zero, &call_runtime_);
// Check for 1-byte or 2-byte string. // Check for 1-byte or 2-byte string.
__ bind(&flat_string); __ bind(&flat_string);
ASSERT(kAsciiStringTag != 0); STATIC_ASSERT(kAsciiStringTag != 0);
__ test(result_, Immediate(kStringEncodingMask)); __ test(result_, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string); __ j(not_zero, &ascii_string);
// 2-byte string. // 2-byte string.
// Load the 2-byte character code into the result register. // Load the 2-byte character code into the result register.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ movzx_w(result_, FieldOperand(object_, __ movzx_w(result_, FieldOperand(object_,
scratch_, times_1, // Scratch is smi-tagged. scratch_, times_1, // Scratch is smi-tagged.
SeqTwoByteString::kHeaderSize)); SeqTwoByteString::kHeaderSize));
@ -12841,7 +12902,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm); call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range. // If index is still not a smi, it must be out of range.
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ test(scratch_, Immediate(kSmiTagMask)); __ test(scratch_, Immediate(kSmiTagMask));
__ j(not_zero, index_out_of_range_); __ j(not_zero, index_out_of_range_);
// Otherwise, return to the fast path. // Otherwise, return to the fast path.
@ -12870,8 +12931,8 @@ void StringCharCodeAtGenerator::GenerateSlow(
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode. // Fast case of Heap::LookupSingleCharacterStringFromCode.
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
ASSERT(kSmiShiftSize == 0); STATIC_ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
__ test(code_, __ test(code_,
Immediate(kSmiTagMask | Immediate(kSmiTagMask |
@ -12879,9 +12940,9 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ j(not_zero, &slow_case_, not_taken); __ j(not_zero, &slow_case_, not_taken);
__ Set(result_, Immediate(Factory::single_character_string_cache())); __ Set(result_, Immediate(Factory::single_character_string_cache()));
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTagSize == 1);
ASSERT(kSmiShiftSize == 0); STATIC_ASSERT(kSmiShiftSize == 0);
// At this point code register contains smi tagged ascii char code. // At this point code register contains smi tagged ascii char code.
__ mov(result_, FieldOperand(result_, __ mov(result_, FieldOperand(result_,
code_, times_half_pointer_size, code_, times_half_pointer_size,
@ -12953,7 +13014,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Check if either of the strings are empty. In that case return the other. // Check if either of the strings are empty. In that case return the other.
Label second_not_zero_length, both_not_zero_length; Label second_not_zero_length, both_not_zero_length;
__ mov(ecx, FieldOperand(edx, String::kLengthOffset)); __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ test(ecx, Operand(ecx)); __ test(ecx, Operand(ecx));
__ j(not_zero, &second_not_zero_length); __ j(not_zero, &second_not_zero_length);
// Second string is empty, result is first string which is already in eax. // Second string is empty, result is first string which is already in eax.
@ -12961,7 +13022,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ret(2 * kPointerSize); __ ret(2 * kPointerSize);
__ bind(&second_not_zero_length); __ bind(&second_not_zero_length);
__ mov(ebx, FieldOperand(eax, String::kLengthOffset)); __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ test(ebx, Operand(ebx)); __ test(ebx, Operand(ebx));
__ j(not_zero, &both_not_zero_length); __ j(not_zero, &both_not_zero_length);
// First string is empty, result is second string which is in edx. // First string is empty, result is second string which is in edx.
@ -12978,7 +13039,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label string_add_flat_result, longer_than_two; Label string_add_flat_result, longer_than_two;
__ bind(&both_not_zero_length); __ bind(&both_not_zero_length);
__ add(ebx, Operand(ecx)); __ add(ebx, Operand(ecx));
ASSERT(Smi::kMaxValue == String::kMaxLength); STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
// Handle exceptionally long strings in the runtime system. // Handle exceptionally long strings in the runtime system.
__ j(overflow, &string_add_runtime); __ j(overflow, &string_add_runtime);
// Use the runtime system when adding two one character strings, as it // Use the runtime system when adding two one character strings, as it
@ -13019,7 +13080,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
__ and_(ecx, Operand(edi)); __ and_(ecx, Operand(edi));
ASSERT(kStringEncodingMask == kAsciiStringTag); STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
__ test(ecx, Immediate(kAsciiStringTag)); __ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii); __ j(zero, &non_ascii);
__ bind(&ascii_data); __ bind(&ascii_data);
@ -13046,7 +13107,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ xor_(edi, Operand(ecx)); __ xor_(edi, Operand(ecx));
ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
__ and_(edi, kAsciiStringTag | kAsciiDataHintTag); __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
__ cmp(edi, kAsciiStringTag | kAsciiDataHintTag); __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
__ j(equal, &ascii_data); __ j(equal, &ascii_data);
@ -13075,7 +13136,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// ebx: length of resulting flat string as a smi // ebx: length of resulting flat string as a smi
// edx: second string // edx: second string
Label non_ascii_string_add_flat_result; Label non_ascii_string_add_flat_result;
ASSERT(kStringEncodingMask == kAsciiStringTag); STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
__ j(zero, &non_ascii_string_add_flat_result); __ j(zero, &non_ascii_string_add_flat_result);
@ -13194,9 +13255,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
Register count, Register count,
Register scratch, Register scratch,
bool ascii) { bool ascii) {
// Copy characters using rep movs of doublewords. Align destination on 4 byte // Copy characters using rep movs of doublewords.
// boundary before starting rep movs. Copy remaining characters after running // The destination is aligned on a 4 byte boundary because we are
// rep movs. // copying to the beginning of a newly allocated string.
ASSERT(dest.is(edi)); // rep movs destination ASSERT(dest.is(edi)); // rep movs destination
ASSERT(src.is(esi)); // rep movs source ASSERT(src.is(esi)); // rep movs source
ASSERT(count.is(ecx)); // rep movs count ASSERT(count.is(ecx)); // rep movs count
@ -13317,9 +13378,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
} }
__ and_(scratch, Operand(mask)); __ and_(scratch, Operand(mask));
// Load the entry from the symble table. // Load the entry from the symbol table.
Register candidate = scratch; // Scratch register contains candidate. Register candidate = scratch; // Scratch register contains candidate.
ASSERT_EQ(1, SymbolTable::kEntrySize); STATIC_ASSERT(SymbolTable::kEntrySize == 1);
__ mov(candidate, __ mov(candidate,
FieldOperand(symbol_table, FieldOperand(symbol_table,
scratch, scratch,
@ -13362,7 +13423,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Scratch register contains result when we fall through to here. // Scratch register contains result when we fall through to here.
Register result = scratch; Register result = scratch;
__ bind(&found_in_symbol_table); __ bind(&found_in_symbol_table);
__ pop(mask); // Pop temporally saved mask from the stack. __ pop(mask); // Pop saved mask from the stack.
if (!result.is(eax)) { if (!result.is(eax)) {
__ mov(eax, result); __ mov(eax, result);
} }
@ -13437,7 +13498,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Make sure first argument is a string. // Make sure first argument is a string.
__ mov(eax, Operand(esp, 3 * kPointerSize)); __ mov(eax, Operand(esp, 3 * kPointerSize));
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
__ test(eax, Immediate(kSmiTagMask)); __ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime); __ j(zero, &runtime);
Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
@ -13445,6 +13506,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// eax: string // eax: string
// ebx: instance type // ebx: instance type
// Calculate length of sub string using the smi values. // Calculate length of sub string using the smi values.
Label result_longer_than_two; Label result_longer_than_two;
__ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index. __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
@ -13550,8 +13612,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(ebx, Operand(esp, 2 * kPointerSize)); // from __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
// As from is a smi it is 2 times the value which matches the size of a two // As from is a smi it is 2 times the value which matches the size of a two
// byte character. // byte character.
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(esi, Operand(ebx)); __ add(esi, Operand(ebx));
// eax: result string // eax: result string
@ -13637,8 +13699,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ j(not_zero, &result_not_equal); __ j(not_zero, &result_not_equal);
// Result is EQUAL. // Result is EQUAL.
ASSERT_EQ(0, EQUAL); STATIC_ASSERT(EQUAL == 0);
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
__ Set(eax, Immediate(Smi::FromInt(EQUAL))); __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0); __ ret(0);
@ -13670,8 +13732,8 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
Label not_same; Label not_same;
__ cmp(edx, Operand(eax)); __ cmp(edx, Operand(eax));
__ j(not_equal, &not_same); __ j(not_equal, &not_same);
ASSERT_EQ(0, EQUAL); STATIC_ASSERT(EQUAL == 0);
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
__ Set(eax, Immediate(Smi::FromInt(EQUAL))); __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ IncrementCounter(&Counters::string_compare_native, 1); __ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize); __ ret(2 * kPointerSize);

9
deps/v8/src/ia32/codegen-ia32.h

@ -519,6 +519,15 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(BinaryOperation* expr, void GenericBinaryOperation(BinaryOperation* expr,
OverwriteMode overwrite_mode); OverwriteMode overwrite_mode);
// Emits code sequence that jumps to a JumpTarget if the inputs
// are both smis. Cannot be in MacroAssembler because it takes
// advantage of TypeInfo to skip unneeded checks.
// Allocates a temporary register, possibly spilling from the frame,
// if it needs to check both left and right.
void JumpIfBothSmiUsingTypeInfo(Result* left,
Result* right,
JumpTarget* both_smi);
// Emits code sequence that jumps to deferred code if the inputs // Emits code sequence that jumps to deferred code if the inputs
// are not both smis. Cannot be in MacroAssembler because it takes // are not both smis. Cannot be in MacroAssembler because it takes
// advantage of TypeInfo to skip unneeded checks. // advantage of TypeInfo to skip unneeded checks.

3
deps/v8/src/jump-target-light.h

@ -101,8 +101,7 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// Emit a conditional branch to the target. There must be a current // Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the // frame at the branch. The current frame will fall through to the
// code after the branch. The arg is a result that is live both at // code after the branch.
// the target and the fall-through.
virtual void Branch(Condition cc, Hint hint = no_hint); virtual void Branch(Condition cc, Hint hint = no_hint);
// Bind a jump target. If there is no current frame at the binding // Bind a jump target. If there is no current frame at the binding

6
deps/v8/src/objects.cc

@ -2966,7 +2966,8 @@ Object* JSObject::DefineAccessor(AccessorInfo* info) {
break; break;
} }
SetElementCallback(index, info, info->property_attributes()); Object* ok = SetElementCallback(index, info, info->property_attributes());
if (ok->IsFailure()) return ok;
} else { } else {
// Lookup the name. // Lookup the name.
LookupResult result; LookupResult result;
@ -2976,7 +2977,8 @@ Object* JSObject::DefineAccessor(AccessorInfo* info) {
if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) { if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) {
return Heap::undefined_value(); return Heap::undefined_value();
} }
SetPropertyCallback(name, info, info->property_attributes()); Object* ok = SetPropertyCallback(name, info, info->property_attributes());
if (ok->IsFailure()) return ok;
} }
return this; return this;

64
deps/v8/src/runtime.cc

@ -212,23 +212,42 @@ static Handle<Map> ComputeObjectLiteralMap(
Handle<Context> context, Handle<Context> context,
Handle<FixedArray> constant_properties, Handle<FixedArray> constant_properties,
bool* is_result_from_cache) { bool* is_result_from_cache) {
int number_of_properties = constant_properties->length() / 2; int properties_length = constant_properties->length();
int number_of_properties = properties_length / 2;
if (FLAG_canonicalize_object_literal_maps) { if (FLAG_canonicalize_object_literal_maps) {
// First find prefix of consecutive symbol keys. // Check that there are only symbols and array indices among keys.
int number_of_symbol_keys = 0; int number_of_symbol_keys = 0;
while ((number_of_symbol_keys < number_of_properties) && for (int p = 0; p != properties_length; p += 2) {
(constant_properties->get(number_of_symbol_keys*2)->IsSymbol())) { Object* key = constant_properties->get(p);
number_of_symbol_keys++; uint32_t element_index = 0;
if (key->IsSymbol()) {
number_of_symbol_keys++;
} else if (key->ToArrayIndex(&element_index)) {
// An index key does not require space in the property backing store.
number_of_properties--;
} else {
// Bail out as a non-symbol non-index key makes caching impossible.
// ASSERT to make sure that the if condition after the loop is false.
ASSERT(number_of_symbol_keys != number_of_properties);
break;
}
} }
// Based on the number of prefix symbols key we decide whether // If we only have symbols and array indices among keys then we can
// to use the map cache in the global context. // use the map cache in the global context.
const int kMaxKeys = 10; const int kMaxKeys = 10;
if ((number_of_symbol_keys == number_of_properties) && if ((number_of_symbol_keys == number_of_properties) &&
(number_of_symbol_keys < kMaxKeys)) { (number_of_symbol_keys < kMaxKeys)) {
// Create the fixed array with the key. // Create the fixed array with the key.
Handle<FixedArray> keys = Factory::NewFixedArray(number_of_symbol_keys); Handle<FixedArray> keys = Factory::NewFixedArray(number_of_symbol_keys);
for (int i = 0; i < number_of_symbol_keys; i++) { if (number_of_symbol_keys > 0) {
keys->set(i, constant_properties->get(i*2)); int index = 0;
for (int p = 0; p < properties_length; p += 2) {
Object* key = constant_properties->get(p);
if (key->IsSymbol()) {
keys->set(index++, key);
}
}
ASSERT(index == number_of_symbol_keys);
} }
*is_result_from_cache = true; *is_result_from_cache = true;
return Factory::ObjectLiteralMapFromCache(context, keys); return Factory::ObjectLiteralMapFromCache(context, keys);
@ -6732,6 +6751,26 @@ static Object* Runtime_NewClosure(Arguments args) {
return *result; return *result;
} }
static Object* Runtime_NewObjectFromBound(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
CONVERT_ARG_CHECKED(JSArray, params, 1);
FixedArray* fixed = FixedArray::cast(params->elements());
bool exception = false;
Object*** param_data = NewArray<Object**>(fixed->length());
for (int i = 0; i < fixed->length(); i++) {
Handle<Object> val = Handle<Object>(fixed->get(i));
param_data[i] = val.location();
}
Handle<Object> result = Execution::New(
function, fixed->length(), param_data, &exception);
return *result;
}
static Code* ComputeConstructStub(Handle<JSFunction> function) { static Code* ComputeConstructStub(Handle<JSFunction> function) {
Handle<Object> prototype = Factory::null_value(); Handle<Object> prototype = Factory::null_value();
@ -9342,6 +9381,13 @@ static Object* Runtime_SetScriptBreakPoint(Arguments args) {
} }
Debug::SetBreakPoint(shared, break_point_object_arg, &position); Debug::SetBreakPoint(shared, break_point_object_arg, &position);
position += shared->start_position(); position += shared->start_position();
// The result position may become beyond script source end.
// This is expected when the function is toplevel. This may become
// a problem later when actual position gets converted into line/column.
if (shared->is_toplevel() && position == shared->end_position()) {
position = shared->end_position() - 1;
}
return Smi::FromInt(position); return Smi::FromInt(position);
} }
return Heap::undefined_value(); return Heap::undefined_value();

1
deps/v8/src/runtime.h

@ -258,6 +258,7 @@ namespace internal {
/* Statements */ \ /* Statements */ \
F(NewClosure, 2, 1) \ F(NewClosure, 2, 1) \
F(NewObject, 1, 1) \ F(NewObject, 1, 1) \
F(NewObjectFromBound, 2, 1) \
F(Throw, 1, 1) \ F(Throw, 1, 1) \
F(ReThrow, 1, 1) \ F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \ F(ThrowReferenceError, 1, 1) \

2
deps/v8/src/v8-counters.h

@ -169,7 +169,7 @@ namespace internal {
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \ SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
SC(constructed_objects_stub, V8.ConstructedObjectsStub) \ SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
SC(negative_lookups, V8.NegativeLookups) \ SC(negative_lookups, V8.NegativeLookups) \
SC(negative_lookups_miss, V8.NegativeLookupsMiss) \ SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
SC(array_function_runtime, V8.ArrayFunctionRuntime) \ SC(array_function_runtime, V8.ArrayFunctionRuntime) \
SC(array_function_native, V8.ArrayFunctionNative) \ SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \ SC(for_in, V8.ForIn) \

82
deps/v8/src/v8natives.js

@ -539,21 +539,21 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
throw MakeTypeError("define_disallowed", ["defineProperty"]); throw MakeTypeError("define_disallowed", ["defineProperty"]);
if (!IS_UNDEFINED(current) && !current.isConfigurable()) { if (!IS_UNDEFINED(current) && !current.isConfigurable()) {
// Step 5 and 6 // Step 5 and 6
if ((!desc.hasEnumerable() || if ((!desc.hasEnumerable() ||
SameValue(desc.isEnumerable() && current.isEnumerable())) && SameValue(desc.isEnumerable() && current.isEnumerable())) &&
(!desc.hasConfigurable() || (!desc.hasConfigurable() ||
SameValue(desc.isConfigurable(), current.isConfigurable())) && SameValue(desc.isConfigurable(), current.isConfigurable())) &&
(!desc.hasWritable() || (!desc.hasWritable() ||
SameValue(desc.isWritable(), current.isWritable())) && SameValue(desc.isWritable(), current.isWritable())) &&
(!desc.hasValue() || (!desc.hasValue() ||
SameValue(desc.getValue(), current.getValue())) && SameValue(desc.getValue(), current.getValue())) &&
(!desc.hasGetter() || (!desc.hasGetter() ||
SameValue(desc.getGet(), current.getGet())) && SameValue(desc.getGet(), current.getGet())) &&
(!desc.hasSetter() || (!desc.hasSetter() ||
SameValue(desc.getSet(), current.getSet()))) { SameValue(desc.getSet(), current.getSet()))) {
return true; return true;
} }
// Step 7 // Step 7
if (desc.isConfigurable() || desc.isEnumerable() != current.isEnumerable()) if (desc.isConfigurable() || desc.isEnumerable() != current.isEnumerable())
@ -1099,6 +1099,57 @@ function FunctionToString() {
} }
// ES5 15.3.4.5
function FunctionBind(this_arg) { // Length is 1.
if (!IS_FUNCTION(this)) {
throw new $TypeError('Bind must be called on a function');
}
// this_arg is not an argument that should be bound.
var argc_bound = %_ArgumentsLength() - 1;
if (argc_bound > 0) {
var bound_args = new $Array(argc_bound);
for(var i = 0; i < argc_bound; i++) {
bound_args[i] = %_Arguments(i+1);
}
}
global.print(argc_bound);
var fn = this;
var result = function() {
// Combine the args we got from the bind call with the args
// given as argument to the invocation.
var argc = %_ArgumentsLength();
var args = new $Array(argc + argc_bound);
// Add bound arguments.
for (var i = 0; i < argc_bound; i++) {
args[i] = bound_args[i];
}
// Add arguments from call.
for (var i = 0; i < argc; i++) {
args[argc_bound + i] = %_Arguments(i);
}
// If this is a construct call we use a special runtime method
// to generate the actual object using the bound function.
if (%_IsConstructCall()) {
return %NewObjectFromBound(fn, args);
}
return fn.apply(this_arg, args);
};
// We already have caller and arguments properties on functions,
// which are non-configurable. It therefore makes no sence to
// try to redefine these as defined by the spec. The spec says
// that bind should make these throw a TypeError if get or set
// is called and make them non-enumerable and non-configurable.
// To be consistent with our normal functions we leave this as it is.
// Set the correct length.
var length = (this.length - argc_bound) > 0 ? this.length - argc_bound : 0;
%FunctionSetLength(result, length);
return result;
}
function NewFunction(arg1) { // length == 1 function NewFunction(arg1) { // length == 1
var n = %_ArgumentsLength(); var n = %_ArgumentsLength();
var p = ''; var p = '';
@ -1130,6 +1181,7 @@ function NewFunction(arg1) { // length == 1
function SetupFunction() { function SetupFunction() {
InstallFunctions($Function.prototype, DONT_ENUM, $Array( InstallFunctions($Function.prototype, DONT_ENUM, $Array(
"bind", FunctionBind,
"toString", FunctionToString "toString", FunctionToString
)); ));
} }

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2 #define MAJOR_VERSION 2
#define MINOR_VERSION 3 #define MINOR_VERSION 3
#define BUILD_NUMBER 2 #define BUILD_NUMBER 3
#define PATCH_LEVEL 0 #define PATCH_LEVEL 0
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false

4
deps/v8/src/x64/builtins-x64.cc

@ -895,8 +895,8 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ lea(rbx, FieldOperand(rbx, Code::kHeaderSize)); __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
__ jmp(rbx); __ jmp(rbx);
// edi: called object // rdi: called object
// eax: number of arguments // rax: number of arguments
__ bind(&non_function_call); __ bind(&non_function_call);
// CALL_NON_FUNCTION expects the non-function constructor as receiver // CALL_NON_FUNCTION expects the non-function constructor as receiver
// (instead of the original receiver from the call site). The receiver is // (instead of the original receiver from the call site). The receiver is

726
deps/v8/src/x64/codegen-x64.cc

File diff suppressed because it is too large

11
deps/v8/src/x64/codegen-x64.h

@ -457,6 +457,7 @@ class CodeGenerator: public AstVisitor {
// Support for compiling assignment expressions. // Support for compiling assignment expressions.
void EmitSlotAssignment(Assignment* node); void EmitSlotAssignment(Assignment* node);
void EmitNamedPropertyAssignment(Assignment* node); void EmitNamedPropertyAssignment(Assignment* node);
void EmitKeyedPropertyAssignment(Assignment* node);
// Receiver is passed on the frame and not consumed. // Receiver is passed on the frame and not consumed.
Result EmitNamedLoad(Handle<String> name, bool is_contextual); Result EmitNamedLoad(Handle<String> name, bool is_contextual);
@ -470,6 +471,9 @@ class CodeGenerator: public AstVisitor {
// not changed. // not changed.
Result EmitKeyedLoad(); Result EmitKeyedLoad();
// Receiver, key, and value are passed on the frame and consumed.
Result EmitKeyedStore(StaticType* key_type);
// Special code for typeof expressions: Unfortunately, we must // Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof' // be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for // expressions. We are not allowed to throw reference errors for
@ -488,6 +492,13 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(BinaryOperation* expr, void GenericBinaryOperation(BinaryOperation* expr,
OverwriteMode overwrite_mode); OverwriteMode overwrite_mode);
// Emits code sequence that jumps to a JumpTarget if the inputs
// are both smis. Cannot be in MacroAssembler because it takes
// advantage of TypeInfo to skip unneeded checks.
void JumpIfBothSmiUsingTypeInfo(Result* left,
Result* right,
JumpTarget* both_smi);
// Emits code sequence that jumps to deferred code if the input // Emits code sequence that jumps to deferred code if the input
// is not a smi. Cannot be in MacroAssembler because it takes // is not a smi. Cannot be in MacroAssembler because it takes
// advantage of TypeInfo to skip unneeded checks. // advantage of TypeInfo to skip unneeded checks.

61
deps/v8/src/x64/ic-x64.cc

@ -379,7 +379,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
} }
// One byte opcode for test eax,0xXXXXXXXX. // One byte opcode for test rax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9; static const byte kTestEaxByte = 0xA9;
@ -1520,8 +1520,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateFunctionTailCall(masm, argc, &slow_call); GenerateFunctionTailCall(masm, argc, &slow_call);
__ bind(&check_number_dictionary); __ bind(&check_number_dictionary);
// eax: elements // rax: elements
// ecx: smi key // rcx: smi key
// Check whether the elements is a number dictionary. // Check whether the elements is a number dictionary.
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex); Heap::kHashTableMapRootIndex);
@ -1603,8 +1603,8 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
} }
// The offset from the inlined patch site to the start of the // The offset from the inlined patch site to the start of the inlined
// inlined load instruction. // load instruction.
const int LoadIC::kOffsetToLoadInstruction = 20; const int LoadIC::kOffsetToLoadInstruction = 20;
@ -1713,7 +1713,7 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call. // The address of the instruction following the call.
Address test_instruction_address = Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset; address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test eax, nothing // If the instruction following the call is not a test rax, nothing
// was inlined. // was inlined.
if (*test_instruction_address != kTestEaxByte) return false; if (*test_instruction_address != kTestEaxByte) return false;
@ -1737,9 +1737,54 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
} }
// The offset from the inlined patch site to the start of the inlined
// store instruction.
const int StoreIC::kOffsetToStoreInstruction = 20;
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) { bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
// TODO(787): Implement inline stores on x64. // The address of the instruction following the call.
return false; Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test rax, nothing
// was inlined.
if (*test_instruction_address != kTestEaxByte) return false;
// Extract the encoded deltas from the test rax instruction.
Address encoded_offsets_address = test_instruction_address + 1;
int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
int delta_to_map_check = -(encoded_offsets & 0xFFFF);
int delta_to_record_write = encoded_offsets >> 16;
// Patch the map to check. The map address is the last 8 bytes of
// the 10-byte immediate move instruction.
Address map_check_address = test_instruction_address + delta_to_map_check;
Address map_address = map_check_address + 2;
*(reinterpret_cast<Object**>(map_address)) = map;
// Patch the offset in the store instruction. The offset is in the
// last 4 bytes of a 7 byte register-to-memory move instruction.
Address offset_address =
map_check_address + StoreIC::kOffsetToStoreInstruction + 3;
// The offset should have initial value (kMaxInt - 1), cleared value
// (-1) or we should be clearing the inlined version.
ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
*reinterpret_cast<int*>(offset_address) == -1 ||
(offset == 0 && map == Heap::null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
// Patch the offset in the write-barrier code. The offset is the
// last 4 bytes of a 7 byte lea instruction.
offset_address = map_check_address + delta_to_record_write + 3;
// The offset should have initial value (kMaxInt), cleared value
// (-1) or we should be clearing the inlined version.
ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
*reinterpret_cast<int*>(offset_address) == -1 ||
(offset == 0 && map == Heap::null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
return true;
} }

145
deps/v8/src/x64/virtual-frame-x64.cc

@ -997,60 +997,60 @@ void VirtualFrame::SyncRange(int begin, int end) {
} }
Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id, //------------------------------------------------------------------------------
InvokeFlag flag, // Virtual frame stub and IC calling functions.
int arg_count) {
Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
PrepareForCall(arg_count, arg_count); PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters()); ASSERT(cgen()->HasValidEntryRegisters());
__ InvokeBuiltin(id, flag); __ CallRuntime(f, arg_count);
Result result = cgen()->allocator()->Allocate(rax); Result result = cgen()->allocator()->Allocate(rax);
ASSERT(result.is_valid()); ASSERT(result.is_valid());
return result; return result;
} }
//------------------------------------------------------------------------------ Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
// Virtual frame stub and IC calling functions. PrepareForCall(arg_count, arg_count);
Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode) {
ASSERT(cgen()->HasValidEntryRegisters()); ASSERT(cgen()->HasValidEntryRegisters());
__ Call(code, rmode); __ CallRuntime(id, arg_count);
Result result = cgen()->allocator()->Allocate(rax); Result result = cgen()->allocator()->Allocate(rax);
ASSERT(result.is_valid()); ASSERT(result.is_valid());
return result; return result;
} }
Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) { #ifdef ENABLE_DEBUGGER_SUPPORT
PrepareForCall(arg_count, arg_count); void VirtualFrame::DebugBreak() {
PrepareForCall(0, 0);
ASSERT(cgen()->HasValidEntryRegisters()); ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count); __ DebugBreak();
Result result = cgen()->allocator()->Allocate(rax); Result result = cgen()->allocator()->Allocate(rax);
ASSERT(result.is_valid()); ASSERT(result.is_valid());
return result;
} }
#endif
Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) { Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
int arg_count) {
PrepareForCall(arg_count, arg_count); PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters()); ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count); __ InvokeBuiltin(id, flag);
Result result = cgen()->allocator()->Allocate(rax); Result result = cgen()->allocator()->Allocate(rax);
ASSERT(result.is_valid()); ASSERT(result.is_valid());
return result; return result;
} }
#ifdef ENABLE_DEBUGGER_SUPPORT Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
void VirtualFrame::DebugBreak() { RelocInfo::Mode rmode) {
PrepareForCall(0, 0);
ASSERT(cgen()->HasValidEntryRegisters()); ASSERT(cgen()->HasValidEntryRegisters());
__ DebugBreak(); __ Call(code, rmode);
Result result = cgen()->allocator()->Allocate(rax); Result result = cgen()->allocator()->Allocate(rax);
ASSERT(result.is_valid()); ASSERT(result.is_valid());
return result;
} }
#endif
// This function assumes that the only results that could be in a_reg or b_reg // This function assumes that the only results that could be in a_reg or b_reg
@ -1107,83 +1107,82 @@ Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) { Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
// Key and receiver are on top of the frame. The IC expects them on // Key and receiver are on top of the frame. Put them in rax and rdx.
// the stack. It does not drop them. Result key = Pop();
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
Result name = Pop();
Result receiver = Pop(); Result receiver = Pop();
PrepareForCall(0, 0); PrepareForCall(0, 0);
MoveResultsToRegisters(&name, &receiver, rax, rdx); MoveResultsToRegisters(&key, &receiver, rax, rdx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
return RawCallCodeObject(ic, mode); return RawCallCodeObject(ic, mode);
} }
Result VirtualFrame::CallCommonStoreIC(Handle<Code> ic, Result VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
Result* value, // Value and (if not contextual) receiver are on top of the frame.
Result* key, // The IC expects name in rcx, value in rax, and receiver in rdx.
Result* receiver) { Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
// The IC expects value in rax, key in rcx, and receiver in rdx. Result value = Pop();
if (is_contextual) {
PrepareForCall(0, 0);
value.ToRegister(rax);
__ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
value.Unuse();
} else {
Result receiver = Pop();
PrepareForCall(0, 0);
MoveResultsToRegisters(&value, &receiver, rax, rdx);
}
__ Move(rcx, name);
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
Result VirtualFrame::CallKeyedStoreIC() {
// Value, key, and receiver are on the top of the frame. The IC
// expects value in rax, key in rcx, and receiver in rdx.
Result value = Pop();
Result key = Pop();
Result receiver = Pop();
PrepareForCall(0, 0); PrepareForCall(0, 0);
// If one of the three registers is free, or a value is already
// in the correct register, move the remaining two values using
// MoveResultsToRegisters().
if (!cgen()->allocator()->is_used(rax) || if (!cgen()->allocator()->is_used(rax) ||
(value->is_register() && value->reg().is(rax))) { (value.is_register() && value.reg().is(rax))) {
if (!cgen()->allocator()->is_used(rax)) { if (!cgen()->allocator()->is_used(rax)) {
value->ToRegister(rax); value.ToRegister(rax);
} }
MoveResultsToRegisters(key, receiver, rcx, rdx); MoveResultsToRegisters(&key, &receiver, rcx, rdx);
value->Unuse(); value.Unuse();
} else if (!cgen()->allocator()->is_used(rcx) || } else if (!cgen()->allocator()->is_used(rcx) ||
(key->is_register() && key->reg().is(rcx))) { (key.is_register() && key.reg().is(rcx))) {
if (!cgen()->allocator()->is_used(rcx)) { if (!cgen()->allocator()->is_used(rcx)) {
key->ToRegister(rcx); key.ToRegister(rcx);
} }
MoveResultsToRegisters(value, receiver, rax, rdx); MoveResultsToRegisters(&value, &receiver, rax, rdx);
key->Unuse(); key.Unuse();
} else if (!cgen()->allocator()->is_used(rdx) || } else if (!cgen()->allocator()->is_used(rdx) ||
(receiver->is_register() && receiver->reg().is(rdx))) { (receiver.is_register() && receiver.reg().is(rdx))) {
if (!cgen()->allocator()->is_used(rdx)) { if (!cgen()->allocator()->is_used(rdx)) {
receiver->ToRegister(rdx); receiver.ToRegister(rdx);
} }
MoveResultsToRegisters(key, value, rcx, rax); MoveResultsToRegisters(&key, &value, rcx, rax);
receiver->Unuse(); receiver.Unuse();
} else { } else {
// Otherwise, no register is free, and no value is in the correct place. // All three registers are used, and no value is in the correct place.
// We have one of the two circular permutations of eax, ecx, edx. // We have one of the two circular permutations of rax, rcx, rdx.
ASSERT(value->is_register()); ASSERT(value.is_register());
if (value->reg().is(rcx)) { if (value.reg().is(rcx)) {
__ xchg(rax, rdx); __ xchg(rax, rdx);
__ xchg(rax, rcx); __ xchg(rax, rcx);
} else { } else {
__ xchg(rax, rcx); __ xchg(rax, rcx);
__ xchg(rax, rdx); __ xchg(rax, rdx);
} }
value->Unuse();
key->Unuse();
receiver->Unuse();
}
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
Result VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
// Value and (if not contextual) receiver are on top of the frame.
// The IC expects name in rcx, value in rax, and receiver in rdx.
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Result value = Pop();
if (is_contextual) {
PrepareForCall(0, 0);
value.ToRegister(rax);
__ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
value.Unuse(); value.Unuse();
} else { key.Unuse();
Result receiver = Pop(); receiver.Unuse();
PrepareForCall(0, 0);
MoveResultsToRegisters(&value, &receiver, rax, rdx);
} }
__ Move(rcx, name);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET); return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
} }

33
deps/v8/src/x64/virtual-frame-x64.h

@ -329,50 +329,27 @@ class VirtualFrame : public ZoneObject {
int arg_count); int arg_count);
// Call load IC. Name and receiver are found on top of the frame. // Call load IC. Name and receiver are found on top of the frame.
// Receiver is not dropped. // Both are dropped.
Result CallLoadIC(RelocInfo::Mode mode); Result CallLoadIC(RelocInfo::Mode mode);
// Call keyed load IC. Key and receiver are found on top of the // Call keyed load IC. Key and receiver are found on top of the
// frame. They are not dropped. // frame. Both are dropped.
Result CallKeyedLoadIC(RelocInfo::Mode mode); Result CallKeyedLoadIC(RelocInfo::Mode mode);
// Calling a store IC and a keyed store IC differ only by which ic is called
// and by the order of the three arguments on the frame.
Result CallCommonStoreIC(Handle<Code> ic,
Result* value,
Result* key,
Result* receiver);
// Call store IC. Name, value, and receiver are found on top
// of the frame. All are dropped.
Result CallStoreIC() {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Result name = Pop();
Result value = Pop();
Result receiver = Pop();
return CallCommonStoreIC(ic, &value, &name, &receiver);
}
// Call store IC. If the load is contextual, value is found on top of the // Call store IC. If the load is contextual, value is found on top of the
// frame. If not, value and receiver are on the frame. Both are dropped. // frame. If not, value and receiver are on the frame. Both are dropped.
Result CallStoreIC(Handle<String> name, bool is_contextual); Result CallStoreIC(Handle<String> name, bool is_contextual);
// Call keyed store IC. Value, key, and receiver are found on top // Call keyed store IC. Value, key, and receiver are found on top
// of the frame. All are dropped. // of the frame. All three are dropped.
Result CallKeyedStoreIC() { Result CallKeyedStoreIC();
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
Result value = Pop();
Result key = Pop();
Result receiver = Pop();
return CallCommonStoreIC(ic, &value, &key, &receiver);
}
// Call call IC. Function name, arguments, and receiver are found on top // Call call IC. Function name, arguments, and receiver are found on top
// of the frame and dropped by the call. // of the frame and dropped by the call.
// The argument count does not include the receiver. // The argument count does not include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting); Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
// Call keyed call IC. Same calling convention as CallCallIC.
Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting); Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
// Allocate and call JS function as constructor. Arguments, // Allocate and call JS function as constructor. Arguments,

23
deps/v8/test/mjsunit/debug-setbreakpoint.js

@ -192,3 +192,26 @@ Debug.setListener(breakListener);
sourceUrlFunc(); sourceUrlFunc();
assertTrue(breakListenerCalled, "Break listener not called on breakpoint set by sourceURL"); assertTrue(breakListenerCalled, "Break listener not called on breakpoint set by sourceURL");
// Breakpoint in a script with no statements test case. If breakpoint is set
// to the script body, its actual position is taken from the nearest statement
// below or like in this case is reset to the very end of the script.
// Unless some precautions made, this position becomes out-of-range and
// we get an exception.
// Gets a script of 'i1' function and sets the breakpoint at line #4 which
// should be empty.
function SetBreakpointInI1Script() {
var i_script = Debug.findScript(i1);
assertTrue(!!i_script, "invalid script for i1");
Debug.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
i_script.id, 4);
}
// Creates the eval script and tries to set the breakpoint.
// The tricky part is that the script function must be strongly reachable at the
// moment. Since there's no way of simply getting the pointer to the function,
// we run this code while the script function is being activated on stack.
eval('SetBreakpointInI1Script()\nfunction i1(){}\n\n\n\nfunction i2(){}\n');

184
deps/v8/test/mjsunit/function-bind.js

@ -0,0 +1,184 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Tests the Function.prototype.bind (ES 15.3.4.5) method.
// Simple tests.
function foo(x, y, z) {
return x + y + z;
}
var f = foo.bind(foo);
assertEquals(3, f(1, 1, 1));
assertEquals(3, f.length);
f = foo.bind(foo, 2);
assertEquals(4, f(1, 1));
assertEquals(2, f.length);
f = foo.bind(foo, 2, 2);
assertEquals(5, f(1));
assertEquals(1, f.length);
f = foo.bind(foo, 2, 2, 2);
assertEquals(6, f());
assertEquals(0, f.length);
// Test that length works correctly even if more than the actual number
// of arguments are given when binding.
f = foo.bind(foo, 1, 2, 3, 4, 5, 6, 7, 8, 9);
assertEquals(6, f());
assertEquals(0, f.length);
// Use a different bound object.
var obj = {x: 42, y: 43};
// Values that would normally be in "this" when calling f_bound_this.
var x = 42;
var y = 44;
function f_bound_this(z) {
return z + this.y - this.x;
}
assertEquals(3, f_bound_this(1))
f = f_bound_this.bind(obj);
assertEquals(2, f(1));
assertEquals(1, f.length);
f = f_bound_this.bind(obj, 2);
assertEquals(3, f());
assertEquals(0, f.length);
// Test chained binds.
// When only giving the thisArg, any number of binds should have
// the same effect.
f = foo.bind(foo);
assertEquals(3, f(1, 1, 1));
f = foo.bind(foo).bind(foo).bind(foo).bind(foo);
assertEquals(3, f(1, 1, 1));
assertEquals(3, f.length);
// Giving bound parameters should work at any place in the chain.
f = foo.bind(foo, 1).bind(foo).bind(foo).bind(foo);
assertEquals(3, f(1, 1));
assertEquals(2, f.length);
f = foo.bind(foo).bind(foo, 1).bind(foo).bind(foo);
assertEquals(3, f(1, 1));
assertEquals(2, f.length);
f = foo.bind(foo).bind(foo).bind(foo,1 ).bind(foo);
assertEquals(3, f(1, 1));
assertEquals(2, f.length);
f = foo.bind(foo).bind(foo).bind(foo).bind(foo, 1);
assertEquals(3, f(1, 1));
assertEquals(2, f.length);
// Several parameters can be given, and given in different bind invokations.
f = foo.bind(foo, 1, 1).bind(foo).bind(foo).bind(foo);
assertEquals(3, f(1));
assertEquals(1, f.length);
f = foo.bind(foo).bind(foo, 1, 1).bind(foo).bind(foo);
assertEquals(3, f(1));
assertEquals(1, f.length);
f = foo.bind(foo).bind(foo, 1, 1).bind(foo).bind(foo);
assertEquals(3, f(1));
assertEquals(1, f.length);
f = foo.bind(foo).bind(foo).bind(foo, 1, 1).bind(foo);
assertEquals(3, f(1));
assertEquals(1, f.length);
f = foo.bind(foo).bind(foo).bind(foo).bind(foo, 1, 1);
assertEquals(3, f(1));
assertEquals(1, f.length);
f = foo.bind(foo, 1).bind(foo, 1).bind(foo).bind(foo);
assertEquals(3, f(1));
assertEquals(1, f.length);
f = foo.bind(foo, 1).bind(foo).bind(foo, 1).bind(foo);
assertEquals(3, f(1));
assertEquals(1, f.length);
f = foo.bind(foo, 1).bind(foo).bind(foo).bind(foo, 1);
assertEquals(3, f(1));
assertEquals(1, f.length);
f = foo.bind(foo).bind(foo, 1).bind(foo).bind(foo, 1);
assertEquals(3, f(1));
assertEquals(1, f.length);
// Test constructor calls.
function bar(x, y, z) {
this.x = x;
this.y = y;
this.z = z;
}
f = bar.bind(bar);
var obj2 = new f(1,2,3);
assertEquals(1, obj2.x);
assertEquals(2, obj2.y);
assertEquals(3, obj2.z);
f = bar.bind(bar, 1);
obj2 = new f(2,3);
assertEquals(1, obj2.x);
assertEquals(2, obj2.y);
assertEquals(3, obj2.z);
f = bar.bind(bar, 1, 2);
obj2 = new f(3);
assertEquals(1, obj2.x);
assertEquals(2, obj2.y);
assertEquals(3, obj2.z);
f = bar.bind(bar, 1, 2, 3);
obj2 = new f();
assertEquals(1, obj2.x);
assertEquals(2, obj2.y);
assertEquals(3, obj2.z);
// Test bind chains when used as a constructor.
f = bar.bind(bar, 1).bind(bar, 2).bind(bar, 3);
obj2 = new f();
assertEquals(1, obj2.x);
assertEquals(2, obj2.y);
assertEquals(3, obj2.z);
// Test instanceof obj2 is bar, not f.
assertTrue(obj2 instanceof bar);
assertFalse(obj2 instanceof f);

4
deps/v8/tools/js2c.py

@ -275,8 +275,8 @@ def JS2C(source, target, env):
debugger_ids = [] debugger_ids = []
modules = [] modules = []
# Locate the macros file name. # Locate the macros file name.
consts = {} consts = []
macros = {} macros = []
for s in source: for s in source:
if 'macros.py' == (os.path.split(str(s))[1]): if 'macros.py' == (os.path.split(str(s))[1]):
(consts, macros) = ReadMacros(ReadLines(str(s))) (consts, macros) = ReadMacros(ReadLines(str(s)))

Loading…
Cancel
Save