Browse Source

Upgrade V8 to 2.2.20

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
5a25338ac0
  1. 12
      deps/v8/ChangeLog
  2. 4
      deps/v8/src/api.cc
  3. 1
      deps/v8/src/arm/assembler-arm.h
  4. 371
      deps/v8/src/arm/codegen-arm.cc
  5. 4
      deps/v8/src/arm/codegen-arm.h
  6. 298
      deps/v8/src/arm/ic-arm.cc
  7. 16
      deps/v8/src/arm/jump-target-arm.cc
  8. 13
      deps/v8/src/arm/macro-assembler-arm.cc
  9. 44
      deps/v8/src/arm/virtual-frame-arm.cc
  10. 13
      deps/v8/src/arm/virtual-frame-arm.h
  11. 4
      deps/v8/src/array.js
  12. 4
      deps/v8/src/ast-inl.h
  13. 1
      deps/v8/src/builtins.cc
  14. 11
      deps/v8/src/factory.cc
  15. 4
      deps/v8/src/factory.h
  16. 59
      deps/v8/src/heap.cc
  17. 50
      deps/v8/src/heap.h
  18. 9
      deps/v8/src/ia32/codegen-ia32.cc
  19. 2
      deps/v8/src/ia32/full-codegen-ia32.cc
  20. 302
      deps/v8/src/ia32/ic-ia32.cc
  21. 10
      deps/v8/src/ic.cc
  22. 4
      deps/v8/src/ic.h
  23. 2
      deps/v8/src/json.js
  24. 2
      deps/v8/src/jump-target-heavy.h
  25. 4
      deps/v8/src/jump-target-light-inl.h
  26. 4
      deps/v8/src/jump-target-light.h
  27. 10
      deps/v8/src/log.cc
  28. 3
      deps/v8/src/objects-debug.cc
  29. 35
      deps/v8/src/objects-inl.h
  30. 67
      deps/v8/src/objects.cc
  31. 27
      deps/v8/src/objects.h
  32. 5
      deps/v8/src/regexp.js
  33. 19
      deps/v8/src/runtime.cc
  34. 2
      deps/v8/src/utils.h
  35. 4
      deps/v8/src/v8-counters.h
  36. 17
      deps/v8/src/v8natives.js
  37. 2
      deps/v8/src/version.cc
  38. 122
      deps/v8/src/x64/assembler-x64.cc
  39. 27
      deps/v8/src/x64/assembler-x64.h
  40. 92
      deps/v8/src/x64/codegen-x64.cc
  41. 30
      deps/v8/src/x64/disasm-x64.cc
  42. 7
      deps/v8/src/x64/full-codegen-x64.cc
  43. 404
      deps/v8/src/x64/ic-x64.cc
  44. 7
      deps/v8/src/x64/macro-assembler-x64.cc
  45. 3
      deps/v8/src/x64/macro-assembler-x64.h
  46. 34
      deps/v8/src/x64/virtual-frame-x64.cc
  47. 2
      deps/v8/src/x64/virtual-frame-x64.h
  48. 25
      deps/v8/test/cctest/test-api.cc
  49. 4
      deps/v8/test/cctest/test-profile-generator.cc
  50. 35
      deps/v8/test/mjsunit/for-in.js
  51. 46
      deps/v8/test/mjsunit/regress/regress-45469.js
  52. 36
      deps/v8/test/mjsunit/regress/regress-752.js
  53. 39
      deps/v8/test/mjsunit/regress/regress-754.js
  54. 5
      deps/v8/test/mjsunit/smi-ops.js

12
deps/v8/ChangeLog

@ -1,3 +1,15 @@
2010-06-28: Version 2.2.20
Fix bug with for-in on x64 platform (issue 748).
Fix crash bug on x64 platform (issue 756).
Fix bug in Object.getOwnPropertyNames. (chromium issue 41243).
Fix a bug on ARM that caused the result of 1 << x to be
miscalculated for some inputs.
Performance improvements on all platforms.
2010-06-23: Version 2.2.19 2010-06-23: Version 2.2.19
Fix bug that causes the build to break when profillingsupport=off Fix bug that causes the build to break when profillingsupport=off

4
deps/v8/src/api.cc

@ -2606,6 +2606,8 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
return; return;
} }
i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data); i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
self->set_map(
*i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map())));
self->set_elements(*pixels); self->set_elements(*pixels);
} }
@ -2659,6 +2661,8 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
} }
i::Handle<i::ExternalArray> array = i::Handle<i::ExternalArray> array =
i::Factory::NewExternalArray(length, array_type, data); i::Factory::NewExternalArray(length, array_type, data);
self->set_map(
*i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map())));
self->set_elements(*array); self->set_elements(*array);
} }

1
deps/v8/src/arm/assembler-arm.h

@ -1110,6 +1110,7 @@ class Assembler : public Malloced {
void EndBlockConstPool() { void EndBlockConstPool() {
const_pool_blocked_nesting_--; const_pool_blocked_nesting_--;
} }
bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; }
private: private:
// Code buffer: // Code buffer:

371
deps/v8/src/arm/codegen-arm.cc

@ -157,6 +157,7 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm)
state_(NULL), state_(NULL),
loop_nesting_(0), loop_nesting_(0),
type_info_(NULL), type_info_(NULL),
function_return_(JumpTarget::BIDIRECTIONAL),
function_return_is_shadowed_(false) { function_return_is_shadowed_(false) {
} }
@ -218,7 +219,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// for stack overflow. // for stack overflow.
frame_->AllocateStackSlots(); frame_->AllocateStackSlots();
VirtualFrame::SpilledScope spilled_scope(frame_); frame_->AssertIsSpilled();
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) { if (heap_slots > 0) {
// Allocate local context. // Allocate local context.
@ -257,6 +258,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// order: such a parameter is copied repeatedly into the same // order: such a parameter is copied repeatedly into the same
// context location and thus the last value is what is seen inside // context location and thus the last value is what is seen inside
// the function. // the function.
frame_->AssertIsSpilled();
for (int i = 0; i < scope()->num_parameters(); i++) { for (int i = 0; i < scope()->num_parameters(); i++) {
Variable* par = scope()->parameter(i); Variable* par = scope()->parameter(i);
Slot* slot = par->slot(); Slot* slot = par->slot();
@ -282,8 +284,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Initialize ThisFunction reference if present. // Initialize ThisFunction reference if present.
if (scope()->is_function_scope() && scope()->function() != NULL) { if (scope()->is_function_scope() && scope()->function() != NULL) {
__ mov(ip, Operand(Factory::the_hole_value())); frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
frame_->EmitPush(ip);
StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT); StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
} }
} else { } else {
@ -510,7 +511,6 @@ void CodeGenerator::LoadCondition(Expression* x,
has_valid_frame() && has_valid_frame() &&
!has_cc() && !has_cc() &&
frame_->height() == original_height) { frame_->height() == original_height) {
frame_->SpillAll();
true_target->Jump(); true_target->Jump();
} }
} }
@ -535,22 +535,18 @@ void CodeGenerator::Load(Expression* expr) {
if (has_cc()) { if (has_cc()) {
// Convert cc_reg_ into a boolean value. // Convert cc_reg_ into a boolean value.
VirtualFrame::SpilledScope scope(frame_);
JumpTarget loaded; JumpTarget loaded;
JumpTarget materialize_true; JumpTarget materialize_true;
materialize_true.Branch(cc_reg_); materialize_true.Branch(cc_reg_);
__ LoadRoot(r0, Heap::kFalseValueRootIndex); frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
frame_->EmitPush(r0);
loaded.Jump(); loaded.Jump();
materialize_true.Bind(); materialize_true.Bind();
__ LoadRoot(r0, Heap::kTrueValueRootIndex); frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
frame_->EmitPush(r0);
loaded.Bind(); loaded.Bind();
cc_reg_ = al; cc_reg_ = al;
} }
if (true_target.is_linked() || false_target.is_linked()) { if (true_target.is_linked() || false_target.is_linked()) {
VirtualFrame::SpilledScope scope(frame_);
// We have at least one condition value that has been "translated" // We have at least one condition value that has been "translated"
// into a branch, thus it needs to be loaded explicitly. // into a branch, thus it needs to be loaded explicitly.
JumpTarget loaded; JumpTarget loaded;
@ -561,8 +557,7 @@ void CodeGenerator::Load(Expression* expr) {
// Load "true" if necessary. // Load "true" if necessary.
if (true_target.is_linked()) { if (true_target.is_linked()) {
true_target.Bind(); true_target.Bind();
__ LoadRoot(r0, Heap::kTrueValueRootIndex); frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
frame_->EmitPush(r0);
} }
// If both "true" and "false" need to be loaded jump across the code for // If both "true" and "false" need to be loaded jump across the code for
// "false". // "false".
@ -572,8 +567,7 @@ void CodeGenerator::Load(Expression* expr) {
// Load "false" if necessary. // Load "false" if necessary.
if (false_target.is_linked()) { if (false_target.is_linked()) {
false_target.Bind(); false_target.Bind();
__ LoadRoot(r0, Heap::kFalseValueRootIndex); frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
frame_->EmitPush(r0);
} }
// A value is loaded on all paths reaching this point. // A value is loaded on all paths reaching this point.
loaded.Bind(); loaded.Bind();
@ -592,11 +586,11 @@ void CodeGenerator::LoadGlobal() {
void CodeGenerator::LoadGlobalReceiver(Register scratch) { void CodeGenerator::LoadGlobalReceiver(Register scratch) {
VirtualFrame::SpilledScope spilled_scope(frame_); Register reg = frame_->GetTOSRegister();
__ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX)); __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
__ ldr(scratch, __ ldr(reg,
FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset)); FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
frame_->EmitPush(scratch); frame_->EmitPush(reg);
} }
@ -613,8 +607,6 @@ ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
void CodeGenerator::StoreArgumentsObject(bool initial) { void CodeGenerator::StoreArgumentsObject(bool initial) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ArgumentsAllocationMode mode = ArgumentsMode(); ArgumentsAllocationMode mode = ArgumentsMode();
ASSERT(mode != NO_ARGUMENTS_ALLOCATION); ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
@ -623,9 +615,9 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
// When using lazy arguments allocation, we store the hole value // When using lazy arguments allocation, we store the hole value
// as a sentinel indicating that the arguments object hasn't been // as a sentinel indicating that the arguments object hasn't been
// allocated yet. // allocated yet.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
frame_->EmitPush(ip);
} else { } else {
frame_->SpillAll();
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ ldr(r2, frame_->Function()); __ ldr(r2, frame_->Function());
// The receiver is below the arguments, the return address, and the // The receiver is below the arguments, the return address, and the
@ -649,9 +641,9 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
// already been written to. This can happen if the a function // already been written to. This can happen if the a function
// has a local variable named 'arguments'. // has a local variable named 'arguments'.
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF); LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
frame_->EmitPop(r0); Register arguments = frame_->PopToRegister();
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r0, ip); __ cmp(arguments, ip);
done.Branch(ne); done.Branch(ne);
} }
StoreToSlot(arguments->slot(), NOT_CONST_INIT); StoreToSlot(arguments->slot(), NOT_CONST_INIT);
@ -754,36 +746,35 @@ void CodeGenerator::UnloadReference(Reference* ref) {
// may jump to 'false_target' in case the register converts to 'false'. // may jump to 'false_target' in case the register converts to 'false'.
void CodeGenerator::ToBoolean(JumpTarget* true_target, void CodeGenerator::ToBoolean(JumpTarget* true_target,
JumpTarget* false_target) { JumpTarget* false_target) {
VirtualFrame::SpilledScope spilled_scope(frame_);
// Note: The generated code snippet does not change stack variables. // Note: The generated code snippet does not change stack variables.
// Only the condition code should be set. // Only the condition code should be set.
frame_->EmitPop(r0); Register tos = frame_->PopToRegister();
// Fast case checks // Fast case checks
// Check if the value is 'false'. // Check if the value is 'false'.
__ LoadRoot(ip, Heap::kFalseValueRootIndex); __ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(r0, ip); __ cmp(tos, ip);
false_target->Branch(eq); false_target->Branch(eq);
// Check if the value is 'true'. // Check if the value is 'true'.
__ LoadRoot(ip, Heap::kTrueValueRootIndex); __ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip); __ cmp(tos, ip);
true_target->Branch(eq); true_target->Branch(eq);
// Check if the value is 'undefined'. // Check if the value is 'undefined'.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip); __ cmp(tos, ip);
false_target->Branch(eq); false_target->Branch(eq);
// Check if the value is a smi. // Check if the value is a smi.
__ cmp(r0, Operand(Smi::FromInt(0))); __ cmp(tos, Operand(Smi::FromInt(0)));
false_target->Branch(eq); false_target->Branch(eq);
__ tst(r0, Operand(kSmiTagMask)); __ tst(tos, Operand(kSmiTagMask));
true_target->Branch(eq); true_target->Branch(eq);
// Slow case: call the runtime. // Slow case: call the runtime.
frame_->EmitPush(r0); frame_->EmitPush(tos);
frame_->CallRuntime(Runtime::kToBool, 1); frame_->CallRuntime(Runtime::kToBool, 1);
// Convert the result (r0) to a condition code. // Convert the result (r0) to a condition code.
__ LoadRoot(ip, Heap::kFalseValueRootIndex); __ LoadRoot(ip, Heap::kFalseValueRootIndex);
@ -935,7 +926,15 @@ class DeferredInlineSmiOperation: public DeferredCode {
}; };
// On entry the non-constant side of the binary operation is in tos_register_
// and the constant smi side is nowhere. The tos_register_ is not used by the
// virtual frame. On exit the answer is in the tos_register_ and the virtual
// frame is unchanged.
void DeferredInlineSmiOperation::Generate() { void DeferredInlineSmiOperation::Generate() {
VirtualFrame copied_frame(*frame_state()->frame());
copied_frame.SpillAll();
Register lhs = r1; Register lhs = r1;
Register rhs = r0; Register rhs = r0;
switch (op_) { switch (op_) {
@ -969,45 +968,20 @@ void DeferredInlineSmiOperation::Generate() {
case Token::MOD: case Token::MOD:
case Token::BIT_OR: case Token::BIT_OR:
case Token::BIT_XOR: case Token::BIT_XOR:
case Token::BIT_AND: { case Token::BIT_AND:
if (reversed_) {
if (tos_register_.is(r0)) {
__ mov(r1, Operand(Smi::FromInt(value_)));
} else {
ASSERT(tos_register_.is(r1));
__ mov(r0, Operand(Smi::FromInt(value_)));
lhs = r0;
rhs = r1;
}
} else {
if (tos_register_.is(r1)) {
__ mov(r0, Operand(Smi::FromInt(value_)));
} else {
ASSERT(tos_register_.is(r0));
__ mov(r1, Operand(Smi::FromInt(value_)));
lhs = r0;
rhs = r1;
}
}
break;
}
case Token::SHL: case Token::SHL:
case Token::SHR: case Token::SHR:
case Token::SAR: { case Token::SAR: {
if (!reversed_) {
if (tos_register_.is(r1)) { if (tos_register_.is(r1)) {
__ mov(r0, Operand(Smi::FromInt(value_))); __ mov(r0, Operand(Smi::FromInt(value_)));
} else { } else {
ASSERT(tos_register_.is(r0)); ASSERT(tos_register_.is(r0));
__ mov(r1, Operand(Smi::FromInt(value_))); __ mov(r1, Operand(Smi::FromInt(value_)));
}
if (reversed_ == tos_register_.is(r1)) {
lhs = r0; lhs = r0;
rhs = r1; rhs = r1;
} }
} else {
ASSERT(op_ == Token::SHL);
__ mov(r1, Operand(Smi::FromInt(value_)));
}
break; break;
} }
@ -1019,11 +993,17 @@ void DeferredInlineSmiOperation::Generate() {
GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_); GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
__ CallStub(&stub); __ CallStub(&stub);
// The generic stub returns its value in r0, but that's not // The generic stub returns its value in r0, but that's not
// necessarily what we want. We want whatever the inlined code // necessarily what we want. We want whatever the inlined code
// expected, which is that the answer is in the same register as // expected, which is that the answer is in the same register as
// the operand was. // the operand was.
__ Move(tos_register_, r0); __ Move(tos_register_, r0);
// The tos register was not in use for the virtual frame that we
// came into this function with, so we can merge back to that frame
// without trashing it.
copied_frame.MergeTo(frame_state()->frame());
} }
@ -1124,12 +1104,6 @@ void CodeGenerator::SmiOperation(Token::Value op,
// We move the top of stack to a register (normally no move is invoved). // We move the top of stack to a register (normally no move is invoved).
Register tos = frame_->PopToRegister(); Register tos = frame_->PopToRegister();
// All other registers are spilled. The deferred code expects one argument
// in a register and all other values are flushed to the stack. The
// answer is returned in the same register that the top of stack argument was
// in.
frame_->SpillAll();
switch (op) { switch (op) {
case Token::ADD: { case Token::ADD: {
DeferredCode* deferred = DeferredCode* deferred =
@ -1448,8 +1422,6 @@ void CodeGenerator::Comparison(Condition cc,
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
CallFunctionFlags flags, CallFunctionFlags flags,
int position) { int position) {
frame_->AssertIsSpilled();
// Push the arguments ("left-to-right") on the stack. // Push the arguments ("left-to-right") on the stack.
int arg_count = args->length(); int arg_count = args->length();
for (int i = 0; i < arg_count; i++) { for (int i = 0; i < arg_count; i++) {
@ -1482,7 +1454,6 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// stack, as receiver and arguments, and calls x. // stack, as receiver and arguments, and calls x.
// In the implementation comments, we call x the applicand // In the implementation comments, we call x the applicand
// and y the receiver. // and y the receiver.
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION); ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments()); ASSERT(arguments->IsArguments());
@ -1500,6 +1471,15 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
Load(receiver); Load(receiver);
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF); LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
// At this point the top two stack elements are probably in registers
// since they were just loaded. Ensure they are in regs and get the
// regs.
Register receiver_reg = frame_->Peek2();
Register arguments_reg = frame_->Peek();
// From now on the frame is spilled.
frame_->SpillAll();
// Emit the source position information after having loaded the // Emit the source position information after having loaded the
// receiver and the arguments. // receiver and the arguments.
CodeForSourcePosition(position); CodeForSourcePosition(position);
@ -1513,32 +1493,30 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// already. If so, just use that instead of copying the arguments // already. If so, just use that instead of copying the arguments
// from the stack. This also deals with cases where a local variable // from the stack. This also deals with cases where a local variable
// named 'arguments' has been introduced. // named 'arguments' has been introduced.
__ ldr(r0, MemOperand(sp, 0)); JumpTarget slow;
Label done;
Label slow, done;
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(ip, r0); __ cmp(ip, arguments_reg);
__ b(ne, &slow); slow.Branch(ne);
Label build_args; Label build_args;
// Get rid of the arguments object probe. // Get rid of the arguments object probe.
frame_->Drop(); frame_->Drop();
// Stack now has 3 elements on it. // Stack now has 3 elements on it.
// Contents of stack at this point: // Contents of stack at this point:
// sp[0]: receiver // sp[0]: receiver - in the receiver_reg register.
// sp[1]: applicand.apply // sp[1]: applicand.apply
// sp[2]: applicand. // sp[2]: applicand.
// Check that the receiver really is a JavaScript object. // Check that the receiver really is a JavaScript object.
__ ldr(r0, MemOperand(sp, 0)); __ BranchOnSmi(receiver_reg, &build_args);
__ BranchOnSmi(r0, &build_args);
// We allow all JSObjects including JSFunctions. As long as // We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right // JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound. // bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CompareObjectType(r0, r1, r2, FIRST_JS_OBJECT_TYPE); __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &build_args); __ b(lt, &build_args);
// Check that applicand.apply is Function.prototype.apply. // Check that applicand.apply is Function.prototype.apply.
@ -1627,7 +1605,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
StoreArgumentsObject(false); StoreArgumentsObject(false);
// Stack and frame now have 4 elements. // Stack and frame now have 4 elements.
__ bind(&slow); slow.Bind();
// Generic computation of x.apply(y, args) with no special optimization. // Generic computation of x.apply(y, args) with no special optimization.
// Flip applicand.apply and applicand on the stack, so // Flip applicand.apply and applicand on the stack, so
@ -1652,7 +1630,6 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
void CodeGenerator::Branch(bool if_true, JumpTarget* target) { void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(has_cc()); ASSERT(has_cc());
Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_); Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
target->Branch(cc); target->Branch(cc);
@ -1661,7 +1638,7 @@ void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
void CodeGenerator::CheckStack() { void CodeGenerator::CheckStack() {
VirtualFrame::SpilledScope spilled_scope(frame_); frame_->SpillAll();
Comment cmnt(masm_, "[ check stack"); Comment cmnt(masm_, "[ check stack");
__ LoadRoot(ip, Heap::kStackLimitRootIndex); __ LoadRoot(ip, Heap::kStackLimitRootIndex);
// Put the lr setup instruction in the delay slot. kInstrSize is added to // Put the lr setup instruction in the delay slot. kInstrSize is added to
@ -1683,7 +1660,6 @@ void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope(frame_);
for (int i = 0; frame_ != NULL && i < statements->length(); i++) { for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
Visit(statements->at(i)); Visit(statements->at(i));
} }
@ -1695,7 +1671,6 @@ void CodeGenerator::VisitBlock(Block* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Block"); Comment cmnt(masm_, "[ Block");
CodeForStatementPosition(node); CodeForStatementPosition(node);
node->break_target()->SetExpectedHeight(); node->break_target()->SetExpectedHeight();
@ -1713,7 +1688,6 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
frame_->EmitPush(Operand(pairs)); frame_->EmitPush(Operand(pairs));
frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0))); frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->CallRuntime(Runtime::kDeclareGlobals, 3); frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
// The result is discarded. // The result is discarded.
} }
@ -1754,7 +1728,6 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
frame_->EmitPush(Operand(0)); frame_->EmitPush(Operand(0));
} }
VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->CallRuntime(Runtime::kDeclareContextSlot, 4); frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
// Ignore the return value (declarations are statements). // Ignore the return value (declarations are statements).
@ -1899,7 +1872,6 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
void CodeGenerator::VisitContinueStatement(ContinueStatement* node) { void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ContinueStatement"); Comment cmnt(masm_, "[ ContinueStatement");
CodeForStatementPosition(node); CodeForStatementPosition(node);
node->target()->continue_target()->Jump(); node->target()->continue_target()->Jump();
@ -1907,7 +1879,6 @@ void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
void CodeGenerator::VisitBreakStatement(BreakStatement* node) { void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ BreakStatement"); Comment cmnt(masm_, "[ BreakStatement");
CodeForStatementPosition(node); CodeForStatementPosition(node);
node->target()->break_target()->Jump(); node->target()->break_target()->Jump();
@ -1915,7 +1886,7 @@ void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) { void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
VirtualFrame::SpilledScope spilled_scope(frame_); frame_->SpillAll();
Comment cmnt(masm_, "[ ReturnStatement"); Comment cmnt(masm_, "[ ReturnStatement");
CodeForStatementPosition(node); CodeForStatementPosition(node);
@ -1926,7 +1897,7 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
} else { } else {
// Pop the result from the frame and prepare the frame for // Pop the result from the frame and prepare the frame for
// returning thus making it easier to merge. // returning thus making it easier to merge.
frame_->EmitPop(r0); frame_->PopToR0();
frame_->PrepareForReturn(); frame_->PrepareForReturn();
if (function_return_.is_bound()) { if (function_return_.is_bound()) {
// If the function return label is already bound we reuse the // If the function return label is already bound we reuse the
@ -1986,7 +1957,6 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WithEnterStatement"); Comment cmnt(masm_, "[ WithEnterStatement");
CodeForStatementPosition(node); CodeForStatementPosition(node);
Load(node->expression()); Load(node->expression());
@ -2012,7 +1982,6 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WithExitStatement"); Comment cmnt(masm_, "[ WithExitStatement");
CodeForStatementPosition(node); CodeForStatementPosition(node);
// Pop context. // Pop context.
@ -2027,7 +1996,6 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ SwitchStatement"); Comment cmnt(masm_, "[ SwitchStatement");
CodeForStatementPosition(node); CodeForStatementPosition(node);
node->break_target()->SetExpectedHeight(); node->break_target()->SetExpectedHeight();
@ -2055,8 +2023,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
next_test.Bind(); next_test.Bind();
next_test.Unuse(); next_test.Unuse();
// Duplicate TOS. // Duplicate TOS.
__ ldr(r0, frame_->Top()); frame_->Dup();
frame_->EmitPush(r0);
Comparison(eq, NULL, clause->label(), true); Comparison(eq, NULL, clause->label(), true);
Branch(false, &next_test); Branch(false, &next_test);
@ -2094,7 +2061,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
default_entry.Bind(); default_entry.Bind();
VisitStatements(default_clause->statements()); VisitStatements(default_clause->statements());
// If control flow can fall out of the default and there is a case after // If control flow can fall out of the default and there is a case after
// it, jup to that case's body. // it, jump to that case's body.
if (frame_ != NULL && default_exit.is_bound()) { if (frame_ != NULL && default_exit.is_bound()) {
default_exit.Jump(); default_exit.Jump();
} }
@ -2116,7 +2083,6 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ DoWhileStatement"); Comment cmnt(masm_, "[ DoWhileStatement");
CodeForStatementPosition(node); CodeForStatementPosition(node);
node->break_target()->SetExpectedHeight(); node->break_target()->SetExpectedHeight();
@ -2191,7 +2157,6 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WhileStatement"); Comment cmnt(masm_, "[ WhileStatement");
CodeForStatementPosition(node); CodeForStatementPosition(node);
@ -2209,7 +2174,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
node->continue_target()->Bind(); node->continue_target()->Bind();
if (info == DONT_KNOW) { if (info == DONT_KNOW) {
JumpTarget body; JumpTarget body(JumpTarget::BIDIRECTIONAL);
LoadCondition(node->cond(), &body, node->break_target(), true); LoadCondition(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) { if (has_valid_frame()) {
// A NULL frame indicates that control did not fall out of the // A NULL frame indicates that control did not fall out of the
@ -2242,7 +2207,6 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ForStatement"); Comment cmnt(masm_, "[ ForStatement");
CodeForStatementPosition(node); CodeForStatementPosition(node);
if (node->init() != NULL) { if (node->init() != NULL) {
@ -2931,7 +2895,6 @@ void CodeGenerator::VisitConditional(Conditional* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Conditional"); Comment cmnt(masm_, "[ Conditional");
JumpTarget then; JumpTarget then;
JumpTarget else_; JumpTarget else_;
@ -2972,10 +2935,8 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
&done); &done);
slow.Bind(); slow.Bind();
VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->EmitPush(cp); frame_->EmitPush(cp);
__ mov(r0, Operand(slot->var()->name())); frame_->EmitPush(Operand(slot->var()->name()));
frame_->EmitPush(r0);
if (typeof_state == INSIDE_TYPEOF) { if (typeof_state == INSIDE_TYPEOF) {
frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
@ -2990,16 +2951,17 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
Register scratch = VirtualFrame::scratch0(); Register scratch = VirtualFrame::scratch0();
TypeInfo info = type_info(slot); TypeInfo info = type_info(slot);
frame_->EmitPush(SlotOperand(slot, scratch), info); frame_->EmitPush(SlotOperand(slot, scratch), info);
if (slot->var()->mode() == Variable::CONST) { if (slot->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't been // Const slots may contain 'the hole' value (the constant hasn't been
// initialized yet) which needs to be converted into the 'undefined' // initialized yet) which needs to be converted into the 'undefined'
// value. // value.
Comment cmnt(masm_, "[ Unhole const"); Comment cmnt(masm_, "[ Unhole const");
frame_->EmitPop(scratch); Register tos = frame_->PopToRegister();
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip); __ cmp(tos, ip);
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq); __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
frame_->EmitPush(scratch); frame_->EmitPush(tos);
} }
} }
} }
@ -3007,6 +2969,7 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot, void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
TypeofState state) { TypeofState state) {
VirtualFrame::RegisterAllocationScope scope(this);
LoadFromSlot(slot, state); LoadFromSlot(slot, state);
// Bail out quickly if we're not using lazy arguments allocation. // Bail out quickly if we're not using lazy arguments allocation.
@ -3015,17 +2978,15 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
// ... or if the slot isn't a non-parameter arguments slot. // ... or if the slot isn't a non-parameter arguments slot.
if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return; if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
VirtualFrame::SpilledScope spilled_scope(frame_); // Load the loaded value from the stack into a register but leave it on the
// Load the loaded value from the stack into r0 but leave it on the
// stack. // stack.
__ ldr(r0, MemOperand(sp, 0)); Register tos = frame_->Peek();
// If the loaded value is the sentinel that indicates that we // If the loaded value is the sentinel that indicates that we
// haven't loaded the arguments object yet, we need to do it now. // haven't loaded the arguments object yet, we need to do it now.
JumpTarget exit; JumpTarget exit;
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r0, ip); __ cmp(tos, ip);
exit.Branch(ne); exit.Branch(ne);
frame_->Drop(); frame_->Drop();
StoreArgumentsObject(false); StoreArgumentsObject(false);
@ -3035,14 +2996,13 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
ASSERT(slot != NULL); ASSERT(slot != NULL);
VirtualFrame::RegisterAllocationScope scope(this);
if (slot->type() == Slot::LOOKUP) { if (slot->type() == Slot::LOOKUP) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(slot->var()->is_dynamic()); ASSERT(slot->var()->is_dynamic());
// For now, just do a runtime call. // For now, just do a runtime call.
frame_->EmitPush(cp); frame_->EmitPush(cp);
__ mov(r0, Operand(slot->var()->name())); frame_->EmitPush(Operand(slot->var()->name()));
frame_->EmitPush(r0);
if (init_state == CONST_INIT) { if (init_state == CONST_INIT) {
// Same as the case for a normal store, but ignores attribute // Same as the case for a normal store, but ignores attribute
@ -3071,7 +3031,7 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
} else { } else {
ASSERT(!slot->var()->is_dynamic()); ASSERT(!slot->var()->is_dynamic());
Register scratch = VirtualFrame::scratch0(); Register scratch = VirtualFrame::scratch0();
VirtualFrame::RegisterAllocationScope scope(this); Register scratch2 = VirtualFrame::scratch1();
// The frame must be spilled when branching to this target. // The frame must be spilled when branching to this target.
JumpTarget exit; JumpTarget exit;
@ -3085,7 +3045,6 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
__ ldr(scratch, SlotOperand(slot, scratch)); __ ldr(scratch, SlotOperand(slot, scratch));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip); __ cmp(scratch, ip);
frame_->SpillAll();
exit.Branch(ne); exit.Branch(ne);
} }
@ -3104,18 +3063,18 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
// Skip write barrier if the written value is a smi. // Skip write barrier if the written value is a smi.
__ tst(tos, Operand(kSmiTagMask)); __ tst(tos, Operand(kSmiTagMask));
// We don't use tos any more after here. // We don't use tos any more after here.
VirtualFrame::SpilledScope spilled_scope(frame_);
exit.Branch(eq); exit.Branch(eq);
// scratch is loaded with context when calling SlotOperand above. // scratch is loaded with context when calling SlotOperand above.
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
// r1 could be identical with tos, but that doesn't matter. // We need an extra register. Until we have a way to do that in the
__ RecordWrite(scratch, Operand(offset), r3, r1); // virtual frame we will cheat and ask for a free TOS register.
Register scratch3 = frame_->GetTOSRegister();
__ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
} }
// If we definitely did not jump over the assignment, we do not need // If we definitely did not jump over the assignment, we do not need
// to bind the exit label. Doing so can defeat peephole // to bind the exit label. Doing so can defeat peephole
// optimization. // optimization.
if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) { if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
frame_->SpillAll();
exit.Bind(); exit.Bind();
} }
} }
@ -3289,42 +3248,51 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ RexExp Literal"); Comment cmnt(masm_, "[ RexExp Literal");
Register tmp = VirtualFrame::scratch0();
// Free up a TOS register that can be used to push the literal.
Register literal = frame_->GetTOSRegister();
// Retrieve the literal array and check the allocated entry. // Retrieve the literal array and check the allocated entry.
// Load the function of this activation. // Load the function of this activation.
__ ldr(r1, frame_->Function()); __ ldr(tmp, frame_->Function());
// Load the literals array of the function. // Load the literals array of the function.
__ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset)); __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
// Load the literal at the ast saved index. // Load the literal at the ast saved index.
int literal_offset = int literal_offset =
FixedArray::kHeaderSize + node->literal_index() * kPointerSize; FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
__ ldr(r2, FieldMemOperand(r1, literal_offset)); __ ldr(literal, FieldMemOperand(tmp, literal_offset));
JumpTarget done; JumpTarget done;
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r2, ip); __ cmp(literal, ip);
// This branch locks the virtual frame at the done label to match the
// one we have here, where the literal register is not on the stack and
// nothing is spilled.
done.Branch(ne); done.Branch(ne);
// If the entry is undefined we call the runtime system to computed // If the entry is undefined we call the runtime system to compute
// the literal. // the literal.
frame_->EmitPush(r1); // literal array (0) // literal array (0)
__ mov(r0, Operand(Smi::FromInt(node->literal_index()))); frame_->EmitPush(tmp);
frame_->EmitPush(r0); // literal index (1) // literal index (1)
__ mov(r0, Operand(node->pattern())); // RegExp pattern (2) frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
frame_->EmitPush(r0); // RegExp pattern (2)
__ mov(r0, Operand(node->flags())); // RegExp flags (3) frame_->EmitPush(Operand(node->pattern()));
frame_->EmitPush(r0); // RegExp flags (3)
frame_->EmitPush(Operand(node->flags()));
frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
__ mov(r2, Operand(r0)); __ Move(literal, r0);
// This call to bind will get us back to the virtual frame we had before
// where things are not spilled and the literal register is not on the stack.
done.Bind(); done.Bind();
// Push the literal. // Push the literal.
frame_->EmitPush(r2); frame_->EmitPush(literal);
ASSERT_EQ(original_height + 1, frame_->height()); ASSERT_EQ(original_height + 1, frame_->height());
} }
@ -3333,20 +3301,20 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ObjectLiteral"); Comment cmnt(masm_, "[ ObjectLiteral");
Register literal = frame_->GetTOSRegister();
// Load the function of this activation. // Load the function of this activation.
__ ldr(r3, frame_->Function()); __ ldr(literal, frame_->Function());
// Literal array. // Literal array.
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset)); __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
frame_->EmitPush(literal);
// Literal index. // Literal index.
__ mov(r2, Operand(Smi::FromInt(node->literal_index()))); frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
// Constant properties. // Constant properties.
__ mov(r1, Operand(node->constant_properties())); frame_->EmitPush(Operand(node->constant_properties()));
// Should the object literal have fast elements? // Should the object literal have fast elements?
__ mov(r0, Operand(Smi::FromInt(node->fast_elements() ? 1 : 0))); frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
frame_->EmitPushMultiple(4, r3.bit() | r2.bit() | r1.bit() | r0.bit());
if (node->depth() > 1) { if (node->depth() > 1) {
frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4); frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else { } else {
@ -3369,37 +3337,33 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
if (key->handle()->IsSymbol()) { if (key->handle()->IsSymbol()) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Load(value); Load(value);
frame_->EmitPop(r0); frame_->PopToR0();
// Fetch the object literal.
frame_->SpillAllButCopyTOSToR1();
__ mov(r2, Operand(key->handle())); __ mov(r2, Operand(key->handle()));
__ ldr(r1, frame_->Top()); // Load the receiver.
frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
break; break;
} }
// else fall through // else fall through
case ObjectLiteral::Property::PROTOTYPE: { case ObjectLiteral::Property::PROTOTYPE: {
__ ldr(r0, frame_->Top()); frame_->Dup();
frame_->EmitPush(r0); // dup the result
Load(key); Load(key);
Load(value); Load(value);
frame_->CallRuntime(Runtime::kSetProperty, 3); frame_->CallRuntime(Runtime::kSetProperty, 3);
break; break;
} }
case ObjectLiteral::Property::SETTER: { case ObjectLiteral::Property::SETTER: {
__ ldr(r0, frame_->Top()); frame_->Dup();
frame_->EmitPush(r0);
Load(key); Load(key);
__ mov(r0, Operand(Smi::FromInt(1))); frame_->EmitPush(Operand(Smi::FromInt(1)));
frame_->EmitPush(r0);
Load(value); Load(value);
frame_->CallRuntime(Runtime::kDefineAccessor, 4); frame_->CallRuntime(Runtime::kDefineAccessor, 4);
break; break;
} }
case ObjectLiteral::Property::GETTER: { case ObjectLiteral::Property::GETTER: {
__ ldr(r0, frame_->Top()); frame_->Dup();
frame_->EmitPush(r0);
Load(key); Load(key);
__ mov(r0, Operand(Smi::FromInt(0))); frame_->EmitPush(Operand(Smi::FromInt(0)));
frame_->EmitPush(r0);
Load(value); Load(value);
frame_->CallRuntime(Runtime::kDefineAccessor, 4); frame_->CallRuntime(Runtime::kDefineAccessor, 4);
break; break;
@ -3414,16 +3378,16 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ArrayLiteral"); Comment cmnt(masm_, "[ ArrayLiteral");
Register tos = frame_->GetTOSRegister();
// Load the function of this activation. // Load the function of this activation.
__ ldr(r2, frame_->Function()); __ ldr(tos, frame_->Function());
// Load the literals array of the function. // Load the literals array of the function.
__ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset)); __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
__ mov(r1, Operand(Smi::FromInt(node->literal_index()))); frame_->EmitPush(tos);
__ mov(r0, Operand(node->constant_elements())); frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit()); frame_->EmitPush(Operand(node->constant_elements()));
int length = node->values()->length(); int length = node->values()->length();
if (node->depth() > 1) { if (node->depth() > 1) {
frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3); frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
@ -3450,10 +3414,10 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// The property must be set by generated code. // The property must be set by generated code.
Load(value); Load(value);
frame_->EmitPop(r0); frame_->PopToR0();
// Fetch the object literal. // Fetch the object literal.
__ ldr(r1, frame_->Top()); frame_->SpillAllButCopyTOSToR1();
// Get the elements array. // Get the elements array.
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
@ -3863,7 +3827,6 @@ void CodeGenerator::VisitCall(Call* node) {
// ------------------------------------------------------------------------ // ------------------------------------------------------------------------
if (var != NULL && var->is_possibly_eval()) { if (var != NULL && var->is_possibly_eval()) {
VirtualFrame::SpilledScope spilled_scope(frame_);
// ---------------------------------- // ----------------------------------
// JavaScript example: 'eval(arg)' // eval is not known to be shadowed // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
// ---------------------------------- // ----------------------------------
@ -3877,8 +3840,7 @@ void CodeGenerator::VisitCall(Call* node) {
Load(function); Load(function);
// Allocate a frame slot for the receiver. // Allocate a frame slot for the receiver.
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex); frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
frame_->EmitPush(r2);
// Load the arguments. // Load the arguments.
int arg_count = args->length(); int arg_count = args->length();
@ -3886,6 +3848,8 @@ void CodeGenerator::VisitCall(Call* node) {
Load(args->at(i)); Load(args->at(i));
} }
VirtualFrame::SpilledScope spilled_scope(frame_);
// If we know that eval can only be shadowed by eval-introduced // If we know that eval can only be shadowed by eval-introduced
// variables we attempt to load the global eval function directly // variables we attempt to load the global eval function directly
// in generated code. If we succeed, there is no need to perform a // in generated code. If we succeed, there is no need to perform a
@ -5201,7 +5165,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ UnaryOperation"); Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op(); Token::Value op = node->op();
@ -5273,8 +5236,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break; break;
case Token::SUB: { case Token::SUB: {
VirtualFrame::SpilledScope spilled(frame_); frame_->PopToR0();
frame_->EmitPop(r0);
GenericUnaryOpStub stub(Token::SUB, overwrite); GenericUnaryOpStub stub(Token::SUB, overwrite);
frame_->CallStub(&stub, 0); frame_->CallStub(&stub, 0);
frame_->EmitPush(r0); // r0 has result frame_->EmitPush(r0); // r0 has result
@ -5282,23 +5244,28 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
} }
case Token::BIT_NOT: { case Token::BIT_NOT: {
// smi check Register tos = frame_->PopToRegister();
VirtualFrame::SpilledScope spilled(frame_); JumpTarget not_smi_label;
frame_->EmitPop(r0);
JumpTarget smi_label;
JumpTarget continue_label; JumpTarget continue_label;
__ tst(r0, Operand(kSmiTagMask)); // Smi check.
smi_label.Branch(eq); __ tst(tos, Operand(kSmiTagMask));
not_smi_label.Branch(ne);
__ mvn(tos, Operand(tos));
__ bic(tos, tos, Operand(kSmiTagMask)); // Bit-clear inverted smi-tag.
frame_->EmitPush(tos);
// The fast case is the first to jump to the continue label, so it gets
// to decide the virtual frame layout.
continue_label.Jump();
not_smi_label.Bind();
frame_->SpillAll();
__ Move(r0, tos);
GenericUnaryOpStub stub(Token::BIT_NOT, overwrite); GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
frame_->CallStub(&stub, 0); frame_->CallStub(&stub, 0);
continue_label.Jump(); frame_->EmitPush(r0);
smi_label.Bind();
__ mvn(r0, Operand(r0));
__ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
continue_label.Bind(); continue_label.Bind();
frame_->EmitPush(r0); // r0 has result
break; break;
} }
@ -5308,16 +5275,16 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break; break;
case Token::ADD: { case Token::ADD: {
VirtualFrame::SpilledScope spilled(frame_); Register tos = frame_->Peek();
frame_->EmitPop(r0);
// Smi check. // Smi check.
JumpTarget continue_label; JumpTarget continue_label;
__ tst(r0, Operand(kSmiTagMask)); __ tst(tos, Operand(kSmiTagMask));
continue_label.Branch(eq); continue_label.Branch(eq);
frame_->EmitPush(r0);
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1); frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
frame_->EmitPush(r0);
continue_label.Bind(); continue_label.Bind();
frame_->EmitPush(r0); // r0 has result
break; break;
} }
default: default:
@ -5335,6 +5302,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
Comment cmnt(masm_, "[ CountOperation"); Comment cmnt(masm_, "[ CountOperation");
VirtualFrame::RegisterAllocationScope scope(this);
bool is_postfix = node->is_postfix(); bool is_postfix = node->is_postfix();
bool is_increment = node->op() == Token::INC; bool is_increment = node->op() == Token::INC;
@ -5478,7 +5446,6 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// after evaluating the left hand side (due to the shortcut // after evaluating the left hand side (due to the shortcut
// semantics), but the compiler must (statically) know if the result // semantics), but the compiler must (statically) know if the result
// of compiling the binary operation is materialized or not. // of compiling the binary operation is materialized or not.
VirtualFrame::SpilledScope spilled_scope(frame_);
if (node->op() == Token::AND) { if (node->op() == Token::AND) {
JumpTarget is_true; JumpTarget is_true;
LoadCondition(node->left(), &is_true, false_target(), false); LoadCondition(node->left(), &is_true, false_target(), false);
@ -5663,8 +5630,6 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
if (left_is_null || right_is_null) { if (left_is_null || right_is_null) {
Load(left_is_null ? right : left); Load(left_is_null ? right : left);
Register tos = frame_->PopToRegister(); Register tos = frame_->PopToRegister();
// JumpTargets can't cope with register allocation yet.
frame_->SpillAll();
__ LoadRoot(ip, Heap::kNullValueRootIndex); __ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(tos, ip); __ cmp(tos, ip);
@ -5707,9 +5672,6 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
LoadTypeofExpression(operation->expression()); LoadTypeofExpression(operation->expression());
Register tos = frame_->PopToRegister(); Register tos = frame_->PopToRegister();
// JumpTargets can't cope with register allocation yet.
frame_->SpillAll();
Register scratch = VirtualFrame::scratch0(); Register scratch = VirtualFrame::scratch0();
if (check->Equals(Heap::number_symbol())) { if (check->Equals(Heap::number_symbol())) {
@ -5830,7 +5792,6 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
break; break;
case Token::IN: { case Token::IN: {
VirtualFrame::SpilledScope scope(frame_);
Load(left); Load(left);
Load(right); Load(right);
frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2); frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
@ -5839,7 +5800,6 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
} }
case Token::INSTANCEOF: { case Token::INSTANCEOF: {
VirtualFrame::SpilledScope scope(frame_);
Load(left); Load(left);
Load(right); Load(right);
InstanceofStub stub; InstanceofStub stub;
@ -5937,10 +5897,15 @@ class DeferredReferenceGetKeyedValue: public DeferredCode {
}; };
// Takes key and register in r0 and r1 or vice versa. Returns result
// in r0.
void DeferredReferenceGetKeyedValue::Generate() { void DeferredReferenceGetKeyedValue::Generate() {
ASSERT((key_.is(r0) && receiver_.is(r1)) || ASSERT((key_.is(r0) && receiver_.is(r1)) ||
(key_.is(r1) && receiver_.is(r0))); (key_.is(r1) && receiver_.is(r0)));
VirtualFrame copied_frame(*frame_state()->frame());
copied_frame.SpillAll();
Register scratch1 = VirtualFrame::scratch0(); Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1(); Register scratch2 = VirtualFrame::scratch1();
__ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2); __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
@ -5961,6 +5926,13 @@ void DeferredReferenceGetKeyedValue::Generate() {
// keyed load has been inlined. // keyed load has been inlined.
__ nop(PROPERTY_ACCESS_INLINED); __ nop(PROPERTY_ACCESS_INLINED);
// Now go back to the frame that we entered with. This will not overwrite
// the receiver or key registers since they were not in use when we came
// in. The instructions emitted by this merge are skipped over by the
// inline load patching mechanism when looking for the branch instruction
// that tells it where the code to patch is.
copied_frame.MergeTo(frame_state()->frame());
// Block the constant pool for one more instruction after leaving this // Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the // constant pool block scope to include the branch instruction ending the
// deferred code. // deferred code.
@ -6114,7 +6086,6 @@ void CodeGenerator::EmitKeyedLoad() {
bool key_is_known_smi = frame_->KnownSmiAt(0); bool key_is_known_smi = frame_->KnownSmiAt(0);
Register key = frame_->PopToRegister(); Register key = frame_->PopToRegister();
Register receiver = frame_->PopToRegister(key); Register receiver = frame_->PopToRegister(key);
VirtualFrame::SpilledScope spilled(frame_);
// The deferred code expects key and receiver in registers. // The deferred code expects key and receiver in registers.
DeferredReferenceGetKeyedValue* deferred = DeferredReferenceGetKeyedValue* deferred =
@ -6152,10 +6123,12 @@ void CodeGenerator::EmitKeyedLoad() {
// Get the elements array from the receiver and check that it // Get the elements array from the receiver and check that it
// is not a dictionary. // is not a dictionary.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
if (FLAG_debug_code) {
__ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset)); __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch2, ip); __ cmp(scratch2, ip);
deferred->Branch(ne); __ Assert(eq, "JSObject with fast elements map has slow elements");
}
// Check that key is within bounds. Use unsigned comparison to handle // Check that key is within bounds. Use unsigned comparison to handle
// negative keys. // negative keys.
@ -6176,7 +6149,7 @@ void CodeGenerator::EmitKeyedLoad() {
__ mov(r0, scratch1); __ mov(r0, scratch1);
// Make sure that the expected number of instructions are generated. // Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatch, ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
masm_->InstructionsGeneratedSince(&check_inlined_codesize)); masm_->InstructionsGeneratedSince(&check_inlined_codesize));
} }
@ -6204,9 +6177,9 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
// Load the value, key and receiver from the stack. // Load the value, key and receiver from the stack.
Register value = frame_->PopToRegister(); Register value = frame_->PopToRegister();
Register key = frame_->PopToRegister(value); Register key = frame_->PopToRegister(value);
VirtualFrame::SpilledScope spilled(frame_);
Register receiver = r2; Register receiver = r2;
frame_->EmitPop(receiver); frame_->EmitPop(receiver);
VirtualFrame::SpilledScope spilled(frame_);
// The deferred code expects value, key and receiver in registers. // The deferred code expects value, key and receiver in registers.
DeferredReferenceSetKeyedValue* deferred = DeferredReferenceSetKeyedValue* deferred =

4
deps/v8/src/arm/codegen-arm.h

@ -276,7 +276,9 @@ class CodeGenerator: public AstVisitor {
static int InlineRuntimeCallArgumentsCount(Handle<String> name); static int InlineRuntimeCallArgumentsCount(Handle<String> name);
// Constants related to patching of inlined load/store. // Constants related to patching of inlined load/store.
static const int kInlinedKeyedLoadInstructionsAfterPatch = 17; static int GetInlinedKeyedLoadInstructionsAfterPatch() {
return FLAG_debug_code ? 27 : 13;
}
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5; static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
private: private:

298
deps/v8/src/arm/ic-arm.cc

@ -47,71 +47,97 @@ namespace internal {
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
__ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
__ b(eq, global_object);
__ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
__ b(eq, global_object);
__ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
__ b(eq, global_object);
}
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register elements,
Register t0,
Register t1,
Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// elements: holds the property dictionary on fall through.
// Scratch registers:
// t0: used to holds the receiver map.
// t1: used to holds the receiver instance type, receiver bit mask and
// elements map.
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss);
// Check that the receiver is a valid JS object.
__ CompareObjectType(receiver, t0, t1, FIRST_JS_OBJECT_TYPE);
__ b(lt, miss);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
GenerateGlobalInstanceTypeCheck(masm, t1, miss);
// Check that the global object does not require access checks.
__ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
__ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasNamedInterceptor)));
__ b(nz, miss);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(t1, ip);
__ b(nz, miss);
}
// Helper function used from LoadIC/CallIC GenerateNormal. // Helper function used from LoadIC/CallIC GenerateNormal.
// receiver: Receiver. It is not clobbered if a jump to the miss label is //
// done // elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
// name: Property name. It is not clobbered if a jump to the miss label is // name: Property name. It is not clobbered if a jump to the miss label is
// done // done
// result: Register for the result. It is only updated if a jump to the miss // result: Register for the result. It is only updated if a jump to the miss
// label is not done. Can be the same as receiver or name clobbering // label is not done. Can be the same as elements or name clobbering
// one of these in the case of not jumping to the miss label. // one of these in the case of not jumping to the miss label.
// The three scratch registers need to be different from the receiver, name and // The two scratch registers need to be different from elements, name and
// result. // result.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm, static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss, Label* miss,
Register receiver, Register elements,
Register name, Register name,
Register result, Register result,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2) {
Register scratch3,
DictionaryCheck check_dictionary) {
// Main use of the scratch registers. // Main use of the scratch registers.
// scratch1: Used to hold the property dictionary. // scratch1: Used as temporary and to hold the capacity of the property
// scratch2: Used as temporary and to hold the capacity of the property
// dictionary. // dictionary.
// scratch3: Used as temporary. // scratch2: Used as temporary.
Label done; Label done;
// Check for the absence of an interceptor.
// Load the map into scratch1.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kMapOffset));
// Bail out if the receiver has a named interceptor.
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
__ tst(scratch2, Operand(1 << Map::kHasNamedInterceptor));
__ b(nz, miss);
// Bail out if we have a JS global proxy object.
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ cmp(scratch2, Operand(JS_GLOBAL_PROXY_TYPE));
__ b(eq, miss);
// Possible work-around for http://crbug.com/16276.
// See also: http://codereview.chromium.org/155418.
__ cmp(scratch2, Operand(JS_GLOBAL_OBJECT_TYPE));
__ b(eq, miss);
__ cmp(scratch2, Operand(JS_BUILTINS_OBJECT_TYPE));
__ b(eq, miss);
// Load the properties array.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
if (check_dictionary == CHECK_DICTIONARY) {
__ ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(scratch2, ip);
__ b(ne, miss);
}
// Compute the capacity mask. // Compute the capacity mask.
const int kCapacityOffset = StringDictionary::kHeaderSize + const int kCapacityOffset = StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize; StringDictionary::kCapacityIndex * kPointerSize;
__ ldr(scratch2, FieldMemOperand(scratch1, kCapacityOffset)); __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
__ mov(scratch2, Operand(scratch2, ASR, kSmiTagSize)); // convert smi to int __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
__ sub(scratch2, scratch2, Operand(1)); __ sub(scratch1, scratch1, Operand(1));
const int kElementsStartOffset = StringDictionary::kHeaderSize + const int kElementsStartOffset = StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize; StringDictionary::kElementsStartIndex * kPointerSize;
@ -122,26 +148,26 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
static const int kProbes = 4; static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) { for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask. // Compute the masked index: (hash + i + i * i) & mask.
__ ldr(scratch3, FieldMemOperand(name, String::kHashFieldOffset)); __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
if (i > 0) { if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting // Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right // the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction. // shifted in the following and instruction.
ASSERT(StringDictionary::GetProbeOffset(i) < ASSERT(StringDictionary::GetProbeOffset(i) <
1 << (32 - String::kHashFieldOffset)); 1 << (32 - String::kHashFieldOffset));
__ add(scratch3, scratch3, Operand( __ add(scratch2, scratch2, Operand(
StringDictionary::GetProbeOffset(i) << String::kHashShift)); StringDictionary::GetProbeOffset(i) << String::kHashShift));
} }
__ and_(scratch3, scratch2, Operand(scratch3, LSR, String::kHashShift)); __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
// Scale the index by multiplying by the element size. // Scale the index by multiplying by the element size.
ASSERT(StringDictionary::kEntrySize == 3); ASSERT(StringDictionary::kEntrySize == 3);
// scratch3 = scratch3 * 3. // scratch2 = scratch2 * 3.
__ add(scratch3, scratch3, Operand(scratch3, LSL, 1)); __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name. // Check if the key is identical to the name.
__ add(scratch3, scratch1, Operand(scratch3, LSL, 2)); __ add(scratch2, elements, Operand(scratch2, LSL, 2));
__ ldr(ip, FieldMemOperand(scratch3, kElementsStartOffset)); __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
__ cmp(name, Operand(ip)); __ cmp(name, Operand(ip));
if (i != kProbes - 1) { if (i != kProbes - 1) {
__ b(eq, &done); __ b(eq, &done);
@ -151,15 +177,15 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
} }
// Check that the value is a normal property. // Check that the value is a normal property.
__ bind(&done); // scratch3 == scratch1 + 4 * index __ bind(&done); // scratch2 == elements + 4 * index
__ ldr(scratch2, __ ldr(scratch1,
FieldMemOperand(scratch3, kElementsStartOffset + 2 * kPointerSize)); FieldMemOperand(scratch2, kElementsStartOffset + 2 * kPointerSize));
__ tst(scratch2, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize)); __ tst(scratch1, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ b(ne, miss); __ b(ne, miss);
// Get the value at the masked, scaled index and return. // Get the value at the masked, scaled index and return.
__ ldr(result, __ ldr(result,
FieldMemOperand(scratch3, kElementsStartOffset + 1 * kPointerSize)); FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
} }
@ -310,6 +336,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver, Register receiver,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
int interceptor_bit,
Label* slow) { Label* slow) {
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ BranchOnSmi(receiver, slow); __ BranchOnSmi(receiver, slow);
@ -317,8 +344,9 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field. // Check bit field.
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
__ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask)); __ tst(scratch2,
__ b(ne, slow); Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ b(nz, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type. // Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object, // In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string // we enter the runtime system to make sure that indexing into string
@ -502,13 +530,11 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
} }
static void GenerateNormalHelper(MacroAssembler* masm, static void GenerateFunctionTailCall(MacroAssembler* masm,
int argc, int argc,
bool is_global_object,
Label* miss, Label* miss,
Register scratch) { Register scratch) {
// Search dictionary - put result in register r1. // r1: function
GenerateDictionaryLoad(masm, miss, r1, r2, r1, r0, r3, r4, CHECK_DICTIONARY);
// Check that the value isn't a smi. // Check that the value isn't a smi.
__ tst(r1, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
@ -518,13 +544,6 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE); __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
__ b(ne, miss); __ b(ne, miss);
// Patch the receiver with the global proxy if necessary.
if (is_global_object) {
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
__ str(r0, MemOperand(sp, argc * kPointerSize));
}
// Invoke the function. // Invoke the function.
ParameterCount actual(argc); ParameterCount actual(argc);
__ InvokeFunction(r1, actual, JUMP_FUNCTION); __ InvokeFunction(r1, actual, JUMP_FUNCTION);
@ -536,53 +555,18 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// -- r2 : name // -- r2 : name
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
Label miss, global_object, non_global_object; Label miss;
// Get the receiver of the function from the stack into r1. // Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize)); __ ldr(r1, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi. GenerateDictionaryLoadReceiverCheck(masm, r1, r0, r3, r4, &miss);
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss);
// Check that the receiver is a valid JS object. Put the map in r3. // r0: elements
__ CompareObjectType(r1, r3, r0, FIRST_JS_OBJECT_TYPE); // Search the dictionary - put result in register r1.
__ b(lt, &miss); GenerateDictionaryLoad(masm, &miss, r0, r2, r1, r3, r4);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
// Check for access to global object. GenerateFunctionTailCall(masm, argc, &miss, r4);
__ cmp(r0, Operand(JS_GLOBAL_OBJECT_TYPE));
__ b(eq, &global_object);
__ cmp(r0, Operand(JS_BUILTINS_OBJECT_TYPE));
__ b(ne, &non_global_object);
// Accessing global object: Load and invoke.
__ bind(&global_object);
// Check that the global object does not require access checks.
__ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &miss);
GenerateNormalHelper(masm, argc, true, &miss, r4);
// Accessing non-global object: Check for access to global proxy.
Label global_proxy, invoke;
__ bind(&non_global_object);
__ cmp(r0, Operand(JS_GLOBAL_PROXY_TYPE));
__ b(eq, &global_proxy);
// Check that the non-global, non-global-proxy object does not
// require access checks.
__ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &miss);
__ bind(&invoke);
GenerateNormalHelper(masm, argc, false, &miss, r4);
// Global object access: Check access rights.
__ bind(&global_proxy);
__ CheckAccessGlobalProxy(r1, r0, &miss);
__ b(&invoke);
__ bind(&miss); __ bind(&miss);
} }
@ -594,6 +578,12 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
if (id == IC::kCallIC_Miss) {
__ IncrementCounter(&Counters::call_miss, 1, r3, r4);
} else {
__ IncrementCounter(&Counters::keyed_call_miss, 1, r3, r4);
}
// Get the receiver of the function from the stack. // Get the receiver of the function from the stack.
__ ldr(r3, MemOperand(sp, argc * kPointerSize)); __ ldr(r3, MemOperand(sp, argc * kPointerSize));
@ -614,6 +604,8 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
__ LeaveInternalFrame(); __ LeaveInternalFrame();
// Check if the receiver is a global object of some sort. // Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
if (id == IC::kCallIC_Miss) {
Label invoke, global; Label invoke, global;
__ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
@ -627,10 +619,11 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
__ bind(&global); __ bind(&global);
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset)); __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
__ str(r2, MemOperand(sp, argc * kPointerSize)); __ str(r2, MemOperand(sp, argc * kPointerSize));
__ bind(&invoke);
}
// Invoke the function. // Invoke the function.
ParameterCount actual(argc); ParameterCount actual(argc);
__ bind(&invoke);
__ InvokeFunction(r1, actual, JUMP_FUNCTION); __ InvokeFunction(r1, actual, JUMP_FUNCTION);
} }
@ -698,7 +691,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Now the key is known to be a smi. This place is also jumped to from below // Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi. // where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &slow_call); GenerateKeyedLoadReceiverCheck(
masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
GenerateFastArrayLoad( GenerateFastArrayLoad(
masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load); masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
@ -708,14 +702,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// receiver in r1 is not used after this point. // receiver in r1 is not used after this point.
// r2: key // r2: key
// r1: function // r1: function
GenerateFunctionTailCall(masm, argc, &slow_call, r0);
// Check that the value in r1 is a JSFunction.
__ BranchOnSmi(r1, &slow_call);
__ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
__ b(ne, &slow_call);
// Invoke the function.
ParameterCount actual(argc);
__ InvokeFunction(r1, actual, JUMP_FUNCTION);
__ bind(&check_number_dictionary); __ bind(&check_number_dictionary);
// r2: key // r2: key
@ -751,16 +738,16 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// If the receiver is a regular JS object with slow properties then do // If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary. // a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe. // Otherwise do the monomorphic cache probe.
GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &lookup_monomorphic_cache); GenerateKeyedLoadReceiverCheck(
masm, r1, r0, r3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
__ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset)); __ ldr(r0, FieldMemOperand(r1, JSObject::kPropertiesOffset));
__ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex); __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip); __ cmp(r3, ip);
__ b(ne, &lookup_monomorphic_cache); __ b(ne, &lookup_monomorphic_cache);
GenerateDictionaryLoad( GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
masm, &slow_load, r1, r2, r1, r0, r3, r4, DICTIONARY_CHECK_DONE);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1, r0, r3); __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1, r0, r3);
__ jmp(&do_call); __ jmp(&do_call);
@ -826,36 +813,14 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- r0 : receiver // -- r0 : receiver
// -- sp[0] : receiver // -- sp[0] : receiver
// ----------------------------------- // -----------------------------------
Label miss, probe, global; Label miss;
// Check that the receiver isn't a smi.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &miss);
// Check that the receiver is a valid JS object. Put the map in r3.
__ CompareObjectType(r0, r3, r1, FIRST_JS_OBJECT_TYPE);
__ b(lt, &miss);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
// Check for access to global object (unlikely).
__ cmp(r1, Operand(JS_GLOBAL_PROXY_TYPE));
__ b(eq, &global);
// Check for non-global object that requires access check. GenerateDictionaryLoadReceiverCheck(masm, r0, r1, r3, r4, &miss);
__ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &miss);
__ bind(&probe); // r1: elements
GenerateDictionaryLoad(masm, &miss, r0, r2, r0, r1, r3, r4, CHECK_DICTIONARY); GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
__ Ret(); __ Ret();
// Global object access: Check access rights.
__ bind(&global);
__ CheckAccessGlobalProxy(r0, r1, &miss);
__ b(&probe);
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
__ bind(&miss); __ bind(&miss);
GenerateMiss(masm); GenerateMiss(masm);
@ -870,6 +835,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// -- sp[0] : receiver // -- sp[0] : receiver
// ----------------------------------- // -----------------------------------
__ IncrementCounter(&Counters::load_miss, 1, r3, r4);
__ mov(r3, r0); __ mov(r3, r0);
__ Push(r3, r2); __ Push(r3, r2);
@ -963,7 +930,7 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
// Patch the map check. // Patch the map check.
Address ldr_map_instr_address = Address ldr_map_instr_address =
inline_end_address - inline_end_address -
(CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatch * (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
Assembler::kInstrSize); Assembler::kInstrSize);
Assembler::set_target_address_at(ldr_map_instr_address, Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map)); reinterpret_cast<Address>(map));
@ -1013,6 +980,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// -- r1 : receiver // -- r1 : receiver
// ----------------------------------- // -----------------------------------
__ IncrementCounter(&Counters::keyed_load_miss, 1, r3, r4);
__ Push(r1, r0); __ Push(r1, r0);
ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss)); ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
@ -1045,14 +1014,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Register key = r0; Register key = r0;
Register receiver = r1; Register receiver = r1;
GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r3, &slow);
// Check that the key is a smi. // Check that the key is a smi.
__ BranchOnNotSmi(key, &check_string); __ BranchOnNotSmi(key, &check_string);
__ bind(&index_smi); __ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below // Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi. // where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(
masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
GenerateFastArrayLoad( GenerateFastArrayLoad(
masm, receiver, key, r4, r3, r2, r0, &check_pixel_array, &slow); masm, receiver, key, r4, r3, r2, r0, &check_pixel_array, &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3); __ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
@ -1095,12 +1065,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_string); __ bind(&check_string);
GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow); GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
GenerateKeyedLoadReceiverCheck(
masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup // If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary. // cache. Otherwise probe the dictionary.
__ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset)); __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
__ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex); __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip); __ cmp(r4, ip);
__ b(eq, &probe_dictionary); __ b(eq, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash // Load the map of the receiver, compute the keyed lookup cache hash
@ -1148,9 +1121,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Do a quick inline probe of the receiver's dictionary, if it // Do a quick inline probe of the receiver's dictionary, if it
// exists. // exists.
__ bind(&probe_dictionary); __ bind(&probe_dictionary);
// r1: receiver
// r0: key
// r3: elements
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
// Load the property to r0. // Load the property to r0.
GenerateDictionaryLoad( GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
masm, &slow, r1, r0, r0, r2, r3, r4, DICTIONARY_CHECK_DONE);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3); __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
__ Ret(); __ Ret();

16
deps/v8/src/arm/jump-target-arm.cc

@ -61,9 +61,17 @@ void JumpTarget::DoJump() {
} else { } else {
// Clone the current frame to use as the expected one at the target. // Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame()); set_entry_frame(cgen()->frame());
// Zap the fall-through frame since the jump was unconditional.
RegisterFile empty; RegisterFile empty;
cgen()->SetFrame(NULL, &empty); cgen()->SetFrame(NULL, &empty);
} }
if (entry_label_.is_bound()) {
// You can't jump backwards to an already bound label unless you admitted
// up front that this was a bidirectional jump target. Bidirectional jump
// targets will zap their type info when bound in case some later virtual
// frame with less precise type info branches to them.
ASSERT(direction_ != FORWARD_ONLY);
}
__ jmp(&entry_label_); __ jmp(&entry_label_);
} }
@ -83,6 +91,13 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
// Clone the current frame to use as the expected one at the target. // Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame()); set_entry_frame(cgen()->frame());
} }
if (entry_label_.is_bound()) {
// You can't branch backwards to an already bound label unless you admitted
// up front that this was a bidirectional jump target. Bidirectional jump
// targets will zap their type info when bound in case some later virtual
// frame with less precise type info branches to them.
ASSERT(direction_ != FORWARD_ONLY);
}
__ b(cc, &entry_label_); __ b(cc, &entry_label_);
if (cc == al) { if (cc == al) {
cgen()->DeleteFrame(); cgen()->DeleteFrame();
@ -121,6 +136,7 @@ void JumpTarget::DoBind() {
ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters()); ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
if (cgen()->has_valid_frame()) { if (cgen()->has_valid_frame()) {
if (direction_ != FORWARD_ONLY) cgen()->frame()->ForgetTypeInfo();
// If there is a current frame we can use it on the fall through. // If there is a current frame we can use it on the fall through.
if (!entry_frame_set_) { if (!entry_frame_set_) {
entry_frame_ = *cgen()->frame(); entry_frame_ = *cgen()->frame();

13
deps/v8/src/arm/macro-assembler-arm.cc

@ -1548,6 +1548,8 @@ void MacroAssembler::Check(Condition cc, const char* msg) {
void MacroAssembler::Abort(const char* msg) { void MacroAssembler::Abort(const char* msg) {
Label abort_start;
bind(&abort_start);
// We want to pass the msg string like a smi to avoid GC // We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned // problems, however msg is not guaranteed to be aligned
// properly. Instead, we pass an aligned pointer that is // properly. Instead, we pass an aligned pointer that is
@ -1571,6 +1573,17 @@ void MacroAssembler::Abort(const char* msg) {
push(r0); push(r0);
CallRuntime(Runtime::kAbort, 2); CallRuntime(Runtime::kAbort, 2);
// will not return here // will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
static const int kExpectedAbortInstructions = 10;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
nop();
}
}
} }

44
deps/v8/src/arm/virtual-frame-arm.cc

@ -482,6 +482,32 @@ void VirtualFrame::SpillAllButCopyTOSToR0() {
} }
void VirtualFrame::SpillAllButCopyTOSToR1() {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r1, MemOperand(sp, 0));
break;
case R0_TOS:
__ push(r0);
__ mov(r1, r0);
break;
case R1_TOS:
__ push(r1);
break;
case R0_R1_TOS:
__ Push(r1, r0);
__ mov(r1, r0);
break;
case R1_R0_TOS:
__ Push(r0, r1);
break;
default:
UNREACHABLE();
}
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::SpillAllButCopyTOSToR1R0() { void VirtualFrame::SpillAllButCopyTOSToR1R0() {
switch (top_of_stack_state_) { switch (top_of_stack_state_) {
case NO_TOS_REGISTERS: case NO_TOS_REGISTERS:
@ -524,6 +550,24 @@ Register VirtualFrame::Peek() {
} }
Register VirtualFrame::Peek2() {
AssertIsNotSpilled();
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
case R0_TOS:
case R0_R1_TOS:
MergeTOSTo(R0_R1_TOS);
return r1;
case R1_TOS:
case R1_R0_TOS:
MergeTOSTo(R1_R0_TOS);
return r0;
}
UNREACHABLE();
return no_reg;
}
void VirtualFrame::Dup() { void VirtualFrame::Dup() {
if (SpilledScope::is_spilled()) { if (SpilledScope::is_spilled()) {
__ ldr(ip, MemOperand(sp, 0)); __ ldr(ip, MemOperand(sp, 0));

13
deps/v8/src/arm/virtual-frame-arm.h

@ -189,12 +189,15 @@ class VirtualFrame : public ZoneObject {
return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0; return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
} }
inline void ForgetTypeInfo() {
tos_known_smi_map_ = 0;
}
// Detach a frame from its code generator, perhaps temporarily. This // Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal // tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this // registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump. // one to NULL by an unconditional jump.
void DetachFromCodeGenerator() { void DetachFromCodeGenerator() {
AssertIsSpilled();
} }
// (Re)attach a frame to its code generator. This informs the register // (Re)attach a frame to its code generator. This informs the register
@ -202,7 +205,6 @@ class VirtualFrame : public ZoneObject {
// Used when a code generator's frame is switched from NULL to this one by // Used when a code generator's frame is switched from NULL to this one by
// binding a label. // binding a label.
void AttachToCodeGenerator() { void AttachToCodeGenerator() {
AssertIsSpilled();
} }
// Emit code for the physical JS entry and exit frame sequences. After // Emit code for the physical JS entry and exit frame sequences. After
@ -330,6 +332,10 @@ class VirtualFrame : public ZoneObject {
// must be copied to a scratch register before modification. // must be copied to a scratch register before modification.
Register Peek(); Register Peek();
// Look at the value beneath the top of the stack. The register returned is
// aliased and must be copied to a scratch register before modification.
Register Peek2();
// Duplicate the top of stack. // Duplicate the top of stack.
void Dup(); void Dup();
@ -339,6 +345,9 @@ class VirtualFrame : public ZoneObject {
// Flushes all registers, but it puts a copy of the top-of-stack in r0. // Flushes all registers, but it puts a copy of the top-of-stack in r0.
void SpillAllButCopyTOSToR0(); void SpillAllButCopyTOSToR0();
// Flushes all registers, but it puts a copy of the top-of-stack in r1.
void SpillAllButCopyTOSToR1();
// Flushes all registers, but it puts a copy of the top-of-stack in r1 // Flushes all registers, but it puts a copy of the top-of-stack in r1
// and the next value on the stack in r0. // and the next value on the stack in r0.
void SpillAllButCopyTOSToR1R0(); void SpillAllButCopyTOSToR1R0();

4
deps/v8/src/array.js

@ -954,7 +954,7 @@ function ArrayMap(f, receiver) {
function ArrayIndexOf(element, index) { function ArrayIndexOf(element, index) {
var length = this.length; var length = this.length;
if (index == null) { if (IS_UNDEFINED(index)) {
index = 0; index = 0;
} else { } else {
index = TO_INTEGER(index); index = TO_INTEGER(index);
@ -981,7 +981,7 @@ function ArrayIndexOf(element, index) {
function ArrayLastIndexOf(element, index) { function ArrayLastIndexOf(element, index) {
var length = this.length; var length = this.length;
if (index == null) { if (%_ArgumentsLength() < 2) {
index = length - 1; index = length - 1;
} else { } else {
index = TO_INTEGER(index); index = TO_INTEGER(index);

4
deps/v8/src/ast-inl.h

@ -45,7 +45,9 @@ SwitchStatement::SwitchStatement(ZoneStringList* labels)
IterationStatement::IterationStatement(ZoneStringList* labels) IterationStatement::IterationStatement(ZoneStringList* labels)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
body_(NULL),
continue_target_(JumpTarget::BIDIRECTIONAL) {
} }

1
deps/v8/src/builtins.cc

@ -195,6 +195,7 @@ BUILTIN(ArrayCodeGeneric) {
} }
// 'array' now contains the JSArray we should initialize. // 'array' now contains the JSArray we should initialize.
ASSERT(array->HasFastElements());
// Optimize the case where there is one argument and the argument is a // Optimize the case where there is one argument and the argument is a
// small smi. // small smi.

11
deps/v8/src/factory.cc

@ -274,11 +274,22 @@ Handle<Map> Factory::CopyMap(Handle<Map> src,
return copy; return copy;
} }
Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) { Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
CALL_HEAP_FUNCTION(src->CopyDropTransitions(), Map); CALL_HEAP_FUNCTION(src->CopyDropTransitions(), Map);
} }
Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
CALL_HEAP_FUNCTION(src->GetFastElementsMap(), Map);
}
Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
CALL_HEAP_FUNCTION(src->GetSlowElementsMap(), Map);
}
Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) { Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
CALL_HEAP_FUNCTION(array->Copy(), FixedArray); CALL_HEAP_FUNCTION(array->Copy(), FixedArray);
} }

4
deps/v8/src/factory.h

@ -180,6 +180,10 @@ class Factory : public AllStatic {
static Handle<Map> CopyMapDropTransitions(Handle<Map> map); static Handle<Map> CopyMapDropTransitions(Handle<Map> map);
static Handle<Map> GetFastElementsMap(Handle<Map> map);
static Handle<Map> GetSlowElementsMap(Handle<Map> map);
static Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array); static Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
// Numbers (eg, literals) are pretenured by the parser. // Numbers (eg, literals) are pretenured by the parser.

59
deps/v8/src/heap.cc

@ -126,6 +126,12 @@ int Heap::always_allocate_scope_depth_ = 0;
int Heap::linear_allocation_scope_depth_ = 0; int Heap::linear_allocation_scope_depth_ = 0;
int Heap::contexts_disposed_ = 0; int Heap::contexts_disposed_ = 0;
int Heap::young_survivors_after_last_gc_ = 0;
int Heap::high_survival_rate_period_length_ = 0;
double Heap::survival_rate_ = 0;
Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
#ifdef DEBUG #ifdef DEBUG
bool Heap::allocation_allowed_ = true; bool Heap::allocation_allowed_ = true;
@ -582,6 +588,29 @@ static void VerifyPageWatermarkValidity(PagedSpace* space,
} }
#endif #endif
void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
double survival_rate =
(static_cast<double>(young_survivors_after_last_gc_) * 100) /
start_new_space_size;
if (survival_rate > kYoungSurvivalRateThreshold) {
high_survival_rate_period_length_++;
} else {
high_survival_rate_period_length_ = 0;
}
double survival_rate_diff = survival_rate_ - survival_rate;
if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
set_survival_rate_trend(DECREASING);
} else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
set_survival_rate_trend(INCREASING);
} else {
set_survival_rate_trend(STABLE);
}
survival_rate_ = survival_rate;
}
void Heap::PerformGarbageCollection(AllocationSpace space, void Heap::PerformGarbageCollection(AllocationSpace space,
GarbageCollector collector, GarbageCollector collector,
@ -604,6 +633,8 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
EnsureFromSpaceIsCommitted(); EnsureFromSpaceIsCommitted();
int start_new_space_size = Heap::new_space()->Size();
if (collector == MARK_COMPACTOR) { if (collector == MARK_COMPACTOR) {
if (FLAG_flush_code) { if (FLAG_flush_code) {
// Flush all potentially unused code. // Flush all potentially unused code.
@ -613,16 +644,36 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
// Perform mark-sweep with optional compaction. // Perform mark-sweep with optional compaction.
MarkCompact(tracer); MarkCompact(tracer);
bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
IsStableOrIncreasingSurvivalTrend();
UpdateSurvivalRateTrend(start_new_space_size);
int old_gen_size = PromotedSpaceSize(); int old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ = old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ = old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
// Stable high survival rates of young objects both during partial and
// full collection indicate that mutator is either building or modifying
// a structure with a long lifetime.
// In this case we aggressively raise old generation memory limits to
// postpone subsequent mark-sweep collection and thus trade memory
// space for the mutation speed.
old_gen_promotion_limit_ *= 2;
old_gen_allocation_limit_ *= 2;
}
old_gen_exhausted_ = false; old_gen_exhausted_ = false;
} else { } else {
tracer_ = tracer; tracer_ = tracer;
Scavenge(); Scavenge();
tracer_ = NULL; tracer_ = NULL;
UpdateSurvivalRateTrend(start_new_space_size);
} }
Counters::objs_since_last_young.Set(0); Counters::objs_since_last_young.Set(0);
@ -1217,7 +1268,7 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
map->set_code_cache(empty_fixed_array()); map->set_code_cache(empty_fixed_array());
map->set_unused_property_fields(0); map->set_unused_property_fields(0);
map->set_bit_field(0); map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible); map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
// If the map object is aligned fill the padding area with Smi 0 objects. // If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) { if (Map::kPadStart < Map::kSize) {
@ -2545,6 +2596,7 @@ Object* Heap::AllocateInitialMap(JSFunction* fun) {
map->set_inobject_properties(in_object_properties); map->set_inobject_properties(in_object_properties);
map->set_unused_property_fields(in_object_properties); map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype); map->set_prototype(prototype);
ASSERT(map->has_fast_elements());
// If the function has only simple this property assignments add // If the function has only simple this property assignments add
// field descriptors for these to the initial map as the object // field descriptors for these to the initial map as the object
@ -2598,8 +2650,8 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
// properly initialized. // properly initialized.
ASSERT(map->instance_type() != JS_FUNCTION_TYPE); ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
// Both types of globla objects should be allocated using // Both types of global objects should be allocated using
// AllocateGloblaObject to be properly initialized. // AllocateGlobalObject to be properly initialized.
ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE); ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE); ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
@ -2623,6 +2675,7 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj), InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties), FixedArray::cast(properties),
map); map);
ASSERT(JSObject::cast(obj)->HasFastElements());
return obj; return obj;
} }

50
deps/v8/src/heap.h

@ -1005,6 +1005,7 @@ class Heap : public AllStatic {
static void CheckNewSpaceExpansionCriteria(); static void CheckNewSpaceExpansionCriteria();
static inline void IncrementYoungSurvivorsCounter(int survived) { static inline void IncrementYoungSurvivorsCounter(int survived) {
young_survivors_after_last_gc_ = survived;
survived_since_last_expansion_ += survived; survived_since_last_expansion_ += survived;
} }
@ -1272,6 +1273,55 @@ class Heap : public AllStatic {
// be replaced with a lazy compilable version. // be replaced with a lazy compilable version.
static void FlushCode(); static void FlushCode();
static void UpdateSurvivalRateTrend(int start_new_space_size);
enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
static const int kYoungSurvivalRateThreshold = 90;
static const int kYoungSurvivalRateAllowedDeviation = 15;
static int young_survivors_after_last_gc_;
static int high_survival_rate_period_length_;
static double survival_rate_;
static SurvivalRateTrend previous_survival_rate_trend_;
static SurvivalRateTrend survival_rate_trend_;
static void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
ASSERT(survival_rate_trend != FLUCTUATING);
previous_survival_rate_trend_ = survival_rate_trend_;
survival_rate_trend_ = survival_rate_trend;
}
static SurvivalRateTrend survival_rate_trend() {
if (survival_rate_trend_ == STABLE) {
return STABLE;
} else if (previous_survival_rate_trend_ == STABLE) {
return survival_rate_trend_;
} else if (survival_rate_trend_ != previous_survival_rate_trend_) {
return FLUCTUATING;
} else {
return survival_rate_trend_;
}
}
static bool IsStableOrIncreasingSurvivalTrend() {
switch (survival_rate_trend()) {
case STABLE:
case INCREASING:
return true;
default:
return false;
}
}
static bool IsIncreasingSurvivalTrend() {
return survival_rate_trend() == INCREASING;
}
static bool IsHighSurvivalRate() {
return high_survival_rate_period_length_ > 0;
}
static const int kInitialSymbolTableSize = 2048; static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64; static const int kInitialEvalCacheSize = 64;

9
deps/v8/src/ia32/codegen-ia32.cc

@ -8868,9 +8868,11 @@ Result CodeGenerator::EmitKeyedLoad() {
// is not a dictionary. // is not a dictionary.
__ mov(elements.reg(), __ mov(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset)); FieldOperand(receiver.reg(), JSObject::kElementsOffset));
if (FLAG_debug_code) {
__ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map())); Immediate(Factory::fixed_array_map()));
deferred->Branch(not_equal); __ Assert(equal, "JSObject with fast elements map has slow elements");
}
// Check that the key is within bounds. // Check that the key is within bounds.
__ cmp(key.reg(), __ cmp(key.reg(),
@ -13293,6 +13295,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ test(edx, Immediate(kSmiTagMask)); __ test(edx, Immediate(kSmiTagMask));
__ j(not_zero, &runtime); __ j(not_zero, &runtime);
__ sub(ecx, Operand(edx)); __ sub(ecx, Operand(edx));
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
Label return_eax;
__ j(equal, &return_eax);
// Special handling of sub-strings of length 1 and 2. One character strings // Special handling of sub-strings of length 1 and 2. One character strings
// are handled in the runtime system (looked up in the single character // are handled in the runtime system (looked up in the single character
// cache). Two character strings are looked for in the symbol cache. // cache). Two character strings are looked for in the symbol cache.
@ -13397,6 +13402,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// esi: character of sub string start // esi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false); StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
__ mov(esi, edx); // Restore esi. __ mov(esi, edx); // Restore esi.
__ bind(&return_eax);
__ IncrementCounter(&Counters::sub_string_native, 1); __ IncrementCounter(&Counters::sub_string_native, 1);
__ ret(3 * kPointerSize); __ ret(3 * kPointerSize);

2
deps/v8/src/ia32/full-codegen-ia32.cc

@ -2175,7 +2175,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// LAST_JS_OBJECT_TYPE. // LAST_JS_OBJECT_TYPE.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ cmp(ebx, JS_FUNCTION_TYPE); __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
__ j(equal, &function); __ j(equal, &function);
// Check if the constructor in the map is a function. // Check if the constructor in the map is a function.

302
deps/v8/src/ia32/ic-ia32.cc

@ -45,72 +45,96 @@ namespace internal {
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
__ cmp(type, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, global_object, not_taken);
__ cmp(type, JS_BUILTINS_OBJECT_TYPE);
__ j(equal, global_object, not_taken);
__ cmp(type, JS_GLOBAL_PROXY_TYPE);
__ j(equal, global_object, not_taken);
}
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register r0,
Register r1,
Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// r0: used to hold receiver instance type.
// Holds the property dictionary on fall through.
// r1: used to hold receivers map.
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
// Check that the receiver is a valid JS object.
__ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
__ cmp(r0, FIRST_JS_OBJECT_TYPE);
__ j(below, miss, not_taken);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
GenerateGlobalInstanceTypeCheck(masm, r0, miss);
// Check for non-global object that requires access check.
__ test_b(FieldOperand(r1, Map::kBitFieldOffset),
(1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasNamedInterceptor));
__ j(not_zero, miss, not_taken);
__ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ CheckMap(r0, Factory::hash_table_map(), miss, true);
}
// Helper function used to load a property from a dictionary backing storage. // Helper function used to load a property from a dictionary backing storage.
// This function may return false negatives, so miss_label // This function may return false negatives, so miss_label
// must always call a backup property load that is complete. // must always call a backup property load that is complete.
// This function is safe to call if the receiver has fast properties, // This function is safe to call if name is not a symbol, and will jump to
// or if name is not a symbol, and will jump to the miss_label in that case. // the miss_label in that case.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm, static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss_label, Label* miss_label,
Register receiver, Register elements,
Register name, Register name,
Register r0, Register r0,
Register r1, Register r1,
Register r2, Register result) {
Register result,
DictionaryCheck check_dictionary) {
// Register use: // Register use:
// //
// name - holds the name of the property and is unchanged. // elements - holds the property dictionary on entry and is unchanged.
// receiver - holds the receiver and is unchanged. //
// name - holds the name of the property on entry and is unchanged.
//
// Scratch registers: // Scratch registers:
// r0 - used to hold the property dictionary.
// //
// r1 - used for the index into the property dictionary // r0 - used for the index into the property dictionary
// //
// r2 - used to hold the capacity of the property dictionary. // r1 - used to hold the capacity of the property dictionary.
// //
// result - holds the result on exit. // result - holds the result on exit.
Label done; Label done;
// Check for the absence of an interceptor.
// Load the map into r0.
__ mov(r0, FieldOperand(receiver, JSObject::kMapOffset));
// Bail out if the receiver has a named interceptor.
__ test(FieldOperand(r0, Map::kBitFieldOffset),
Immediate(1 << Map::kHasNamedInterceptor));
__ j(not_zero, miss_label, not_taken);
// Bail out if we have a JS global proxy object.
__ movzx_b(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
__ cmp(r0, JS_GLOBAL_PROXY_TYPE);
__ j(equal, miss_label, not_taken);
// Possible work-around for http://crbug.com/16276.
__ cmp(r0, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, miss_label, not_taken);
__ cmp(r0, JS_BUILTINS_OBJECT_TYPE);
__ j(equal, miss_label, not_taken);
// Load properties array.
__ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
if (check_dictionary == CHECK_DICTIONARY) {
__ cmp(FieldOperand(r0, HeapObject::kMapOffset),
Immediate(Factory::hash_table_map()));
__ j(not_equal, miss_label);
}
// Compute the capacity mask. // Compute the capacity mask.
const int kCapacityOffset = const int kCapacityOffset =
StringDictionary::kHeaderSize + StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize; StringDictionary::kCapacityIndex * kPointerSize;
__ mov(r2, FieldOperand(r0, kCapacityOffset)); __ mov(r1, FieldOperand(elements, kCapacityOffset));
__ shr(r2, kSmiTagSize); // convert smi to int __ shr(r1, kSmiTagSize); // convert smi to int
__ dec(r2); __ dec(r1);
// Generate an unrolled loop that performs a few probes before // Generate an unrolled loop that performs a few probes before
// giving up. Measurements done on Gmail indicate that 2 probes // giving up. Measurements done on Gmail indicate that 2 probes
@ -121,20 +145,20 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
StringDictionary::kElementsStartIndex * kPointerSize; StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) { for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask. // Compute the masked index: (hash + i + i * i) & mask.
__ mov(r1, FieldOperand(name, String::kHashFieldOffset)); __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
__ shr(r1, String::kHashShift); __ shr(r0, String::kHashShift);
if (i > 0) { if (i > 0) {
__ add(Operand(r1), Immediate(StringDictionary::GetProbeOffset(i))); __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
} }
__ and_(r1, Operand(r2)); __ and_(r0, Operand(r1));
// Scale the index by multiplying by the entry size. // Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3); ASSERT(StringDictionary::kEntrySize == 3);
__ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3 __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
// Check if the key is identical to the name. // Check if the key is identical to the name.
__ cmp(name, __ cmp(name, Operand(elements, r0, times_4,
Operand(r0, r1, times_4, kElementsStartOffset - kHeapObjectTag)); kElementsStartOffset - kHeapObjectTag));
if (i != kProbes - 1) { if (i != kProbes - 1) {
__ j(equal, &done, taken); __ j(equal, &done, taken);
} else { } else {
@ -145,13 +169,13 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Check that the value is a normal property. // Check that the value is a normal property.
__ bind(&done); __ bind(&done);
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ test(Operand(r0, r1, times_4, kDetailsOffset - kHeapObjectTag), __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize)); Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ j(not_zero, miss_label, not_taken); __ j(not_zero, miss_label, not_taken);
// Get the value at the masked, scaled index. // Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize; const int kValueOffset = kElementsStartOffset + kPointerSize;
__ mov(result, Operand(r0, r1, times_4, kValueOffset - kHeapObjectTag)); __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
} }
@ -307,6 +331,7 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver, Register receiver,
Register map, Register map,
int interceptor_bit,
Label* slow) { Label* slow) {
// Register use: // Register use:
// receiver - holds the receiver and is unchanged. // receiver - holds the receiver and is unchanged.
@ -322,7 +347,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Check bit field. // Check bit field.
__ test_b(FieldOperand(map, Map::kBitFieldOffset), __ test_b(FieldOperand(map, Map::kBitFieldOffset),
KeyedLoadIC::kSlowCaseBitFieldMask); (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
__ j(not_zero, slow, not_taken); __ j(not_zero, slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type. // Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object, // In the case that the object is a value-wrapper object,
@ -432,8 +457,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Label slow, check_string, index_smi, index_string; Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary, check_number_dictionary; Label check_pixel_array, probe_dictionary, check_number_dictionary;
GenerateKeyedLoadReceiverCheck(masm, edx, ecx, &slow);
// Check that the key is a smi. // Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask)); __ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &check_string, not_taken); __ j(not_zero, &check_string, not_taken);
@ -441,6 +464,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Now the key is known to be a smi. This place is also jumped to from // Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi. // where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(
masm, edx, ecx, Map::kHasIndexedInterceptor, &slow);
GenerateFastArrayLoad(masm, GenerateFastArrayLoad(masm,
edx, edx,
eax, eax,
@ -503,6 +529,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_string); __ bind(&check_string);
GenerateKeyStringCheck(masm, eax, ecx, ebx, &index_string, &slow); GenerateKeyStringCheck(masm, eax, ecx, ebx, &index_string, &slow);
GenerateKeyedLoadReceiverCheck(
masm, edx, ecx, Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup // If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary. // cache. Otherwise probe the dictionary.
__ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset)); __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
@ -555,15 +584,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Do a quick inline probe of the receiver's dictionary, if it // Do a quick inline probe of the receiver's dictionary, if it
// exists. // exists.
__ bind(&probe_dictionary); __ bind(&probe_dictionary);
GenerateDictionaryLoad(masm,
&slow, __ mov(ecx, FieldOperand(edx, JSObject::kMapOffset));
edx, __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
eax, GenerateGlobalInstanceTypeCheck(masm, ecx, &slow);
ebx,
ecx, GenerateDictionaryLoad(masm, &slow, ebx, eax, ecx, edi, eax);
edi,
eax,
DICTIONARY_CHECK_DONE);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1); __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0); __ ret(0);
@ -1173,24 +1199,18 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
} }
static void GenerateNormalHelper(MacroAssembler* masm, static void GenerateFunctionTailCall(MacroAssembler* masm,
int argc, int argc,
bool is_global_object,
Label* miss) { Label* miss) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- ecx : name // -- ecx : name
// -- edx : receiver // -- edi : function
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based) // -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ... // -- ...
// -- esp[(argc + 1) * 4] : receiver // -- esp[(argc + 1) * 4] : receiver
// ----------------------------------- // -----------------------------------
// Search dictionary - put result in register edi.
__ mov(edi, edx);
GenerateDictionaryLoad(
masm, miss, edx, ecx, eax, edi, ebx, edi, CHECK_DICTIONARY);
// Check that the result is not a smi. // Check that the result is not a smi.
__ test(edi, Immediate(kSmiTagMask)); __ test(edi, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken); __ j(zero, miss, not_taken);
@ -1199,12 +1219,6 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ CmpObjectType(edi, JS_FUNCTION_TYPE, eax); __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
__ j(not_equal, miss, not_taken); __ j(not_equal, miss, not_taken);
// Patch the receiver on stack with the global proxy if necessary.
if (is_global_object) {
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
__ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
}
// Invoke the function. // Invoke the function.
ParameterCount actual(argc); ParameterCount actual(argc);
__ InvokeFunction(edi, actual, JUMP_FUNCTION); __ InvokeFunction(edi, actual, JUMP_FUNCTION);
@ -1219,55 +1233,17 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// -- ... // -- ...
// -- esp[(argc + 1) * 4] : receiver // -- esp[(argc + 1) * 4] : receiver
// ----------------------------------- // -----------------------------------
Label miss, global_object, non_global_object; Label miss;
// Get the receiver of the function from the stack; 1 ~ return address. // Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi. GenerateDictionaryLoadReceiverCheck(masm, edx, eax, ebx, &miss);
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
// Check that the receiver is a valid JS object.
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(eax, FIRST_JS_OBJECT_TYPE);
__ j(below, &miss, not_taken);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
// Check for access to global object.
__ cmp(eax, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, &global_object);
__ cmp(eax, JS_BUILTINS_OBJECT_TYPE);
__ j(not_equal, &non_global_object);
// Accessing global object: Load and invoke.
__ bind(&global_object);
// Check that the global object does not require access checks.
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_equal, &miss, not_taken);
GenerateNormalHelper(masm, argc, true, &miss);
// Accessing non-global object: Check for access to global proxy.
Label global_proxy, invoke;
__ bind(&non_global_object);
__ cmp(eax, JS_GLOBAL_PROXY_TYPE);
__ j(equal, &global_proxy, not_taken);
// Check that the non-global, non-global-proxy object does not
// require access checks.
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_equal, &miss, not_taken);
__ bind(&invoke);
GenerateNormalHelper(masm, argc, false, &miss);
// Global object proxy access: Check access rights. // eax: elements
__ bind(&global_proxy); // Search the dictionary placing the result in edi.
__ CheckAccessGlobalProxy(edx, eax, &miss); GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, edi);
__ jmp(&invoke); GenerateFunctionTailCall(masm, argc, &miss);
__ bind(&miss); __ bind(&miss);
} }
@ -1282,6 +1258,12 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// -- esp[(argc + 1) * 4] : receiver // -- esp[(argc + 1) * 4] : receiver
// ----------------------------------- // -----------------------------------
if (id == IC::kCallIC_Miss) {
__ IncrementCounter(&Counters::call_miss, 1);
} else {
__ IncrementCounter(&Counters::keyed_call_miss, 1);
}
// Get the receiver of the function from the stack; 1 ~ return address. // Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@ -1303,6 +1285,8 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
__ LeaveInternalFrame(); __ LeaveInternalFrame();
// Check if the receiver is a global object of some sort. // Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
if (id == IC::kCallIC_Miss) {
Label invoke, global; Label invoke, global;
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
__ test(edx, Immediate(kSmiTagMask)); __ test(edx, Immediate(kSmiTagMask));
@ -1318,10 +1302,11 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
__ bind(&global); __ bind(&global);
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset)); __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
__ mov(Operand(esp, (argc + 1) * kPointerSize), edx); __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
__ bind(&invoke);
}
// Invoke the function. // Invoke the function.
ParameterCount actual(argc); ParameterCount actual(argc);
__ bind(&invoke);
__ InvokeFunction(edi, actual, JUMP_FUNCTION); __ InvokeFunction(edi, actual, JUMP_FUNCTION);
} }
@ -1393,7 +1378,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Now the key is known to be a smi. This place is also jumped to from // Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi. // where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, edx, eax, &slow_call); GenerateKeyedLoadReceiverCheck(
masm, edx, eax, Map::kHasIndexedInterceptor, &slow_call);
GenerateFastArrayLoad( GenerateFastArrayLoad(
masm, edx, ecx, eax, edi, &check_number_dictionary, &slow_load); masm, edx, ecx, eax, edi, &check_number_dictionary, &slow_load);
@ -1403,15 +1389,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// receiver in edx is not used after this point. // receiver in edx is not used after this point.
// ecx: key // ecx: key
// edi: function // edi: function
GenerateFunctionTailCall(masm, argc, &slow_call);
// Check that the value in edi is a JavaScript function.
__ test(edi, Immediate(kSmiTagMask));
__ j(zero, &slow_call, not_taken);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
__ j(not_equal, &slow_call, not_taken);
// Invoke the function.
ParameterCount actual(argc);
__ InvokeFunction(edi, actual, JUMP_FUNCTION);
__ bind(&check_number_dictionary); __ bind(&check_number_dictionary);
// eax: elements // eax: elements
@ -1451,15 +1429,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// If the receiver is a regular JS object with slow properties then do // If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary. // a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe. // Otherwise do the monomorphic cache probe.
GenerateKeyedLoadReceiverCheck(masm, edx, eax, &lookup_monomorphic_cache); GenerateKeyedLoadReceiverCheck(
masm, edx, eax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
__ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset)); __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset), __ CheckMap(ebx, Factory::hash_table_map(), &lookup_monomorphic_cache, true);
Immediate(Factory::hash_table_map()));
__ j(not_equal, &lookup_monomorphic_cache, not_taken);
GenerateDictionaryLoad( GenerateDictionaryLoad(masm, &slow_load, ebx, ecx, eax, edi, edi);
masm, &slow_load, edx, ecx, ebx, eax, edi, edi, DICTIONARY_CHECK_DONE);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1); __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
__ jmp(&do_call); __ jmp(&do_call);
@ -1539,49 +1515,15 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- ecx : name // -- ecx : name
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
Label miss, probe, global; Label miss;
// Check that the receiver isn't a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
// Check that the receiver is a valid JS object.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(edx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(edx, FIRST_JS_OBJECT_TYPE);
__ j(less, &miss, not_taken);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
// Check for access to global object (unlikely). GenerateDictionaryLoadReceiverCheck(masm, eax, edx, ebx, &miss);
__ cmp(edx, JS_GLOBAL_PROXY_TYPE);
__ j(equal, &global, not_taken);
// Check for non-global object that requires access check.
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &miss, not_taken);
// edx: elements
// Search the dictionary placing the result in eax. // Search the dictionary placing the result in eax.
__ bind(&probe); GenerateDictionaryLoad(masm, &miss, edx, ecx, edi, ebx, eax);
GenerateDictionaryLoad(masm,
&miss,
eax,
ecx,
edx,
edi,
ebx,
edi,
CHECK_DICTIONARY);
__ mov(eax, edi);
__ ret(0); __ ret(0);
// Global object access: Check access rights.
__ bind(&global);
__ CheckAccessGlobalProxy(eax, edx, &miss);
__ jmp(&probe);
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
__ bind(&miss); __ bind(&miss);
GenerateMiss(masm); GenerateMiss(masm);
@ -1595,6 +1537,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
__ IncrementCounter(&Counters::load_miss, 1);
__ pop(ebx); __ pop(ebx);
__ push(eax); // receiver __ push(eax); // receiver
__ push(ecx); // name __ push(ecx); // name
@ -1711,6 +1655,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
__ IncrementCounter(&Counters::keyed_load_miss, 1);
__ pop(ebx); __ pop(ebx);
__ push(edx); // receiver __ push(edx); // receiver
__ push(eax); // name __ push(eax); // name

10
deps/v8/src/ic.cc

@ -992,12 +992,14 @@ Object* KeyedLoadIC::Load(State state,
} }
} }
set_target(stub); set_target(stub);
// For JSObjects that are not value wrappers and that do not have // For JSObjects with fast elements that are not value wrappers
// indexed interceptors, we initialize the inlined fast case (if // and that do not have indexed interceptors, we initialize the
// present) by patching the inlined map check. // inlined fast case (if present) by patching the inlined map
// check.
if (object->IsJSObject() && if (object->IsJSObject() &&
!object->IsJSValue() && !object->IsJSValue() &&
!JSObject::cast(*object)->HasIndexedInterceptor()) { !JSObject::cast(*object)->HasIndexedInterceptor() &&
JSObject::cast(*object)->HasFastElements()) {
Map* map = JSObject::cast(*object)->map(); Map* map = JSObject::cast(*object)->map();
PatchInlinedLoad(address(), map); PatchInlinedLoad(address(), map);
} }

4
deps/v8/src/ic.h

@ -33,10 +33,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Flag indicating whether an IC stub needs to check that a backing
// store is in dictionary case.
enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
// IC_UTIL_LIST defines all utility functions called from generated // IC_UTIL_LIST defines all utility functions called from generated
// inline caching code. The argument for the macro, ICU, is the function name. // inline caching code. The argument for the macro, ICU, is the function name.

2
deps/v8/src/json.js

@ -207,7 +207,7 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
} else if (IS_STRING_WRAPPER(value)) { } else if (IS_STRING_WRAPPER(value)) {
value = $String(value); value = $String(value);
} else if (IS_BOOLEAN_WRAPPER(value)) { } else if (IS_BOOLEAN_WRAPPER(value)) {
value = $Boolean(value); value = %_ValueOf(value);
} }
} }
switch (typeof value) { switch (typeof value) {

2
deps/v8/src/jump-target-heavy.h

@ -196,6 +196,8 @@ class BreakTarget : public JumpTarget {
public: public:
// Construct a break target. // Construct a break target.
BreakTarget() {} BreakTarget() {}
explicit BreakTarget(JumpTarget::Directionality direction)
: JumpTarget(direction) { }
virtual ~BreakTarget() {} virtual ~BreakTarget() {}

4
deps/v8/src/jump-target-light-inl.h

@ -36,16 +36,20 @@ namespace internal {
// Construct a jump target. // Construct a jump target.
JumpTarget::JumpTarget(Directionality direction) JumpTarget::JumpTarget(Directionality direction)
: entry_frame_set_(false), : entry_frame_set_(false),
direction_(direction),
entry_frame_(kInvalidVirtualFrameInitializer) { entry_frame_(kInvalidVirtualFrameInitializer) {
} }
JumpTarget::JumpTarget() JumpTarget::JumpTarget()
: entry_frame_set_(false), : entry_frame_set_(false),
direction_(FORWARD_ONLY),
entry_frame_(kInvalidVirtualFrameInitializer) { entry_frame_(kInvalidVirtualFrameInitializer) {
} }
BreakTarget::BreakTarget() { } BreakTarget::BreakTarget() { }
BreakTarget::BreakTarget(JumpTarget::Directionality direction)
: JumpTarget(direction) { }
} } // namespace v8::internal } } // namespace v8::internal

4
deps/v8/src/jump-target-light.h

@ -120,6 +120,9 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// Has an entry frame been found? // Has an entry frame been found?
bool entry_frame_set_; bool entry_frame_set_;
// Can we branch backwards to this label?
Directionality direction_;
// The frame used on entry to the block and expected at backward // The frame used on entry to the block and expected at backward
// jumps to the block. Set the first time something branches to this // jumps to the block. Set the first time something branches to this
// jump target. // jump target.
@ -150,6 +153,7 @@ class BreakTarget : public JumpTarget {
public: public:
// Construct a break target. // Construct a break target.
inline BreakTarget(); inline BreakTarget();
inline BreakTarget(JumpTarget::Directionality direction);
virtual ~BreakTarget() {} virtual ~BreakTarget() {}

10
deps/v8/src/log.cc

@ -309,10 +309,10 @@ void Profiler::Disengage() {
void Profiler::Run() { void Profiler::Run() {
TickSample sample; TickSample sample;
bool overflow = Logger::profiler_->Remove(&sample); bool overflow = Remove(&sample);
while (running_) { while (running_) {
LOG(TickEvent(&sample, overflow)); LOG(TickEvent(&sample, overflow));
overflow = Logger::profiler_->Remove(&sample); overflow = Remove(&sample);
} }
} }
@ -1150,7 +1150,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
int Logger::GetActiveProfilerModules() { int Logger::GetActiveProfilerModules() {
int result = PROFILER_MODULE_NONE; int result = PROFILER_MODULE_NONE;
if (!profiler_->paused()) { if (profiler_ != NULL && !profiler_->paused()) {
result |= PROFILER_MODULE_CPU; result |= PROFILER_MODULE_CPU;
} }
if (FLAG_log_gc) { if (FLAG_log_gc) {
@ -1162,7 +1162,7 @@ int Logger::GetActiveProfilerModules() {
void Logger::PauseProfiler(int flags, int tag) { void Logger::PauseProfiler(int flags, int tag) {
if (!Log::IsEnabled()) return; if (!Log::IsEnabled()) return;
if (flags & PROFILER_MODULE_CPU) { if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
// It is OK to have negative nesting. // It is OK to have negative nesting.
if (--cpu_profiler_nesting_ == 0) { if (--cpu_profiler_nesting_ == 0) {
profiler_->pause(); profiler_->pause();
@ -1193,7 +1193,7 @@ void Logger::ResumeProfiler(int flags, int tag) {
if (tag != 0) { if (tag != 0) {
UncheckedIntEvent("open-tag", tag); UncheckedIntEvent("open-tag", tag);
} }
if (flags & PROFILER_MODULE_CPU) { if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
if (cpu_profiler_nesting_++ == 0) { if (cpu_profiler_nesting_++ == 0) {
++logging_nesting_; ++logging_nesting_;
if (FLAG_prof_lazy) { if (FLAG_prof_lazy) {

3
deps/v8/src/objects-debug.cc

@ -539,6 +539,9 @@ void JSObject::JSObjectVerify() {
(map()->inobject_properties() + properties()->length() - (map()->inobject_properties() + properties()->length() -
map()->NextFreePropertyIndex())); map()->NextFreePropertyIndex()));
} }
ASSERT(map()->has_fast_elements() ==
(elements()->map() == Heap::fixed_array_map()));
ASSERT(map()->has_fast_elements() == HasFastElements());
} }

35
deps/v8/src/objects-inl.h

@ -1166,6 +1166,8 @@ HeapObject* JSObject::elements() {
void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) { void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
ASSERT(map()->has_fast_elements() ==
(value->map() == Heap::fixed_array_map()));
// In the assert below Dictionary is covered under FixedArray. // In the assert below Dictionary is covered under FixedArray.
ASSERT(value->IsFixedArray() || value->IsPixelArray() || ASSERT(value->IsFixedArray() || value->IsPixelArray() ||
value->IsExternalArray()); value->IsExternalArray());
@ -1181,11 +1183,21 @@ void JSObject::initialize_properties() {
void JSObject::initialize_elements() { void JSObject::initialize_elements() {
ASSERT(map()->has_fast_elements());
ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array())); ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
WRITE_FIELD(this, kElementsOffset, Heap::empty_fixed_array()); WRITE_FIELD(this, kElementsOffset, Heap::empty_fixed_array());
} }
Object* JSObject::ResetElements() {
Object* obj = map()->GetFastElementsMap();
if (obj->IsFailure()) return obj;
set_map(Map::cast(obj));
initialize_elements();
return this;
}
ACCESSORS(Oddball, to_string, String, kToStringOffset) ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset) ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
@ -2335,6 +2347,26 @@ void Map::set_prototype(Object* value, WriteBarrierMode mode) {
} }
Object* Map::GetFastElementsMap() {
if (has_fast_elements()) return this;
Object* obj = CopyDropTransitions();
if (obj->IsFailure()) return obj;
Map* new_map = Map::cast(obj);
new_map->set_has_fast_elements(true);
return new_map;
}
Object* Map::GetSlowElementsMap() {
if (!has_fast_elements()) return this;
Object* obj = CopyDropTransitions();
if (obj->IsFailure()) return obj;
Map* new_map = Map::cast(obj);
new_map->set_has_fast_elements(false);
return new_map;
}
ACCESSORS(Map, instance_descriptors, DescriptorArray, ACCESSORS(Map, instance_descriptors, DescriptorArray,
kInstanceDescriptorsOffset) kInstanceDescriptorsOffset)
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset) ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
@ -2838,11 +2870,14 @@ JSObject::ElementsKind JSObject::GetElementsKind() {
if (array->IsFixedArray()) { if (array->IsFixedArray()) {
// FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray. // FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray.
if (array->map() == Heap::fixed_array_map()) { if (array->map() == Heap::fixed_array_map()) {
ASSERT(map()->has_fast_elements());
return FAST_ELEMENTS; return FAST_ELEMENTS;
} }
ASSERT(array->IsDictionary()); ASSERT(array->IsDictionary());
ASSERT(!map()->has_fast_elements());
return DICTIONARY_ELEMENTS; return DICTIONARY_ELEMENTS;
} }
ASSERT(!map()->has_fast_elements());
if (array->IsExternalArray()) { if (array->IsExternalArray()) {
switch (array->map()->instance_type()) { switch (array->map()->instance_type()) {
case EXTERNAL_BYTE_ARRAY_TYPE: case EXTERNAL_BYTE_ARRAY_TYPE:

67
deps/v8/src/objects.cc

@ -2222,6 +2222,11 @@ Object* JSObject::TransformToFastProperties(int unused_property_fields) {
Object* JSObject::NormalizeElements() { Object* JSObject::NormalizeElements() {
ASSERT(!HasPixelElements() && !HasExternalArrayElements()); ASSERT(!HasPixelElements() && !HasExternalArrayElements());
if (HasDictionaryElements()) return this; if (HasDictionaryElements()) return this;
ASSERT(map()->has_fast_elements());
Object* obj = map()->GetSlowElementsMap();
if (obj->IsFailure()) return obj;
Map* new_map = Map::cast(obj);
// Get number of entries. // Get number of entries.
FixedArray* array = FixedArray::cast(elements()); FixedArray* array = FixedArray::cast(elements());
@ -2230,7 +2235,7 @@ Object* JSObject::NormalizeElements() {
int length = IsJSArray() ? int length = IsJSArray() ?
Smi::cast(JSArray::cast(this)->length())->value() : Smi::cast(JSArray::cast(this)->length())->value() :
array->length(); array->length();
Object* obj = NumberDictionary::Allocate(length); obj = NumberDictionary::Allocate(length);
if (obj->IsFailure()) return obj; if (obj->IsFailure()) return obj;
NumberDictionary* dictionary = NumberDictionary::cast(obj); NumberDictionary* dictionary = NumberDictionary::cast(obj);
// Copy entries. // Copy entries.
@ -2243,7 +2248,10 @@ Object* JSObject::NormalizeElements() {
dictionary = NumberDictionary::cast(result); dictionary = NumberDictionary::cast(result);
} }
} }
// Switch to using the dictionary as the backing storage for elements. // Switch to using the dictionary as the backing storage for
// elements. Set the new map first to satify the elements type
// assert in set_elements().
set_map(new_map);
set_elements(dictionary); set_elements(dictionary);
Counters::elements_to_dictionary.Increment(); Counters::elements_to_dictionary.Increment();
@ -5473,14 +5481,18 @@ void Code::Disassemble(const char* name) {
#endif // ENABLE_DISASSEMBLER #endif // ENABLE_DISASSEMBLER
void JSObject::SetFastElements(FixedArray* elems) { Object* JSObject::SetFastElementsCapacityAndLength(int capacity, int length) {
// We should never end in here with a pixel or external array. // We should never end in here with a pixel or external array.
ASSERT(!HasPixelElements() && !HasExternalArrayElements()); ASSERT(!HasPixelElements() && !HasExternalArrayElements());
#ifdef DEBUG
// Check the provided array is filled with the_hole. Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
uint32_t len = static_cast<uint32_t>(elems->length()); if (obj->IsFailure()) return obj;
for (uint32_t i = 0; i < len; i++) ASSERT(elems->get(i)->IsTheHole()); FixedArray* elems = FixedArray::cast(obj);
#endif
obj = map()->GetFastElementsMap();
if (obj->IsFailure()) return obj;
Map* new_map = Map::cast(obj);
AssertNoAllocation no_gc; AssertNoAllocation no_gc;
WriteBarrierMode mode = elems->GetWriteBarrierMode(no_gc); WriteBarrierMode mode = elems->GetWriteBarrierMode(no_gc);
switch (GetElementsKind()) { switch (GetElementsKind()) {
@ -5508,7 +5520,15 @@ void JSObject::SetFastElements(FixedArray* elems) {
UNREACHABLE(); UNREACHABLE();
break; break;
} }
set_map(new_map);
set_elements(elems); set_elements(elems);
if (IsJSArray()) {
JSArray::cast(this)->set_length(Smi::FromInt(length));
}
return this;
} }
@ -5595,7 +5615,7 @@ Object* JSObject::SetElementsLength(Object* len) {
Object* smi_length = len->ToSmi(); Object* smi_length = len->ToSmi();
if (smi_length->IsSmi()) { if (smi_length->IsSmi()) {
int value = Smi::cast(smi_length)->value(); const int value = Smi::cast(smi_length)->value();
if (value < 0) return ArrayLengthRangeError(); if (value < 0) return ArrayLengthRangeError();
switch (GetElementsKind()) { switch (GetElementsKind()) {
case FAST_ELEMENTS: { case FAST_ELEMENTS: {
@ -5617,12 +5637,8 @@ Object* JSObject::SetElementsLength(Object* len) {
int new_capacity = value > min ? value : min; int new_capacity = value > min ? value : min;
if (new_capacity <= kMaxFastElementsLength || if (new_capacity <= kMaxFastElementsLength ||
!ShouldConvertToSlowElements(new_capacity)) { !ShouldConvertToSlowElements(new_capacity)) {
Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity); Object* obj = SetFastElementsCapacityAndLength(new_capacity, value);
if (obj->IsFailure()) return obj; if (obj->IsFailure()) return obj;
if (IsJSArray()) {
JSArray::cast(this)->set_length(Smi::cast(smi_length));
}
SetFastElements(FixedArray::cast(obj));
return this; return this;
} }
break; break;
@ -5633,7 +5649,8 @@ Object* JSObject::SetElementsLength(Object* len) {
// If the length of a slow array is reset to zero, we clear // If the length of a slow array is reset to zero, we clear
// the array and flush backing storage. This has the added // the array and flush backing storage. This has the added
// benefit that the array returns to fast mode. // benefit that the array returns to fast mode.
initialize_elements(); Object* obj = ResetElements();
if (obj->IsFailure()) return obj;
} else { } else {
// Remove deleted elements. // Remove deleted elements.
uint32_t old_length = uint32_t old_length =
@ -6092,12 +6109,8 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) {
if (new_capacity <= kMaxFastElementsLength || if (new_capacity <= kMaxFastElementsLength ||
!ShouldConvertToSlowElements(new_capacity)) { !ShouldConvertToSlowElements(new_capacity)) {
ASSERT(static_cast<uint32_t>(new_capacity) > index); ASSERT(static_cast<uint32_t>(new_capacity) > index);
Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity); Object* obj = SetFastElementsCapacityAndLength(new_capacity, index + 1);
if (obj->IsFailure()) return obj; if (obj->IsFailure()) return obj;
SetFastElements(FixedArray::cast(obj));
if (IsJSArray()) {
JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
}
FixedArray::cast(elements())->set(index, value); FixedArray::cast(elements())->set(index, value);
return value; return value;
} }
@ -6216,13 +6229,11 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
uint32_t new_length = 0; uint32_t new_length = 0;
if (IsJSArray()) { if (IsJSArray()) {
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length)); CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
JSArray::cast(this)->set_length(Smi::FromInt(new_length));
} else { } else {
new_length = NumberDictionary::cast(elements())->max_number_key() + 1; new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
} }
Object* obj = Heap::AllocateFixedArrayWithHoles(new_length); Object* obj = SetFastElementsCapacityAndLength(new_length, new_length);
if (obj->IsFailure()) return obj; if (obj->IsFailure()) return obj;
SetFastElements(FixedArray::cast(obj));
#ifdef DEBUG #ifdef DEBUG
if (FLAG_trace_normalization) { if (FLAG_trace_normalization) {
PrintF("Object elements are fast case again:\n"); PrintF("Object elements are fast case again:\n");
@ -7526,14 +7537,18 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) {
} }
// Convert to fast elements. // Convert to fast elements.
Object* obj = map()->GetFastElementsMap();
if (obj->IsFailure()) return obj;
Map* new_map = Map::cast(obj);
PretenureFlag tenure = Heap::InNewSpace(this) ? NOT_TENURED: TENURED; PretenureFlag tenure = Heap::InNewSpace(this) ? NOT_TENURED: TENURED;
Object* new_array = Object* new_array =
Heap::AllocateFixedArray(dict->NumberOfElements(), tenure); Heap::AllocateFixedArray(dict->NumberOfElements(), tenure);
if (new_array->IsFailure()) { if (new_array->IsFailure()) return new_array;
return new_array;
}
FixedArray* fast_elements = FixedArray::cast(new_array); FixedArray* fast_elements = FixedArray::cast(new_array);
dict->CopyValuesTo(fast_elements); dict->CopyValuesTo(fast_elements);
set_map(new_map);
set_elements(fast_elements); set_elements(fast_elements);
} }
ASSERT(HasFastElements()); ASSERT(HasFastElements());

27
deps/v8/src/objects.h

@ -1191,6 +1191,7 @@ class JSObject: public HeapObject {
// case, and a PixelArray or ExternalArray in special cases. // case, and a PixelArray or ExternalArray in special cases.
DECL_ACCESSORS(elements, HeapObject) DECL_ACCESSORS(elements, HeapObject)
inline void initialize_elements(); inline void initialize_elements();
inline Object* ResetElements();
inline ElementsKind GetElementsKind(); inline ElementsKind GetElementsKind();
inline bool HasFastElements(); inline bool HasFastElements();
inline bool HasDictionaryElements(); inline bool HasDictionaryElements();
@ -1367,7 +1368,7 @@ class JSObject: public HeapObject {
// The undefined object if index is out of bounds. // The undefined object if index is out of bounds.
Object* GetElementWithReceiver(JSObject* receiver, uint32_t index); Object* GetElementWithReceiver(JSObject* receiver, uint32_t index);
void SetFastElements(FixedArray* elements); Object* SetFastElementsCapacityAndLength(int capacity, int length);
Object* SetSlowElements(Object* length); Object* SetSlowElements(Object* length);
// Lookup interceptors are used for handling properties controlled by host // Lookup interceptors are used for handling properties controlled by host
@ -2987,6 +2988,19 @@ class Map: public HeapObject {
return ((1 << kIsExtensible) & bit_field2()) != 0; return ((1 << kIsExtensible) & bit_field2()) != 0;
} }
// Tells whether the instance has fast elements.
void set_has_fast_elements(bool value) {
if (value) {
set_bit_field2(bit_field2() | (1 << kHasFastElements));
} else {
set_bit_field2(bit_field2() & ~(1 << kHasFastElements));
}
}
bool has_fast_elements() {
return ((1 << kHasFastElements) & bit_field2()) != 0;
}
// Tells whether the instance needs security checks when accessing its // Tells whether the instance needs security checks when accessing its
// properties. // properties.
inline void set_is_access_check_needed(bool access_check_needed); inline void set_is_access_check_needed(bool access_check_needed);
@ -3010,6 +3024,16 @@ class Map: public HeapObject {
// instance descriptors. // instance descriptors.
Object* CopyDropTransitions(); Object* CopyDropTransitions();
// Returns this map if it has the fast elements bit set, otherwise
// returns a copy of the map, with all transitions dropped from the
// descriptors and the fast elements bit set.
inline Object* GetFastElementsMap();
// Returns this map if it has the fast elements bit cleared,
// otherwise returns a copy of the map, with all transitions dropped
// from the descriptors and the fast elements bit cleared.
inline Object* GetSlowElementsMap();
// Returns the property index for name (only valid for FAST MODE). // Returns the property index for name (only valid for FAST MODE).
int PropertyIndexFor(String* name); int PropertyIndexFor(String* name);
@ -3111,6 +3135,7 @@ class Map: public HeapObject {
// Bit positions for bit field 2 // Bit positions for bit field 2
static const int kIsExtensible = 0; static const int kIsExtensible = 0;
static const int kFunctionWithPrototype = 1; static const int kFunctionWithPrototype = 1;
static const int kHasFastElements = 2;
// Layout of the default cache. It holds alternating name and code objects. // Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2; static const int kCodeCacheEntrySize = 2;

5
deps/v8/src/regexp.js

@ -230,7 +230,10 @@ function RegExpExec(string) {
var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo); var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
if (matchIndices == null) { if (matchIndices == null) {
if (this.global) this.lastIndex = 0; if (this.global) {
this.lastIndex = 0;
if (lastIndex != 0) return matchIndices;
}
cache.lastIndex = lastIndex; cache.lastIndex = lastIndex;
cache.regExp = this; cache.regExp = this;
cache.subject = s; cache.subject = s;

19
deps/v8/src/runtime.cc

@ -7449,7 +7449,7 @@ class ArrayConcatVisitor {
uint32_t index_limit_; uint32_t index_limit_;
// Index after last seen index. Always less than or equal to index_limit_. // Index after last seen index. Always less than or equal to index_limit_.
uint32_t index_offset_; uint32_t index_offset_;
bool fast_elements_; const bool fast_elements_;
}; };
@ -7766,13 +7766,14 @@ static Object* Runtime_ArrayConcat(Arguments args) {
// The backing storage array must have non-existing elements to // The backing storage array must have non-existing elements to
// preserve holes across concat operations. // preserve holes across concat operations.
storage = Factory::NewFixedArrayWithHoles(result_length); storage = Factory::NewFixedArrayWithHoles(result_length);
result->set_map(*Factory::GetFastElementsMap(Handle<Map>(result->map())));
} else { } else {
// TODO(126): move 25% pre-allocation logic into Dictionary::Allocate // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
uint32_t at_least_space_for = estimate_nof_elements + uint32_t at_least_space_for = estimate_nof_elements +
(estimate_nof_elements >> 2); (estimate_nof_elements >> 2);
storage = Handle<FixedArray>::cast( storage = Handle<FixedArray>::cast(
Factory::NewNumberDictionary(at_least_space_for)); Factory::NewNumberDictionary(at_least_space_for));
result->set_map(*Factory::GetSlowElementsMap(Handle<Map>(result->map())));
} }
Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length)); Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length));
@ -7822,9 +7823,19 @@ static Object* Runtime_MoveArrayContents(Arguments args) {
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
CONVERT_CHECKED(JSArray, from, args[0]); CONVERT_CHECKED(JSArray, from, args[0]);
CONVERT_CHECKED(JSArray, to, args[1]); CONVERT_CHECKED(JSArray, to, args[1]);
to->SetContent(FixedArray::cast(from->elements())); HeapObject* new_elements = from->elements();
Object* new_map;
if (new_elements->map() == Heap::fixed_array_map()) {
new_map = to->map()->GetFastElementsMap();
} else {
new_map = to->map()->GetSlowElementsMap();
}
if (new_map->IsFailure()) return new_map;
to->set_map(Map::cast(new_map));
to->set_elements(new_elements);
to->set_length(from->length()); to->set_length(from->length());
from->SetContent(Heap::empty_fixed_array()); Object* obj = from->ResetElements();
if (obj->IsFailure()) return obj;
from->set_length(Smi::FromInt(0)); from->set_length(Smi::FromInt(0));
return to; return to;
} }

2
deps/v8/src/utils.h

@ -587,7 +587,7 @@ static inline void MemCopy(void* dest, const void* src, size_t size) {
// Limit below which the extra overhead of the MemCopy function is likely // Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying. // to outweigh the benefits of faster copying.
// TODO(lrn): Try to find a more precise value. // TODO(lrn): Try to find a more precise value.
static const int kMinComplexMemCopy = 256; static const int kMinComplexMemCopy = 64;
#else // V8_TARGET_ARCH_IA32 #else // V8_TARGET_ARCH_IA32

4
deps/v8/src/v8-counters.h

@ -153,6 +153,10 @@ namespace internal {
SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \ SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
SC(named_store_global_inline, V8.NamedStoreGlobalInline) \ SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \ SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
SC(call_miss, V8.CallMiss) \
SC(keyed_call_miss, V8.KeyedCallMiss) \
SC(load_miss, V8.LoadMiss) \
SC(keyed_load_miss, V8.KeyedLoadMiss) \
SC(call_const, V8.CallConst) \ SC(call_const, V8.CallConst) \
SC(call_const_fast_api, V8.CallConstFastApi) \ SC(call_const_fast_api, V8.CallConstFastApi) \
SC(call_const_interceptor, V8.CallConstInterceptor) \ SC(call_const_interceptor, V8.CallConstInterceptor) \

17
deps/v8/src/v8natives.js

@ -677,9 +677,20 @@ function ObjectGetOwnPropertyNames(obj) {
} }
} }
// Property names are expected to be strings. // Property names are expected to be unique strings.
for (var i = 0; i < propertyNames.length; ++i) var propertySet = {};
propertyNames[i] = ToString(propertyNames[i]); var j = 0;
for (var i = 0; i < propertyNames.length; ++i) {
var name = ToString(propertyNames[i]);
// We need to check for the exact property value since for intrinsic
// properties like toString if(propertySet["toString"]) will always
// succeed.
if (propertySet[name] === true)
continue;
propertySet[name] = true;
propertyNames[j++] = name;
}
propertyNames.length = j;
return propertyNames; return propertyNames;
} }

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2 #define MAJOR_VERSION 2
#define MINOR_VERSION 2 #define MINOR_VERSION 2
#define BUILD_NUMBER 19 #define BUILD_NUMBER 20
#define PATCH_LEVEL 0 #define PATCH_LEVEL 0
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false

122
deps/v8/src/x64/assembler-x64.cc

@ -376,8 +376,13 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) { void Assembler::Align(int m) {
ASSERT(IsPowerOf2(m)); ASSERT(IsPowerOf2(m));
while ((pc_offset() & (m - 1)) != 0) { int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
nop(); while (delta >= 9) {
nop(9);
delta -= 9;
}
if (delta > 0) {
nop(delta);
} }
} }
@ -837,9 +842,7 @@ void Assembler::call(Register adr) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
// Opcode: FF /2 r64. // Opcode: FF /2 r64.
if (adr.high_bit()) { emit_optional_rex_32(adr);
emit_rex_64(adr);
}
emit(0xFF); emit(0xFF);
emit_modrm(0x2, adr); emit_modrm(0x2, adr);
} }
@ -849,9 +852,9 @@ void Assembler::call(const Operand& op) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
// Opcode: FF /2 m64. // Opcode: FF /2 m64.
emit_rex_64(op); emit_optional_rex_32(op);
emit(0xFF); emit(0xFF);
emit_operand(2, op); emit_operand(0x2, op);
} }
@ -1270,9 +1273,7 @@ void Assembler::jmp(Register target) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
// Opcode FF/4 r64. // Opcode FF/4 r64.
if (target.high_bit()) { emit_optional_rex_32(target);
emit_rex_64(target);
}
emit(0xFF); emit(0xFF);
emit_modrm(0x4, target); emit_modrm(0x4, target);
} }
@ -1831,9 +1832,7 @@ void Assembler::nop(int n) {
void Assembler::pop(Register dst) { void Assembler::pop(Register dst) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
if (dst.high_bit()) { emit_optional_rex_32(dst);
emit_rex_64(dst);
}
emit(0x58 | dst.low_bits()); emit(0x58 | dst.low_bits());
} }
@ -1841,7 +1840,7 @@ void Assembler::pop(Register dst) {
void Assembler::pop(const Operand& dst) { void Assembler::pop(const Operand& dst) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
emit_rex_64(dst); // Could be omitted in some cases. emit_optional_rex_32(dst);
emit(0x8F); emit(0x8F);
emit_operand(0, dst); emit_operand(0, dst);
} }
@ -1857,9 +1856,7 @@ void Assembler::popfq() {
void Assembler::push(Register src) { void Assembler::push(Register src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
if (src.high_bit()) { emit_optional_rex_32(src);
emit_rex_64(src);
}
emit(0x50 | src.low_bits()); emit(0x50 | src.low_bits());
} }
@ -1867,7 +1864,7 @@ void Assembler::push(Register src) {
void Assembler::push(const Operand& src) { void Assembler::push(const Operand& src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
emit_rex_64(src); // Could be omitted in some cases. emit_optional_rex_32(src);
emit(0xFF); emit(0xFF);
emit_operand(6, src); emit_operand(6, src);
} }
@ -2609,6 +2606,28 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
} }
void Assembler::movss(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF3); // single
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x10); // load
emit_sse_operand(dst, src);
}
void Assembler::movss(const Operand& src, XMMRegister dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF3); // single
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x11); // store
emit_sse_operand(dst, src);
}
void Assembler::cvttss2si(Register dst, const Operand& src) { void Assembler::cvttss2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -2664,6 +2683,17 @@ void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
} }
void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x2A);
emit_sse_operand(dst, src);
}
void Assembler::cvtqsi2sd(XMMRegister dst, Register src) { void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -2686,6 +2716,50 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
} }
void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x5A);
emit_sse_operand(dst, src);
}
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x5A);
emit_sse_operand(dst, src);
}
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x2D);
emit_sse_operand(dst, src);
}
void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2);
emit_rex_64(dst, src);
emit(0x0F);
emit(0x2D);
emit_sse_operand(dst, src);
}
void Assembler::addsd(XMMRegister dst, XMMRegister src) { void Assembler::addsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -2763,6 +2837,18 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
} }
void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0f);
emit(0x2e);
emit_sse_operand(dst, src);
}
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) { void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() }; Register ireg = { reg.code() };
emit_operand(ireg, adr); emit_operand(ireg, adr);

27
deps/v8/src/x64/assembler-x64.h

@ -46,23 +46,23 @@ namespace internal {
// Test whether a 64-bit value is in a specific range. // Test whether a 64-bit value is in a specific range.
static inline bool is_uint32(int64_t x) { static inline bool is_uint32(int64_t x) {
static const int64_t kUInt32Mask = V8_INT64_C(0xffffffff); static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
return x == (x & kUInt32Mask); return static_cast<uint64_t>(x) <= kMaxUInt32;
} }
static inline bool is_int32(int64_t x) { static inline bool is_int32(int64_t x) {
static const int64_t kMinIntValue = V8_INT64_C(-0x80000000); static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
return is_uint32(x - kMinIntValue); return is_uint32(x - kMinInt32);
} }
static inline bool uint_is_int32(uint64_t x) { static inline bool uint_is_int32(uint64_t x) {
static const uint64_t kMaxIntValue = V8_UINT64_C(0x80000000); static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
return x < kMaxIntValue; return x <= kMaxInt32;
} }
static inline bool is_uint32(uint64_t x) { static inline bool is_uint32(uint64_t x) {
static const uint64_t kMaxUIntValue = V8_UINT64_C(0x100000000); static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
return x < kMaxUIntValue; return x <= kMaxUInt32;
} }
// CPU Registers. // CPU Registers.
@ -1110,6 +1110,9 @@ class Assembler : public Malloced {
void movsd(XMMRegister dst, XMMRegister src); void movsd(XMMRegister dst, XMMRegister src);
void movsd(XMMRegister dst, const Operand& src); void movsd(XMMRegister dst, const Operand& src);
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
void cvttss2si(Register dst, const Operand& src); void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src); void cvttsd2si(Register dst, const Operand& src);
void cvttsd2siq(Register dst, XMMRegister src); void cvttsd2siq(Register dst, XMMRegister src);
@ -1119,7 +1122,14 @@ class Assembler : public Malloced {
void cvtqsi2sd(XMMRegister dst, const Operand& src); void cvtqsi2sd(XMMRegister dst, const Operand& src);
void cvtqsi2sd(XMMRegister dst, Register src); void cvtqsi2sd(XMMRegister dst, Register src);
void cvtlsi2ss(XMMRegister dst, Register src);
void cvtss2sd(XMMRegister dst, XMMRegister src); void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtss2sd(XMMRegister dst, const Operand& src);
void cvtsd2ss(XMMRegister dst, XMMRegister src);
void cvtsd2si(Register dst, XMMRegister src);
void cvtsd2siq(Register dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src); void addsd(XMMRegister dst, XMMRegister src);
void subsd(XMMRegister dst, XMMRegister src); void subsd(XMMRegister dst, XMMRegister src);
@ -1130,6 +1140,7 @@ class Assembler : public Malloced {
void sqrtsd(XMMRegister dst, XMMRegister src); void sqrtsd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src); void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
// The first argument is the reg field, the second argument is the r/m field. // The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src); void emit_sse_operand(XMMRegister dst, XMMRegister src);

92
deps/v8/src/x64/codegen-x64.cc

@ -2641,7 +2641,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// Generate code to set the elements in the array that are not // Generate code to set the elements in the array that are not
// literals. // literals.
for (int i = 0; i < node->values()->length(); i++) { for (int i = 0; i < length; i++) {
Expression* value = node->values()->at(i); Expression* value = node->values()->at(i);
// If value is a literal the property value is already set in the // If value is a literal the property value is already set in the
@ -3855,8 +3855,17 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
default: default:
UNREACHABLE(); UNREACHABLE();
} }
if (left->IsTrivial()) {
Load(right);
Result right_result = frame_->Pop();
frame_->Push(left);
frame_->Push(&right_result);
} else {
Load(left); Load(left);
Load(right); Load(right);
}
Comparison(node, cc, strict, destination()); Comparison(node, cc, strict, destination());
} }
@ -5336,9 +5345,8 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
dest->false_target()->Branch(equal); dest->false_target()->Branch(equal);
Condition is_smi = masm_->CheckSmi(value.reg()); Condition is_smi = masm_->CheckSmi(value.reg());
dest->true_target()->Branch(is_smi); dest->true_target()->Branch(is_smi);
__ fldz(); __ xorpd(xmm0, xmm0);
__ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset)); __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
__ FCmp();
value.Unuse(); value.Unuse();
dest->Split(not_zero); dest->Split(not_zero);
} else { } else {
@ -6511,7 +6519,7 @@ class DeferredInlineBinaryOperation: public DeferredCode {
void DeferredInlineBinaryOperation::Generate() { void DeferredInlineBinaryOperation::Generate() {
Label done; Label done;
if ((op_ == Token::ADD) if ((op_ == Token::ADD)
|| (op_ ==Token::SUB) || (op_ == Token::SUB)
|| (op_ == Token::MUL) || (op_ == Token::MUL)
|| (op_ == Token::DIV)) { || (op_ == Token::DIV)) {
Label call_runtime; Label call_runtime;
@ -7530,9 +7538,11 @@ Result CodeGenerator::EmitKeyedLoad() {
// is not a dictionary. // is not a dictionary.
__ movq(elements.reg(), __ movq(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset)); FieldOperand(receiver.reg(), JSObject::kElementsOffset));
if (FLAG_debug_code) {
__ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
Factory::fixed_array_map()); Factory::fixed_array_map());
deferred->Branch(not_equal); __ Assert(equal, "JSObject with fast elements map has slow elements");
}
// Check that key is within bounds. // Check that key is within bounds.
__ SmiCompare(key.reg(), __ SmiCompare(key.reg(),
@ -8000,14 +8010,12 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ jmp(&true_result); __ jmp(&true_result);
__ bind(&not_string); __ bind(&not_string);
// HeapNumber => false iff +0, -0, or NaN.
// These three cases set C3 when compared to zero in the FPU.
__ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &true_result); __ j(not_equal, &true_result);
__ fldz(); // Load zero onto fp stack // HeapNumber => false iff +0, -0, or NaN.
// Load heap-number double value onto fp stack // These three cases set the zero flag when compared to zero using ucomisd.
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); __ xorpd(xmm0, xmm0);
__ FCmp(); __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
__ j(zero, &false_result); __ j(zero, &false_result);
// Fall through to |true_result|. // Fall through to |true_result|.
@ -8951,48 +8959,31 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(), // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves. // so we do the second best thing - test it ourselves.
// Note: if cc_ != equal, never_nan_nan_ is not used. // Note: if cc_ != equal, never_nan_nan_ is not used.
if (never_nan_nan_ && (cc_ == equal)) {
__ Set(rax, EQUAL); __ Set(rax, EQUAL);
if (never_nan_nan_ && (cc_ == equal)) {
__ ret(0); __ ret(0);
} else { } else {
Label return_equal;
Label heap_number; Label heap_number;
// If it's not a heap number, then return equal. // If it's not a heap number, then return equal.
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
Factory::heap_number_map()); Factory::heap_number_map());
__ j(equal, &heap_number); __ j(equal, &heap_number);
__ bind(&return_equal);
__ Set(rax, EQUAL);
__ ret(0); __ ret(0);
__ bind(&heap_number); __ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if // It is a heap number, so return equal if it's not NaN.
// it's not NaN. // For NaN, return 1 for every condition except greater and
// The representation of NaN values has all exponent bits (52..62) set, // greater-equal. Return -1 for them, so the comparison yields
// and not all mantissa bits (0..51) clear. // false for all conditions except not-equal.
// We only allow QNaNs, which have bit 51 set (which also rules out
// the value being Infinity). __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ ucomisd(xmm0, xmm0);
// Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e., __ setcc(parity_even, rax);
// all bits in the mask are set. We only need to check the word // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
// that contains the exponent and high bit of the mantissa. if (cc_ == greater_equal || cc_ == greater) {
ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u); __ neg(rax);
__ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
__ xorl(rax, rax);
__ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
__ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
if (cc_ == equal) {
__ setcc(above_equal, rax);
__ ret(0);
} else {
Label nan;
__ j(above_equal, &nan);
__ Set(rax, EQUAL);
__ ret(0);
__ bind(&nan);
__ Set(rax, NegativeComparisonResult(cc_));
__ ret(0);
} }
__ ret(0);
} }
__ bind(&not_identical); __ bind(&not_identical);
@ -10040,20 +10031,15 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
// Input: rdx, rax are the left and right objects of a bit op. // Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op. // Output: rax, rcx are left and right integers for a bit op.
void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) { void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Both arguments can not be smis. That case is handled by smi-only code.
Label ok;
__ JumpIfNotBothSmi(rax, rdx, &ok);
__ Abort("Both arguments smi but not handled by smi-code.");
__ bind(&ok);
}
// Check float operands. // Check float operands.
Label done; Label done;
Label rax_is_smi;
Label rax_is_object; Label rax_is_object;
Label rdx_is_object; Label rdx_is_object;
__ JumpIfNotSmi(rdx, &rdx_is_object); __ JumpIfNotSmi(rdx, &rdx_is_object);
__ SmiToInteger32(rdx, rdx); __ SmiToInteger32(rdx, rdx);
__ JumpIfSmi(rax, &rax_is_smi);
__ bind(&rax_is_object); __ bind(&rax_is_object);
IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx. IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
@ -10062,6 +10048,7 @@ void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
__ bind(&rdx_is_object); __ bind(&rdx_is_object);
IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx. IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
__ JumpIfNotSmi(rax, &rax_is_object); __ JumpIfNotSmi(rax, &rax_is_object);
__ bind(&rax_is_smi);
__ SmiToInteger32(rcx, rax); __ SmiToInteger32(rcx, rax);
__ bind(&done); __ bind(&done);
@ -10446,7 +10433,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label not_floats; Label not_floats;
// rax: y // rax: y
// rdx: x // rdx: x
ASSERT(!static_operands_type_.IsSmi());
if (static_operands_type_.IsNumber()) { if (static_operands_type_.IsNumber()) {
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Assert at runtime that inputs are only numbers. // Assert at runtime that inputs are only numbers.
@ -11583,7 +11569,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime); __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen. __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
__ j(negative, &runtime); __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
Label return_rax;
__ j(equal, &return_rax);
// Special handling of sub-strings of length 1 and 2. One character strings // Special handling of sub-strings of length 1 and 2. One character strings
// are handled in the runtime system (looked up in the single character // are handled in the runtime system (looked up in the single character
// cache). Two character strings are looked for in the symbol cache. // cache). Two character strings are looked for in the symbol cache.
@ -11686,6 +11674,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rsi: character of sub string start // rsi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false); StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
__ movq(rsi, rdx); // Restore esi. __ movq(rsi, rdx); // Restore esi.
__ bind(&return_rax);
__ IncrementCounter(&Counters::sub_string_native, 1); __ IncrementCounter(&Counters::sub_string_native, 1);
__ ret(kArgumentsSize); __ ret(kArgumentsSize);

30
deps/v8/src/x64/disasm-x64.cc

@ -1028,9 +1028,9 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
if (opcode == 0x57) { if (opcode == 0x57) {
mnemonic = "xorpd"; mnemonic = "xorpd";
} else if (opcode == 0x2E) { } else if (opcode == 0x2E) {
mnemonic = "comisd";
} else if (opcode == 0x2F) {
mnemonic = "ucomisd"; mnemonic = "ucomisd";
} else if (opcode == 0x2F) {
mnemonic = "comisd";
} else { } else {
UnimplementedInstruction(); UnimplementedInstruction();
} }
@ -1057,7 +1057,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// CVTSI2SD: integer to XMM double conversion. // CVTSI2SD: integer to XMM double conversion.
int mod, regop, rm; int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm); get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop)); AppendToBuffer("%sd %s,", mnemonic, NameOfXMMRegister(regop));
current += PrintRightOperand(current); current += PrintRightOperand(current);
} else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) { } else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
// XMM arithmetic. Mnemonic was retrieved at the start of this function. // XMM arithmetic. Mnemonic was retrieved at the start of this function.
@ -1070,7 +1070,25 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} }
} else if (group_1_prefix_ == 0xF3) { } else if (group_1_prefix_ == 0xF3) {
// Instructions with prefix 0xF3. // Instructions with prefix 0xF3.
if (opcode == 0x2C) { if (opcode == 0x11 || opcode == 0x10) {
// MOVSS: Move scalar double-precision fp to/from/between XMM registers.
AppendToBuffer("movss ");
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
if (opcode == 0x11) {
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else {
AppendToBuffer("%s,", NameOfXMMRegister(regop));
current += PrintRightOperand(current);
}
} else if (opcode == 0x2A) {
// CVTSI2SS: integer to XMM single conversion.
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%ss %s,", mnemonic, NameOfXMMRegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0x2C) {
// CVTTSS2SI: Convert scalar single-precision FP to dword integer. // CVTTSS2SI: Convert scalar single-precision FP to dword integer.
// Assert that mod is not 3, so source is memory, not an XMM register. // Assert that mod is not 3, so source is memory, not an XMM register.
ASSERT_NE(0xC0, *current & 0xC0); ASSERT_NE(0xC0, *current & 0xC0);
@ -1146,8 +1164,8 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
switch (opcode) { switch (opcode) {
case 0x1F: case 0x1F:
return "nop"; return "nop";
case 0x2A: // F2 prefix. case 0x2A: // F2/F3 prefix.
return "cvtsi2sd"; return "cvtsi2s";
case 0x31: case 0x31:
return "rdtsc"; return "rdtsc";
case 0x51: // F2 prefix. case 0x51: // F2 prefix.

7
deps/v8/src/x64/full-codegen-x64.cc

@ -1518,12 +1518,13 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
case KEYED_PROPERTY: { case KEYED_PROPERTY: {
__ push(rax); // Preserve value. __ push(rax); // Preserve value.
VisitForValue(prop->obj(), kStack); VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kStack); VisitForValue(prop->key(), kAccumulator);
__ movq(rax, Operand(rsp, 2 * kPointerSize)); __ movq(rcx, rax);
__ pop(rdx);
__ pop(rax);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET); __ call(ic, RelocInfo::CODE_TARGET);
__ nop(); // Signal no inlined code. __ nop(); // Signal no inlined code.
__ Drop(3); // Receiver, key, and extra copy of value.
break; break;
} }
} }

404
deps/v8/src/x64/ic-x64.cc

@ -45,71 +45,93 @@ namespace internal {
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
__ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
__ j(equal, global_object);
__ cmpb(type, Immediate(JS_BUILTINS_OBJECT_TYPE));
__ j(equal, global_object);
__ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
__ j(equal, global_object);
}
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register r0,
Register r1,
Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// r0: used to hold receiver instance type.
// Holds the property dictionary on fall through.
// r1: used to hold receivers map.
__ JumpIfSmi(receiver, miss);
// Check that the receiver is a valid JS object.
__ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
__ cmpb(r0, Immediate(FIRST_JS_OBJECT_TYPE));
__ j(below, miss);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
GenerateGlobalInstanceTypeCheck(masm, r0, miss);
// Check for non-global object that requires access check.
__ testb(FieldOperand(r1, Map::kBitFieldOffset),
Immediate((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasNamedInterceptor)));
__ j(not_zero, miss);
__ movq(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, miss);
}
// Helper function used to load a property from a dictionary backing storage. // Helper function used to load a property from a dictionary backing storage.
// This function may return false negatives, so miss_label // This function may return false negatives, so miss_label
// must always call a backup property load that is complete. // must always call a backup property load that is complete.
// This function is safe to call if the receiver has fast properties, // This function is safe to call if name is not a symbol, and will jump to
// or if name is not a symbol, and will jump to the miss_label in that case. // the miss_label in that case.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm, static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss_label, Label* miss_label,
Register elements,
Register name,
Register r0, Register r0,
Register r1, Register r1,
Register r2, Register result) {
Register name,
Register r4,
Register result,
DictionaryCheck check_dictionary) {
// Register use: // Register use:
// //
// r0 - used to hold the property dictionary and is unchanged. // elements - holds the property dictionary on entry and is unchanged.
//
// r1 - used to hold the receiver and is unchanged.
// //
// r2 - used to hold the capacity of the property dictionary. // name - holds the name of the property on entry and is unchanged.
// //
// name - holds the name of the property and is unchanged. // r0 - used to hold the capacity of the property dictionary.
// //
// r4 - used to hold the index into the property dictionary. // r1 - used to hold the index into the property dictionary.
// //
// result - holds the result on exit if the load succeeded. // result - holds the result on exit if the load succeeded.
Label done; Label done;
// Check for the absence of an interceptor.
// Load the map into r0.
__ movq(r0, FieldOperand(r1, JSObject::kMapOffset));
// Bail out if the receiver has a named interceptor.
__ testl(FieldOperand(r0, Map::kBitFieldOffset),
Immediate(1 << Map::kHasNamedInterceptor));
__ j(not_zero, miss_label);
// Bail out if we have a JS global proxy object.
__ movzxbq(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
__ cmpb(r0, Immediate(JS_GLOBAL_PROXY_TYPE));
__ j(equal, miss_label);
// Possible work-around for http://crbug.com/16276.
__ cmpb(r0, Immediate(JS_GLOBAL_OBJECT_TYPE));
__ j(equal, miss_label);
__ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE));
__ j(equal, miss_label);
// Load properties array.
__ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
if (check_dictionary == CHECK_DICTIONARY) {
// Check that the properties array is a dictionary.
__ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
__ j(not_equal, miss_label);
}
// Compute the capacity mask. // Compute the capacity mask.
const int kCapacityOffset = const int kCapacityOffset =
StringDictionary::kHeaderSize + StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize; StringDictionary::kCapacityIndex * kPointerSize;
__ SmiToInteger32(r2, FieldOperand(r0, kCapacityOffset)); __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
__ decl(r2); __ decl(r0);
// Generate an unrolled loop that performs a few probes before // Generate an unrolled loop that performs a few probes before
// giving up. Measurements done on Gmail indicate that 2 probes // giving up. Measurements done on Gmail indicate that 2 probes
@ -120,19 +142,19 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
StringDictionary::kElementsStartIndex * kPointerSize; StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) { for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask. // Compute the masked index: (hash + i + i * i) & mask.
__ movl(r4, FieldOperand(name, String::kHashFieldOffset)); __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
__ shrl(r4, Immediate(String::kHashShift)); __ shrl(r1, Immediate(String::kHashShift));
if (i > 0) { if (i > 0) {
__ addl(r4, Immediate(StringDictionary::GetProbeOffset(i))); __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
} }
__ and_(r4, r2); __ and_(r1, r0);
// Scale the index by multiplying by the entry size. // Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3); ASSERT(StringDictionary::kEntrySize == 3);
__ lea(r4, Operand(r4, r4, times_2, 0)); // r4 = r4 * 3 __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
// Check if the key is identical to the name. // Check if the key is identical to the name.
__ cmpq(name, Operand(r0, r4, times_pointer_size, __ cmpq(name, Operand(elements, r1, times_pointer_size,
kElementsStartOffset - kHeapObjectTag)); kElementsStartOffset - kHeapObjectTag));
if (i != kProbes - 1) { if (i != kProbes - 1) {
__ j(equal, &done); __ j(equal, &done);
@ -144,14 +166,16 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Check that the value is a normal property. // Check that the value is a normal property.
__ bind(&done); __ bind(&done);
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ Test(Operand(r0, r4, times_pointer_size, kDetailsOffset - kHeapObjectTag), __ Test(Operand(elements, r1, times_pointer_size,
kDetailsOffset - kHeapObjectTag),
Smi::FromInt(PropertyDetails::TypeField::mask())); Smi::FromInt(PropertyDetails::TypeField::mask()));
__ j(not_zero, miss_label); __ j(not_zero, miss_label);
// Get the value at the masked, scaled index. // Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize; const int kValueOffset = kElementsStartOffset + kPointerSize;
__ movq(result, __ movq(result,
Operand(r0, r4, times_pointer_size, kValueOffset - kHeapObjectTag)); Operand(elements, r1, times_pointer_size,
kValueOffset - kHeapObjectTag));
} }
@ -327,6 +351,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// -- rsp[0] : return address // -- rsp[0] : return address
// ----------------------------------- // -----------------------------------
__ IncrementCounter(&Counters::keyed_load_miss, 1);
__ pop(rbx); __ pop(rbx);
__ push(rdx); // receiver __ push(rdx); // receiver
__ push(rax); // name __ push(rax); // name
@ -360,6 +386,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver, Register receiver,
Register map, Register map,
int interceptor_bit,
Label* slow) { Label* slow) {
// Register use: // Register use:
// receiver - holds the receiver and is unchanged. // receiver - holds the receiver and is unchanged.
@ -379,7 +406,8 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Check bit field. // Check bit field.
__ testb(FieldOperand(map, Map::kBitFieldOffset), __ testb(FieldOperand(map, Map::kBitFieldOffset),
Immediate(KeyedLoadIC::kSlowCaseBitFieldMask)); Immediate((1 << Map::kIsAccessCheckNeeded) |
(1 << interceptor_bit)));
__ j(not_zero, slow); __ j(not_zero, slow);
} }
@ -500,14 +528,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Label slow, check_string, index_smi, index_string; Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary, check_number_dictionary; Label check_pixel_array, probe_dictionary, check_number_dictionary;
GenerateKeyedLoadReceiverCheck(masm, rdx, rcx, &slow);
// Check that the key is a smi. // Check that the key is a smi.
__ JumpIfNotSmi(rax, &check_string); __ JumpIfNotSmi(rax, &check_string);
__ bind(&index_smi); __ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below // Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi. // where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(
masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
GenerateFastArrayLoad(masm, GenerateFastArrayLoad(masm,
rdx, rdx,
rax, rax,
@ -557,6 +586,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_string); __ bind(&check_string);
GenerateKeyStringCheck(masm, rax, rcx, rbx, &index_string, &slow); GenerateKeyStringCheck(masm, rax, rcx, rbx, &index_string, &slow);
GenerateKeyedLoadReceiverCheck(
masm, rdx, rcx, Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup // If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx. // cache. Otherwise probe the dictionary leaving result in rcx.
__ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset)); __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
@ -608,15 +640,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&probe_dictionary); __ bind(&probe_dictionary);
// rdx: receiver // rdx: receiver
// rax: key // rax: key
GenerateDictionaryLoad(masm, // rbx: elements
&slow,
rbx, __ movq(rcx, FieldOperand(rdx, JSObject::kMapOffset));
rdx, __ movb(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
rcx, GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
rax,
rdi, GenerateDictionaryLoad(masm, &slow, rbx, rax, rcx, rdi, rax);
rax,
DICTIONARY_CHECK_DONE);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1); __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0); __ ret(0);
@ -672,7 +702,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// -- rdx : receiver // -- rdx : receiver
// -- rsp[0] : return address // -- rsp[0] : return address
// ----------------------------------- // -----------------------------------
Label slow, failed_allocation; Label slow;
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow); __ JumpIfSmi(rdx, &slow);
@ -731,7 +761,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ movl(rcx, Operand(rbx, rcx, times_4, 0)); __ movl(rcx, Operand(rbx, rcx, times_4, 0));
break; break;
case kExternalFloatArray: case kExternalFloatArray:
__ fld_s(Operand(rbx, rcx, times_4, 0)); __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -743,20 +773,16 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// For integer array types: // For integer array types:
// rcx: value // rcx: value
// For floating-point array type: // For floating-point array type:
// FP(0): value // xmm0: value as double.
if (array_type == kExternalIntArray || ASSERT(kSmiValueSize == 32);
array_type == kExternalUnsignedIntArray) { if (array_type == kExternalUnsignedIntArray) {
// For the Int and UnsignedInt array types, we need to see whether // For the UnsignedInt array type, we need to see whether
// the value can be represented in a Smi. If not, we need to convert // the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber. // it to a HeapNumber.
Label box_int; Label box_int;
if (array_type == kExternalIntArray) {
__ JumpIfNotValidSmiValue(rcx, &box_int);
} else {
ASSERT_EQ(array_type, kExternalUnsignedIntArray);
__ JumpIfUIntNotValidSmiValue(rcx, &box_int); __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
}
__ Integer32ToSmi(rax, rcx); __ Integer32ToSmi(rax, rcx);
__ ret(0); __ ret(0);
@ -765,42 +791,28 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the int and perform int-to-double // Allocate a HeapNumber for the int and perform int-to-double
// conversion. // conversion.
__ push(rcx); // The value is zero-extended since we loaded the value from memory
if (array_type == kExternalIntArray) { // with movl.
__ fild_s(Operand(rsp, 0)); __ cvtqsi2sd(xmm0, rcx);
} else {
ASSERT(array_type == kExternalUnsignedIntArray); __ AllocateHeapNumber(rcx, rbx, &slow);
// The value is zero-extended on the stack, because all pushes are
// 64-bit and we loaded the value from memory with movl.
__ fild_d(Operand(rsp, 0));
}
__ pop(rcx);
// FP(0): value
__ AllocateHeapNumber(rcx, rbx, &failed_allocation);
// Set the value. // Set the value.
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx); __ movq(rax, rcx);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ ret(0); __ ret(0);
} else if (array_type == kExternalFloatArray) { } else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a // For the floating-point array type, we need to always allocate a
// HeapNumber. // HeapNumber.
__ AllocateHeapNumber(rcx, rbx, &failed_allocation); __ AllocateHeapNumber(rcx, rbx, &slow);
// Set the value. // Set the value.
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx); __ movq(rax, rcx);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ ret(0); __ ret(0);
} else { } else {
__ Integer32ToSmi(rax, rcx); __ Integer32ToSmi(rax, rcx);
__ ret(0); __ ret(0);
} }
// If we fail allocation of the HeapNumber, we still have a value on
// top of the FPU stack. Remove it.
__ bind(&failed_allocation);
__ ffree();
__ fincstp();
// Fall through to slow case.
// Slow case: Jump to runtime. // Slow case: Jump to runtime.
__ bind(&slow); __ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1); __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
@ -1086,10 +1098,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
break; break;
case kExternalFloatArray: case kExternalFloatArray:
// Need to perform int-to-float conversion. // Need to perform int-to-float conversion.
__ push(rdx); __ cvtlsi2ss(xmm0, rdx);
__ fild_s(Operand(rsp, 0)); __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
__ pop(rdx);
__ fstp_s(Operand(rbx, rdi, times_4, 0));
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -1110,53 +1120,41 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// The WebGL specification leaves the behavior of storing NaN and // The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more // +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero. // reproducible behavior, convert these to zero.
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset)); __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rdi: untagged index // rdi: untagged index
// rbx: base pointer of external storage // rbx: base pointer of external storage
// top of FPU stack: value // top of FPU stack: value
if (array_type == kExternalFloatArray) { if (array_type == kExternalFloatArray) {
__ fstp_s(Operand(rbx, rdi, times_4, 0)); __ cvtsd2ss(xmm0, xmm0);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
__ ret(0); __ ret(0);
} else { } else {
// Need to perform float-to-int conversion. // Need to perform float-to-int conversion.
// Test the top of the FP stack for NaN. // Test the value for NaN.
Label is_nan;
__ fucomi(0); // Convert to int32 and store the low byte/word.
__ j(parity_even, &is_nan); // If the value is NaN or +/-infinity, the result is 0x80000000,
// which is automatically zero when taken mod 2^n, n < 32.
__ push(rdx); // Make room on the stack. Receiver is no longer needed.
__ fistp_d(Operand(rsp, 0));
__ pop(rdx);
// rdx: value (converted to an untagged integer) // rdx: value (converted to an untagged integer)
// rdi: untagged index // rdi: untagged index
// rbx: base pointer of external storage // rbx: base pointer of external storage
switch (array_type) { switch (array_type) {
case kExternalByteArray: case kExternalByteArray:
case kExternalUnsignedByteArray: case kExternalUnsignedByteArray:
__ cvtsd2si(rdx, xmm0);
__ movb(Operand(rbx, rdi, times_1, 0), rdx); __ movb(Operand(rbx, rdi, times_1, 0), rdx);
break; break;
case kExternalShortArray: case kExternalShortArray:
case kExternalUnsignedShortArray: case kExternalUnsignedShortArray:
__ cvtsd2si(rdx, xmm0);
__ movw(Operand(rbx, rdi, times_2, 0), rdx); __ movw(Operand(rbx, rdi, times_2, 0), rdx);
break; break;
case kExternalIntArray: case kExternalIntArray:
case kExternalUnsignedIntArray: { case kExternalUnsignedIntArray: {
// We also need to explicitly check for +/-Infinity. These are // Convert to int64, so that NaN and infinities become
// converted to MIN_INT, but we need to be careful not to // 0x8000000000000000, which is zero mod 2^32.
// confuse with legal uses of MIN_INT. Since MIN_INT truncated __ cvtsd2siq(rdx, xmm0);
// to 8 or 16 bits is zero, we only perform this test when storing
// 32-bit ints.
Label not_infinity;
// This test would apparently detect both NaN and Infinity,
// but we've already checked for NaN using the FPU hardware
// above.
__ movzxwq(rcx, FieldOperand(rax, HeapNumber::kValueOffset + 6));
__ and_(rcx, Immediate(0x7FF0));
__ cmpw(rcx, Immediate(0x7FF0));
__ j(not_equal, &not_infinity);
__ movq(rdx, Immediate(0));
__ bind(&not_infinity);
__ movl(Operand(rbx, rdi, times_4, 0), rdx); __ movl(Operand(rbx, rdi, times_4, 0), rdx);
break; break;
} }
@ -1165,31 +1163,6 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
break; break;
} }
__ ret(0); __ ret(0);
__ bind(&is_nan);
// rdi: untagged index
// rbx: base pointer of external storage
__ ffree();
__ fincstp();
__ movq(rdx, Immediate(0));
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
default:
UNREACHABLE();
break;
}
__ ret(0);
} }
// Slow case: call runtime. // Slow case: call runtime.
@ -1212,6 +1185,13 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// rsp[argc * 8] : argument 1 // rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver // rsp[(argc + 1) * 8] : argument 0 = receiver
// ----------------------------------- // -----------------------------------
if (id == IC::kCallIC_Miss) {
__ IncrementCounter(&Counters::call_miss, 1);
} else {
__ IncrementCounter(&Counters::keyed_call_miss, 1);
}
// Get the receiver of the function from the stack; 1 ~ return address. // Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@ -1233,6 +1213,8 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
__ LeaveInternalFrame(); __ LeaveInternalFrame();
// Check if the receiver is a global object of some sort. // Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
if (id == IC::kCallIC_Miss) {
Label invoke, global; Label invoke, global;
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
__ JumpIfSmi(rdx, &invoke); __ JumpIfSmi(rdx, &invoke);
@ -1245,10 +1227,11 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
__ bind(&global); __ bind(&global);
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset)); __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
__ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx); __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
__ bind(&invoke);
}
// Invoke the function. // Invoke the function.
ParameterCount actual(argc); ParameterCount actual(argc);
__ bind(&invoke);
__ InvokeFunction(rdi, actual, JUMP_FUNCTION); __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
} }
@ -1309,13 +1292,12 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
} }
static void GenerateNormalHelper(MacroAssembler* masm, static void GenerateFunctionTailCall(MacroAssembler* masm,
int argc, int argc,
bool is_global_object,
Label* miss) { Label* miss) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// rcx : function name // rcx : function name
// rdx : receiver // rdi : function
// rsp[0] : return address // rsp[0] : return address
// rsp[8] : argument argc // rsp[8] : argument argc
// rsp[16] : argument argc - 1 // rsp[16] : argument argc - 1
@ -1323,21 +1305,11 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// rsp[argc * 8] : argument 1 // rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver // rsp[(argc + 1) * 8] : argument 0 = receiver
// ----------------------------------- // -----------------------------------
// Search dictionary - put result in register rdx.
GenerateDictionaryLoad(
masm, miss, rax, rdx, rbx, rcx, rdi, rdi, CHECK_DICTIONARY);
__ JumpIfSmi(rdi, miss); __ JumpIfSmi(rdi, miss);
// Check that the value is a JavaScript function. // Check that the value is a JavaScript function.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rdx); __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rdx);
__ j(not_equal, miss); __ j(not_equal, miss);
// Patch the receiver with the global proxy if necessary.
if (is_global_object) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
__ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
}
// Invoke the function. // Invoke the function.
ParameterCount actual(argc); ParameterCount actual(argc);
__ InvokeFunction(rdi, actual, JUMP_FUNCTION); __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
@ -1355,56 +1327,18 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// rsp[argc * 8] : argument 1 // rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver // rsp[(argc + 1) * 8] : argument 0 = receiver
// ----------------------------------- // -----------------------------------
Label miss, global_object, non_global_object; Label miss;
// Get the receiver of the function from the stack. // Get the receiver of the function from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi. GenerateDictionaryLoadReceiverCheck(masm, rdx, rax, rbx, &miss);
__ JumpIfSmi(rdx, &miss);
// Check that the receiver is a valid JS object.
// Because there are so many map checks and type checks, do not
// use CmpObjectType, but load map and type into registers.
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ movb(rax, FieldOperand(rbx, Map::kInstanceTypeOffset));
__ cmpb(rax, Immediate(FIRST_JS_OBJECT_TYPE));
__ j(below, &miss);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
// Check for access to global object. // rax: elements
__ cmpb(rax, Immediate(JS_GLOBAL_OBJECT_TYPE)); // Search the dictionary placing the result in rdi.
__ j(equal, &global_object); GenerateDictionaryLoad(masm, &miss, rax, rcx, rbx, rdi, rdi);
__ cmpb(rax, Immediate(JS_BUILTINS_OBJECT_TYPE));
__ j(not_equal, &non_global_object);
// Accessing global object: Load and invoke.
__ bind(&global_object);
// Check that the global object does not require access checks.
__ movb(rbx, FieldOperand(rbx, Map::kBitFieldOffset));
__ testb(rbx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_equal, &miss);
GenerateNormalHelper(masm, argc, true, &miss);
// Accessing non-global object: Check for access to global proxy.
Label global_proxy, invoke;
__ bind(&non_global_object);
__ cmpb(rax, Immediate(JS_GLOBAL_PROXY_TYPE));
__ j(equal, &global_proxy);
// Check that the non-global, non-global-proxy object does not
// require access checks.
__ movb(rbx, FieldOperand(rbx, Map::kBitFieldOffset));
__ testb(rbx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_equal, &miss);
__ bind(&invoke);
GenerateNormalHelper(masm, argc, false, &miss);
// Global object proxy access: Check access rights. GenerateFunctionTailCall(masm, argc, &miss);
__ bind(&global_proxy);
__ CheckAccessGlobalProxy(rdx, rax, &miss);
__ jmp(&invoke);
__ bind(&miss); __ bind(&miss);
} }
@ -1498,7 +1432,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Now the key is known to be a smi. This place is also jumped to from below // Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi. // where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, rdx, rax, &slow_call); GenerateKeyedLoadReceiverCheck(
masm, rdx, rax, Map::kHasIndexedInterceptor, &slow_call);
GenerateFastArrayLoad( GenerateFastArrayLoad(
masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load); masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
@ -1508,14 +1443,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// receiver in rdx is not used after this point. // receiver in rdx is not used after this point.
// rcx: key // rcx: key
// rdi: function // rdi: function
GenerateFunctionTailCall(masm, argc, &slow_call);
// Check that the value in edi is a JavaScript function.
__ JumpIfSmi(rdi, &slow_call);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
__ j(not_equal, &slow_call);
// Invoke the function.
ParameterCount actual(argc);
__ InvokeFunction(rdi, actual, JUMP_FUNCTION);
__ bind(&check_number_dictionary); __ bind(&check_number_dictionary);
// eax: elements // eax: elements
@ -1523,6 +1451,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Check whether the elements is a number dictionary. // Check whether the elements is a number dictionary.
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex); Heap::kHashTableMapRootIndex);
__ j(not_equal, &slow_load);
__ SmiToInteger32(rbx, rcx); __ SmiToInteger32(rbx, rcx);
// ebx: untagged index // ebx: untagged index
GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi); GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi);
@ -1550,15 +1479,15 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// If the receiver is a regular JS object with slow properties then do // If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary. // a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe. // Otherwise do the monomorphic cache probe.
GenerateKeyedLoadReceiverCheck(masm, rdx, rax, &lookup_monomorphic_cache); GenerateKeyedLoadReceiverCheck(
masm, rdx, rax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
__ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset)); __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex); Heap::kHashTableMapRootIndex);
__ j(not_equal, &lookup_monomorphic_cache); __ j(not_equal, &lookup_monomorphic_cache);
GenerateDictionaryLoad( GenerateDictionaryLoad(masm, &slow_load, rbx, rcx, rax, rdi, rdi);
masm, &slow_load, rbx, rdx, rax, rcx, rdi, rdi, DICTIONARY_CHECK_DONE);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1); __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
__ jmp(&do_call); __ jmp(&do_call);
@ -1620,6 +1549,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// -- rsp[0] : return address // -- rsp[0] : return address
// ----------------------------------- // -----------------------------------
__ IncrementCounter(&Counters::load_miss, 1);
__ pop(rbx); __ pop(rbx);
__ push(rax); // receiver __ push(rax); // receiver
__ push(rcx); // name __ push(rcx); // name
@ -1683,38 +1614,15 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- rcx : name // -- rcx : name
// -- rsp[0] : return address // -- rsp[0] : return address
// ----------------------------------- // -----------------------------------
Label miss, probe, global; Label miss;
// Check that the receiver isn't a smi.
__ JumpIfSmi(rax, &miss);
// Check that the receiver is a valid JS object.
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
__ j(below, &miss);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
// Check for access to global object (unlikely).
__ CmpInstanceType(rbx, JS_GLOBAL_PROXY_TYPE);
__ j(equal, &global);
// Check for non-global object that requires access check. GenerateDictionaryLoadReceiverCheck(masm, rax, rdx, rbx, &miss);
__ testl(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &miss);
// rdx: elements
// Search the dictionary placing the result in rax. // Search the dictionary placing the result in rax.
__ bind(&probe); GenerateDictionaryLoad(masm, &miss, rdx, rcx, rbx, rdi, rax);
GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx,
rcx, rdi, rax, CHECK_DICTIONARY);
__ ret(0); __ ret(0);
// Global object access: Check access rights.
__ bind(&global);
__ CheckAccessGlobalProxy(rax, rdx, &miss);
__ jmp(&probe);
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
__ bind(&miss); __ bind(&miss);
GenerateMiss(masm); GenerateMiss(masm);

7
deps/v8/src/x64/macro-assembler-x64.cc

@ -652,8 +652,8 @@ Condition MacroAssembler::CheckBothPositiveSmi(Register first,
if (first.is(second)) { if (first.is(second)) {
return CheckPositiveSmi(first); return CheckPositiveSmi(first);
} }
movl(kScratchRegister, first); movq(kScratchRegister, first);
orl(kScratchRegister, second); or_(kScratchRegister, second);
rol(kScratchRegister, Immediate(1)); rol(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(0x03)); testl(kScratchRegister, Immediate(0x03));
return zero; return zero;
@ -1678,8 +1678,7 @@ void MacroAssembler::Ret() {
void MacroAssembler::FCmp() { void MacroAssembler::FCmp() {
fucomip(); fucomip();
ffree(0); fstp(0);
fincstp();
} }

3
deps/v8/src/x64/macro-assembler-x64.h

@ -546,7 +546,8 @@ class MacroAssembler: public Assembler {
Register map, Register map,
Register instance_type); Register instance_type);
// FCmp is similar to integer cmp, but requires unsigned // FCmp compares and pops the two values on top of the FPU stack.
// The flag results are similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz). // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp(); void FCmp();

34
deps/v8/src/x64/virtual-frame-x64.cc

@ -115,25 +115,45 @@ void VirtualFrame::AllocateStackSlots() {
Handle<Object> undefined = Factory::undefined_value(); Handle<Object> undefined = Factory::undefined_value();
FrameElement initial_value = FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED); FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
if (count == 1) { if (count < kLocalVarBound) {
__ Push(undefined); // For fewer locals the unrolled loop is more compact.
} else if (count < kLocalVarBound) {
// For less locals the unrolled loop is more compact. // Hope for one of the first eight registers, where the push operation
__ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT); // takes only one byte (kScratchRegister needs the REX.W bit).
Result tmp = cgen()->allocator()->Allocate();
ASSERT(tmp.is_valid());
__ movq(tmp.reg(), undefined, RelocInfo::EMBEDDED_OBJECT);
for (int i = 0; i < count; i++) { for (int i = 0; i < count; i++) {
__ push(kScratchRegister); __ push(tmp.reg());
} }
} else { } else {
// For more locals a loop in generated code is more compact. // For more locals a loop in generated code is more compact.
Label alloc_locals_loop; Label alloc_locals_loop;
Result cnt = cgen()->allocator()->Allocate(); Result cnt = cgen()->allocator()->Allocate();
ASSERT(cnt.is_valid()); ASSERT(cnt.is_valid());
__ movq(cnt.reg(), Immediate(count));
__ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT); __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
#ifdef DEBUG
Label loop_size;
__ bind(&loop_size);
#endif
if (is_uint8(count)) {
// Loading imm8 is shorter than loading imm32.
// Loading only partial byte register, and using decb below.
__ movb(cnt.reg(), Immediate(count));
} else {
__ movl(cnt.reg(), Immediate(count));
}
__ bind(&alloc_locals_loop); __ bind(&alloc_locals_loop);
__ push(kScratchRegister); __ push(kScratchRegister);
if (is_uint8(count)) {
__ decb(cnt.reg());
} else {
__ decl(cnt.reg()); __ decl(cnt.reg());
}
__ j(not_zero, &alloc_locals_loop); __ j(not_zero, &alloc_locals_loop);
#ifdef DEBUG
CHECK(masm()->SizeOfCodeGeneratedSince(&loop_size) < kLocalVarBound);
#endif
} }
for (int i = 0; i < count; i++) { for (int i = 0; i < count; i++) {
elements_.Add(initial_value); elements_.Add(initial_value);

2
deps/v8/src/x64/virtual-frame-x64.h

@ -200,7 +200,7 @@ class VirtualFrame : public ZoneObject {
inline void PrepareForReturn(); inline void PrepareForReturn();
// Number of local variables after when we use a loop for allocating. // Number of local variables after when we use a loop for allocating.
static const int kLocalVarBound = 7; static const int kLocalVarBound = 14;
// Allocate and initialize the frame-allocated locals. // Allocate and initialize the frame-allocated locals.
void AllocateStackSlots(); void AllocateStackSlots();

25
deps/v8/test/cctest/test-api.cc

@ -5035,6 +5035,31 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
} }
static v8::Handle<v8::Array> NamedPropertyEnumerator(const AccessorInfo& info) {
v8::Handle<v8::Array> result = v8::Array::New(1);
result->Set(0, v8_str("x"));
return result;
}
THREADED_TEST(GetOwnPropertyNamesWithInterceptor) {
v8::HandleScope handle_scope;
v8::Handle<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New();
obj_template->Set(v8_str("x"), v8::Integer::New(42));
obj_template->SetNamedPropertyHandler(NULL, NULL, NULL, NULL,
NamedPropertyEnumerator);
LocalContext context;
v8::Handle<v8::Object> global = context->Global();
global->Set(v8_str("object"), obj_template->NewInstance());
v8::Handle<Value> value =
CompileRun("Object.getOwnPropertyNames(object).join(',')");
CHECK_EQ(v8_str("x"), value);
}
static v8::Handle<Value> ConstTenGetter(Local<String> name, static v8::Handle<Value> ConstTenGetter(Local<String> name,
const AccessorInfo& info) { const AccessorInfo& info) {
return v8_num(10); return v8_num(10);

4
deps/v8/test/cctest/test-profile-generator.cc

@ -653,7 +653,7 @@ TEST(SampleRateCalculator) {
time += SampleRateCalculator::kWallTimeQueryIntervalMs * 0.75; time += SampleRateCalculator::kWallTimeQueryIntervalMs * 0.75;
calc2.UpdateMeasurements(time); calc2.UpdateMeasurements(time);
// (1.0 + 2.0 + 2.0) / 3 // (1.0 + 2.0 + 2.0) / 3
CHECK_EQ(kSamplingIntervalMs * 1.66666, calc2.ticks_per_ms()); CHECK_EQ(kSamplingIntervalMs * 5.0, floor(calc2.ticks_per_ms() * 3.0 + 0.5));
SampleRateCalculator calc3; SampleRateCalculator calc3;
time = 0.0; time = 0.0;
@ -667,7 +667,7 @@ TEST(SampleRateCalculator) {
time += SampleRateCalculator::kWallTimeQueryIntervalMs * 1.5; time += SampleRateCalculator::kWallTimeQueryIntervalMs * 1.5;
calc3.UpdateMeasurements(time); calc3.UpdateMeasurements(time);
// (1.0 + 0.5 + 0.5) / 3 // (1.0 + 0.5 + 0.5) / 3
CHECK_EQ(kSamplingIntervalMs * 0.66666, calc3.ticks_per_ms()); CHECK_EQ(kSamplingIntervalMs * 2.0, floor(calc3.ticks_per_ms() * 3.0 + 0.5));
} }

35
deps/v8/test/mjsunit/for-in.js

@ -84,3 +84,38 @@ var result = '';
for (var p in { get a() {}, set a(x) {}, b : 1}) { result += p; } for (var p in { get a() {}, set a(x) {}, b : 1}) { result += p; }
assertEquals('ab', result, "abgetset"); assertEquals('ab', result, "abgetset");
// Test that for-in in the global scope works with a keyed property as "each".
// Test outside a loop and in a loop for multiple iterations.
a = [1,2,3,4];
x = {foo:5, bar:6, zip:7, glep:9, 10:11};
delete x.bar;
y = {}
for (a[2] in x) {
y[a[2]] = x[a[2]];
}
assertEquals(5, y.foo, "y.foo");
assertEquals("undefined", typeof y.bar, "y.bar");
assertEquals(7, y.zip, "y.zip");
assertEquals(9, y.glep, "y.glep");
assertEquals(11, y[10], "y[10]");
assertEquals("undefined", typeof y[2], "y[2]");
assertEquals("undefined", typeof y[0], "y[0]");
for (i=0 ; i < 3; ++i) {
y = {}
for (a[2] in x) {
y[a[2]] = x[a[2]];
}
assertEquals(5, y.foo, "y.foo");
assertEquals("undefined", typeof y.bar, "y.bar");
assertEquals(7, y.zip, "y.zip");
assertEquals(9, y.glep, "y.glep");
assertEquals(11, y[10], "y[10]");
assertEquals("undefined", typeof y[2], "y[2]");
assertEquals("undefined", typeof y[0], "y[0]");
}

46
deps/v8/test/mjsunit/regress/regress-45469.js

@ -0,0 +1,46 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test that global regexps capture and fail in the correct cyclic way.
var re = /x/g;
for (var i = 0; i < 15; i++) {
assertEquals(i % 3, re.lastIndex, "preindex" + i);
var res = re.exec("xx");
assertEquals(i % 3 == 2 ? null : ["x"], res, "res" + i);
}
re = /x/g;
for (var i = 0; i < 15; i++) {
assertEquals(i % 3, re.lastIndex, "testpreindex" + i);
var res = re.test("xx");
assertEquals(i % 3 != 2, res, "testres" + i);
}

36
deps/v8/test/mjsunit/regress/regress-752.js

@ -0,0 +1,36 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test that JSON.stringify correctly unwraps Boolean objects.
// See: http://code.google.com/p/v8/issues/detail?id=752
function replacer(key, value) {
return value === 42 ? new Boolean(false) : value;
}
assertEquals(JSON.stringify([42], replacer), "[false]");

39
deps/v8/test/mjsunit/regress/regress-754.js

@ -0,0 +1,39 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test that Array.prototype.lastIndexOf correctly handles null and undefined
// as fromIndex argument.
// See: http://code.google.com/p/v8/issues/detail?id=754
var a = new Array(1,2,1);
assertEquals(1, a.lastIndexOf(2));
assertEquals(2, a.lastIndexOf(1));
assertEquals(0, a.lastIndexOf(1, undefined));
assertEquals(0, a.lastIndexOf(1, null));
assertEquals(-1, a.lastIndexOf(2, undefined));
assertEquals(-1, a.lastIndexOf(2, null));

5
deps/v8/test/mjsunit/smi-ops.js

@ -685,3 +685,8 @@ assertEquals(24, LeftShiftThreeBy(3));
assertEquals(24, LeftShiftThreeBy(35)); assertEquals(24, LeftShiftThreeBy(35));
assertEquals(24, LeftShiftThreeBy(67)); assertEquals(24, LeftShiftThreeBy(67));
assertEquals(24, LeftShiftThreeBy(-29)); assertEquals(24, LeftShiftThreeBy(-29));
// Regression test for a bug in the ARM code generator. For some register
// allocations we got the Smi overflow case wrong.
function f(x, y) { return y + ( 1 << (x & 31)); }
assertEquals(-2147483647, f(31, 1));

Loading…
Cancel
Save