Browse Source

Upgrade V8 to 2.2.15

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
0bb47b6c97
  1. 19
      deps/v8/ChangeLog
  2. 108
      deps/v8/include/v8.h
  3. 37
      deps/v8/src/api.cc
  4. 51
      deps/v8/src/arm/assembler-arm.cc
  5. 4
      deps/v8/src/arm/assembler-arm.h
  6. 544
      deps/v8/src/arm/codegen-arm.cc
  7. 83
      deps/v8/src/arm/codegen-arm.h
  8. 231
      deps/v8/src/arm/ic-arm.cc
  9. 13
      deps/v8/src/arm/jump-target-arm.cc
  10. 50
      deps/v8/src/arm/stub-cache-arm.cc
  11. 6
      deps/v8/src/arm/virtual-frame-arm-inl.h
  12. 44
      deps/v8/src/arm/virtual-frame-arm.cc
  13. 43
      deps/v8/src/arm/virtual-frame-arm.h
  14. 20
      deps/v8/src/codegen.cc
  15. 42
      deps/v8/src/cpu-profiler.cc
  16. 11
      deps/v8/src/cpu-profiler.h
  17. 3
      deps/v8/src/data-flow.cc
  18. 19
      deps/v8/src/debug.cc
  19. 2
      deps/v8/src/disassembler.cc
  20. 1
      deps/v8/src/full-codegen.h
  21. 4
      deps/v8/src/globals.h
  22. 9
      deps/v8/src/heap-inl.h
  23. 4
      deps/v8/src/heap.cc
  24. 8
      deps/v8/src/heap.h
  25. 44
      deps/v8/src/ia32/assembler-ia32.cc
  26. 10
      deps/v8/src/ia32/assembler-ia32.h
  27. 20
      deps/v8/src/ia32/builtins-ia32.cc
  28. 452
      deps/v8/src/ia32/codegen-ia32.cc
  29. 2
      deps/v8/src/ia32/codegen-ia32.h
  30. 23
      deps/v8/src/ia32/disasm-ia32.cc
  31. 57
      deps/v8/src/ia32/full-codegen-ia32.cc
  32. 213
      deps/v8/src/ia32/ic-ia32.cc
  33. 19
      deps/v8/src/ia32/macro-assembler-ia32.cc
  34. 12
      deps/v8/src/ia32/macro-assembler-ia32.h
  35. 102
      deps/v8/src/ia32/stub-cache-ia32.cc
  36. 18
      deps/v8/src/ia32/virtual-frame-ia32.cc
  37. 3
      deps/v8/src/ia32/virtual-frame-ia32.h
  38. 148
      deps/v8/src/ic.cc
  39. 53
      deps/v8/src/ic.h
  40. 4
      deps/v8/src/liveedit.cc
  41. 4
      deps/v8/src/log.cc
  42. 12
      deps/v8/src/log.h
  43. 12
      deps/v8/src/objects-inl.h
  44. 31
      deps/v8/src/objects.cc
  45. 37
      deps/v8/src/objects.h
  46. 2
      deps/v8/src/profile-generator.h
  47. 24
      deps/v8/src/regexp.js
  48. 104
      deps/v8/src/runtime.cc
  49. 2
      deps/v8/src/runtime.h
  50. 1
      deps/v8/src/spaces.cc
  51. 155
      deps/v8/src/stub-cache.cc
  52. 59
      deps/v8/src/stub-cache.h
  53. 2
      deps/v8/src/type-info.h
  54. 8
      deps/v8/src/unbound-queue-inl.h
  55. 1
      deps/v8/src/unbound-queue.h
  56. 46
      deps/v8/src/utils.h
  57. 4
      deps/v8/src/v8-counters.h
  58. 2
      deps/v8/src/v8threads.cc
  59. 2
      deps/v8/src/v8threads.h
  60. 2
      deps/v8/src/version.cc
  61. 10
      deps/v8/src/virtual-frame-light-inl.h
  62. 2
      deps/v8/src/virtual-frame-light.cc
  63. 7
      deps/v8/src/x64/builtins-x64.cc
  64. 123
      deps/v8/src/x64/codegen-x64.cc
  65. 2
      deps/v8/src/x64/codegen-x64.h
  66. 8
      deps/v8/src/x64/debug-x64.cc
  67. 42
      deps/v8/src/x64/full-codegen-x64.cc
  68. 309
      deps/v8/src/x64/ic-x64.cc
  69. 143
      deps/v8/src/x64/stub-cache-x64.cc
  70. 17
      deps/v8/src/x64/virtual-frame-x64.cc
  71. 238
      deps/v8/test/cctest/test-api.cc
  72. 7
      deps/v8/test/cctest/test-debug.cc
  73. 68
      deps/v8/test/cctest/test-decls.cc
  74. 107
      deps/v8/test/cctest/test-profile-generator.cc
  75. 8
      deps/v8/test/cctest/test-strings.cc
  76. 52
      deps/v8/test/cctest/test-utils.cc
  77. 27
      deps/v8/test/mjsunit/delete.js
  78. 2
      deps/v8/test/mjsunit/eval.js
  79. 205
      deps/v8/test/mjsunit/keyed-call-ic.js
  80. 42
      deps/v8/test/mjsunit/regress/regress-728.js
  81. 46
      deps/v8/test/mjsunit/regress/regress-732.js
  82. 91
      deps/v8/test/mjsunit/string-charat.js
  83. 4
      deps/v8/test/mjsunit/string-charcodeat.js

19
deps/v8/ChangeLog

@ -1,3 +1,22 @@
2010-06-07: Version 2.2.15
Add an API to control the disposal of external string resources.
Add missing initialization of a couple of variables which makes
some compilers complaint when compiling with -Werror.
Improve performance on all platforms.
2010-06-02: Version 2.2.14
Fixed a crash in code generated for String.charCodeAt.
Fixed a compilation issue with some GCC versions (issue 727).
Performance optimizations on x64 and ARM platforms.
2010-05-31: Version 2.2.13
Implement Object.getOwnPropertyDescriptor for element indices and

108
deps/v8/include/v8.h

@ -1122,11 +1122,13 @@ class V8EXPORT String : public Primitive {
/**
* Creates a new external string using the data defined in the given
* resource. The resource is deleted when the external string is no
* longer live on V8's heap. The caller of this function should not
* delete or modify the resource. Neither should the underlying buffer be
* deallocated or modified except through the destructor of the
* external string resource.
* resource. When the external string is no longer live on V8's heap the
* resource will be disposed. If a disposal callback has been set using
* SetExternalStringDiposeCallback this callback will be called to dispose
* the resource. Otherwise, V8 will dispose the resource using the C++ delete
* operator. The caller of this function should not otherwise delete or
* modify the resource. Neither should the underlying buffer be deallocated
* or modified except through the destructor of the external string resource.
*/
static Local<String> NewExternal(ExternalStringResource* resource);
@ -1136,17 +1138,20 @@ class V8EXPORT String : public Primitive {
* will use the external string resource. The external string resource's
* character contents needs to be equivalent to this string.
* Returns true if the string has been changed to be an external string.
* The string is not modified if the operation fails.
* The string is not modified if the operation fails. See NewExternal for
* information on the lifetime of the resource.
*/
bool MakeExternal(ExternalStringResource* resource);
/**
* Creates a new external string using the ascii data defined in the given
* resource. The resource is deleted when the external string is no
* longer live on V8's heap. The caller of this function should not
* delete or modify the resource. Neither should the underlying buffer be
* deallocated or modified except through the destructor of the
* external string resource.
* resource. When the external string is no longer live on V8's heap the
* resource will be disposed. If a disposal callback has been set using
* SetExternalStringDiposeCallback this callback will be called to dispose
* the resource. Otherwise, V8 will dispose the resource using the C++ delete
* operator. The caller of this function should not otherwise delete or
* modify the resource. Neither should the underlying buffer be deallocated
* or modified except through the destructor of the external string resource.
*/
static Local<String> NewExternal(ExternalAsciiStringResource* resource);
@ -1156,7 +1161,8 @@ class V8EXPORT String : public Primitive {
* will use the external string resource. The external string resource's
* character contents needs to be equivalent to this string.
* Returns true if the string has been changed to be an external string.
* The string is not modified if the operation fails.
* The string is not modified if the operation fails. See NewExternal for
* information on the lifetime of the resource.
*/
bool MakeExternal(ExternalAsciiStringResource* resource);
@ -1245,6 +1251,10 @@ class V8EXPORT String : public Primitive {
};
typedef void (*ExternalStringDiposeCallback)
(String::ExternalStringResourceBase* resource);
/**
* A JavaScript number value (ECMA-262, 4.3.20)
*/
@ -1726,13 +1736,22 @@ typedef Handle<Value> (*NamedPropertySetter)(Local<String> property,
Local<Value> value,
const AccessorInfo& info);
/**
* Returns a non-empty handle if the interceptor intercepts the request.
* The result is true if the property exists and false otherwise.
* The result is either boolean (true if property exists and false
* otherwise) or an integer encoding property attributes.
*/
#ifdef USE_NEW_QUERY_CALLBACKS
typedef Handle<Integer> (*NamedPropertyQuery)(Local<String> property,
const AccessorInfo& info);
#else
typedef Handle<Boolean> (*NamedPropertyQuery)(Local<String> property,
const AccessorInfo& info);
#endif
typedef Handle<Value> (*NamedPropertyQueryImpl)(Local<String> property,
const AccessorInfo& info);
/**
@ -1984,7 +2003,16 @@ class V8EXPORT FunctionTemplate : public Template {
NamedPropertyQuery query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
Handle<Value> data);
Handle<Value> data) {
NamedPropertyQueryImpl casted =
reinterpret_cast<NamedPropertyQueryImpl>(query);
SetNamedInstancePropertyHandlerImpl(getter,
setter,
casted,
remover,
enumerator,
data);
}
void SetIndexedInstancePropertyHandler(IndexedPropertyGetter getter,
IndexedPropertySetter setter,
IndexedPropertyQuery query,
@ -1996,6 +2024,13 @@ class V8EXPORT FunctionTemplate : public Template {
friend class Context;
friend class ObjectTemplate;
private:
void SetNamedInstancePropertyHandlerImpl(NamedPropertyGetter getter,
NamedPropertySetter setter,
NamedPropertyQueryImpl query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
Handle<Value> data);
};
@ -2053,7 +2088,7 @@ class V8EXPORT ObjectTemplate : public Template {
*
* \param getter The callback to invoke when getting a property.
* \param setter The callback to invoke when setting a property.
* \param query The callback to invoke to check is an object has a property.
* \param query The callback to invoke to check if an object has a property.
* \param deleter The callback to invoke when deleting a property.
* \param enumerator The callback to invoke to enumerate all the named
* properties of an object.
@ -2065,7 +2100,26 @@ class V8EXPORT ObjectTemplate : public Template {
NamedPropertyQuery query = 0,
NamedPropertyDeleter deleter = 0,
NamedPropertyEnumerator enumerator = 0,
Handle<Value> data = Handle<Value>());
Handle<Value> data = Handle<Value>()) {
NamedPropertyQueryImpl casted =
reinterpret_cast<NamedPropertyQueryImpl>(query);
SetNamedPropertyHandlerImpl(getter,
setter,
casted,
deleter,
enumerator,
data);
}
private:
void SetNamedPropertyHandlerImpl(NamedPropertyGetter getter,
NamedPropertySetter setter,
NamedPropertyQueryImpl query,
NamedPropertyDeleter deleter,
NamedPropertyEnumerator enumerator,
Handle<Value> data);
public:
/**
* Sets an indexed property handler on the object template.
@ -2335,15 +2389,6 @@ typedef void (*GCEpilogueCallback)(GCType type, GCCallbackFlags flags);
typedef void (*GCCallback)();
// --- C o n t e x t G e n e r a t o r ---
/**
* Applications must provide a callback function which is called to generate
* a context if a context was not deserialized from the snapshot.
*/
typedef Persistent<Context> (*ContextGenerator)();
/**
* Profiler modules.
*
@ -2426,6 +2471,15 @@ class V8EXPORT V8 {
*/
static void RemoveMessageListeners(MessageCallback that);
/**
* Set a callback to be called when an external string is no longer live on
* V8's heap. The resource will no longer be needed by V8 and the embedder
* can dispose of if. If this callback is not set V8 will free the resource
* using the C++ delete operator.
*/
static void SetExternalStringDiposeCallback(
ExternalStringDiposeCallback that);
/**
* Sets V8 flags from a string.
*/
@ -3177,7 +3231,7 @@ class Internals {
static const int kProxyProxyOffset = sizeof(void*);
static const int kJSObjectHeaderSize = 3 * sizeof(void*);
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x03;
static const int kExternalTwoByteRepresentationTag = 0x02;
// These constants are compiler dependent so their values must be
// defined within the implementation.

37
deps/v8/src/api.cc

@ -853,10 +853,10 @@ void FunctionTemplate::SetHiddenPrototype(bool value) {
}
void FunctionTemplate::SetNamedInstancePropertyHandler(
void FunctionTemplate::SetNamedInstancePropertyHandlerImpl(
NamedPropertyGetter getter,
NamedPropertySetter setter,
NamedPropertyQuery query,
NamedPropertyQueryImpl query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
Handle<Value> data) {
@ -987,12 +987,13 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
}
void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
NamedPropertySetter setter,
NamedPropertyQuery query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
Handle<Value> data) {
void ObjectTemplate::SetNamedPropertyHandlerImpl(NamedPropertyGetter getter,
NamedPropertySetter setter,
NamedPropertyQueryImpl query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator
enumerator,
Handle<Value> data) {
if (IsDeadCheck("v8::ObjectTemplate::SetNamedPropertyHandler()")) return;
ENTER_V8;
HandleScope scope;
@ -1000,12 +1001,12 @@ void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
setter,
query,
remover,
enumerator,
data);
Utils::ToLocal(cons)->SetNamedInstancePropertyHandlerImpl(getter,
setter,
query,
remover,
enumerator,
data);
}
@ -3691,6 +3692,14 @@ void V8::RemoveMessageListeners(MessageCallback that) {
}
void V8::SetExternalStringDiposeCallback(
ExternalStringDiposeCallback callback) {
if (IsDeadCheck("v8::V8::SetExternalStringDiposeCallback()"))
return;
i::Heap::SetExternalStringDiposeCallback(callback);
}
void V8::SetCounterFunction(CounterLookupCallback callback) {
if (IsDeadCheck("v8::V8::SetCounterFunction()")) return;
i::StatsTable::SetCounterFunction(callback);

51
deps/v8/src/arm/assembler-arm.cc

@ -1213,31 +1213,32 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
// Both instructions can be eliminated if ry = rx.
// If ry != rx, a register copy from ry to rx is inserted
// after eliminating the push and the pop instructions.
Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
if (can_peephole_optimize(2) &&
IsPush(push_instr) &&
IsPop(pop_instr)) {
if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
// For consecutive push and pop on different registers,
// we delete both the push & pop and insert a register move.
// push ry, pop rx --> mov rx, ry
Register reg_pushed, reg_popped;
reg_pushed = GetRd(push_instr);
reg_popped = GetRd(pop_instr);
pc_ -= 2 * kInstrSize;
// Insert a mov instruction, which is better than a pair of push & pop
mov(reg_popped, reg_pushed);
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop (diff reg) replaced by a reg move\n", pc_offset());
}
} else {
// For consecutive push and pop on the same register,
// both the push and the pop can be deleted.
pc_ -= 2 * kInstrSize;
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
if (can_peephole_optimize(2)) {
Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
if (IsPush(push_instr) && IsPop(pop_instr)) {
if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
// For consecutive push and pop on different registers,
// we delete both the push & pop and insert a register move.
// push ry, pop rx --> mov rx, ry
Register reg_pushed, reg_popped;
reg_pushed = GetRd(push_instr);
reg_popped = GetRd(pop_instr);
pc_ -= 2 * kInstrSize;
// Insert a mov instruction, which is better than a pair of push & pop
mov(reg_popped, reg_pushed);
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop (diff reg) replaced by a reg move\n",
pc_offset());
}
} else {
// For consecutive push and pop on the same register,
// both the push and the pop can be deleted.
pc_ -= 2 * kInstrSize;
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
}
}
}
}

4
deps/v8/src/arm/assembler-arm.h

@ -690,6 +690,10 @@ class Assembler : public Malloced {
void add(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void add(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al) {
add(dst, src1, Operand(src2), s, cond);
}
void adc(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);

544
deps/v8/src/arm/codegen-arm.cc

@ -109,21 +109,28 @@ void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
true_target_(NULL),
false_target_(NULL),
previous_(NULL) {
owner_->set_state(this);
previous_(owner->state()) {
owner->set_state(this);
}
CodeGenState::CodeGenState(CodeGenerator* owner,
JumpTarget* true_target,
JumpTarget* false_target)
: owner_(owner),
ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
JumpTarget* true_target,
JumpTarget* false_target)
: CodeGenState(owner),
true_target_(true_target),
false_target_(false_target),
previous_(owner->state()) {
owner_->set_state(this);
false_target_(false_target) {
owner->set_state(this);
}
TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
Slot* slot,
TypeInfo type_info)
: CodeGenState(owner),
slot_(slot) {
owner->set_state(this);
old_type_info_ = owner->set_type_info(slot, type_info);
}
@ -133,6 +140,10 @@ CodeGenState::~CodeGenState() {
}
TypeInfoCodeGenState::~TypeInfoCodeGenState() {
owner()->set_type_info(slot_, old_type_info_);
}
// -------------------------------------------------------------------------
// CodeGenerator implementation
@ -145,6 +156,7 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm)
cc_reg_(al),
state_(NULL),
loop_nesting_(0),
type_info_(NULL),
function_return_is_shadowed_(false) {
}
@ -162,6 +174,11 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Initialize state.
info_ = info;
int slots = scope()->num_parameters() + scope()->num_stack_slots();
ScopedVector<TypeInfo> type_info_array(slots);
type_info_ = &type_info_array;
ASSERT(allocator_ == NULL);
RegisterAllocator register_allocator(this);
allocator_ = &register_allocator;
@ -393,6 +410,21 @@ void CodeGenerator::Generate(CompilationInfo* info) {
}
allocator_ = NULL;
type_info_ = NULL;
}
int CodeGenerator::NumberOfSlot(Slot* slot) {
if (slot == NULL) return kInvalidSlotNumber;
switch (slot->type()) {
case Slot::PARAMETER:
return slot->index();
case Slot::LOCAL:
return slot->index() + scope()->num_parameters();
default:
break;
}
return kInvalidSlotNumber;
}
@ -490,7 +522,7 @@ void CodeGenerator::LoadCondition(Expression* x,
ASSERT(!has_cc());
int original_height = frame_->height();
{ CodeGenState new_state(this, true_target, false_target);
{ ConditionCodeGenState new_state(this, true_target, false_target);
Visit(x);
// If we hit a stack overflow, we may not have actually visited
@ -791,63 +823,92 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
GenerateInlineSmi inline_smi,
int constant_rhs) {
VirtualFrame::SpilledScope spilled_scope(frame_);
// sp[0] : y
// sp[1] : x
// result : r0
// top of virtual frame: y
// 2nd elt. on virtual frame : x
// result : top of virtual frame
// Stub is entered with a call: 'return address' is in lr.
switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
if (inline_smi) {
JumpTarget done;
Register rhs = frame_->PopToRegister();
Register lhs = frame_->PopToRegister(rhs);
Register scratch = VirtualFrame::scratch0();
__ orr(scratch, rhs, Operand(lhs));
// Check they are both small and positive.
__ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
ASSERT_EQ(0, kSmiTag);
if (op == Token::ADD) {
__ add(r0, lhs, Operand(rhs), LeaveCC, eq);
} else {
__ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
}
done.Branch(eq);
GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
frame_->SpillAll();
frame_->CallStub(&stub, 0);
done.Bind();
frame_->EmitPush(r0);
break;
} else {
// Fall through!
}
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
case Token::SHL:
case Token::SHR:
case Token::SAR: {
frame_->EmitPop(r0); // r0 : y
frame_->EmitPop(r1); // r1 : x
GenericBinaryOpStub stub(op, overwrite_mode, r1, r0, constant_rhs);
frame_->CallStub(&stub, 0);
break;
}
case Token::COMMA:
frame_->EmitPop(r0);
// Simply discard left value.
frame_->Drop();
break;
default:
// Other cases should have been handled before this point.
UNREACHABLE();
break;
}
}
void CodeGenerator::VirtualFrameBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
int constant_rhs) {
// top of virtual frame: y
// 2nd elt. on virtual frame : x
// result : top of virtual frame
// Stub is entered with a call: 'return address' is in lr.
switch (op) {
case Token::ADD: // fall through.
case Token::SUB: // fall through.
if (inline_smi) {
bool rhs_is_smi = frame_->KnownSmiAt(0);
bool lhs_is_smi = frame_->KnownSmiAt(1);
Register rhs = frame_->PopToRegister();
Register lhs = frame_->PopToRegister(rhs);
Register smi_test_reg;
Condition cond;
if (!rhs_is_smi || !lhs_is_smi) {
if (rhs_is_smi) {
smi_test_reg = lhs;
} else if (lhs_is_smi) {
smi_test_reg = rhs;
} else {
smi_test_reg = VirtualFrame::scratch0();
__ orr(smi_test_reg, rhs, Operand(lhs));
}
// Check they are both Smis.
__ tst(smi_test_reg, Operand(kSmiTagMask));
cond = eq;
} else {
cond = al;
}
ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
if (op == Token::BIT_OR) {
__ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
} else if (op == Token::BIT_AND) {
__ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
} else {
ASSERT(op == Token::BIT_XOR);
ASSERT_EQ(0, kSmiTag);
__ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
}
if (cond != al) {
JumpTarget done;
done.Branch(cond);
GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
frame_->SpillAll();
frame_->CallStub(&stub, 0);
done.Bind();
}
frame_->EmitPush(r0);
break;
} else {
// Fall through!
}
case Token::MUL:
case Token::DIV:
case Token::MOD:
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
case Token::SHL:
case Token::SHR:
case Token::SAR: {
@ -972,7 +1033,8 @@ void DeferredInlineSmiOperation::Generate() {
rhs = r1;
}
} else {
UNREACHABLE(); // Should have been handled in SmiOperation.
ASSERT(op_ == Token::SHL);
__ mov(r1, Operand(Smi::FromInt(value_)));
}
break;
}
@ -1020,6 +1082,8 @@ void CodeGenerator::SmiOperation(Token::Value op,
OverwriteMode mode) {
int int_value = Smi::cast(*value)->value();
bool both_sides_are_smi = frame_->KnownSmiAt(0);
bool something_to_inline;
switch (op) {
case Token::ADD:
@ -1030,7 +1094,10 @@ void CodeGenerator::SmiOperation(Token::Value op,
something_to_inline = true;
break;
}
case Token::SHL:
case Token::SHL: {
something_to_inline = (both_sides_are_smi || !reversed);
break;
}
case Token::SHR:
case Token::SAR: {
if (reversed) {
@ -1067,17 +1134,18 @@ void CodeGenerator::SmiOperation(Token::Value op,
// Push the rhs onto the virtual frame by putting it in a TOS register.
Register rhs = frame_->GetTOSRegister();
__ mov(rhs, Operand(value));
frame_->EmitPush(rhs);
VirtualFrameBinaryOperation(op, mode, int_value);
frame_->EmitPush(rhs, TypeInfo::Smi());
GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
} else {
// Pop the rhs, then push lhs and rhs in the right order. Only performs
// at most one pop, the rest takes place in TOS registers.
Register lhs = frame_->GetTOSRegister(); // Get reg for pushing.
Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this.
__ mov(lhs, Operand(value));
frame_->EmitPush(lhs);
frame_->EmitPush(rhs);
VirtualFrameBinaryOperation(op, mode, kUnknownIntValue);
frame_->EmitPush(lhs, TypeInfo::Smi());
TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
frame_->EmitPush(rhs, t);
GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, kUnknownIntValue);
}
return;
}
@ -1097,8 +1165,10 @@ void CodeGenerator::SmiOperation(Token::Value op,
__ add(tos, tos, Operand(value), SetCC);
deferred->Branch(vs);
__ tst(tos, Operand(kSmiTagMask));
deferred->Branch(ne);
if (!both_sides_are_smi) {
__ tst(tos, Operand(kSmiTagMask));
deferred->Branch(ne);
}
deferred->BindExit();
frame_->EmitPush(tos);
break;
@ -1114,8 +1184,10 @@ void CodeGenerator::SmiOperation(Token::Value op,
__ sub(tos, tos, Operand(value), SetCC);
}
deferred->Branch(vs);
__ tst(tos, Operand(kSmiTagMask));
deferred->Branch(ne);
if (!both_sides_are_smi) {
__ tst(tos, Operand(kSmiTagMask));
deferred->Branch(ne);
}
deferred->BindExit();
frame_->EmitPush(tos);
break;
@ -1125,25 +1197,65 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND: {
DeferredCode* deferred =
new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
__ tst(tos, Operand(kSmiTagMask));
deferred->Branch(ne);
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
default: UNREACHABLE();
if (both_sides_are_smi) {
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
default: UNREACHABLE();
}
frame_->EmitPush(tos, TypeInfo::Smi());
} else {
DeferredCode* deferred =
new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
__ tst(tos, Operand(kSmiTagMask));
deferred->Branch(ne);
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
default: UNREACHABLE();
}
deferred->BindExit();
TypeInfo result_type =
(op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32();
frame_->EmitPush(tos, result_type);
}
deferred->BindExit();
frame_->EmitPush(tos);
break;
}
case Token::SHL:
if (reversed) {
ASSERT(both_sides_are_smi);
int max_shift = 0;
int max_result = int_value == 0 ? 1 : int_value;
while (Smi::IsValid(max_result << 1)) {
max_shift++;
max_result <<= 1;
}
DeferredCode* deferred =
new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
// Mask off the last 5 bits of the shift operand (rhs). This is part
// of the definition of shift in JS and we know we have a Smi so we
// can safely do this. The masked version gets passed to the
// deferred code, but that makes no difference.
__ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
__ cmp(tos, Operand(Smi::FromInt(max_shift)));
deferred->Branch(ge);
Register scratch = VirtualFrame::scratch0();
__ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Untag.
__ mov(tos, Operand(Smi::FromInt(int_value))); // Load constant.
__ mov(tos, Operand(tos, LSL, scratch)); // Shift constant.
deferred->BindExit();
TypeInfo result = TypeInfo::Integer32();
frame_->EmitPush(tos, result);
break;
}
// Fall through!
case Token::SHR:
case Token::SAR: {
ASSERT(!reversed);
TypeInfo result = TypeInfo::Integer32();
Register scratch = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
int shift_value = int_value & 0x1f; // least significant 5 bits
@ -1151,9 +1263,15 @@ void CodeGenerator::SmiOperation(Token::Value op,
new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
uint32_t problematic_mask = kSmiTagMask;
// For unsigned shift by zero all negative smis are problematic.
if (shift_value == 0 && op == Token::SHR) problematic_mask |= 0x80000000;
__ tst(tos, Operand(problematic_mask));
deferred->Branch(ne); // Go slow for problematic input.
bool skip_smi_test = both_sides_are_smi;
if (shift_value == 0 && op == Token::SHR) {
problematic_mask |= 0x80000000;
skip_smi_test = false;
}
if (!skip_smi_test) {
__ tst(tos, Operand(problematic_mask));
deferred->Branch(ne); // Go slow for problematic input.
}
switch (op) {
case Token::SHL: {
if (shift_value != 0) {
@ -1188,6 +1306,9 @@ void CodeGenerator::SmiOperation(Token::Value op,
// by 0 or 1 when handed a valid smi
__ tst(scratch, Operand(0xc0000000));
deferred->Branch(ne);
} else {
ASSERT(shift_value >= 2);
result = TypeInfo::Smi(); // SHR by at least 2 gives a Smi.
}
__ mov(tos, Operand(scratch, LSL, kSmiTagSize));
}
@ -1204,13 +1325,15 @@ void CodeGenerator::SmiOperation(Token::Value op,
__ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
// Put tag back.
__ mov(tos, Operand(tos, LSL, kSmiTagSize));
// SAR by at least 1 gives a Smi.
result = TypeInfo::Smi();
}
break;
}
default: UNREACHABLE();
}
deferred->BindExit();
frame_->EmitPush(tos);
frame_->EmitPush(tos, result);
break;
}
@ -1219,21 +1342,24 @@ void CodeGenerator::SmiOperation(Token::Value op,
ASSERT(int_value >= 2);
ASSERT(IsPowerOf2(int_value));
DeferredCode* deferred =
new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
unsigned mask = (0x80000000u | kSmiTagMask);
__ tst(tos, Operand(mask));
deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
mask = (int_value << kSmiTagSize) - 1;
__ and_(tos, tos, Operand(mask));
deferred->BindExit();
frame_->EmitPush(tos);
// Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
frame_->EmitPush(
tos,
both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
break;
}
case Token::MUL: {
ASSERT(IsEasyToMultiplyBy(int_value));
DeferredCode* deferred =
new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
max_smi_that_wont_overflow <<= kSmiTagSize;
unsigned mask = 0x80000000u;
@ -1279,45 +1405,66 @@ void CodeGenerator::Comparison(Condition cc,
Register lhs;
Register rhs;
bool lhs_is_smi;
bool rhs_is_smi;
// We load the top two stack positions into registers chosen by the virtual
// frame. This should keep the register shuffling to a minimum.
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
if (cc == gt || cc == le) {
cc = ReverseCondition(cc);
lhs_is_smi = frame_->KnownSmiAt(0);
rhs_is_smi = frame_->KnownSmiAt(1);
lhs = frame_->PopToRegister();
rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again!
} else {
rhs_is_smi = frame_->KnownSmiAt(0);
lhs_is_smi = frame_->KnownSmiAt(1);
rhs = frame_->PopToRegister();
lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again!
}
bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
ASSERT(rhs.is(r0) || rhs.is(r1));
ASSERT(lhs.is(r0) || lhs.is(r1));
// Now we have the two sides in r0 and r1. We flush any other registers
// because the stub doesn't know about register allocation.
frame_->SpillAll();
Register scratch = VirtualFrame::scratch0();
__ orr(scratch, lhs, Operand(rhs));
__ tst(scratch, Operand(kSmiTagMask));
JumpTarget smi;
smi.Branch(eq);
JumpTarget exit;
// Perform non-smi comparison by stub.
// CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
// We call with 0 args because there are 0 on the stack.
if (!rhs.is(r0)) {
__ Swap(rhs, lhs, ip);
}
if (!both_sides_are_smi) {
// Now we have the two sides in r0 and r1. We flush any other registers
// because the stub doesn't know about register allocation.
frame_->SpillAll();
Register scratch = VirtualFrame::scratch0();
Register smi_test_reg;
if (lhs_is_smi) {
smi_test_reg = rhs;
} else if (rhs_is_smi) {
smi_test_reg = lhs;
} else {
__ orr(scratch, lhs, Operand(rhs));
smi_test_reg = scratch;
}
__ tst(smi_test_reg, Operand(kSmiTagMask));
JumpTarget smi;
smi.Branch(eq);
CompareStub stub(cc, strict);
frame_->CallStub(&stub, 0);
__ cmp(r0, Operand(0));
JumpTarget exit;
exit.Jump();
// Perform non-smi comparison by stub.
// CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
// We call with 0 args because there are 0 on the stack.
if (!rhs.is(r0)) {
__ Swap(rhs, lhs, ip);
}
CompareStub stub(cc, strict);
frame_->CallStub(&stub, 0);
__ cmp(r0, Operand(0));
exit.Jump();
smi.Bind();
}
// Do smi comparisons by pointer comparison.
smi.Bind();
__ cmp(lhs, Operand(rhs));
exit.Bind();
@ -2090,6 +2237,17 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
node->break_target()->SetExpectedHeight();
IncrementLoopNesting();
// We know that the loop index is a smi if it is not modified in the
// loop body and it is checked against a constant limit in the loop
// condition. In this case, we reset the static type information of the
// loop index to smi before compiling the body, the update expression, and
// the bottom check of the loop condition.
TypeInfoCodeGenState type_info_scope(this,
node->is_fast_smi_loop() ?
node->loop_variable()->slot() :
NULL,
TypeInfo::Smi());
// If there is no update statement, label the top of the loop with the
// continue target, otherwise with the loop target.
JumpTarget loop(JumpTarget::BIDIRECTIONAL);
@ -2810,7 +2968,8 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
} else {
Register scratch = VirtualFrame::scratch0();
frame_->EmitPush(SlotOperand(slot, scratch));
TypeInfo info = type_info(slot);
frame_->EmitPush(SlotOperand(slot, scratch), info);
if (slot->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't been
// initialized yet) which needs to be converted into the 'undefined'
@ -3100,8 +3259,9 @@ void CodeGenerator::VisitLiteral(Literal* node) {
#endif
Comment cmnt(masm_, "[ Literal");
Register reg = frame_->GetTOSRegister();
bool is_smi = node->handle()->IsSmi();
__ mov(reg, Operand(node->handle()));
frame_->EmitPush(reg);
frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
ASSERT_EQ(original_height + 1, frame_->height());
}
@ -3332,9 +3492,16 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
false,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
GenerateInlineSmi inline_smi =
loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
if (literal != NULL) {
ASSERT(!literal->handle()->IsSmi());
inline_smi = DONT_GENERATE_INLINE_SMI;
}
Load(node->value());
VirtualFrameBinaryOperation(
node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
GenericBinaryOperation(node->binary_op(),
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
inline_smi);
}
} else {
Load(node->value());
@ -3425,9 +3592,16 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
false,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
GenerateInlineSmi inline_smi =
loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
if (literal != NULL) {
ASSERT(!literal->handle()->IsSmi());
inline_smi = DONT_GENERATE_INLINE_SMI;
}
Load(node->value());
VirtualFrameBinaryOperation(
node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
GenericBinaryOperation(node->binary_op(),
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
inline_smi);
}
} else {
// For non-compound assignment just load the right-hand side.
@ -3532,9 +3706,16 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
false,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
GenerateInlineSmi inline_smi =
loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
if (literal != NULL) {
ASSERT(!literal->handle()->IsSmi());
inline_smi = DONT_GENERATE_INLINE_SMI;
}
Load(node->value());
VirtualFrameBinaryOperation(
node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
GenericBinaryOperation(node->binary_op(),
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
inline_smi);
}
} else {
// For non-compound assignment just load the right-hand side.
@ -3673,16 +3854,54 @@ void CodeGenerator::VisitCall(Call* node) {
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
// Prepare stack for call to resolved function.
Load(function);
// Allocate a frame slot for the receiver.
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
frame_->EmitPush(r2); // Slot for receiver
frame_->EmitPush(r2);
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
}
// Prepare stack for call to ResolvePossiblyDirectEval.
// If we know that eval can only be shadowed by eval-introduced
// variables we attempt to load the global eval function directly
// in generated code. If we succeed, there is no need to perform a
// context lookup in the runtime system.
JumpTarget done;
if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
ASSERT(var->slot()->type() == Slot::LOOKUP);
JumpTarget slow;
// Prepare the stack for the call to
// ResolvePossiblyDirectEvalNoLookup by pushing the loaded
// function, the first argument to the eval call and the
// receiver.
LoadFromGlobalSlotCheckExtensions(var->slot(),
NOT_INSIDE_TYPEOF,
&slow);
frame_->EmitPush(r0);
if (arg_count > 0) {
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
frame_->EmitPush(r1);
} else {
frame_->EmitPush(r2);
}
__ ldr(r1, frame_->Receiver());
frame_->EmitPush(r1);
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
done.Jump();
slow.Bind();
}
// Prepare the stack for the call to ResolvePossiblyDirectEval by
// pushing the loaded function, the first argument to the eval
// call and the receiver.
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
frame_->EmitPush(r1);
if (arg_count > 0) {
@ -3691,14 +3910,16 @@ void CodeGenerator::VisitCall(Call* node) {
} else {
frame_->EmitPush(r2);
}
// Push the receiver.
__ ldr(r1, frame_->Receiver());
frame_->EmitPush(r1);
// Resolve the call.
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
// If we generated fast-case code bind the jump-target where fast
// and slow case merge.
if (done.is_linked()) done.Bind();
// Touch up stack with the right values for the function and the receiver.
__ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ str(r1, MemOperand(sp, arg_count * kPointerSize));
@ -5086,9 +5307,36 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
Variable* var = node->expression()->AsVariableProxy()->AsVariable();
bool is_const = (var != NULL && var->mode() == Variable::CONST);
bool is_slot = (var != NULL && var->mode() == Variable::VAR);
if (!is_const && is_slot && type_info(var->slot()).IsSmi()) {
// The type info declares that this variable is always a Smi. That
// means it is a Smi both before and after the increment/decrement.
// Lets make use of that to make a very minimal count.
Reference target(this, node->expression(), !is_const);
ASSERT(!target.is_illegal());
target.GetValue(); // Pushes the value.
Register value = frame_->PopToRegister();
if (is_postfix) frame_->EmitPush(value);
if (is_increment) {
__ add(value, value, Operand(Smi::FromInt(1)));
} else {
__ sub(value, value, Operand(Smi::FromInt(1)));
}
frame_->EmitPush(value);
target.SetValue(NOT_CONST_INIT);
if (is_postfix) frame_->Pop();
ASSERT_EQ(original_height + 1, frame_->height());
return;
}
if (is_postfix) {
// If it's a postfix expression and its result is not ignored and the
// reference is non-trivial, then push a placeholder on the stack now
// to hold the result of the expression.
bool placeholder_pushed = false;
if (!is_slot && is_postfix) {
frame_->EmitPush(Operand(Smi::FromInt(0)));
placeholder_pushed = true;
}
// A constant reference is not saved to, so a constant reference is not a
@ -5097,12 +5345,11 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
if (!is_postfix) {
frame_->EmitPush(Operand(Smi::FromInt(0)));
}
if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
ASSERT_EQ(original_height + 1, frame_->height());
return;
}
// This pushes 0, 1 or 2 words on the object to be used later when updating
// the target. It also pushes the current value of the target.
target.GetValue();
@ -5110,16 +5357,21 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
JumpTarget slow;
JumpTarget exit;
// Check for smi operand.
Register value = frame_->PopToRegister();
__ tst(value, Operand(kSmiTagMask));
slow.Branch(ne);
// Postfix: Store the old value as the result.
if (is_postfix) {
if (placeholder_pushed) {
frame_->SetElementAt(value, target.size());
} else if (is_postfix) {
frame_->EmitPush(value);
__ mov(VirtualFrame::scratch0(), value);
value = VirtualFrame::scratch0();
}
// Check for smi operand.
__ tst(value, Operand(kSmiTagMask));
slow.Branch(ne);
// Perform optimistic increment/decrement.
if (is_increment) {
__ add(value, value, Operand(Smi::FromInt(1)), SetCC);
@ -5300,18 +5552,30 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
if (rliteral != NULL && rliteral->handle()->IsSmi()) {
VirtualFrame::RegisterAllocationScope scope(this);
Load(node->left());
if (frame_->KnownSmiAt(0)) overwrite_left = false;
SmiOperation(node->op(),
rliteral->handle(),
false,
overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
} else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
VirtualFrame::RegisterAllocationScope scope(this);
Load(node->right());
if (frame_->KnownSmiAt(0)) overwrite_right = false;
SmiOperation(node->op(),
lliteral->handle(),
true,
overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
GenerateInlineSmi inline_smi =
loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
if (lliteral != NULL) {
ASSERT(!lliteral->handle()->IsSmi());
inline_smi = DONT_GENERATE_INLINE_SMI;
}
if (rliteral != NULL) {
ASSERT(!rliteral->handle()->IsSmi());
inline_smi = DONT_GENERATE_INLINE_SMI;
}
VirtualFrame::RegisterAllocationScope scope(this);
OverwriteMode overwrite_mode = NO_OVERWRITE;
if (overwrite_left) {
@ -5321,7 +5585,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
}
Load(node->left());
Load(node->right());
VirtualFrameBinaryOperation(node->op(), overwrite_mode);
GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
}
}
ASSERT(!has_valid_frame() ||
@ -5813,6 +6077,7 @@ void CodeGenerator::EmitKeyedLoad() {
frame_->scratch0(), frame_->scratch1());
// Load the key and receiver from the stack.
bool key_is_known_smi = frame_->KnownSmiAt(0);
Register key = frame_->PopToRegister();
Register receiver = frame_->PopToRegister(key);
VirtualFrame::SpilledScope spilled(frame_);
@ -5835,18 +6100,21 @@ void CodeGenerator::EmitKeyedLoad() {
// Check the map. The null map used below is patched by the inline cache
// code.
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the key is a smi.
if (!key_is_known_smi) {
__ tst(key, Operand(kSmiTagMask));
deferred->Branch(ne);
}
#ifdef DEBUG
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
__ mov(scratch2, Operand(Factory::null_value()));
__ cmp(scratch1, scratch2);
deferred->Branch(ne);
// Check that the key is a smi.
__ tst(key, Operand(kSmiTagMask));
deferred->Branch(ne);
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
@ -9283,7 +9551,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ CheckMap(index_, scratch_,
Factory::heap_number_map(), index_not_number_, true);
call_helper.BeforeCall(masm);
__ Push(object_, index_, result_);
__ Push(object_, index_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@ -9297,9 +9565,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
// have a chance to overwrite it.
__ mov(scratch_, r0);
}
__ pop(result_);
__ pop(index_);
__ pop(object_);
// Reload the instance type.
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
__ BranchOnNotSmi(scratch_, index_out_of_range_);

83
deps/v8/src/arm/codegen-arm.h

@ -43,6 +43,7 @@ class RegisterFile;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
// -------------------------------------------------------------------------
@ -129,24 +130,55 @@ class CodeGenState BASE_EMBEDDED {
// leaves the code generator with a NULL state.
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
// state. The new state has its own pair of branch labels.
CodeGenState(CodeGenerator* owner,
JumpTarget* true_target,
JumpTarget* false_target);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
virtual ~CodeGenState();
virtual JumpTarget* true_target() const { return NULL; }
virtual JumpTarget* false_target() const { return NULL; }
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
protected:
inline CodeGenerator* owner() { return owner_; }
inline CodeGenState* previous() const { return previous_; }
private:
CodeGenerator* owner_;
CodeGenState* previous_;
};
class ConditionCodeGenState : public CodeGenState {
public:
// Create a code generator state based on a code generator's current
// state. The new state has its own pair of branch labels.
ConditionCodeGenState(CodeGenerator* owner,
JumpTarget* true_target,
JumpTarget* false_target);
virtual JumpTarget* true_target() const { return true_target_; }
virtual JumpTarget* false_target() const { return false_target_; }
private:
JumpTarget* true_target_;
JumpTarget* false_target_;
CodeGenState* previous_;
};
class TypeInfoCodeGenState : public CodeGenState {
public:
TypeInfoCodeGenState(CodeGenerator* owner,
Slot* slot_number,
TypeInfo info);
~TypeInfoCodeGenState();
virtual JumpTarget* true_target() const { return previous()->true_target(); }
virtual JumpTarget* false_target() const {
return previous()->false_target();
}
private:
Slot* slot_;
TypeInfo old_type_info_;
};
@ -216,6 +248,23 @@ class CodeGenerator: public AstVisitor {
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
TypeInfo type_info(Slot* slot) {
int index = NumberOfSlot(slot);
if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
return (*type_info_)[index];
}
TypeInfo set_type_info(Slot* slot, TypeInfo info) {
int index = NumberOfSlot(slot);
ASSERT(index >= kInvalidSlotNumber);
if (index != kInvalidSlotNumber) {
TypeInfo previous_value = (*type_info_)[index];
(*type_info_)[index] = info;
return previous_value;
}
return TypeInfo::Unknown();
}
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
static const int kUnknownIntValue = -1;
@ -225,7 +274,7 @@ class CodeGenerator: public AstVisitor {
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
// Constants related to patching of inlined load/store.
static const int kInlinedKeyedLoadInstructionsAfterPatch = 19;
static const int kInlinedKeyedLoadInstructionsAfterPatch = 17;
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
private:
@ -239,6 +288,10 @@ class CodeGenerator: public AstVisitor {
// Generating deferred code.
void ProcessDeferred();
static const int kInvalidSlotNumber = -1;
int NumberOfSlot(Slot* slot);
// State
bool has_cc() const { return cc_reg_ != al; }
JumpTarget* true_target() const { return state_->true_target(); }
@ -351,10 +404,8 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
GenerateInlineSmi inline_smi,
int known_rhs = kUnknownIntValue);
void VirtualFrameBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
int known_rhs = kUnknownIntValue);
void Comparison(Condition cc,
Expression* left,
Expression* right,
@ -398,6 +449,8 @@ class CodeGenerator: public AstVisitor {
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
static Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
@ -511,6 +564,8 @@ class CodeGenerator: public AstVisitor {
CodeGenState* state_;
int loop_nesting_;
Vector<TypeInfo>* type_info_;
// Jump targets
BreakTarget function_return_;

231
deps/v8/src/arm/ic-arm.cc

@ -48,60 +48,70 @@ namespace internal {
#define __ ACCESS_MASM(masm)
// Helper function used from LoadIC/CallIC GenerateNormal.
// receiver: Receiver. It is not clobbered if a jump to the miss label is
// done
// name: Property name. It is not clobbered if a jump to the miss label is
// done
// result: Register for the result. It is only updated if a jump to the miss
// label is not done. Can be the same as receiver or name clobbering
// one of these in the case of not jumping to the miss label.
// The three scratch registers need to be different from the receiver, name and
// result.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register t0,
Register t1) {
// Register use:
//
// t0 - used to hold the property dictionary.
//
// t1 - initially the receiver
// - used for the index into the property dictionary
// - holds the result on exit.
//
// r3 - used as temporary and to hold the capacity of the property
// dictionary.
//
// r2 - holds the name of the property and is unchanged.
// r4 - used as temporary.
Register receiver,
Register name,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
DictionaryCheck check_dictionary) {
// Main use of the scratch registers.
// scratch1: Used to hold the property dictionary.
// scratch2: Used as temporary and to hold the capacity of the property
// dictionary.
// scratch3: Used as temporary.
Label done;
// Check for the absence of an interceptor.
// Load the map into t0.
__ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset));
// Load the map into scratch1.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kMapOffset));
// Bail out if the receiver has a named interceptor.
__ ldrb(r3, FieldMemOperand(t0, Map::kBitFieldOffset));
__ tst(r3, Operand(1 << Map::kHasNamedInterceptor));
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
__ tst(scratch2, Operand(1 << Map::kHasNamedInterceptor));
__ b(nz, miss);
// Bail out if we have a JS global proxy object.
__ ldrb(r3, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ cmp(r3, Operand(JS_GLOBAL_PROXY_TYPE));
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ cmp(scratch2, Operand(JS_GLOBAL_PROXY_TYPE));
__ b(eq, miss);
// Possible work-around for http://crbug.com/16276.
// See also: http://codereview.chromium.org/155418.
__ cmp(r3, Operand(JS_GLOBAL_OBJECT_TYPE));
__ cmp(scratch2, Operand(JS_GLOBAL_OBJECT_TYPE));
__ b(eq, miss);
__ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
__ cmp(scratch2, Operand(JS_BUILTINS_OBJECT_TYPE));
__ b(eq, miss);
// Load the properties array.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
__ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset));
__ ldr(r3, FieldMemOperand(t0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, miss);
if (check_dictionary == CHECK_DICTIONARY) {
__ ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(scratch2, ip);
__ b(ne, miss);
}
// Compute the capacity mask.
const int kCapacityOffset = StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
__ ldr(r3, FieldMemOperand(t0, kCapacityOffset));
__ mov(r3, Operand(r3, ASR, kSmiTagSize)); // convert smi to int
__ sub(r3, r3, Operand(1));
__ ldr(scratch2, FieldMemOperand(scratch1, kCapacityOffset));
__ mov(scratch2, Operand(scratch2, ASR, kSmiTagSize)); // convert smi to int
__ sub(scratch2, scratch2, Operand(1));
const int kElementsStartOffset = StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
@ -112,26 +122,27 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
__ ldr(r4, FieldMemOperand(r2, String::kHashFieldOffset));
__ ldr(scratch3, FieldMemOperand(name, String::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
ASSERT(StringDictionary::GetProbeOffset(i) <
1 << (32 - String::kHashFieldOffset));
__ add(r4, r4, Operand(
__ add(scratch3, scratch3, Operand(
StringDictionary::GetProbeOffset(i) << String::kHashShift));
}
__ and_(r4, r3, Operand(r4, LSR, String::kHashShift));
__ and_(scratch3, scratch2, Operand(scratch3, LSR, String::kHashShift));
// Scale the index by multiplying by the element size.
ASSERT(StringDictionary::kEntrySize == 3);
__ add(r4, r4, Operand(r4, LSL, 1)); // r4 = r4 * 3
// scratch3 = scratch3 * 3.
__ add(scratch3, scratch3, Operand(scratch3, LSL, 1));
// Check if the key is identical to the name.
__ add(r4, t0, Operand(r4, LSL, 2));
__ ldr(ip, FieldMemOperand(r4, kElementsStartOffset));
__ cmp(r2, Operand(ip));
__ add(scratch3, scratch1, Operand(scratch3, LSL, 2));
__ ldr(ip, FieldMemOperand(scratch3, kElementsStartOffset));
__ cmp(name, Operand(ip));
if (i != kProbes - 1) {
__ b(eq, &done);
} else {
@ -140,13 +151,15 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
}
// Check that the value is a normal property.
__ bind(&done); // r4 == t0 + 4*index
__ ldr(r3, FieldMemOperand(r4, kElementsStartOffset + 2 * kPointerSize));
__ tst(r3, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ bind(&done); // scratch3 == scratch1 + 4 * index
__ ldr(scratch2,
FieldMemOperand(scratch3, kElementsStartOffset + 2 * kPointerSize));
__ tst(scratch2, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ b(ne, miss);
// Get the value at the masked, scaled index and return.
__ ldr(t1, FieldMemOperand(r4, kElementsStartOffset + 1 * kPointerSize));
__ ldr(result,
FieldMemOperand(scratch3, kElementsStartOffset + 1 * kPointerSize));
}
@ -354,7 +367,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
Label* miss,
Register scratch) {
// Search dictionary - put result in register r1.
GenerateDictionaryLoad(masm, miss, r0, r1);
GenerateDictionaryLoad(masm, miss, r1, r2, r1, r0, r3, r4, CHECK_DICTIONARY);
// Check that the value isn't a smi.
__ tst(r1, Operand(kSmiTagMask));
@ -483,6 +496,21 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
}
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
UNREACHABLE();
}
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
UNREACHABLE();
}
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
UNREACHABLE();
}
// Defined in ic.cc.
Object* LoadIC_Miss(Arguments args);
@ -534,7 +562,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ b(ne, &miss);
__ bind(&probe);
GenerateDictionaryLoad(masm, &miss, r1, r0);
GenerateDictionaryLoad(masm, &miss, r0, r2, r0, r1, r3, r4, CHECK_DICTIONARY);
__ Ret();
// Global object access: Check access rights.
@ -542,7 +570,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ CheckAccessGlobalProxy(r0, r1, &miss);
__ b(&probe);
// Cache miss: Restore receiver from stack and jump to runtime.
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
}
@ -715,7 +743,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ Push(r1, r0);
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
@ -725,7 +753,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label slow, fast, check_pixel_array, check_number_dictionary;
Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary, check_number_dictionary;
Register key = r0;
Register receiver = r1;
@ -748,8 +777,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ b(lt, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
// Get the elements array of the object.
__ BranchOnNotSmi(key, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
@ -771,6 +802,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// to ensure the prototype chain is searched.
__ b(eq, &slow);
__ mov(r0, r2);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
__ Ret();
// Check whether the elements is a pixel array.
@ -806,6 +838,107 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r2, r3);
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
// The key is not a smi.
// Is it a string?
// r0: key
// r1: receiver
__ CompareObjectType(r0, r2, r3, FIRST_NONSTRING_TYPE);
__ b(ge, &slow);
// Is the string an array index, with cached numeric value?
__ ldr(r3, FieldMemOperand(r0, String::kHashFieldOffset));
__ tst(r3, Operand(String::kContainsCachedArrayIndexMask));
__ b(eq, &index_string);
// Is the string a symbol?
// r2: key map
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
ASSERT(kSymbolTag != 0);
__ tst(r3, Operand(kIsSymbolMask));
__ b(eq, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
__ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
__ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(eq, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
__ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
__ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
__ and_(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
ExternalReference cache_keys = ExternalReference::keyed_lookup_cache_keys();
__ mov(r4, Operand(cache_keys));
__ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
__ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); // Move r4 to symbol.
__ cmp(r2, r5);
__ b(ne, &slow);
__ ldr(r5, MemOperand(r4));
__ cmp(r0, r5);
__ b(ne, &slow);
// Get field offset and check that it is an in-object property.
// r0 : key
// r1 : receiver
// r2 : receiver's map
// r3 : lookup cache index
ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets();
__ mov(r4, Operand(cache_field_offsets));
__ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
__ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
__ cmp(r5, r6);
__ b(ge, &slow);
// Load in-object property.
__ sub(r5, r5, r6); // Index from end of object.
__ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ add(r6, r6, r5); // Index from start of object.
__ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
__ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
__ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1, r2, r3);
__ Ret();
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
// Load the property to r0.
GenerateDictionaryLoad(
masm, &slow, r1, r0, r0, r2, r3, r4, DICTIONARY_CHECK_DONE);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
__ Ret();
__ b(&slow);
// If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
// conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
// r0: key (string)
// r1: receiver
// r3: hash field
// We want the smi-tagged index in r0. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
ASSERT(String::kHashShift >= kSmiTagSize);
__ and_(r3, r3, Operand(String::kArrayIndexValueMask));
// Here we actually clobber the key (r0) which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
__ mov(r0, Operand(r3, ASR, String::kHashShift - kSmiTagSize));
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}

13
deps/v8/src/arm/jump-target-arm.cc

@ -50,6 +50,11 @@ void JumpTarget::DoJump() {
ASSERT(cgen()->HasValidEntryRegisters());
if (entry_frame_set_) {
if (entry_label_.is_bound()) {
// If we already bound and generated code at the destination then it
// is too late to ask for less optimistic type assumptions.
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
}
// There already a frame expectation at the target.
cgen()->frame()->MergeTo(&entry_frame_);
cgen()->DeleteFrame();
@ -67,8 +72,12 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
ASSERT(cgen()->has_valid_frame());
if (entry_frame_set_) {
// Backward branch. We have an expected frame to merge to on the
// backward edge.
if (entry_label_.is_bound()) {
// If we already bound and generated code at the destination then it
// is too late to ask for less optimistic type assumptions.
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
}
// We have an expected frame to merge to on the backward edge.
cgen()->frame()->MergeTo(&entry_frame_, cc);
} else {
// Clone the current frame to use as the expected one at the target.

50
deps/v8/src/arm/stub-cache-arm.cc

@ -152,6 +152,17 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
}
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype) {
// Get the global function with the given index.
JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
__ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
}
// Load a fast property out of a holder object (src). In-object properties
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
@ -1008,6 +1019,12 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
void CallStubCompiler::GenerateMissBranch() {
Handle<Code> ic = ComputeCallMiss(arguments().immediate(), kind_);
__ Jump(ic, RelocInfo::CODE_TARGET);
}
Object* CallStubCompiler::CompileCallField(JSObject* object,
JSObject* holder,
int index,
@ -1034,8 +1051,7 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
@ -1084,8 +1100,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@ -1134,8 +1149,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@ -1238,9 +1252,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
__ b(hs, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
r0);
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, name, &miss);
}
@ -1259,9 +1272,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ b(ne, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::NUMBER_FUNCTION_INDEX,
r0);
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, name, &miss);
}
@ -1283,9 +1295,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ b(ne, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::BOOLEAN_FUNCTION_INDEX,
r0);
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, name, &miss);
}
@ -1309,8 +1320,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
}
__ bind(&miss_in_smi_check);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@ -1356,8 +1366,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
@ -1439,8 +1448,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(&Counters::call_global_inline_miss, 1, r1, r3);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);

6
deps/v8/src/arm/virtual-frame-arm-inl.h

@ -48,6 +48,12 @@ MemOperand VirtualFrame::Receiver() {
return ParameterAt(-1);
}
void VirtualFrame::Forget(int count) {
SpillAll();
LowerHeight(count);
}
} } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_ARM_INL_H_

44
deps/v8/src/arm/virtual-frame-arm.cc

@ -43,7 +43,7 @@ void VirtualFrame::PopToR1R0() {
// Shuffle things around so the top of stack is in r0 and r1.
MergeTOSTo(R0_R1_TOS);
// Pop the two registers off the stack so they are detached from the frame.
element_count_ -= 2;
LowerHeight(2);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
@ -52,7 +52,7 @@ void VirtualFrame::PopToR1() {
// Shuffle things around so the top of stack is only in r1.
MergeTOSTo(R1_TOS);
// Pop the register off the stack so it is detached from the frame.
element_count_ -= 1;
LowerHeight(1);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
@ -61,13 +61,22 @@ void VirtualFrame::PopToR0() {
// Shuffle things around so the top of stack only in r0.
MergeTOSTo(R0_TOS);
// Pop the register off the stack so it is detached from the frame.
element_count_ -= 1;
LowerHeight(1);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
if (Equals(expected)) return;
ASSERT(expected->IsCompatibleWith(this));
MergeTOSTo(expected->top_of_stack_state_, cond);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) {
if (Equals(expected)) return;
expected->tos_known_smi_map_ &= tos_known_smi_map_;
MergeTOSTo(expected->top_of_stack_state_, cond);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
@ -420,7 +429,7 @@ void VirtualFrame::Drop(int count) {
}
if (count == 0) return;
__ add(sp, sp, Operand(count * kPointerSize));
element_count_ -= count;
LowerHeight(count);
}
@ -430,7 +439,7 @@ void VirtualFrame::Pop() {
} else {
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
element_count_--;
LowerHeight(1);
}
@ -442,7 +451,7 @@ void VirtualFrame::EmitPop(Register reg) {
__ mov(reg, kTopRegister[top_of_stack_state_]);
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
element_count_--;
LowerHeight(1);
}
@ -550,7 +559,7 @@ void VirtualFrame::Dup() {
UNREACHABLE();
}
}
element_count_++;
RaiseHeight(1, tos_known_smi_map_ & 1);
}
@ -589,7 +598,7 @@ void VirtualFrame::Dup2() {
UNREACHABLE();
}
}
element_count_ += 2;
RaiseHeight(2, tos_known_smi_map_ & 3);
}
@ -597,7 +606,7 @@ Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
ASSERT(but_not_to_this_one.is(r0) ||
but_not_to_this_one.is(r1) ||
but_not_to_this_one.is(no_reg));
element_count_--;
LowerHeight(1);
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
if (but_not_to_this_one.is(r0)) {
__ pop(r1);
@ -625,8 +634,8 @@ void VirtualFrame::EnsureOneFreeTOSRegister() {
}
void VirtualFrame::EmitPush(Register reg) {
element_count_++;
void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (reg.is(cp)) {
// If we are pushing cp then we are about to make a call and things have to
// be pushed to the physical stack. There's nothing to be gained my moving
@ -659,6 +668,9 @@ void VirtualFrame::EmitPush(Register reg) {
void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
if (this_far_down < kTOSKnownSmiMapSize) {
tos_known_smi_map_ &= ~(1 << this_far_down);
}
if (this_far_down == 0) {
Pop();
Register dest = GetTOSRegister();
@ -699,8 +711,8 @@ Register VirtualFrame::GetTOSRegister() {
}
void VirtualFrame::EmitPush(Operand operand) {
element_count_++;
void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (SpilledScope::is_spilled()) {
__ mov(r0, operand);
__ push(r0);
@ -712,8 +724,8 @@ void VirtualFrame::EmitPush(Operand operand) {
}
void VirtualFrame::EmitPush(MemOperand operand) {
element_count_++;
void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (SpilledScope::is_spilled()) {
__ ldr(r0, operand);
__ push(r0);
@ -726,7 +738,7 @@ void VirtualFrame::EmitPush(MemOperand operand) {
void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
element_count_++;
RaiseHeight(1, 0);
if (SpilledScope::is_spilled()) {
__ LoadRoot(r0, index);
__ push(r0);

43
deps/v8/src/arm/virtual-frame-arm.h

@ -154,10 +154,7 @@ class VirtualFrame : public ZoneObject {
// Forget elements from the top of the frame to match an actual frame (eg,
// the frame after a runtime call). No code is emitted except to bring the
// frame to a spilled state.
void Forget(int count) {
SpillAll();
element_count_ -= count;
}
void Forget(int count);
// Spill all values from the frame to memory.
void SpillAll();
@ -184,8 +181,14 @@ class VirtualFrame : public ZoneObject {
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
void MergeTo(VirtualFrame* expected, Condition cond = al);
void MergeTo(const VirtualFrame* expected, Condition cond = al);
// Checks whether this frame can be branched to by the other frame.
bool IsCompatibleWith(const VirtualFrame* other) const {
return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
}
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
@ -234,6 +237,11 @@ class VirtualFrame : public ZoneObject {
return MemOperand(sp, adjusted_index * kPointerSize);
}
bool KnownSmiAt(int index) {
if (index >= kTOSKnownSmiMapSize) return false;
return (tos_known_smi_map_ & (1 << index)) != 0;
}
// A frame-allocated local as an assembly operand.
inline MemOperand LocalAt(int index);
@ -352,9 +360,9 @@ class VirtualFrame : public ZoneObject {
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg);
void EmitPush(Operand operand);
void EmitPush(MemOperand operand);
void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
void EmitPushRoot(Heap::RootListIndex index);
// Overwrite the nth thing on the stack. If the nth position is in a
@ -419,6 +427,8 @@ class VirtualFrame : public ZoneObject {
int element_count_;
TopOfStack top_of_stack_state_:3;
int register_allocation_map_:kNumberOfAllocatedRegisters;
static const int kTOSKnownSmiMapSize = 4;
unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
// The index of the element that is at the processor's stack pointer
// (the sp register). For now since everything is in memory it is given
@ -473,6 +483,25 @@ class VirtualFrame : public ZoneObject {
inline bool Equals(const VirtualFrame* other);
inline void LowerHeight(int count) {
element_count_ -= count;
if (count >= kTOSKnownSmiMapSize) {
tos_known_smi_map_ = 0;
} else {
tos_known_smi_map_ >>= count;
}
}
inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
ASSERT(known_smi_map < (1u << count));
element_count_ += count;
if (count >= kTOSKnownSmiMapSize) {
tos_known_smi_map_ = known_smi_map;
} else {
tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
}
}
friend class JumpTarget;
};

20
deps/v8/src/codegen.cc

@ -254,10 +254,28 @@ Handle<Code> CodeGenerator::ComputeCallInitialize(
// that it needs so we need to ensure it is generated already.
ComputeCallInitialize(argc, NOT_IN_LOOP);
}
CALL_HEAP_FUNCTION(StubCache::ComputeCallInitialize(argc, in_loop), Code);
CALL_HEAP_FUNCTION(
StubCache::ComputeCallInitialize(argc, in_loop, Code::CALL_IC),
Code);
}
Handle<Code> CodeGenerator::ComputeKeyedCallInitialize(
int argc,
InLoopFlag in_loop) {
if (in_loop == IN_LOOP) {
// Force the creation of the corresponding stub outside loops,
// because it may be used when clearing the ICs later - it is
// possible for a series of IC transitions to lose the in-loop
// information, and the IC clearing code can't generate a stub
// that it needs so we need to ensure it is generated already.
ComputeKeyedCallInitialize(argc, NOT_IN_LOOP);
}
CALL_HEAP_FUNCTION(
StubCache::ComputeCallInitialize(argc, in_loop, Code::KEYED_CALL_IC),
Code);
}
void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
int length = declarations->length();
int globals = 0;

42
deps/v8/src/cpu-profiler.cc

@ -31,6 +31,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "frames-inl.h"
#include "log-inl.h"
#include "../include/v8-profiler.h"
@ -49,7 +50,8 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
enqueue_order_(0) { }
enqueue_order_(0) {
}
void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
@ -181,6 +183,24 @@ void ProfilerEventsProcessor::RegExpCodeCreateEvent(
}
void ProfilerEventsProcessor::AddCurrentStack() {
TickSampleEventRecord record;
TickSample* sample = &record.sample;
sample->state = VMState::current_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
sample->frames_count = 0;
for (StackTraceFrameIterator it;
!it.done() && sample->frames_count < TickSample::kMaxFramesCount;
it.Advance()) {
JavaScriptFrame* frame = it.frame();
sample->stack[sample->frames_count++] =
reinterpret_cast<Address>(frame->function());
}
record.order = enqueue_order_;
ticks_from_vm_buffer_.Enqueue(record);
}
bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
if (!events_buffer_.IsEmpty()) {
CodeEventsContainer record;
@ -205,9 +225,16 @@ bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
while (true) {
if (!ticks_from_vm_buffer_.IsEmpty()
&& ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
TickSampleEventRecord record;
ticks_from_vm_buffer_.Dequeue(&record);
generator_->RecordTickSample(record.sample);
}
const TickSampleEventRecord* rec =
TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
if (rec == NULL) return false;
if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
if (rec->order == dequeue_order) {
generator_->RecordTickSample(rec->sample);
ticks_buffer_.FinishDequeue();
@ -416,13 +443,12 @@ void CpuProfiler::StartCollectingProfile(const char* title) {
if (profiles_->StartProfiling(title, next_profile_uid_++)) {
StartProcessorIfNotStarted();
}
processor_->AddCurrentStack();
}
void CpuProfiler::StartCollectingProfile(String* title) {
if (profiles_->StartProfiling(title, next_profile_uid_++)) {
StartProcessorIfNotStarted();
}
StartCollectingProfile(profiles_->GetName(title));
}
@ -434,10 +460,6 @@ void CpuProfiler::StartProcessorIfNotStarted() {
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
processor_->Start();
// Enable stack sampling.
// It is important to have it started prior to logging, see issue 683:
// http://code.google.com/p/v8/issues/detail?id=683
reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
// Enumerate stuff we already have in the heap.
if (Heap::HasBeenSetup()) {
Logger::LogCodeObjects();
@ -445,6 +467,8 @@ void CpuProfiler::StartProcessorIfNotStarted() {
Logger::LogFunctionObjects();
Logger::LogAccessorCallbacks();
}
// Enable stack sampling.
reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
}
}

11
deps/v8/src/cpu-profiler.h

@ -105,6 +105,11 @@ class CodeAliasEventRecord : public CodeEventRecord {
class TickSampleEventRecord BASE_EMBEDDED {
public:
TickSampleEventRecord()
: filler(1) {
ASSERT(filler != SamplingCircularQueue::kClear);
}
// The first machine word of a TickSampleEventRecord must not ever
// become equal to SamplingCircularQueue::kClear. As both order and
// TickSample's first field are not reliable in this sense (order
@ -119,9 +124,6 @@ class TickSampleEventRecord BASE_EMBEDDED {
}
INLINE(static TickSampleEventRecord* init(void* value));
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TickSampleEventRecord);
};
@ -159,6 +161,8 @@ class ProfilerEventsProcessor : public Thread {
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix, String* name,
Address start, unsigned size);
// Puts current stack into tick sample events buffer.
void AddCurrentStack();
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
@ -184,6 +188,7 @@ class ProfilerEventsProcessor : public Thread {
bool running_;
UnboundQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_;
};

3
deps/v8/src/data-flow.cc

@ -318,6 +318,9 @@ Variable* AssignedVariablesAnalyzer::FindSmiLoopVariable(ForStatement* stmt) {
Variable* loop_var = init->target()->AsVariableProxy()->AsVariable();
if (loop_var == NULL || !loop_var->IsStackAllocated()) return NULL;
// Don't try to get clever with const or dynamic variables.
if (loop_var->mode() != Variable::VAR) return NULL;
// The initial value has to be a smi.
Literal* init_lit = init->value()->AsLiteral();
if (init_lit == NULL || !init_lit->handle()->IsSmi()) return NULL;

19
deps/v8/src/debug.cc

@ -62,13 +62,14 @@ static void PrintLn(v8::Local<v8::Value> value) {
}
static Handle<Code> ComputeCallDebugBreak(int argc) {
CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugBreak(argc), Code);
static Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind) {
CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugBreak(argc, kind), Code);
}
static Handle<Code> ComputeCallDebugPrepareStepIn(int argc) {
CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugPrepareStepIn(argc), Code);
static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
CALL_HEAP_FUNCTION(
StubCache::ComputeCallDebugPrepareStepIn(argc, kind), Code);
}
@ -360,13 +361,14 @@ void BreakLocationIterator::PrepareStepIn() {
// construct call or CallFunction stub call.
Address target = rinfo()->target_address();
Handle<Code> code(Code::GetCodeFromTargetAddress(target));
if (code->is_call_stub()) {
if (code->is_call_stub() || code->is_keyed_call_stub()) {
// Step in through IC call is handled by the runtime system. Therefore make
// sure that the any current IC is cleared and the runtime system is
// called. If the executing code has a debug break at the location change
// the call in the original code as it is the code there that will be
// executed in place of the debug break call.
Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count());
Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count(),
code->kind());
if (IsDebugBreak()) {
original_rinfo()->set_target_address(stub->entry());
} else {
@ -1187,7 +1189,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
Address target = it.rinfo()->target_address();
Code* code = Code::GetCodeFromTargetAddress(target);
if (code->is_call_stub()) {
if (code->is_call_stub() || code->is_keyed_call_stub()) {
is_call_target = true;
}
if (code->is_inline_cache_stub()) {
@ -1373,7 +1375,8 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
if (code->is_inline_cache_stub()) {
switch (code->kind()) {
case Code::CALL_IC:
return ComputeCallDebugBreak(code->arguments_count());
case Code::KEYED_CALL_IC:
return ComputeCallDebugBreak(code->arguments_count(), code->kind());
case Code::LOAD_IC:
return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));

2
deps/v8/src/disassembler.cc

@ -246,7 +246,7 @@ static int DecodeIt(FILE* f,
if (code->ic_in_loop() == IN_LOOP) {
out.AddFormatted(", in_loop");
}
if (kind == Code::CALL_IC) {
if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
out.AddFormatted(", argc = %d", code->arguments_count());
}
} else if (kind == Code::STUB) {

1
deps/v8/src/full-codegen.h

@ -369,6 +369,7 @@ class FullCodeGenerator: public AstVisitor {
// Platform-specific code sequences for calls
void EmitCallWithStub(Call* expr);
void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode);
// Platform-specific code for inline runtime calls.

4
deps/v8/src/globals.h

@ -647,7 +647,9 @@ F FUNCTION_CAST(Address addr) {
// Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.)
enum CpuFeature { SSE3 = 32, // x86
// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
enum CpuFeature { SSE4_1 = 32 + 19, // x86
SSE3 = 32 + 0, // x86
SSE2 = 26, // x86
CMOV = 15, // x86
RDTSC = 4, // x86

9
deps/v8/src/heap-inl.h

@ -117,7 +117,14 @@ void Heap::FinalizeExternalString(String* string) {
reinterpret_cast<byte*>(string) +
ExternalString::kResourceOffset -
kHeapObjectTag);
delete *resource_addr;
// Dispose of the C++ object.
if (external_string_dispose_callback_ != NULL) {
external_string_dispose_callback_(*resource_addr);
} else {
delete *resource_addr;
}
// Clear the resource pointer in the string.
*resource_addr = NULL;
}

4
deps/v8/src/heap.cc

@ -98,6 +98,8 @@ size_t Heap::code_range_size_ = 0;
// set up by ConfigureHeap otherwise.
int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
ExternalStringDiposeCallback Heap::external_string_dispose_callback_ = NULL;
List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
@ -560,7 +562,7 @@ class ClearThreadJSFunctionResultCachesVisitor: public ThreadVisitor {
void Heap::ClearJSFunctionResultCaches() {
if (Bootstrapper::IsActive()) return;
ClearThreadJSFunctionResultCachesVisitor visitor;
ThreadManager::IterateThreads(&visitor);
ThreadManager::IterateArchivedThreads(&visitor);
}

8
deps/v8/src/heap.h

@ -690,6 +690,11 @@ class Heap : public AllStatic {
static bool GarbageCollectionGreedyCheck();
#endif
static void SetExternalStringDiposeCallback(
ExternalStringDiposeCallback callback) {
external_string_dispose_callback_ = callback;
}
static void AddGCPrologueCallback(
GCEpilogueCallback callback, GCType gc_type_filter);
static void RemoveGCPrologueCallback(GCEpilogueCallback callback);
@ -1138,6 +1143,9 @@ class Heap : public AllStatic {
// any string when looked up in properties.
static String* hidden_symbol_;
static ExternalStringDiposeCallback
external_string_dispose_callback_;
// GC callback function, called before and after mark-compact GC.
// Allocations in the callback function are disallowed.
struct GCPrologueCallbackPair {

44
deps/v8/src/ia32/assembler-ia32.cc

@ -1328,6 +1328,15 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
}
void Assembler::test_b(const Operand& op, uint8_t imm8) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF6);
emit_operand(eax, op);
EMIT(imm8);
}
void Assembler::xor_(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -2221,6 +2230,40 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) {
}
void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x38);
EMIT(0x2A);
emit_sse_operand(dst, src);
}
void Assembler::movntdq(const Operand& dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0xE7);
emit_sse_operand(src, dst);
}
void Assembler::prefetch(const Operand& src, int level) {
ASSERT(is_uint2(level));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
EMIT(0x18);
XMMRegister code = { level }; // Emit hint number in Reg position of RegR/M.
emit_sse_operand(code, src);
}
void Assembler::movdbl(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -2300,7 +2343,6 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);

10
deps/v8/src/ia32/assembler-ia32.h

@ -637,6 +637,7 @@ class Assembler : public Malloced {
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
void xor_(Register dst, const Operand& src);
@ -790,6 +791,15 @@ class Assembler : public Malloced {
void pxor(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
// Parallel XMM operations.
void movntdqa(XMMRegister src, const Operand& dst);
void movntdq(const Operand& dst, XMMRegister src);
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
// non-temporal
void prefetch(const Operand& src, int level);
// TODO(lrn): Need SFENCE for movnt?
// Debugging
void Print();

20
deps/v8/src/ia32/builtins-ia32.cc

@ -331,10 +331,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the type of the result (stored in its map) is less than
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(greater_equal, &exit, not_taken);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(above_equal, &exit, not_taken);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
@ -469,11 +467,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ cmp(ebx, Factory::undefined_value());
__ j(equal, &use_global_receiver);
// We don't use IsObjectJSObjectType here because we jump on success.
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(below, &convert_to_object);
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
__ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
__ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
__ j(below_equal, &shift_arguments);
__ bind(&convert_to_object);
@ -617,12 +615,12 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// If given receiver is already a JavaScript object then there's no
// reason for converting it.
// We don't use IsObjectJSObjectType here because we jump on success.
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(less, &call_to_object);
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
__ j(less_equal, &push_receiver);
__ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
__ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
__ j(below_equal, &push_receiver);
// Convert the receiver to an object.
__ bind(&call_to_object);

452
deps/v8/src/ia32/codegen-ia32.cc

@ -2624,9 +2624,8 @@ void CodeGenerator::Comparison(AstNode* node,
ASSERT(temp.is_valid());
__ mov(temp.reg(),
FieldOperand(operand.reg(), HeapObject::kMapOffset));
__ movzx_b(temp.reg(),
FieldOperand(temp.reg(), Map::kBitFieldOffset));
__ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
__ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
temp.Unuse();
operand.Unuse();
dest->Split(not_zero);
@ -2720,11 +2719,9 @@ void CodeGenerator::Comparison(AstNode* node,
// left_side is a sequential ASCII string.
left_side = Result(left_reg);
right_side = Result(right_val);
Result temp2 = allocator_->Allocate();
ASSERT(temp2.is_valid());
// Test string equality and comparison.
Label comparison_done;
if (cc == equal) {
Label comparison_done;
__ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ j(not_equal, &comparison_done);
@ -2732,34 +2729,25 @@ void CodeGenerator::Comparison(AstNode* node,
static_cast<uint8_t>(String::cast(*right_val)->Get(0));
__ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
char_value);
__ bind(&comparison_done);
} else {
__ mov(temp2.reg(),
FieldOperand(left_side.reg(), String::kLengthOffset));
__ SmiUntag(temp2.reg());
__ sub(Operand(temp2.reg()), Immediate(1));
Label comparison;
// If the length is 0 then the subtraction gave -1 which compares less
// than any character.
__ j(negative, &comparison);
// Otherwise load the first character.
__ movzx_b(temp2.reg(),
FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
__ bind(&comparison);
__ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
Immediate(Smi::FromInt(1)));
// If the length is 0 then the jump is taken and the flags
// correctly represent being less than the one-character string.
__ j(below, &comparison_done);
// Compare the first character of the string with the
// constant 1-character string.
uint8_t char_value =
static_cast<uint8_t>(String::cast(*right_val)->Get(0));
__ cmp(Operand(temp2.reg()), Immediate(char_value));
Label characters_were_different;
__ j(not_equal, &characters_were_different);
__ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
char_value);
__ j(not_equal, &comparison_done);
// If the first character is the same then the long string sorts after
// the short one.
__ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ bind(&characters_were_different);
}
temp2.Unuse();
__ bind(&comparison_done);
left_side.Unuse();
right_side.Unuse();
dest->Split(cc);
@ -4148,9 +4136,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// eax: value to be iterated over
__ test(eax, Immediate(kSmiTagMask));
primitive.Branch(zero);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
jsobject.Branch(above_equal);
primitive.Bind();
@ -5762,26 +5748,66 @@ void CodeGenerator::VisitCall(Call* node) {
// Allocate a frame slot for the receiver.
frame_->Push(Factory::undefined_value());
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
frame_->SpillTop();
}
// Prepare the stack for the call to ResolvePossiblyDirectEval.
// Result to hold the result of the function resolution and the
// final result of the eval call.
Result result;
// If we know that eval can only be shadowed by eval-introduced
// variables we attempt to load the global eval function directly
// in generated code. If we succeed, there is no need to perform a
// context lookup in the runtime system.
JumpTarget done;
if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
ASSERT(var->slot()->type() == Slot::LOOKUP);
JumpTarget slow;
// Prepare the stack for the call to
// ResolvePossiblyDirectEvalNoLookup by pushing the loaded
// function, the first argument to the eval call and the
// receiver.
Result fun = LoadFromGlobalSlotCheckExtensions(var->slot(),
NOT_INSIDE_TYPEOF,
&slow);
frame_->Push(&fun);
if (arg_count > 0) {
frame_->PushElementAt(arg_count);
} else {
frame_->Push(Factory::undefined_value());
}
frame_->PushParameterAt(-1);
// Resolve the call.
result =
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
done.Jump(&result);
slow.Bind();
}
// Prepare the stack for the call to ResolvePossiblyDirectEval by
// pushing the loaded function, the first argument to the eval
// call and the receiver.
frame_->PushElementAt(arg_count + 1);
if (arg_count > 0) {
frame_->PushElementAt(arg_count);
} else {
frame_->Push(Factory::undefined_value());
}
// Push the receiver.
frame_->PushParameterAt(-1);
// Resolve the call.
Result result =
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
// If we generated fast-case code bind the jump-target where fast
// and slow case merge.
if (done.is_linked()) done.Bind(&result);
// The runtime call returns a pair of values in eax (function) and
// edx (receiver). Touch up the stack with the right values.
@ -5949,18 +5975,31 @@ void CodeGenerator::VisitCall(Call* node) {
ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
// Call the function.
CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
} else {
// Push the receiver onto the frame.
Load(property->obj());
frame()->Dup();
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
frame_->SpillTop();
}
// Load the name of the function.
Load(property->key());
Result function = EmitKeyedLoad();
Result receiver = frame_->Pop();
frame_->Push(&function);
frame_->Push(&receiver);
}
// Call the function.
CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
// Call the IC initialization code.
CodeForSourcePosition(node->position());
Result result =
frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
arg_count,
loop_nesting());
frame_->RestoreContextRegister();
frame_->Push(&result);
}
}
} else {
@ -6317,14 +6356,15 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
ASSERT(map.is_valid());
__ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
// Undetectable objects behave like undefined when tested with typeof.
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
__ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
__ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
destination()->false_target()->Branch(not_zero);
__ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
// Do a range test for JSObject type. We can't use
// MacroAssembler::IsInstanceJSObjectType, because we are using a
// ControlDestination, so we copy its implementation here.
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
__ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
destination()->false_target()->Branch(below);
__ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
__ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
__ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
obj.Unuse();
map.Unuse();
destination()->Split(below_equal);
@ -6360,9 +6400,8 @@ void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(temp.is_valid());
__ mov(temp.reg(),
FieldOperand(obj.reg(), HeapObject::kMapOffset));
__ movzx_b(temp.reg(),
FieldOperand(temp.reg(), Map::kBitFieldOffset));
__ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
__ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
obj.Unuse();
temp.Unuse();
destination()->Split(not_zero);
@ -6436,20 +6475,16 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
{ Result tmp = allocator()->Allocate();
__ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
__ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
__ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
null.Branch(below);
__ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
null.Branch(below);
// As long as JS_FUNCTION_TYPE is the last instance type and it is
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for
// LAST_JS_OBJECT_TYPE.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ cmp(tmp.reg(), JS_FUNCTION_TYPE);
function.Branch(equal);
}
// As long as JS_FUNCTION_TYPE is the last instance type and it is
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for
// LAST_JS_OBJECT_TYPE.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
function.Branch(equal);
// Check if the constructor in the map is a function.
{ Result tmp = allocator()->Allocate();
@ -7030,8 +7065,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
// has no indexed interceptor.
__ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
deferred->Branch(below);
__ movzx_b(tmp1.reg(), FieldOperand(tmp1.reg(), Map::kBitFieldOffset));
__ test(tmp1.reg(), Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
__ test_b(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
KeyedLoadIC::kSlowCaseBitFieldMask);
deferred->Branch(not_zero);
// Check the object's elements are in fast case.
@ -8285,10 +8320,10 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
__ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kBitFieldOffset));
__ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
__ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
destination()->false_target()->Branch(not_zero);
__ CmpObjectType(answer.reg(), FIRST_NONSTRING_TYPE, temp.reg());
__ CmpInstanceType(temp.reg(), FIRST_NONSTRING_TYPE);
temp.Unuse();
answer.Unuse();
destination()->Split(below);
@ -8310,9 +8345,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
// It can be an undetectable object.
frame_->Spill(answer.reg());
__ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
__ movzx_b(answer.reg(),
FieldOperand(answer.reg(), Map::kBitFieldOffset));
__ test(answer.reg(), Immediate(1 << Map::kIsUndetectable));
__ test_b(FieldOperand(answer.reg(), Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
answer.Unuse();
destination()->Split(not_zero);
@ -8339,14 +8373,15 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->false_target()->Branch(equal);
// It can be an undetectable object.
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
__ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
__ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
destination()->false_target()->Branch(not_zero);
__ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
// Do a range test for JSObject type. We can't use
// MacroAssembler::IsInstanceJSObjectType, because we are using a
// ControlDestination, so we copy its implementation here.
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
__ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
destination()->false_target()->Branch(below);
__ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
__ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
__ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
answer.Unuse();
map.Unuse();
destination()->Split(below_equal);
@ -8766,6 +8801,9 @@ Result CodeGenerator::EmitKeyedLoad() {
key.ToRegister();
receiver.ToRegister();
// If key and receiver are shared registers on the frame, their values will
// be automatically saved and restored when going to deferred code.
// The result is in elements, which is guaranteed non-shared.
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(elements.reg(),
receiver.reg(),
@ -9270,20 +9308,19 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
// Undetectable => false.
__ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
__ and_(ebx, 1 << Map::kIsUndetectable);
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
__ j(not_zero, &false_result);
// JavaScript object => true.
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
__ j(above_equal, &true_result);
// String value => false iff empty.
__ cmp(ecx, FIRST_NONSTRING_TYPE);
__ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string);
__ mov(edx, FieldOperand(eax, String::kLengthOffset));
ASSERT(kSmiTag == 0);
__ test(edx, Operand(edx));
__ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
__ j(zero, &false_result);
__ jmp(&true_result);
@ -11739,13 +11776,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
// There is no test for undetectability in strict equality.
// Get the type of the first operand.
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// If the first object is a JS object, we have done pointer comparison.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
Label first_non_object;
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(below, &first_non_object);
// Return non-zero (eax is not zero)
@ -11756,17 +11790,14 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&first_non_object);
// Check for oddballs: true, false, null, undefined.
__ cmp(ecx, ODDBALL_TYPE);
__ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
__ cmp(ecx, ODDBALL_TYPE);
__ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
// Fall through to the general case.
@ -12408,12 +12439,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(zero, &slow, not_taken);
// Check that the left hand is a JS object.
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // eax - object map
__ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset)); // ecx - type
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(below, &slow, not_taken);
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
__ j(above, &slow, not_taken);
__ IsObjectJSObjectType(eax, eax, edx, &slow);
// Get the prototype of the function.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
@ -12438,12 +12464,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Check that the function prototype is a JS object.
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(below, &slow, not_taken);
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
__ j(above, &slow, not_taken);
__ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
// Register mapping:
// eax is object map.
@ -12638,7 +12659,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ push(object_);
__ push(index_);
__ push(result_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@ -12652,9 +12672,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
// have a chance to overwrite it.
__ mov(scratch_, eax);
}
__ pop(result_);
__ pop(index_);
__ pop(object_);
// Reload the instance type.
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
ASSERT(kSmiTag == 0);
@ -12877,14 +12899,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// ebx: length of resulting flat string as a smi
// edx: second string
Label non_ascii_string_add_flat_result;
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
ASSERT(kStringEncodingMask == kAsciiStringTag);
__ test(ecx, Immediate(kAsciiStringTag));
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
__ j(zero, &non_ascii_string_add_flat_result);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ test(ecx, Immediate(kAsciiStringTag));
__ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
__ j(zero, &string_add_runtime);
__ bind(&make_flat_ascii_string);
@ -12925,8 +12945,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// edx: second string
__ bind(&non_ascii_string_add_flat_result);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ and_(ecx, kAsciiStringTag);
__ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
__ j(not_zero, &string_add_runtime);
// Both strings are two byte strings. As they are short they are both
// flat.
@ -13492,6 +13511,211 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
#undef __
#define __ masm.
MemCopyFunction CreateMemCopyFunction() {
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
&actual_size,
true));
CHECK(buffer);
HandleScope handles;
MacroAssembler masm(buffer, static_cast<int>(actual_size));
// Generated code is put into a fixed, unmovable, buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
// 32-bit C declaration function calls pass arguments on stack.
// Stack layout:
// esp[12]: Third argument, size.
// esp[8]: Second argument, source pointer.
// esp[4]: First argument, destination pointer.
// esp[0]: return address
const int kDestinationOffset = 1 * kPointerSize;
const int kSourceOffset = 2 * kPointerSize;
const int kSizeOffset = 3 * kPointerSize;
int stack_offset = 0; // Update if we change the stack height.
if (FLAG_debug_code) {
__ cmp(Operand(esp, kSizeOffset + stack_offset),
Immediate(kMinComplexMemCopy));
Label ok;
__ j(greater_equal, &ok);
__ int3();
__ bind(&ok);
}
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope enable(SSE2);
__ push(edi);
__ push(esi);
stack_offset += 2 * kPointerSize;
Register dst = edi;
Register src = esi;
Register count = ecx;
__ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
__ mov(src, Operand(esp, stack_offset + kSourceOffset));
__ mov(count, Operand(esp, stack_offset + kSizeOffset));
__ movdqu(xmm0, Operand(src, 0));
__ movdqu(Operand(dst, 0), xmm0);
__ mov(edx, dst);
__ and_(edx, 0xF);
__ neg(edx);
__ add(Operand(edx), Immediate(16));
__ add(dst, Operand(edx));
__ add(src, Operand(edx));
__ sub(Operand(count), edx);
// edi is now aligned. Check if esi is also aligned.
Label unaligned_source;
__ test(Operand(src), Immediate(0x0F));
__ j(not_zero, &unaligned_source);
{
__ IncrementCounter(&Counters::memcopy_aligned, 1);
// Copy loop for aligned source and destination.
__ mov(edx, count);
Register loop_count = ecx;
Register count = edx;
__ shr(loop_count, 5);
{
// Main copy loop.
Label loop;
__ bind(&loop);
__ prefetch(Operand(src, 0x20), 1);
__ movdqa(xmm0, Operand(src, 0x00));
__ movdqa(xmm1, Operand(src, 0x10));
__ add(Operand(src), Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
__ add(Operand(dst), Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
}
// At most 31 bytes to copy.
Label move_less_16;
__ test(Operand(count), Immediate(0x10));
__ j(zero, &move_less_16);
__ movdqa(xmm0, Operand(src, 0));
__ add(Operand(src), Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
__ add(Operand(dst), Immediate(0x10));
__ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
__ and_(count, 0xF);
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
__ pop(esi);
__ pop(edi);
__ ret(0);
}
__ Align(16);
{
// Copy loop for unaligned source and aligned destination.
// If source is not aligned, we can't read it as efficiently.
__ bind(&unaligned_source);
__ IncrementCounter(&Counters::memcopy_unaligned, 1);
__ mov(edx, ecx);
Register loop_count = ecx;
Register count = edx;
__ shr(loop_count, 5);
{
// Main copy loop
Label loop;
__ bind(&loop);
__ prefetch(Operand(src, 0x20), 1);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
__ add(Operand(src), Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
__ add(Operand(dst), Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
}
// At most 31 bytes to copy.
Label move_less_16;
__ test(Operand(count), Immediate(0x10));
__ j(zero, &move_less_16);
__ movdqu(xmm0, Operand(src, 0));
__ add(Operand(src), Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
__ add(Operand(dst), Immediate(0x10));
__ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
__ and_(count, 0x0F);
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
__ pop(esi);
__ pop(edi);
__ ret(0);
}
} else {
__ IncrementCounter(&Counters::memcopy_noxmm, 1);
// SSE2 not supported. Unlikely to happen in practice.
__ push(edi);
__ push(esi);
stack_offset += 2 * kPointerSize;
__ cld();
Register dst = edi;
Register src = esi;
Register count = ecx;
__ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
__ mov(src, Operand(esp, stack_offset + kSourceOffset));
__ mov(count, Operand(esp, stack_offset + kSizeOffset));
// Copy the first word.
__ mov(eax, Operand(src, 0));
__ mov(Operand(dst, 0), eax);
// Increment src,dstso that dst is aligned.
__ mov(edx, dst);
__ and_(edx, 0x03);
__ neg(edx);
__ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3)
__ add(dst, Operand(edx));
__ add(src, Operand(edx));
__ sub(Operand(count), edx);
// edi is now aligned, ecx holds number of remaning bytes to copy.
__ mov(edx, count);
count = edx;
__ shr(ecx, 2); // Make word count instead of byte count.
__ rep_movs();
// At most 3 bytes left to copy. Copy 4 bytes at end of string.
__ and_(count, 3);
__ mov(eax, Operand(src, count, times_1, -4));
__ mov(Operand(dst, count, times_1, -4), eax);
__ pop(esi);
__ pop(edi);
__ ret(0);
}
CodeDesc desc;
masm.GetCode(&desc);
// Call the function from C++.
return FUNCTION_CAST<MemCopyFunction>(buffer);
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

2
deps/v8/src/ia32/codegen-ia32.h

@ -594,6 +594,8 @@ class CodeGenerator: public AstVisitor {
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
static Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);

23
deps/v8/src/ia32/disasm-ia32.cc

@ -817,6 +817,7 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
// Returns NULL if the instruction is not handled here.
static const char* F0Mnem(byte f0byte) {
switch (f0byte) {
case 0x18: return "prefetch";
case 0xA2: return "cpuid";
case 0x31: return "rdtsc";
case 0xBE: return "movsx_b";
@ -942,7 +943,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x0F:
{ byte f0byte = *(data+1);
const char* f0mnem = F0Mnem(f0byte);
if (f0byte == 0xA2 || f0byte == 0x31) {
if (f0byte == 0x18) {
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
const char* suffix[] = {"nta", "1", "2", "3"};
AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
data += PrintRightOperand(data);
} else if (f0byte == 0xA2 || f0byte == 0x31) {
AppendToBuffer("%s", f0mnem);
data += 2;
} else if ((f0byte & 0xF0) == 0x80) {
@ -1070,6 +1077,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x2A) {
// movntdqa
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
} else {
UnimplementedInstruction();
}
@ -1122,6 +1136,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (*data == 0xE7) {
AppendToBuffer("movntdq ");
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (*data == 0xEF) {
data++;
int mod, regop, rm;

57
deps/v8/src/ia32/full-codegen-ia32.cc

@ -1726,6 +1726,29 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
}
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Expression* key,
RelocInfo::Mode mode) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
VisitForValue(args->at(i), kStack);
}
VisitForValue(key, kAccumulator);
__ mov(ecx, eax);
// Record source position of the IC call.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(
arg_count, in_loop);
__ call(ic, mode);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
Apply(context_, eax);
}
void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
@ -1815,37 +1838,31 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForValue(prop->obj(), kStack);
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property, use keyed load IC followed by function
// call.
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use keyed CallIC.
VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kAccumulator);
// Record source code position for IC call.
SetSourcePosition(prop->position());
if (prop->is_synthetic()) {
VisitForValue(prop->key(), kAccumulator);
// Record source code position for IC call.
SetSourcePosition(prop->position());
__ pop(edx); // We do not need to keep the receiver.
} else {
__ mov(edx, Operand(esp, 0)); // Keep receiver, to call function on.
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// By emitting a nop we make sure that we do not have a "test eax,..."
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
if (prop->is_synthetic()) {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// By emitting a nop we make sure that we do not have a "test eax,..."
// instruction after the call as it is treated specially
// by the LoadIC code.
__ nop();
// Push result (function).
__ push(eax);
// Push Global receiver.
__ mov(ecx, CodeGenerator::GlobalObject());
__ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
EmitCallWithStub(expr);
} else {
// Pop receiver.
__ pop(ebx);
// Push result (function).
__ push(eax);
__ push(ebx);
EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
}
EmitCallWithStub(expr);
}
} else {
// Call to some other expression. If the expression is an anonymous

213
deps/v8/src/ia32/ic-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -305,8 +305,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- esp[0] : return address
// -----------------------------------
Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary;
Label check_number_dictionary;
Label check_pixel_array, probe_dictionary, check_number_dictionary;
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
@ -316,8 +315,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
// Check bit field.
__ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
__ test(ebx, Immediate(kSlowCaseBitFieldMask));
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset), kSlowCaseBitFieldMask);
__ j(not_zero, &slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
@ -329,8 +327,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &check_string, not_taken);
// Get the elements array of the object.
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CheckMap(ecx, Factory::fixed_array_map(), &check_pixel_array, true);
@ -405,13 +404,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ j(above_equal, &slow);
// Is the string an array index, with cached numeric value?
__ mov(ebx, FieldOperand(eax, String::kHashFieldOffset));
__ test(ebx, Immediate(String::kIsArrayIndexMask));
__ j(not_zero, &index_string, not_taken);
__ test(ebx, Immediate(String::kContainsCachedArrayIndexMask));
__ j(zero, &index_string, not_taken);
// Is the string a symbol?
__ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// ecx: key map.
ASSERT(kSymbolTag != 0);
__ test(ebx, Immediate(kIsSymbolMask));
__ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kIsSymbolMask);
__ j(zero, &slow, not_taken);
// If the receiver is a fast-case object, check the keyed lookup
@ -453,14 +452,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
__ cmp(edi, Operand(ecx));
__ sub(edi, Operand(ecx));
__ j(above_equal, &slow);
// Load in-object property.
__ sub(edi, Operand(ecx));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(ecx, Operand(edi));
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
__ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
@ -487,10 +486,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&index_string);
// We want the smi-tagged index in eax. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
// eax: key (string).
// ebx: hash field.
// edx: receiver.
ASSERT(String::kHashShift >= kSmiTagSize);
__ and_(ebx, String::kArrayIndexValueMask);
__ shr(ebx, String::kHashShift - kSmiTagSize);
// Here we actually clobber the key (eax) which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
__ mov(eax, ebx);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@ -556,8 +562,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
__ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow, not_taken);
__ CmpInstanceType(ecx, JS_OBJECT_TYPE);
@ -681,7 +687,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ fincstp();
// Fall through to slow case.
// Slow case: Load key and receiver from stack and jump to runtime.
// Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
GenerateRuntimeGetProperty(masm);
@ -746,8 +752,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ movzx_b(ebx, FieldOperand(edi, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow, not_taken);
// Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
@ -864,8 +870,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ movzx_b(ebx, FieldOperand(edi, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow);
// Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
@ -1038,22 +1044,21 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
Label* miss) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -- edx : receiver
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
Label number, non_number, non_string, boolean, probe;
// Probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, eax);
// If the stub cache probing failed, the receiver might be a value.
@ -1073,7 +1078,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Check for string.
__ bind(&non_number);
__ cmp(ebx, FIRST_NONSTRING_TYPE);
__ CmpInstanceType(ebx, FIRST_NONSTRING_TYPE);
__ j(above_equal, &non_string, taken);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::STRING_FUNCTION_INDEX, edx);
@ -1084,7 +1089,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ cmp(edx, Factory::true_value());
__ j(equal, &boolean, not_taken);
__ cmp(edx, Factory::false_value());
__ j(not_equal, &miss, taken);
__ j(not_equal, miss, taken);
__ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
@ -1092,10 +1097,6 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache for the value object.
__ bind(&probe);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm, argc);
}
@ -1135,8 +1136,8 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ InvokeFunction(edi, actual, JUMP_FUNCTION);
}
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// The generated code never falls through.
static void GenerateCallNormal(MacroAssembler* masm, int argc, Label* miss) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@ -1144,20 +1145,20 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss, global_object, non_global_object;
Label global_object, non_global_object;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
__ j(zero, miss, not_taken);
// Check that the receiver is a valid JS object.
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(eax, FIRST_JS_OBJECT_TYPE);
__ j(below, &miss, not_taken);
__ j(below, miss, not_taken);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
@ -1171,10 +1172,10 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Accessing global object: Load and invoke.
__ bind(&global_object);
// Check that the global object does not require access checks.
__ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_equal, &miss, not_taken);
GenerateNormalHelper(masm, argc, true, &miss);
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_equal, miss, not_taken);
GenerateNormalHelper(masm, argc, true, miss);
// Accessing non-global object: Check for access to global proxy.
Label global_proxy, invoke;
@ -1183,24 +1184,20 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ j(equal, &global_proxy, not_taken);
// Check that the non-global, non-global-proxy object does not
// require access checks.
__ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_equal, &miss, not_taken);
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_equal, miss, not_taken);
__ bind(&invoke);
GenerateNormalHelper(masm, argc, false, &miss);
GenerateNormalHelper(masm, argc, false, miss);
// Global object proxy access: Check access rights.
__ bind(&global_proxy);
__ CheckAccessGlobalProxy(edx, eax, &miss);
__ CheckAccessGlobalProxy(edx, eax, miss);
__ jmp(&invoke);
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm, argc);
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@ -1222,7 +1219,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Call the entry.
CEntryStub stub(1);
__ mov(eax, Immediate(2));
__ mov(ebx, Immediate(ExternalReference(IC_Utility(kCallIC_Miss))));
__ mov(ebx, Immediate(ExternalReference(IC_Utility(id))));
__ CallStub(&stub);
// Move result to edi and exit the internal frame.
@ -1253,6 +1250,106 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
}
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, &miss);
__ bind(&miss);
GenerateMiss(masm, argc);
}
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
Label miss;
GenerateCallNormal(masm, argc, &miss);
__ bind(&miss);
GenerateMiss(masm, argc);
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
}
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
Label miss, skip_probe;
// Do not probe monomorphic cache if a key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
__ j(equal, &skip_probe, taken);
GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC, &skip_probe);
__ bind(&skip_probe);
__ mov(eax, ecx);
__ EnterInternalFrame();
__ push(ecx);
__ call(Handle<Code>(Builtins::builtin(Builtins::KeyedLoadIC_Generic)),
RelocInfo::CODE_TARGET);
__ pop(ecx);
__ LeaveInternalFrame();
__ mov(edi, eax);
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
// Check that the receiver is a valid JS object.
__ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, eax);
__ j(below, &miss, not_taken);
// Check that the value is a JavaScript function.
__ test(edi, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
__ j(not_equal, &miss, not_taken);
// Invoke the function.
ParameterCount actual(argc);
__ InvokeFunction(edi, actual, JUMP_FUNCTION);
__ bind(&miss);
GenerateMiss(masm, argc);
}
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
Label miss;
GenerateCallNormal(masm, argc, &miss);
__ bind(&miss);
GenerateMiss(masm, argc);
}
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
}
// Defined in ic.cc.
Object* LoadIC_Miss(Arguments args);
@ -1300,8 +1397,8 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ j(equal, &global, not_taken);
// Check for non-global object that requires access check.
__ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &miss, not_taken);
// Search the dictionary placing the result in eax.
@ -1322,7 +1419,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ CheckAccessGlobalProxy(eax, edx, &miss);
__ jmp(&probe);
// Cache miss: Restore receiver from stack and jump to runtime.
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
}

19
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -296,6 +296,25 @@ Condition MacroAssembler::IsObjectStringType(Register heap_object,
}
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
Register map,
Register scratch,
Label* fail) {
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
IsInstanceJSObjectType(map, scratch, fail);
}
void MacroAssembler::IsInstanceJSObjectType(Register map,
Register scratch,
Label* fail) {
movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
sub(Operand(scratch), Immediate(FIRST_JS_OBJECT_TYPE));
cmp(scratch, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
j(above, fail);
}
void MacroAssembler::FCmp() {
if (CpuFeatures::IsSupported(CMOV)) {
fucomip();

12
deps/v8/src/ia32/macro-assembler-ia32.h

@ -188,6 +188,18 @@ class MacroAssembler: public Assembler {
Register map,
Register instance_type);
// Check if a heap object's type is in the JSObject range, not including
// JSFunction. The object's map will be loaded in the map register.
// Any or all of the three registers may be the same.
// The contents of the scratch register will always be overwritten.
void IsObjectJSObjectType(Register heap_object,
Register map,
Register scratch,
Label* fail);
// The contents of the scratch register will be overwritten.
void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();

102
deps/v8/src/ia32/stub-cache-ia32.cc

@ -172,6 +172,17 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
}
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype) {
// Get the global function with the given index.
JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
__ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
// Load the prototype from the initial map.
__ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
Register scratch,
@ -1029,6 +1040,20 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ cmp(Operand(ecx), Immediate(Handle<String>(name)));
__ j(not_equal, miss, not_taken);
}
}
void CallStubCompiler::GenerateMissBranch() {
Handle<Code> ic = ComputeCallMiss(arguments().immediate(), kind_);
__ jmp(ic, RelocInfo::CODE_TARGET);
}
Object* CallStubCompiler::CompileCallField(JSObject* object,
JSObject* holder,
int index,
@ -1042,6 +1067,8 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// -----------------------------------
Label miss;
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@ -1073,8 +1100,7 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
@ -1102,6 +1128,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
Label miss;
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@ -1219,8 +1247,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
}
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@ -1248,6 +1275,8 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
Label miss, return_undefined, call_builtin;
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@ -1301,8 +1330,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
1);
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@ -1326,16 +1354,17 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
Label miss;
Label index_out_of_range;
GenerateNameCheck(name, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
eax);
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
Register receiver = ebx;
Register index = ecx;
Register index = edi;
Register scratch = edx;
Register result = eax;
__ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
@ -1364,11 +1393,8 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
__ ret((argc + 1) * kPointerSize);
__ bind(&miss);
// Restore function name in ecx.
__ Set(ecx, Immediate(Handle<String>(name)));
Handle<Code> ic = ComputeCallMiss(argc);
__ jmp(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@ -1393,15 +1419,17 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
Label miss;
Label index_out_of_range;
GenerateNameCheck(name, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
eax);
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
Register receiver = eax;
Register index = ecx;
Register index = edi;
Register scratch1 = ebx;
Register scratch2 = edx;
Register result = eax;
@ -1433,10 +1461,8 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
__ bind(&miss);
// Restore function name in ecx.
__ Set(ecx, Immediate(Handle<String>(name)));
Handle<Code> ic = ComputeCallMiss(argc);
__ jmp(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@ -1469,6 +1495,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
Label miss_in_smi_check;
GenerateNameCheck(name, &miss_in_smi_check);
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@ -1520,14 +1548,11 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ jmp(&miss);
} else {
// Check that the object is a string or a symbol.
__ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
__ cmp(eax, FIRST_NONSTRING_TYPE);
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
__ j(above_equal, &miss, not_taken);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
eax);
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
}
@ -1546,9 +1571,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(not_equal, &miss, not_taken);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::NUMBER_FUNCTION_INDEX,
eax);
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
}
@ -1568,9 +1592,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(not_equal, &miss, not_taken);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::BOOLEAN_FUNCTION_INDEX,
eax);
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
}
@ -1593,8 +1616,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
FreeSpaceForFastApiCall(masm(), eax);
}
__ bind(&miss_in_smi_check);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@ -1613,6 +1635,8 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// -----------------------------------
Label miss;
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
@ -1655,8 +1679,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Handle load cache miss.
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(argc);
__ jmp(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
@ -1677,6 +1700,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -----------------------------------
Label miss;
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
@ -1739,8 +1764,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(&Counters::call_global_inline_miss, 1);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);

18
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -1119,6 +1119,24 @@ Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
}
Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
int arg_count,
int loop_nesting) {
// Function name, arguments, and receiver are on top of the frame.
// The IC expects the name in ecx and the rest on the stack and
// drops them all.
InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = cgen()->ComputeKeyedCallInitialize(arg_count, in_loop);
// Spill args, receiver, and function. The call will drop args and
// receiver.
Result name = Pop();
PrepareForCall(arg_count + 1, arg_count + 1); // Arguments + receiver.
name.ToRegister(ecx);
name.Unuse();
return RawCallCodeObject(ic, mode);
}
Result VirtualFrame::CallConstructor(int arg_count) {
// Arguments, receiver, and function are on top of the frame. The
// IC expects arg count in eax, function in edi, and the arguments

3
deps/v8/src/ia32/virtual-frame-ia32.h

@ -360,6 +360,9 @@ class VirtualFrame: public ZoneObject {
// include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
// Call keyed call IC. Same calling convention as CallCallIC.
Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
// Allocate and call JS function as constructor. Arguments,
// receiver (global object), and function are found on top of the
// frame. Function is not dropped. The argument count does not

148
deps/v8/src/ic.cc

@ -152,11 +152,13 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
// to prototype check failure.
int index = map->IndexInCodeCache(name, target);
if (index >= 0) {
// For keyed load/store, the most likely cause of cache failure is
// For keyed load/store/call, the most likely cause of cache failure is
// that the key has changed. We do not distinguish between
// prototype and non-prototype failures for keyed access.
Code::Kind kind = target->kind();
if (kind == Code::KEYED_LOAD_IC || kind == Code::KEYED_STORE_IC) {
if (kind == Code::KEYED_LOAD_IC ||
kind == Code::KEYED_STORE_IC ||
kind == Code::KEYED_CALL_IC) {
return MONOMORPHIC;
}
@ -196,9 +198,9 @@ RelocInfo::Mode IC::ComputeMode() {
Failure* IC::TypeError(const char* type,
Handle<Object> object,
Handle<String> name) {
Handle<Object> key) {
HandleScope scope;
Handle<Object> args[2] = { name, object };
Handle<Object> args[2] = { key, object };
Handle<Object> error = Factory::NewTypeError(type, HandleVector(args, 2));
return Top::Throw(*error);
}
@ -224,6 +226,7 @@ void IC::Clear(Address address) {
case Code::STORE_IC: return StoreIC::Clear(address, target);
case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
case Code::BINARY_OP_IC: return; // Clearing these is tricky and does not
// make any performance difference.
default: UNREACHABLE();
@ -231,12 +234,13 @@ void IC::Clear(Address address) {
}
void CallIC::Clear(Address address, Code* target) {
void CallICBase::Clear(Address address, Code* target) {
State state = target->ic_state();
InLoopFlag in_loop = target->ic_in_loop();
if (state == UNINITIALIZED) return;
Code* code =
StubCache::FindCallInitialize(target->arguments_count(), in_loop);
StubCache::FindCallInitialize(target->arguments_count(),
target->ic_in_loop(),
target->kind());
SetTargetAtAddress(address, code);
}
@ -364,7 +368,7 @@ static void LookupForRead(Object* object,
}
Object* CallIC::TryCallAsFunction(Object* object) {
Object* CallICBase::TryCallAsFunction(Object* object) {
HandleScope scope;
Handle<Object> target(object);
Handle<Object> delegate = Execution::GetFunctionDelegate(target);
@ -383,7 +387,7 @@ Object* CallIC::TryCallAsFunction(Object* object) {
return *delegate;
}
void CallIC::ReceiverToObject(Handle<Object> object) {
void CallICBase::ReceiverToObject(Handle<Object> object) {
HandleScope scope;
Handle<Object> receiver(object);
@ -396,9 +400,9 @@ void CallIC::ReceiverToObject(Handle<Object> object) {
}
Object* CallIC::LoadFunction(State state,
Handle<Object> object,
Handle<String> name) {
Object* CallICBase::LoadFunction(State state,
Handle<Object> object,
Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
@ -481,7 +485,7 @@ Object* CallIC::LoadFunction(State state,
}
void CallIC::UpdateCaches(LookupResult* lookup,
void CallICBase::UpdateCaches(LookupResult* lookup,
State state,
Handle<Object> object,
Handle<String> name) {
@ -497,16 +501,21 @@ void CallIC::UpdateCaches(LookupResult* lookup,
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
code = StubCache::ComputeCallPreMonomorphic(argc, in_loop);
code = StubCache::ComputeCallPreMonomorphic(argc, in_loop, kind_);
} else if (state == MONOMORPHIC) {
code = StubCache::ComputeCallMegamorphic(argc, in_loop);
code = StubCache::ComputeCallMegamorphic(argc, in_loop, kind_);
} else {
// Compute monomorphic stub.
switch (lookup->type()) {
case FIELD: {
int index = lookup->GetFieldIndex();
code = StubCache::ComputeCallField(argc, in_loop, *name, *object,
lookup->holder(), index);
code = StubCache::ComputeCallField(argc,
in_loop,
kind_,
*name,
*object,
lookup->holder(),
index);
break;
}
case CONSTANT_FUNCTION: {
@ -514,8 +523,13 @@ void CallIC::UpdateCaches(LookupResult* lookup,
// call; used for rewriting to monomorphic state and making sure
// that the code stub is in the stub cache.
JSFunction* function = lookup->GetConstantFunction();
code = StubCache::ComputeCallConstant(argc, in_loop, *name, *object,
lookup->holder(), function);
code = StubCache::ComputeCallConstant(argc,
in_loop,
kind_,
*name,
*object,
lookup->holder(),
function);
break;
}
case NORMAL: {
@ -530,6 +544,7 @@ void CallIC::UpdateCaches(LookupResult* lookup,
JSFunction* function = JSFunction::cast(cell->value());
code = StubCache::ComputeCallGlobal(argc,
in_loop,
kind_,
*name,
*receiver,
global,
@ -541,13 +556,20 @@ void CallIC::UpdateCaches(LookupResult* lookup,
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return;
code = StubCache::ComputeCallNormal(argc, in_loop, *name, *receiver);
code = StubCache::ComputeCallNormal(argc,
in_loop,
kind_,
*name,
*receiver);
}
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
code = StubCache::ComputeCallInterceptor(argc, *name, *object,
code = StubCache::ComputeCallInterceptor(argc,
kind_,
*name,
*object,
lookup->holder());
break;
}
@ -569,11 +591,44 @@ void CallIC::UpdateCaches(LookupResult* lookup,
}
#ifdef DEBUG
TraceIC("CallIC", name, state, target(), in_loop ? " (in-loop)" : "");
TraceIC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
name, state, target(), in_loop ? " (in-loop)" : "");
#endif
}
Object* KeyedCallIC::LoadFunction(State state,
Handle<Object> object,
Handle<Object> key) {
if (key->IsSymbol()) {
return CallICBase::LoadFunction(state, object, Handle<String>::cast(key));
}
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_call", object, key);
}
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
ReceiverToObject(object);
} else {
if (FLAG_use_ic && state != MEGAMORPHIC && !object->IsAccessCheckNeeded()) {
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
Object* code = StubCache::ComputeCallMegamorphic(
argc, in_loop, Code::KEYED_CALL_IC);
if (!code->IsFailure()) {
set_target(Code::cast(code));
}
}
}
Object* result = Runtime::GetObjectProperty(object, key);
if (result->IsJSFunction()) return result;
result = TryCallAsFunction(result);
return result->IsJSFunction() ?
result : TypeError("property_not_function", object, key);
}
Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
@ -1293,7 +1348,22 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
// Static IC stub generators.
//
// Used from ic_<arch>.cc.
static Object* CompileFunction(Object* result,
Handle<Object> object,
InLoopFlag in_loop) {
// Compile now with optimization.
HandleScope scope;
Handle<JSFunction> function = Handle<JSFunction>(JSFunction::cast(result));
if (in_loop == IN_LOOP) {
CompileLazyInLoop(function, object, CLEAR_EXCEPTION);
} else {
CompileLazy(function, object, CLEAR_EXCEPTION);
}
return *function;
}
// Used from ic-<arch>.cc.
Object* CallIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
@ -1312,21 +1382,27 @@ Object* CallIC_Miss(Arguments args) {
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
}
return CompileFunction(result, args.at<Object>(0), ic.target()->ic_in_loop());
}
// Compile now with optimization.
HandleScope scope;
Handle<JSFunction> function = Handle<JSFunction>(JSFunction::cast(result));
InLoopFlag in_loop = ic.target()->ic_in_loop();
if (in_loop == IN_LOOP) {
CompileLazyInLoop(function, args.at<Object>(0), CLEAR_EXCEPTION);
} else {
CompileLazy(function, args.at<Object>(0), CLEAR_EXCEPTION);
// Used from ic-<arch>.cc.
Object* KeyedCallIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
KeyedCallIC ic;
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Object* result =
ic.LoadFunction(state, args.at<Object>(0), args.at<Object>(1));
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
}
return *function;
return CompileFunction(result, args.at<Object>(0), ic.target()->ic_in_loop());
}
// Used from ic_<arch>.cc.
// Used from ic-<arch>.cc.
Object* LoadIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
@ -1336,7 +1412,7 @@ Object* LoadIC_Miss(Arguments args) {
}
// Used from ic_<arch>.cc
// Used from ic-<arch>.cc
Object* KeyedLoadIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
@ -1346,7 +1422,7 @@ Object* KeyedLoadIC_Miss(Arguments args) {
}
// Used from ic_<arch>.cc.
// Used from ic-<arch>.cc.
Object* StoreIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
@ -1404,7 +1480,7 @@ Object* SharedStoreIC_ExtendStorage(Arguments args) {
}
// Used from ic_<arch>.cc.
// Used from ic-<arch>.cc.
Object* KeyedStoreIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 3);

53
deps/v8/src/ic.h

@ -44,6 +44,7 @@ enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
ICU(LoadIC_Miss) \
ICU(KeyedLoadIC_Miss) \
ICU(CallIC_Miss) \
ICU(KeyedCallIC_Miss) \
ICU(StoreIC_Miss) \
ICU(StoreIC_ArrayLength) \
ICU(SharedStoreIC_ExtendStorage) \
@ -147,7 +148,7 @@ class IC {
static Failure* TypeError(const char* type,
Handle<Object> object,
Handle<String> name);
Handle<Object> key);
static Failure* ReferenceError(const char* type, Handle<String> name);
// Access the target code for the given IC address.
@ -184,22 +185,16 @@ class IC_Utility {
};
class CallIC: public IC {
public:
CallIC() : IC(EXTRA_CALL_FRAME) { ASSERT(target()->is_call_stub()); }
class CallICBase: public IC {
protected:
explicit CallICBase(Code::Kind kind) : IC(EXTRA_CALL_FRAME), kind_(kind) {}
public:
Object* LoadFunction(State state, Handle<Object> object, Handle<String> name);
protected:
Code::Kind kind_;
// Code generator routines.
static void GenerateInitialize(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
}
static void GenerateMiss(MacroAssembler* masm, int argc);
static void GenerateMegamorphic(MacroAssembler* masm, int argc);
static void GenerateNormal(MacroAssembler* masm, int argc);
private:
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
@ -219,6 +214,38 @@ class CallIC: public IC {
};
class CallIC: public CallICBase {
public:
CallIC() : CallICBase(Code::CALL_IC) { ASSERT(target()->is_call_stub()); }
// Code generator routines.
static void GenerateInitialize(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
}
static void GenerateMiss(MacroAssembler* masm, int argc);
static void GenerateMegamorphic(MacroAssembler* masm, int argc);
static void GenerateNormal(MacroAssembler* masm, int argc);
};
class KeyedCallIC: public CallICBase {
public:
KeyedCallIC() : CallICBase(Code::KEYED_CALL_IC) {
ASSERT(target()->is_keyed_call_stub());
}
Object* LoadFunction(State state, Handle<Object> object, Handle<Object> key);
// Code generator routines.
static void GenerateInitialize(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
}
static void GenerateMiss(MacroAssembler* masm, int argc);
static void GenerateMegamorphic(MacroAssembler* masm, int argc);
static void GenerateNormal(MacroAssembler* masm, int argc);
};
class LoadIC: public IC {
public:
LoadIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_load_stub()); }

4
deps/v8/src/liveedit.cc

@ -794,7 +794,7 @@ class FrameUncookingThreadVisitor : public ThreadVisitor {
static void IterateAllThreads(ThreadVisitor* visitor) {
Top::IterateThread(visitor);
ThreadManager::IterateThreads(visitor);
ThreadManager::IterateArchivedThreads(visitor);
}
// Finds all references to original and replaces them with substitution.
@ -1386,7 +1386,7 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
// First check inactive threads. Fail if some functions are blocked there.
InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
result);
ThreadManager::IterateThreads(&inactive_threads_checker);
ThreadManager::IterateArchivedThreads(&inactive_threads_checker);
if (inactive_threads_checker.HasBlockedFunctions()) {
return result;
}

4
deps/v8/src/log.cc

@ -1295,6 +1295,10 @@ void Logger::LogCodeObject(Object* object) {
description = "A call IC from the snapshot";
tag = Logger::CALL_IC_TAG;
break;
case Code::KEYED_CALL_IC:
description = "A keyed call IC from the snapshot";
tag = Logger::KEYED_CALL_IC_TAG;
break;
}
PROFILE(CodeCreateEvent(tag, code_object, description));
}

12
deps/v8/src/log.h

@ -106,6 +106,18 @@ class CompressionHelper;
V(CALL_MISS_TAG, "CallMiss", "cm") \
V(CALL_NORMAL_TAG, "CallNormal", "cn") \
V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic", "cpm") \
V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak", "kcdb") \
V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, \
"KeyedCallDebugPrepareStepIn", \
"kcdbsi") \
V(KEYED_CALL_IC_TAG, "KeyedCallIC", "kcic") \
V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize", "kci") \
V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic", "kcmm") \
V(KEYED_CALL_MISS_TAG, "KeyedCallMiss", "kcm") \
V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal", "kcn") \
V(KEYED_CALL_PRE_MONOMORPHIC_TAG, \
"KeyedCallPreMonomorphic", \
"kcpm") \
V(CALLBACK_TAG, "Callback", "cb") \
V(EVAL_TAG, "Eval", "e") \
V(FUNCTION_TAG, "Function", "f") \

12
deps/v8/src/objects-inl.h

@ -2196,7 +2196,8 @@ Code::Flags Code::flags() {
void Code::set_flags(Code::Flags flags) {
STATIC_ASSERT(Code::NUMBER_OF_KINDS <= (kFlagsKindMask >> kFlagsKindShift)+1);
// Make sure that all call stubs have an arguments count.
ASSERT(ExtractKindFromFlags(flags) != CALL_IC ||
ASSERT((ExtractKindFromFlags(flags) != CALL_IC &&
ExtractKindFromFlags(flags) != KEYED_CALL_IC) ||
ExtractArgumentsCountFromFlags(flags) >= 0);
WRITE_INT_FIELD(this, kFlagsOffset, flags);
}
@ -2232,7 +2233,7 @@ PropertyType Code::type() {
int Code::arguments_count() {
ASSERT(is_call_stub() || kind() == STUB);
ASSERT(is_call_stub() || is_keyed_call_stub() || kind() == STUB);
return ExtractArgumentsCountFromFlags(flags());
}
@ -2986,8 +2987,7 @@ StringHasher::StringHasher(int length)
: length_(length),
raw_running_hash_(0),
array_index_(0),
is_array_index_(0 < length_ &&
length_ <= String::kMaxCachedArrayIndexLength),
is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
is_first_char_(true),
is_valid_(true) { }
@ -3050,7 +3050,9 @@ uint32_t StringHasher::GetHash() {
bool String::AsArrayIndex(uint32_t* index) {
uint32_t field = hash_field();
if (IsHashFieldComputed(field) && !(field & kIsArrayIndexMask)) return false;
if (IsHashFieldComputed(field) && (field & kIsNotArrayIndexMask)) {
return false;
}
return SlowAsArrayIndex(index);
}

31
deps/v8/src/objects.cc

@ -2013,19 +2013,25 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
CustomArguments args(interceptor->data(), receiver, this);
v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
v8::NamedPropertyQuery query =
v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
v8::NamedPropertyQueryImpl query =
v8::ToCData<v8::NamedPropertyQueryImpl>(interceptor->query());
LOG(ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
v8::Handle<v8::Boolean> result;
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
result = query(v8::Utils::ToLocal(name_handle), info);
}
if (!result.IsEmpty()) {
// Convert the boolean result to a property attribute
// specification.
return result->IsTrue() ? NONE : ABSENT;
// Temporary complicated logic, would be removed soon.
if (result->IsBoolean()) {
// Convert the boolean result to a property attribute
// specification.
return result->IsTrue() ? NONE : ABSENT;
} else {
ASSERT(result->IsInt32());
return static_cast<PropertyAttributes>(result->Int32Value());
}
}
} else if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetter getter =
@ -2700,7 +2706,7 @@ Object* JSObject::DefineGetterSetter(String* name,
return Heap::undefined_value();
}
uint32_t index;
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
if (is_element && IsJSArray()) return Heap::undefined_value();
@ -2958,7 +2964,7 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) {
// Make the lookup and include prototypes.
int accessor_index = is_getter ? kGetterIndex : kSetterIndex;
uint32_t index;
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
for (Object* obj = this;
obj != Heap::null_value();
@ -4844,7 +4850,7 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
if (length() <= kMaxCachedArrayIndexLength) {
Hash(); // force computation of hash code
uint32_t field = hash_field();
if ((field & kIsArrayIndexMask) == 0) return false;
if ((field & kIsNotArrayIndexMask) != 0) return false;
// Isolate the array index form the full hash field.
*index = (kArrayIndexHashMask & field) >> kHashShift;
return true;
@ -4863,10 +4869,14 @@ static inline uint32_t HashField(uint32_t hash,
// For array indexes mix the length into the hash as an array index could
// be zero.
ASSERT(length > 0);
ASSERT(length <= String::kMaxArrayIndexSize);
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
result |= String::kIsArrayIndexMask;
ASSERT(String::kMaxArrayIndexSize < (1 << String::kArrayIndexValueBits));
result &= ~String::kIsNotArrayIndexMask;
result |= length << String::kArrayIndexHashLengthShift;
} else {
result |= String::kIsNotArrayIndexMask;
}
return result;
}
@ -5396,6 +5406,7 @@ const char* Code::Kind2String(Kind kind) {
case STORE_IC: return "STORE_IC";
case KEYED_STORE_IC: return "KEYED_STORE_IC";
case CALL_IC: return "CALL_IC";
case KEYED_CALL_IC: return "KEYED_CALL_IC";
case BINARY_OP_IC: return "BINARY_OP_IC";
}
UNREACHABLE();

37
deps/v8/src/objects.h

@ -408,7 +408,7 @@ const uint32_t kStringRepresentationMask = 0x03;
enum StringRepresentationTag {
kSeqStringTag = 0x0,
kConsStringTag = 0x1,
kExternalStringTag = 0x3
kExternalStringTag = 0x2
};
const uint32_t kIsConsStringMask = 0x1;
@ -2669,6 +2669,7 @@ class Code: public HeapObject {
LOAD_IC,
KEYED_LOAD_IC,
CALL_IC,
KEYED_CALL_IC,
STORE_IC,
KEYED_STORE_IC,
BINARY_OP_IC,
@ -2723,6 +2724,7 @@ class Code: public HeapObject {
inline bool is_store_stub() { return kind() == STORE_IC; }
inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
inline bool is_call_stub() { return kind() == CALL_IC; }
inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
// [major_key]: For kind STUB or BINARY_OP_IC, the major key.
inline CodeStub::Major major_key();
@ -4192,11 +4194,11 @@ class String: public HeapObject {
// computed the 2nd bit tells whether the string can be used as an
// array index.
static const int kHashNotComputedMask = 1;
static const int kIsArrayIndexMask = 1 << 1;
static const int kNofLengthBitFields = 2;
static const int kIsNotArrayIndexMask = 1 << 1;
static const int kNofHashBitFields = 2;
// Shift constant retrieving hash code from hash field.
static const int kHashShift = kNofLengthBitFields;
static const int kHashShift = kNofHashBitFields;
// Array index strings this short can keep their index in the hash
// field.
@ -4205,18 +4207,35 @@ class String: public HeapObject {
// For strings which are array indexes the hash value has the string length
// mixed into the hash, mainly to avoid a hash value of zero which would be
// the case for the string '0'. 24 bits are used for the array index value.
static const int kArrayIndexHashLengthShift = 24 + kNofLengthBitFields;
static const int kArrayIndexValueBits = 24;
static const int kArrayIndexLengthBits =
kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
STATIC_CHECK((kArrayIndexLengthBits > 0));
static const int kArrayIndexHashLengthShift =
kArrayIndexValueBits + kNofHashBitFields;
static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
static const int kArrayIndexValueBits =
kArrayIndexHashLengthShift - kHashShift;
static const int kArrayIndexValueMask =
((1 << kArrayIndexValueBits) - 1) << kHashShift;
// Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
// could use a mask to test if the length of string is less than or equal to
// kMaxCachedArrayIndexLength.
STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
static const int kContainsCachedArrayIndexMask =
(~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
kIsNotArrayIndexMask;
// Value of empty hash field indicating that the hash is not computed.
static const int kEmptyHashField = kHashNotComputedMask;
static const int kEmptyHashField =
kIsNotArrayIndexMask | kHashNotComputedMask;
// Value of hash field containing computed hash equal to zero.
static const int kZeroHash = 0;
static const int kZeroHash = kIsNotArrayIndexMask;
// Maximal string length.
static const int kMaxLength = (1 << (32 - 2)) - 1;

2
deps/v8/src/profile-generator.h

@ -260,6 +260,7 @@ class CpuProfilesCollection {
CpuProfile* GetProfile(int security_token_id, unsigned uid);
inline bool is_last_profile();
const char* GetName(String* name);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
String* name, String* resource_name, int line_number);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name);
@ -274,7 +275,6 @@ class CpuProfilesCollection {
private:
INLINE(const char* GetFunctionName(String* name));
INLINE(const char* GetFunctionName(const char* name));
const char* GetName(String* name);
const char* GetName(int args_count);
List<CpuProfile*>* GetProfilesList(int security_token_id);
int TokenToIndex(int security_token_id);

24
deps/v8/src/regexp.js

@ -257,6 +257,10 @@ function RegExpExec(string) {
}
// One-element cache for the simplified test regexp.
var regexp_key;
var regexp_val;
// Section 15.10.6.3 doesn't actually make sense, but the intention seems to be
// that test is defined in terms of String.prototype.exec. However, it probably
// means the original value of String.prototype.exec, which is what everybody
@ -281,9 +285,7 @@ function RegExpTest(string) {
}
var lastIndex = this.lastIndex;
var cache = regExpCache;
if (%_ObjectEquals(cache.type, 'test') &&
%_ObjectEquals(cache.regExp, this) &&
%_ObjectEquals(cache.subject, string) &&
@ -291,6 +293,22 @@ function RegExpTest(string) {
return cache.answer;
}
// Remove irrelevant preceeding '.*' in a test regexp. The expression
// checks whether this.source starts with '.*' and that the third
// char is not a '?'
if (%_StringCharCodeAt(this.source,0) == 46 && // '.'
%_StringCharCodeAt(this.source,1) == 42 && // '*'
%_StringCharCodeAt(this.source,2) != 63) { // '?'
if (!%_ObjectEquals(regexp_key, this)) {
regexp_key = this;
regexp_val = new $RegExp(this.source.substring(2, this.source.length),
(this.global ? 'g' : '')
+ (this.ignoreCase ? 'i' : '')
+ (this.multiline ? 'm' : ''));
}
if (!regexp_val.test(s)) return false;
}
var length = s.length;
var i = this.global ? TO_INTEGER(lastIndex) : 0;
@ -299,7 +317,7 @@ function RegExpTest(string) {
cache.subject = s;
cache.lastIndex = i;
if (i < 0 || i > s.length) {
if (i < 0 || i > length) {
this.lastIndex = 0;
cache.answer = false;
return false;

104
deps/v8/src/runtime.cc

@ -1638,22 +1638,6 @@ static Object* Runtime_SetCode(Arguments args) {
}
static Object* CharCodeAt(String* subject, Object* index) {
uint32_t i = 0;
if (!index->ToArrayIndex(&i)) return Heap::nan_value();
// Flatten the string. If someone wants to get a char at an index
// in a cons string, it is likely that more indices will be
// accessed.
Object* flat = subject->TryFlatten();
if (flat->IsFailure()) return flat;
subject = String::cast(flat);
if (i >= static_cast<uint32_t>(subject->length())) {
return Heap::nan_value();
}
return Smi::FromInt(subject->Get(i));
}
static Object* CharFromCode(Object* char_code) {
uint32_t code;
if (char_code->ToArrayIndex(&code)) {
@ -1671,21 +1655,31 @@ static Object* Runtime_StringCharCodeAt(Arguments args) {
CONVERT_CHECKED(String, subject, args[0]);
Object* index = args[1];
return CharCodeAt(subject, index);
}
RUNTIME_ASSERT(index->IsNumber());
uint32_t i = 0;
if (index->IsSmi()) {
int value = Smi::cast(index)->value();
if (value < 0) return Heap::nan_value();
i = value;
} else {
ASSERT(index->IsHeapNumber());
double value = HeapNumber::cast(index)->value();
i = static_cast<uint32_t>(DoubleToInteger(value));
}
static Object* Runtime_StringCharAt(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
// Flatten the string. If someone wants to get a char at an index
// in a cons string, it is likely that more indices will be
// accessed.
Object* flat = subject->TryFlatten();
if (flat->IsFailure()) return flat;
subject = String::cast(flat);
CONVERT_CHECKED(String, subject, args[0]);
Object* index = args[1];
Object* code = CharCodeAt(subject, index);
if (code == Heap::nan_value()) {
return Heap::undefined_value();
if (i >= static_cast<uint32_t>(subject->length())) {
return Heap::nan_value();
}
return CharFromCode(code);
return Smi::FromInt(subject->Get(i));
}
@ -5344,6 +5338,9 @@ static Object* Runtime_NumberToInteger(Arguments args) {
}
static Object* Runtime_NumberToIntegerMapMinusZero(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@ -7248,6 +7245,24 @@ static Object* Runtime_CompileString(Arguments args) {
}
static ObjectPair CompileGlobalEval(Handle<String> source,
Handle<Object> receiver) {
// Deal with a normal eval call with a string argument. Compile it
// and return the compiled function bound in the local context.
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
source,
Handle<Context>(Top::context()),
Top::context()->IsGlobalContext(),
Compiler::DONT_VALIDATE_JSON);
if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
Handle<JSFunction> compiled = Factory::NewFunctionFromSharedFunctionInfo(
shared,
Handle<Context>(Top::context()),
NOT_TENURED);
return MakePair(*compiled, *receiver);
}
static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
ASSERT(args.length() == 3);
if (!args[0]->IsJSFunction()) {
@ -7313,20 +7328,27 @@ static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
return MakePair(*callee, Top::context()->global()->global_receiver());
}
// Deal with a normal eval call with a string argument. Compile it
// and return the compiled function bound in the local context.
Handle<String> source = args.at<String>(1);
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
source,
Handle<Context>(Top::context()),
Top::context()->IsGlobalContext(),
Compiler::DONT_VALIDATE_JSON);
if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
callee = Factory::NewFunctionFromSharedFunctionInfo(
shared,
Handle<Context>(Top::context()),
NOT_TENURED);
return MakePair(*callee, args[2]);
return CompileGlobalEval(args.at<String>(1), args.at<Object>(2));
}
static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(Arguments args) {
ASSERT(args.length() == 3);
if (!args[0]->IsJSFunction()) {
return MakePair(Top::ThrowIllegalOperation(), NULL);
}
HandleScope scope;
Handle<JSFunction> callee = args.at<JSFunction>(0);
// 'eval' is bound in the global context, but it may have been overwritten.
// Compare it to the builtin 'GlobalEval' function to make sure.
if (*callee != Top::global_context()->global_eval_fun() ||
!args[1]->IsString()) {
return MakePair(*callee, Top::context()->global()->global_receiver());
}
return CompileGlobalEval(args.at<String>(1), args.at<Object>(2));
}

2
deps/v8/src/runtime.h

@ -162,7 +162,6 @@ namespace internal {
\
/* Strings */ \
F(StringCharCodeAt, 2, 1) \
F(StringCharAt, 2, 1) \
F(StringIndexOf, 3, 1) \
F(StringLastIndexOf, 3, 1) \
F(StringLocaleCompare, 2, 1) \
@ -223,6 +222,7 @@ namespace internal {
/* Eval */ \
F(GlobalReceiver, 1, 1) \
F(ResolvePossiblyDirectEval, 3, 2) \
F(ResolvePossiblyDirectEvalNoLookup, 3, 2) \
\
F(SetProperty, -1 /* 3 or 4 */, 1) \
F(DefineOrRedefineDataProperty, 4, 1) \

1
deps/v8/src/spaces.cc

@ -1457,6 +1457,7 @@ static void ReportCodeKindStatistics() {
CASE(STORE_IC);
CASE(KEYED_STORE_IC);
CASE(CALL_IC);
CASE(KEYED_CALL_IC);
CASE(BINARY_OP_IC);
}
}

155
deps/v8/src/stub-cache.cc

@ -441,9 +441,12 @@ Object* StubCache::ComputeKeyedStoreField(String* name, JSObject* receiver,
return code;
}
#define CALL_LOGGER_TAG(kind, type) \
(kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
Object* StubCache::ComputeCallConstant(int argc,
InLoopFlag in_loop,
Code::Kind kind,
String* name,
Object* object,
JSObject* holder,
@ -462,7 +465,7 @@ Object* StubCache::ComputeCallConstant(int argc,
}
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::CALL_IC,
Code::ComputeMonomorphicFlags(kind,
CONSTANT_FUNCTION,
in_loop,
argc);
@ -474,11 +477,12 @@ Object* StubCache::ComputeCallConstant(int argc,
// caches.
if (!function->is_compiled()) return Failure::InternalError();
// Compile the stub - only create stubs for fully compiled functions.
CallStubCompiler compiler(argc, in_loop);
CallStubCompiler compiler(argc, in_loop, kind);
code = compiler.CompileCallConstant(object, holder, function, name, check);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@ -488,6 +492,7 @@ Object* StubCache::ComputeCallConstant(int argc,
Object* StubCache::ComputeCallField(int argc,
InLoopFlag in_loop,
Code::Kind kind,
String* name,
Object* object,
JSObject* holder,
@ -502,20 +507,21 @@ Object* StubCache::ComputeCallField(int argc,
object = holder;
}
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
FIELD,
in_loop,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc, in_loop);
CallStubCompiler compiler(argc, in_loop, kind);
code = compiler.CompileCallField(JSObject::cast(object),
holder,
index,
name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@ -524,6 +530,7 @@ Object* StubCache::ComputeCallField(int argc,
Object* StubCache::ComputeCallInterceptor(int argc,
Code::Kind kind,
String* name,
Object* object,
JSObject* holder) {
@ -539,19 +546,20 @@ Object* StubCache::ComputeCallInterceptor(int argc,
}
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::CALL_IC,
Code::ComputeMonomorphicFlags(kind,
INTERCEPTOR,
NOT_IN_LOOP,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc, NOT_IN_LOOP);
CallStubCompiler compiler(argc, NOT_IN_LOOP, kind);
code = compiler.CompileCallInterceptor(JSObject::cast(object),
holder,
name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@ -561,9 +569,10 @@ Object* StubCache::ComputeCallInterceptor(int argc,
Object* StubCache::ComputeCallNormal(int argc,
InLoopFlag in_loop,
Code::Kind kind,
String* name,
JSObject* receiver) {
Object* code = ComputeCallNormal(argc, in_loop);
Object* code = ComputeCallNormal(argc, in_loop, kind);
if (code->IsFailure()) return code;
return Set(name, receiver->map(), Code::cast(code));
}
@ -571,13 +580,17 @@ Object* StubCache::ComputeCallNormal(int argc,
Object* StubCache::ComputeCallGlobal(int argc,
InLoopFlag in_loop,
Code::Kind kind,
String* name,
JSObject* receiver,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::CALL_IC, NORMAL, in_loop, argc);
Code::ComputeMonomorphicFlags(kind,
NORMAL,
in_loop,
argc);
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
@ -585,11 +598,12 @@ Object* StubCache::ComputeCallGlobal(int argc,
// internal error which will make sure we do not update any
// caches.
if (!function->is_compiled()) return Failure::InternalError();
CallStubCompiler compiler(argc, in_loop);
CallStubCompiler compiler(argc, in_loop, kind);
code = compiler.CompileCallGlobal(receiver, holder, cell, function, name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@ -637,9 +651,11 @@ static Object* FillCache(Object* code) {
}
Code* StubCache::FindCallInitialize(int argc, InLoopFlag in_loop) {
Code* StubCache::FindCallInitialize(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC, in_loop, UNINITIALIZED, NORMAL, argc);
Code::ComputeFlags(kind, in_loop, UNINITIALIZED, NORMAL, argc);
Object* result = ProbeCache(flags);
ASSERT(!result->IsUndefined());
// This might be called during the marking phase of the collector
@ -648,9 +664,11 @@ Code* StubCache::FindCallInitialize(int argc, InLoopFlag in_loop) {
}
Object* StubCache::ComputeCallInitialize(int argc, InLoopFlag in_loop) {
Object* StubCache::ComputeCallInitialize(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC, in_loop, UNINITIALIZED, NORMAL, argc);
Code::ComputeFlags(kind, in_loop, UNINITIALIZED, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@ -658,9 +676,11 @@ Object* StubCache::ComputeCallInitialize(int argc, InLoopFlag in_loop) {
}
Object* StubCache::ComputeCallPreMonomorphic(int argc, InLoopFlag in_loop) {
Object* StubCache::ComputeCallPreMonomorphic(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC, in_loop, PREMONOMORPHIC, NORMAL, argc);
Code::ComputeFlags(kind, in_loop, PREMONOMORPHIC, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@ -668,9 +688,11 @@ Object* StubCache::ComputeCallPreMonomorphic(int argc, InLoopFlag in_loop) {
}
Object* StubCache::ComputeCallNormal(int argc, InLoopFlag in_loop) {
Object* StubCache::ComputeCallNormal(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC, in_loop, MONOMORPHIC, NORMAL, argc);
Code::ComputeFlags(kind, in_loop, MONOMORPHIC, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@ -678,9 +700,11 @@ Object* StubCache::ComputeCallNormal(int argc, InLoopFlag in_loop) {
}
Object* StubCache::ComputeCallMegamorphic(int argc, InLoopFlag in_loop) {
Object* StubCache::ComputeCallMegamorphic(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC, in_loop, MEGAMORPHIC, NORMAL, argc);
Code::ComputeFlags(kind, in_loop, MEGAMORPHIC, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@ -688,9 +712,11 @@ Object* StubCache::ComputeCallMegamorphic(int argc, InLoopFlag in_loop) {
}
Object* StubCache::ComputeCallMiss(int argc) {
Code::Flags flags =
Code::ComputeFlags(Code::STUB, NOT_IN_LOOP, MEGAMORPHIC, NORMAL, argc);
Object* StubCache::ComputeCallMiss(int argc, Code::Kind kind) {
// MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
// and monomorphic stubs are not mixed up together in the stub cache.
Code::Flags flags = Code::ComputeFlags(
kind, NOT_IN_LOOP, MONOMORPHIC_PROTOTYPE_FAILURE, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@ -699,9 +725,9 @@ Object* StubCache::ComputeCallMiss(int argc) {
#ifdef ENABLE_DEBUGGER_SUPPORT
Object* StubCache::ComputeCallDebugBreak(int argc) {
Object* StubCache::ComputeCallDebugBreak(int argc, Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, DEBUG_BREAK, NORMAL, argc);
Code::ComputeFlags(kind, NOT_IN_LOOP, DEBUG_BREAK, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@ -709,9 +735,9 @@ Object* StubCache::ComputeCallDebugBreak(int argc) {
}
Object* StubCache::ComputeCallDebugPrepareStepIn(int argc) {
Object* StubCache::ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC,
Code::ComputeFlags(kind,
NOT_IN_LOOP,
DEBUG_PREPARE_STEP_IN,
NORMAL,
@ -758,8 +784,8 @@ void StubCache::Clear() {
// Support function for computing call IC miss stubs.
Handle<Code> ComputeCallMiss(int argc) {
CALL_HEAP_FUNCTION(StubCache::ComputeCallMiss(argc), Code);
Handle<Code> ComputeCallMiss(int argc, Code::Kind kind) {
CALL_HEAP_FUNCTION(StubCache::ComputeCallMiss(argc, kind), Code);
}
@ -966,13 +992,18 @@ Object* KeyedLoadPropertyWithInterceptor(Arguments args) {
Object* StubCompiler::CompileCallInitialize(Code::Flags flags) {
HandleScope scope;
int argc = Code::ExtractArgumentsCountFromFlags(flags);
CallIC::GenerateInitialize(masm(), argc);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
CallIC::GenerateInitialize(masm(), argc);
} else {
KeyedCallIC::GenerateInitialize(masm(), argc);
}
Object* result = GetCodeWithFlags(flags, "CompileCallInitialize");
if (!result->IsFailure()) {
Counters::call_initialize_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
PROFILE(CodeCreateEvent(Logger::CALL_INITIALIZE_TAG,
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
code, code->arguments_count()));
}
return result;
@ -984,13 +1015,18 @@ Object* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
int argc = Code::ExtractArgumentsCountFromFlags(flags);
// The code of the PreMonomorphic stub is the same as the code
// of the Initialized stub. They just differ on the code object flags.
CallIC::GenerateInitialize(masm(), argc);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
CallIC::GenerateInitialize(masm(), argc);
} else {
KeyedCallIC::GenerateInitialize(masm(), argc);
}
Object* result = GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
if (!result->IsFailure()) {
Counters::call_premonomorphic_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
PROFILE(CodeCreateEvent(Logger::CALL_PRE_MONOMORPHIC_TAG,
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
code, code->arguments_count()));
}
return result;
@ -1000,13 +1036,18 @@ Object* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
Object* StubCompiler::CompileCallNormal(Code::Flags flags) {
HandleScope scope;
int argc = Code::ExtractArgumentsCountFromFlags(flags);
CallIC::GenerateNormal(masm(), argc);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
CallIC::GenerateNormal(masm(), argc);
} else {
KeyedCallIC::GenerateNormal(masm(), argc);
}
Object* result = GetCodeWithFlags(flags, "CompileCallNormal");
if (!result->IsFailure()) {
Counters::call_normal_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
PROFILE(CodeCreateEvent(Logger::CALL_NORMAL_TAG,
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
code, code->arguments_count()));
}
return result;
@ -1016,13 +1057,19 @@ Object* StubCompiler::CompileCallNormal(Code::Flags flags) {
Object* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
HandleScope scope;
int argc = Code::ExtractArgumentsCountFromFlags(flags);
CallIC::GenerateMegamorphic(masm(), argc);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
CallIC::GenerateMegamorphic(masm(), argc);
} else {
KeyedCallIC::GenerateMegamorphic(masm(), argc);
}
Object* result = GetCodeWithFlags(flags, "CompileCallMegamorphic");
if (!result->IsFailure()) {
Counters::call_megamorphic_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
PROFILE(CodeCreateEvent(Logger::CALL_MEGAMORPHIC_TAG,
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
code, code->arguments_count()));
}
return result;
@ -1032,13 +1079,18 @@ Object* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
Object* StubCompiler::CompileCallMiss(Code::Flags flags) {
HandleScope scope;
int argc = Code::ExtractArgumentsCountFromFlags(flags);
CallIC::GenerateMiss(masm(), argc);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
CallIC::GenerateMiss(masm(), argc);
} else {
KeyedCallIC::GenerateMiss(masm(), argc);
}
Object* result = GetCodeWithFlags(flags, "CompileCallMiss");
if (!result->IsFailure()) {
Counters::call_megamorphic_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
PROFILE(CodeCreateEvent(Logger::CALL_MISS_TAG,
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
code, code->arguments_count()));
}
return result;
@ -1053,7 +1105,8 @@ Object* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
if (!result->IsFailure()) {
Code* code = Code::cast(result);
USE(code);
PROFILE(CodeCreateEvent(Logger::CALL_DEBUG_BREAK_TAG,
Code::Kind kind = Code::ExtractKindFromFlags(flags);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
code, code->arguments_count()));
}
return result;
@ -1065,18 +1118,26 @@ Object* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
// Use the same code for the the step in preparations as we do for
// the miss case.
int argc = Code::ExtractArgumentsCountFromFlags(flags);
CallIC::GenerateMiss(masm(), argc);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
CallIC::GenerateMiss(masm(), argc);
} else {
KeyedCallIC::GenerateMiss(masm(), argc);
}
Object* result = GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
if (!result->IsFailure()) {
Code* code = Code::cast(result);
USE(code);
PROFILE(CodeCreateEvent(Logger::CALL_DEBUG_PREPARE_STEP_IN_TAG,
code, code->arguments_count()));
PROFILE(CodeCreateEvent(
CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
code,
code->arguments_count()));
}
return result;
}
#endif
#undef CALL_LOGGER_TAG
Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, const char* name) {
// Check for allocation failures during stub compilation.
@ -1167,7 +1228,7 @@ Object* CallStubCompiler::CompileCustomCall(int generator_id,
Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
type,
in_loop_,
argc);

59
deps/v8/src/stub-cache.h

@ -142,6 +142,7 @@ class StubCache : public AllStatic {
static Object* ComputeCallField(int argc,
InLoopFlag in_loop,
Code::Kind,
String* name,
Object* object,
JSObject* holder,
@ -149,6 +150,7 @@ class StubCache : public AllStatic {
static Object* ComputeCallConstant(int argc,
InLoopFlag in_loop,
Code::Kind,
String* name,
Object* object,
JSObject* holder,
@ -156,16 +158,19 @@ class StubCache : public AllStatic {
static Object* ComputeCallNormal(int argc,
InLoopFlag in_loop,
Code::Kind,
String* name,
JSObject* receiver);
static Object* ComputeCallInterceptor(int argc,
Code::Kind,
String* name,
Object* object,
JSObject* holder);
static Object* ComputeCallGlobal(int argc,
InLoopFlag in_loop,
Code::Kind,
String* name,
JSObject* receiver,
GlobalObject* holder,
@ -174,18 +179,33 @@ class StubCache : public AllStatic {
// ---
static Object* ComputeCallInitialize(int argc, InLoopFlag in_loop);
static Object* ComputeCallPreMonomorphic(int argc, InLoopFlag in_loop);
static Object* ComputeCallNormal(int argc, InLoopFlag in_loop);
static Object* ComputeCallMegamorphic(int argc, InLoopFlag in_loop);
static Object* ComputeCallMiss(int argc);
static Object* ComputeCallInitialize(int argc,
InLoopFlag in_loop,
Code::Kind kind);
static Object* ComputeCallPreMonomorphic(int argc,
InLoopFlag in_loop,
Code::Kind kind);
static Object* ComputeCallNormal(int argc,
InLoopFlag in_loop,
Code::Kind kind);
static Object* ComputeCallMegamorphic(int argc,
InLoopFlag in_loop,
Code::Kind kind);
static Object* ComputeCallMiss(int argc, Code::Kind kind);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
static Code* FindCallInitialize(int argc, InLoopFlag in_loop);
static Code* FindCallInitialize(int argc,
InLoopFlag in_loop,
Code::Kind kind);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Object* ComputeCallDebugBreak(int argc);
static Object* ComputeCallDebugPrepareStepIn(int argc);
static Object* ComputeCallDebugBreak(int argc, Code::Kind kind);
static Object* ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind);
#endif
static Object* ComputeLazyCompile(int argc);
@ -197,9 +217,6 @@ class StubCache : public AllStatic {
// Clear the lookup table (@ mark compact collection).
static void Clear();
// Functions for generating stubs at startup.
static void GenerateMiss(MacroAssembler* masm);
// Generate code for probing the stub cache table.
// If extra != no_reg it might be used as am extra scratch register.
static void GenerateProbe(MacroAssembler* masm,
@ -318,7 +335,7 @@ Object* KeyedLoadPropertyWithInterceptor(Arguments args);
// Support function for computing call IC miss stubs.
Handle<Code> ComputeCallMiss(int argc);
Handle<Code> ComputeCallMiss(int argc, Code::Kind kind);
// The stub compiler compiles stubs for the stub cache.
@ -349,6 +366,15 @@ class StubCompiler BASE_EMBEDDED {
int index,
Register prototype);
// Generates prototype loading code that uses the objects from the
// context we were in when this function was called. This ties the
// generated code to a particular context and so must not be used in
// cases where the generated code is not allowed to have references
// to objects from a context.
static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype);
static void GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst, Register src,
JSObject* holder, int index);
@ -585,8 +611,8 @@ class CallStubCompiler: public StubCompiler {
kNumCallGenerators
};
CallStubCompiler(int argc, InLoopFlag in_loop)
: arguments_(argc), in_loop_(in_loop) { }
CallStubCompiler(int argc, InLoopFlag in_loop, Code::Kind kind)
: arguments_(argc), in_loop_(in_loop), kind_(kind) { }
Object* CompileCallField(JSObject* object,
JSObject* holder,
@ -626,6 +652,7 @@ class CallStubCompiler: public StubCompiler {
private:
const ParameterCount arguments_;
const InLoopFlag in_loop_;
const Code::Kind kind_;
const ParameterCount& arguments() { return arguments_; }
@ -634,6 +661,10 @@ class CallStubCompiler: public StubCompiler {
// Convenience function. Calls GetCode above passing
// CONSTANT_FUNCTION type and the name of the given function.
Object* GetCode(JSFunction* function);
void GenerateNameCheck(String* name, Label* miss);
void GenerateMissBranch();
};

2
deps/v8/src/type-info.h

@ -47,7 +47,7 @@ namespace internal {
class TypeInfo {
public:
TypeInfo() { }
TypeInfo() : type_(kUnknownType) { }
static inline TypeInfo Unknown();
// We know it's a primitive type.

8
deps/v8/src/unbound-queue-inl.h

@ -82,6 +82,14 @@ void UnboundQueue<Record>::Enqueue(const Record& rec) {
while (first_ != reinterpret_cast<Node*>(divider_)) DeleteFirst();
}
template<typename Record>
Record* UnboundQueue<Record>::Peek() {
ASSERT(divider_ != last_);
Node* next = reinterpret_cast<Node*>(divider_)->next;
return &next->value;
}
} } // namespace v8::internal
#endif // V8_UNBOUND_QUEUE_INL_H_

1
deps/v8/src/unbound-queue.h

@ -47,6 +47,7 @@ class UnboundQueue BASE_EMBEDDED {
INLINE(void Dequeue(Record* rec));
INLINE(void Enqueue(const Record& rec));
INLINE(bool IsEmpty()) { return divider_ == last_; }
INLINE(Record* Peek());
private:
INLINE(void DeleteFirst());

46
deps/v8/src/utils.h

@ -37,11 +37,13 @@ namespace internal {
// ----------------------------------------------------------------------------
// General helper functions
#define IS_POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
// Returns true iff x is a power of 2 (or zero). Cannot be used with the
// maximally negative value of the type T (the -1 overflows).
template <typename T>
static inline bool IsPowerOf2(T x) {
return (x & (x - 1)) == 0;
return IS_POWER_OF_TWO(x);
}
@ -525,12 +527,54 @@ class StringBuilder {
};
// Custom memcpy implementation for platforms where the standard version
// may not be good enough.
// TODO(lrn): Check whether some IA32 platforms should be excluded.
#if defined(V8_TARGET_ARCH_IA32)
// TODO(lrn): Extend to other platforms as needed.
typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
// Implemented in codegen-<arch>.cc.
MemCopyFunction CreateMemCopyFunction();
// Copy memory area to disjoint memory area.
static inline void MemCopy(void* dest, const void* src, size_t size) {
static MemCopyFunction memcopy = CreateMemCopyFunction();
(*memcopy)(dest, src, size);
#ifdef DEBUG
CHECK_EQ(0, memcmp(dest, src, size));
#endif
}
// Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying.
// TODO(lrn): Try to find a more precise value.
static const int kMinComplexMemCopy = 256;
#else // V8_TARGET_ARCH_IA32
static inline void MemCopy(void* dest, const void* src, size_t size) {
memcpy(dest, src, size);
}
static const int kMinComplexMemCopy = 256;
#endif // V8_TARGET_ARCH_IA32
// Copy from ASCII/16bit chars to ASCII/16bit chars.
template <typename sourcechar, typename sinkchar>
static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
sinkchar* limit = dest + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*dest) == sizeof(*src)) {
if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) {
MemCopy(dest, src, chars * sizeof(*dest));
return;
}
// Number of characters in a uintptr_t.
static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
while (dest <= limit - kStepSize) {

4
deps/v8/src/v8-counters.h

@ -123,6 +123,7 @@ namespace internal {
/* How is the generic keyed-load stub used? */ \
SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
/* Count how much the monomorphic keyed-load stubs are hit. */ \
@ -156,6 +157,9 @@ namespace internal {
SC(array_function_runtime, V8.ArrayFunctionRuntime) \
SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \
SC(memcopy_aligned, V8.MemCopyAligned) \
SC(memcopy_unaligned, V8.MemCopyUnaligned) \
SC(memcopy_noxmm, V8.MemCopyNoXMM) \
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(reloc_info_count, V8.RelocInfoCount) \

2
deps/v8/src/v8threads.cc

@ -331,7 +331,7 @@ void ThreadManager::Iterate(ObjectVisitor* v) {
}
void ThreadManager::IterateThreads(ThreadVisitor* v) {
void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
for (ThreadState* state = ThreadState::FirstInUse();
state != NULL;
state = state->Next()) {

2
deps/v8/src/v8threads.h

@ -104,7 +104,7 @@ class ThreadManager : public AllStatic {
static bool IsArchived();
static void Iterate(ObjectVisitor* v);
static void IterateThreads(ThreadVisitor* v);
static void IterateArchivedThreads(ThreadVisitor* v);
static void MarkCompactPrologue(bool is_compacting);
static void MarkCompactEpilogue(bool is_compacting);
static bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
#define BUILD_NUMBER 13
#define BUILD_NUMBER 15
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

10
deps/v8/src/virtual-frame-light-inl.h

@ -42,7 +42,8 @@ namespace internal {
VirtualFrame::VirtualFrame(InvalidVirtualFrameInitializer* dummy)
: element_count_(0),
top_of_stack_state_(NO_TOS_REGISTERS),
register_allocation_map_(0) { }
register_allocation_map_(0),
tos_known_smi_map_(0) { }
// On entry to a function, the virtual frame already contains the receiver,
@ -50,20 +51,23 @@ VirtualFrame::VirtualFrame(InvalidVirtualFrameInitializer* dummy)
VirtualFrame::VirtualFrame()
: element_count_(parameter_count() + 2),
top_of_stack_state_(NO_TOS_REGISTERS),
register_allocation_map_(0) { }
register_allocation_map_(0),
tos_known_smi_map_(0) { }
// When cloned, a frame is a deep copy of the original.
VirtualFrame::VirtualFrame(VirtualFrame* original)
: element_count_(original->element_count()),
top_of_stack_state_(original->top_of_stack_state_),
register_allocation_map_(original->register_allocation_map_) { }
register_allocation_map_(original->register_allocation_map_),
tos_known_smi_map_(0) { }
bool VirtualFrame::Equals(const VirtualFrame* other) {
ASSERT(element_count() == other->element_count());
if (top_of_stack_state_ != other->top_of_stack_state_) return false;
if (register_allocation_map_ != other->register_allocation_map_) return false;
if (tos_known_smi_map_ != other->tos_known_smi_map_) return false;
return true;
}

2
deps/v8/src/virtual-frame-light.cc

@ -36,7 +36,7 @@ namespace internal {
void VirtualFrame::Adjust(int count) {
ASSERT(count >= 0);
element_count_ += count;
RaiseHeight(count, 0);
}

7
deps/v8/src/x64/builtins-x64.cc

@ -418,9 +418,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movq(rax, Operand(rbp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
__ movq(rcx, Operand(rbp, kArgumentsOffset)); // load arguments
__ push(rcx);
__ push(rax);
__ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@ -430,8 +428,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// we have generated an inline version of the keyed load. In this
// case, we know that we are not generating a test instruction next.
// Remove IC arguments from the stack and push the nth argument.
__ addq(rsp, Immediate(2 * kPointerSize));
// Push the nth argument.
__ push(rax);
// Update the index on the stack and in register rax.

123
deps/v8/src/x64/codegen-x64.cc

@ -660,9 +660,25 @@ class DeferredReferenceGetKeyedValue: public DeferredCode {
void DeferredReferenceGetKeyedValue::Generate() {
__ push(receiver_); // First IC argument.
__ push(key_); // Second IC argument.
if (receiver_.is(rdx)) {
if (!key_.is(rax)) {
__ movq(rax, key_);
} // else do nothing.
} else if (receiver_.is(rax)) {
if (key_.is(rdx)) {
__ xchg(rax, rdx);
} else if (key_.is(rax)) {
__ movq(rdx, receiver_);
} else {
__ movq(rdx, receiver_);
__ movq(rax, key_);
}
} else if (key_.is(rax)) {
__ movq(rdx, receiver_);
} else {
__ movq(rax, key_);
__ movq(rdx, receiver_);
}
// Calculate the delta from the IC call instruction to the map check
// movq instruction in the inlined version. This delta is stored in
// a test(rax, delta) instruction after the call so that we can find
@ -686,8 +702,6 @@ void DeferredReferenceGetKeyedValue::Generate() {
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
if (!dst_.is(rax)) __ movq(dst_, rax);
__ pop(key_);
__ pop(receiver_);
}
@ -794,6 +808,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Load(applicand);
frame()->Dup();
Handle<String> name = Factory::LookupAsciiSymbol("apply");
frame()->Push(name);
Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
@ -2868,26 +2883,66 @@ void CodeGenerator::VisitCall(Call* node) {
// Allocate a frame slot for the receiver.
frame_->Push(Factory::undefined_value());
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
frame_->SpillTop();
}
// Prepare the stack for the call to ResolvePossiblyDirectEval.
// Result to hold the result of the function resolution and the
// final result of the eval call.
Result result;
// If we know that eval can only be shadowed by eval-introduced
// variables we attempt to load the global eval function directly
// in generated code. If we succeed, there is no need to perform a
// context lookup in the runtime system.
JumpTarget done;
if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
ASSERT(var->slot()->type() == Slot::LOOKUP);
JumpTarget slow;
// Prepare the stack for the call to
// ResolvePossiblyDirectEvalNoLookup by pushing the loaded
// function, the first argument to the eval call and the
// receiver.
Result fun = LoadFromGlobalSlotCheckExtensions(var->slot(),
NOT_INSIDE_TYPEOF,
&slow);
frame_->Push(&fun);
if (arg_count > 0) {
frame_->PushElementAt(arg_count);
} else {
frame_->Push(Factory::undefined_value());
}
frame_->PushParameterAt(-1);
// Resolve the call.
result =
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
done.Jump(&result);
slow.Bind();
}
// Prepare the stack for the call to ResolvePossiblyDirectEval by
// pushing the loaded function, the first argument to the eval
// call and the receiver.
frame_->PushElementAt(arg_count + 1);
if (arg_count > 0) {
frame_->PushElementAt(arg_count);
} else {
frame_->Push(Factory::undefined_value());
}
// Push the receiver.
frame_->PushParameterAt(-1);
// Resolve the call.
Result result =
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
// If we generated fast-case code bind the jump-target where fast
// and slow case merge.
if (done.is_linked()) done.Bind(&result);
// The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values.
@ -5791,8 +5846,6 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
// property case was inlined. Ensure that there is not a test rax
// instruction here.
masm_->nop();
// Discard the global object. The result is in answer.
frame_->Drop();
return answer;
}
@ -5853,7 +5906,6 @@ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
frame_->Push(&arguments);
frame_->Push(key_literal->handle());
*result = EmitKeyedLoad();
frame_->Drop(2); // Drop key and receiver.
done->Jump(result);
}
}
@ -6740,7 +6792,9 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
void DeferredReferenceGetNamedValue::Generate() {
__ push(receiver_);
if (!receiver_.is(rax)) {
__ movq(rax, receiver_);
}
__ Move(rcx, name_);
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
@ -6757,7 +6811,6 @@ void DeferredReferenceGetNamedValue::Generate() {
__ IncrementCounter(&Counters::named_load_inline_miss, 1);
if (!dst_.is(rax)) __ movq(dst_, rax);
__ pop(receiver_);
}
@ -7418,9 +7471,8 @@ Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
__ IncrementCounter(&Counters::named_load_inline, 1);
deferred->BindExit();
frame()->Push(&receiver);
}
ASSERT(frame()->height() == original_height);
ASSERT(frame()->height() == original_height - 1);
return result;
}
@ -7448,6 +7500,9 @@ Result CodeGenerator::EmitKeyedLoad() {
key.ToRegister();
receiver.ToRegister();
// If key and receiver are shared registers on the frame, their values will
// be automatically saved and restored when going to deferred code.
// The result is returned in elements, which is not shared.
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(elements.reg(),
receiver.reg(),
@ -7460,9 +7515,9 @@ Result CodeGenerator::EmitKeyedLoad() {
// initialization code.
__ bind(deferred->patch_site());
// Use masm-> here instead of the double underscore macro since extra
// coverage code can interfere with the patching. Do not use
// root array to load null_value, since it must be patched with
// the expected receiver map.
// coverage code can interfere with the patching. Do not use a load
// from the root array to load null_value, since the load must be patched
// with the expected receiver map, which is not in the root array.
masm_->movq(kScratchRegister, Factory::null_value(),
RelocInfo::EMBEDDED_OBJECT);
masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
@ -7505,8 +7560,6 @@ Result CodeGenerator::EmitKeyedLoad() {
__ IncrementCounter(&Counters::keyed_load_inline, 1);
deferred->BindExit();
frame_->Push(&receiver);
frame_->Push(&key);
} else {
Comment cmnt(masm_, "[ Load from keyed Property");
result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
@ -7517,7 +7570,7 @@ Result CodeGenerator::EmitKeyedLoad() {
// the push that follows might be peep-hole optimized away.
__ nop();
}
ASSERT(frame()->height() == original_height);
ASSERT(frame()->height() == original_height - 2);
return result;
}
@ -7561,7 +7614,6 @@ void Reference::GetValue() {
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
if (!persist_after_get_) set_unloaded();
break;
}
@ -7569,29 +7621,33 @@ void Reference::GetValue() {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
if (persist_after_get_) {
cgen_->frame()->Dup();
}
Result result = cgen_->EmitNamedLoad(GetName(), is_global);
cgen_->frame()->Push(&result);
if (!persist_after_get_) {
cgen_->UnloadReference(this);
}
break;
}
case KEYED: {
// A load of a bare identifier (load from global) cannot be keyed.
ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
if (persist_after_get_) {
cgen_->frame()->PushElementAt(1);
cgen_->frame()->PushElementAt(1);
}
Result value = cgen_->EmitKeyedLoad();
cgen_->frame()->Push(&value);
if (!persist_after_get_) {
cgen_->UnloadReference(this);
}
break;
}
default:
UNREACHABLE();
}
if (!persist_after_get_) {
set_unloaded();
}
}
@ -10920,7 +10976,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ push(object_);
__ push(index_);
__ push(result_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@ -10934,9 +10989,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
// have a chance to overwrite it.
__ movq(scratch_, rax);
}
__ pop(result_);
__ pop(index_);
__ pop(object_);
// Reload the instance type.
__ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
__ JumpIfNotSmi(scratch_, index_out_of_range_);

2
deps/v8/src/x64/codegen-x64.h

@ -542,6 +542,8 @@ class CodeGenerator: public AstVisitor {
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
static Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);

8
deps/v8/src/x64/debug-x64.cc

@ -124,9 +124,10 @@ void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
// No registers used on entry.
// -- rax : key
// -- rdx : receiver
// -----------------------------------
Generate_DebugBreakCallHelper(masm, 0, false);
Generate_DebugBreakCallHelper(masm, rax.bit() | rdx.bit(), false);
}
@ -144,9 +145,10 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -----------------------------------
Generate_DebugBreakCallHelper(masm, rcx.bit(), false);
Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), false);
}

42
deps/v8/src/x64/full-codegen-x64.cc

@ -1127,15 +1127,15 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ push(CodeGenerator::GlobalObject());
__ Move(rcx, var->name());
__ movq(rax, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// A test rax instruction following the call is used by the IC to
// indicate that the inobject property case was inlined. Ensure there
// is no test rax instruction here.
__ nop();
DropAndApply(1, context, rax);
Apply(context, rax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
Comment cmnt(masm_, "Lookup slot");
@ -1176,7 +1176,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Load the object.
MemOperand object_loc = EmitSlotSearch(object_slot, rax);
__ push(object_loc);
__ movq(rdx, object_loc);
// Assert that the key is a smi.
Literal* key_literal = property->key()->AsLiteral();
@ -1184,7 +1184,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
ASSERT(key_literal->handle()->IsSmi());
// Load the key.
__ Push(key_literal->handle());
__ Move(rax, key_literal->handle());
// Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@ -1192,8 +1192,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Notice: We must not have a "test rax, ..." instruction after the
// call. It is treated specially by the LoadIC code.
__ nop();
// Drop key and object left on the stack by IC, and push the result.
DropAndApply(2, context, rax);
Apply(context, rax);
}
}
@ -1693,18 +1692,16 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
// Evaluate receiver.
VisitForValue(expr->obj(), kStack);
if (key->IsPropertyName()) {
VisitForValue(expr->obj(), kAccumulator);
EmitNamedPropertyLoad(expr);
// Drop receiver left on the stack by IC.
DropAndApply(1, context_, rax);
Apply(context_, rax);
} else {
VisitForValue(expr->key(), kStack);
VisitForValue(expr->obj(), kStack);
VisitForValue(expr->key(), kAccumulator);
__ pop(rdx);
EmitKeyedPropertyLoad(expr);
// Drop key and receiver left on the stack by IC.
DropAndApply(2, context_, rax);
Apply(context_, rax);
}
}
@ -1826,7 +1823,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Call to a keyed property, use keyed load IC followed by function
// call.
VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kStack);
VisitForValue(prop->key(), kAccumulator);
__ movq(rdx, Operand(rsp, 0));
// Record source code position for IC call.
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@ -1834,8 +1832,6 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// By emitting a nop we make sure that we do not have a "test rax,..."
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
// Drop key left on the stack by IC.
__ Drop(1);
// Pop receiver.
__ pop(rbx);
// Push result (function).
@ -2745,13 +2741,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
!proxy->var()->is_this() &&
proxy->var()->is_global()) {
Comment cmnt(masm_, "Global variable");
__ push(CodeGenerator::GlobalObject());
__ Move(rcx, proxy->name());
__ movq(rax, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
__ Call(ic, RelocInfo::CODE_TARGET);
__ movq(Operand(rsp, 0), rax);
__ push(rax);
} else if (proxy != NULL &&
proxy->var()->slot() != NULL &&
proxy->var()->slot()->type() == Slot::LOOKUP) {
@ -2861,11 +2857,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (expr->is_postfix() && context_ != Expression::kEffect) {
__ Push(Smi::FromInt(0));
}
VisitForValue(prop->obj(), kStack);
if (assign_type == NAMED_PROPERTY) {
VisitForValue(prop->obj(), kAccumulator);
__ push(rax); // Copy of receiver, needed for later store.
EmitNamedPropertyLoad(prop);
} else {
VisitForValue(prop->key(), kStack);
VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kAccumulator);
__ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
__ push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
}
}

309
deps/v8/src/x64/ic-x64.cc

@ -56,18 +56,20 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Register r1,
Register r2,
Register name,
Register r4,
DictionaryCheck check_dictionary) {
// Register use:
//
// r0 - used to hold the property dictionary.
//
// r1 - initially the receiver
// - used for the index into the property dictionary
// r1 - initially the receiver.
// - unchanged on any jump to miss_label.
// - holds the result on exit.
//
// r2 - used to hold the capacity of the property dictionary.
//
// name - holds the name of the property and is unchanged.
// r4 - used to hold the index into the property dictionary.
Label done;
@ -116,19 +118,19 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
__ movl(r1, FieldOperand(name, String::kHashFieldOffset));
__ shrl(r1, Immediate(String::kHashShift));
__ movl(r4, FieldOperand(name, String::kHashFieldOffset));
__ shrl(r4, Immediate(String::kHashShift));
if (i > 0) {
__ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
__ addl(r4, Immediate(StringDictionary::GetProbeOffset(i)));
}
__ and_(r1, r2);
__ and_(r4, r2);
// Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3);
__ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
__ lea(r4, Operand(r4, r4, times_2, 0)); // r4 = r4 * 3
// Check if the key is identical to the name.
__ cmpq(name, Operand(r0, r1, times_pointer_size,
__ cmpq(name, Operand(r0, r4, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
if (i != kProbes - 1) {
__ j(equal, &done);
@ -140,14 +142,14 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Check that the value is a normal property.
__ bind(&done);
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ Test(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
__ Test(Operand(r0, r4, times_pointer_size, kDetailsOffset - kHeapObjectTag),
Smi::FromInt(PropertyDetails::TypeField::mask()));
__ j(not_zero, miss_label);
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ movq(r1,
Operand(r0, r1, times_pointer_size, kValueOffset - kHeapObjectTag));
Operand(r0, r4, times_pointer_size, kValueOffset - kHeapObjectTag));
}
@ -311,14 +313,14 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) {
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
__ pop(rbx);
__ push(Operand(rsp, 1 * kPointerSize)); // receiver
__ push(Operand(rsp, 1 * kPointerSize)); // name
__ push(rdx); // receiver
__ push(rax); // name
__ push(rbx); // return address
// Perform tail call to the entry.
@ -329,14 +331,14 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
__ pop(rbx);
__ push(Operand(rsp, 1 * kPointerSize)); // receiver
__ push(Operand(rsp, 1 * kPointerSize)); // name
__ push(rdx); // receiver
__ push(rax); // name
__ push(rbx); // return address
// Perform tail call to the entry.
@ -346,40 +348,35 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary;
Label check_number_dictionary;
// Load name and receiver.
__ movq(rax, Operand(rsp, kPointerSize));
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
Label check_pixel_array, probe_dictionary, check_number_dictionary;
// Check that the object isn't a smi.
__ JumpIfSmi(rcx, &slow);
__ JumpIfSmi(rdx, &slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing
// into string objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
__ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
__ j(below, &slow);
// Check bit field.
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(kSlowCaseBitFieldMask));
__ j(not_zero, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &check_string);
// Get the elements array of the object.
__ bind(&index_smi);
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
@ -388,91 +385,99 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &slow); // Unsigned comparison rejects negative indices.
// Fast case: Do the load.
SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
__ movq(rax, FieldOperand(rcx,
SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
__ movq(rbx, FieldOperand(rcx,
index.reg,
index.scale,
FixedArray::kHeaderSize));
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, &slow);
__ movq(rax, rbx);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
__ ret(0);
// Check whether the elements is a pixel array.
__ bind(&check_pixel_array);
// Check whether the elements object is a pixel array.
// rdx: receiver
// rax: key
// rcx: elements array
__ bind(&check_pixel_array);
__ SmiToInteger32(rbx, rax); // Used on both directions of next branch.
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kPixelArrayMapRootIndex);
__ j(not_equal, &check_number_dictionary);
__ SmiToInteger32(rax, rax);
__ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
__ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
__ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
__ movzxbq(rax, Operand(rcx, rax, times_1, 0));
__ movq(rax, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
__ movzxbq(rax, Operand(rax, rbx, times_1, 0));
__ Integer32ToSmi(rax, rax);
__ ret(0);
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
// rdx: receiver
// rax: key
// rbx: key as untagged int32
// rcx: elements
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &slow);
__ SmiToInteger32(rbx, rax);
GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, rdx, rdi);
GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi);
__ ret(0);
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
// Slow case: Jump to runtime.
// rdx: receiver
// rax: key
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
// The key is not a smi.
// Is it a string?
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
// rdx: receiver
// rax: key
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &slow);
// Is the string an array index, with cached numeric value?
__ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
__ testl(rbx, Immediate(String::kIsArrayIndexMask));
__ testl(rbx, Immediate(String::kContainsCachedArrayIndexMask));
__ j(zero, &index_string); // The value in rbx is used at jump target.
// Is the string a symbol?
__ j(not_zero, &index_string); // The value in rbx is used at jump target.
ASSERT(kSymbolTag != 0);
__ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
__ testb(FieldOperand(rcx, Map::kInstanceTypeOffset),
Immediate(kIsSymbolMask));
__ j(zero, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx.
__ movq(rbx, FieldOperand(rcx, JSObject::kPropertiesOffset));
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), Factory::hash_table_map());
__ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
__ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
__ movl(rdx, rbx);
__ shr(rdx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
__ shr(rax, Immediate(String::kHashShift));
__ xor_(rdx, rax);
__ and_(rdx, Immediate(KeyedLookupCache::kCapacityMask));
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ movl(rcx, rbx);
__ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
__ shr(rdi, Immediate(String::kHashShift));
__ xor_(rcx, rdi);
__ and_(rcx, Immediate(KeyedLookupCache::kCapacityMask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
ExternalReference cache_keys
= ExternalReference::keyed_lookup_cache_keys();
__ movq(rdi, rdx);
__ movq(rdi, rcx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ movq(kScratchRegister, cache_keys);
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
__ j(not_equal, &slow);
__ movq(rdi, Operand(kScratchRegister, rdi, times_1, kPointerSize));
__ cmpq(Operand(rsp, kPointerSize), rdi);
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize));
__ j(not_equal, &slow);
// Get field offset which is a 32-bit integer and check that it is
@ -480,29 +485,32 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets();
__ movq(kScratchRegister, cache_field_offsets);
__ movl(rax, Operand(kScratchRegister, rdx, times_4, 0));
__ movzxbq(rdx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
__ cmpq(rax, rdx);
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
__ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
__ subq(rdi, rcx);
__ j(above_equal, &slow);
// Load in-object property.
__ subq(rax, rdx);
__ movzxbq(rdx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rax, rdx);
__ movq(rax, FieldOperand(rcx, rax, times_pointer_size, 0));
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rcx, rdi);
__ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
__ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
// rdx: receiver
// rax: key
GenerateDictionaryLoad(masm,
&slow,
rbx,
rcx,
rdx,
rcx,
rax,
rdi,
DICTIONARY_CHECK_DONE);
__ movq(rax, rcx);
__ movq(rax, rdx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
// If the hash field contains an array index pick it out. The assert checks
@ -512,19 +520,27 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
// We want the smi-tagged index in rax.
// We want the smi-tagged index in rax. Even if we subsequently go to
// the slow case, converting the key to a smi is always valid.
// rdx: receiver
// rax: key (a string)
// rbx: key's hash field, including its array index value.
__ and_(rbx, Immediate(String::kArrayIndexValueMask));
__ shr(rbx, Immediate(String::kHashShift));
// Here we actually clobber the key (rax) which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
__ Integer32ToSmi(rax, rbx);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -- rsp[8] : name (index)
// -- rsp[16] : receiver
// -----------------------------------
Label miss;
Label index_out_of_range;
@ -535,9 +551,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
Register scratch2 = rcx;
Register result = rax;
__ movq(index, Operand(rsp, 1 * kPointerSize));
__ movq(receiver, Operand(rsp, 2 * kPointerSize));
StringCharAtGenerator char_at_generator(receiver,
index,
scratch1,
@ -565,80 +578,80 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
Label slow, failed_allocation;
// Load name and receiver.
__ movq(rax, Operand(rsp, kPointerSize));
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
// Check that the object isn't a smi.
__ JumpIfSmi(rcx, &slow);
__ JumpIfSmi(rdx, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &slow);
// Check that the object is a JS object.
__ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
__ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
__ j(not_equal, &slow);
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks. The map is already in rdx.
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// rax: index (as a smi)
// rcx: JSObject
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
// rdx: JSObject
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
// Check that the index is in range.
__ SmiToInteger32(rax, rax);
__ cmpl(rax, FieldOperand(rcx, ExternalArray::kLengthOffset));
__ SmiToInteger32(rcx, rax);
__ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// rax: untagged index
// rcx: elements array
__ movq(rcx, FieldOperand(rcx, ExternalArray::kExternalPointerOffset));
// rcx: base pointer of external storage
// rax: index (as a smi)
// rdx: receiver (JSObject)
// rcx: untagged index
// rbx: elements array
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
__ movsxbq(rax, Operand(rcx, rax, times_1, 0));
__ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalUnsignedByteArray:
__ movzxbq(rax, Operand(rcx, rax, times_1, 0));
__ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalShortArray:
__ movsxwq(rax, Operand(rcx, rax, times_2, 0));
__ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalUnsignedShortArray:
__ movzxwq(rax, Operand(rcx, rax, times_2, 0));
__ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalIntArray:
__ movsxlq(rax, Operand(rcx, rax, times_4, 0));
__ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalUnsignedIntArray:
__ movl(rax, Operand(rcx, rax, times_4, 0));
__ movl(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalFloatArray:
__ fld_s(Operand(rcx, rax, times_4, 0));
__ fld_s(Operand(rbx, rcx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
// rax: index
// rdx: receiver
// For integer array types:
// rax: value
// rcx: value
// For floating-point array type:
// FP(0): value
@ -649,42 +662,45 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// it to a HeapNumber.
Label box_int;
if (array_type == kExternalIntArray) {
__ JumpIfNotValidSmiValue(rax, &box_int);
__ JumpIfNotValidSmiValue(rcx, &box_int);
} else {
ASSERT_EQ(array_type, kExternalUnsignedIntArray);
__ JumpIfUIntNotValidSmiValue(rax, &box_int);
__ JumpIfUIntNotValidSmiValue(rcx, &box_int);
}
__ Integer32ToSmi(rax, rax);
__ Integer32ToSmi(rax, rcx);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
__ push(rax);
__ push(rcx);
if (array_type == kExternalIntArray) {
__ fild_s(Operand(rsp, 0));
} else {
ASSERT(array_type == kExternalUnsignedIntArray);
// Need to zero-extend the value.
// The value is zero-extended on the stack, because all pushes are
// 64-bit and we loaded the value from memory with movl.
__ fild_d(Operand(rsp, 0));
}
__ pop(rax);
__ pop(rcx);
// FP(0): value
__ AllocateHeapNumber(rax, rbx, &failed_allocation);
__ AllocateHeapNumber(rcx, rbx, &failed_allocation);
// Set the value.
__ movq(rax, rcx);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(rax, rbx, &failed_allocation);
__ AllocateHeapNumber(rcx, rbx, &failed_allocation);
// Set the value.
__ movq(rax, rcx);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ ret(0);
} else {
__ Integer32ToSmi(rax, rax);
__ Integer32ToSmi(rax, rcx);
__ ret(0);
}
@ -695,7 +711,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ fincstp();
// Fall through to slow case.
// Slow case: Load name and receiver from stack and jump to runtime.
// Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
GenerateRuntimeGetProperty(masm);
@ -704,37 +720,33 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -- rsp[8] : key
// -- rsp[16] : receiver
// -----------------------------------
Label slow;
// Load key and receiver.
__ movq(rax, Operand(rsp, kPointerSize));
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
// Check that the receiver isn't a smi.
__ JumpIfSmi(rcx, &slow);
__ JumpIfSmi(rdx, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &slow);
// Get the map of the receiver.
__ movq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
__ movb(rdx, FieldOperand(rdx, Map::kBitFieldOffset));
__ andb(rdx, Immediate(kSlowCaseBitFieldMask));
__ cmpb(rdx, Immediate(1 << Map::kHasIndexedInterceptor));
__ movb(rcx, FieldOperand(rcx, Map::kBitFieldOffset));
__ andb(rcx, Immediate(kSlowCaseBitFieldMask));
__ cmpb(rcx, Immediate(1 << Map::kHasIndexedInterceptor));
__ j(not_zero, &slow);
// Everything is fine, call runtime.
__ pop(rdx);
__ push(rcx); // receiver
__ pop(rcx);
__ push(rdx); // receiver
__ push(rax); // key
__ push(rdx); // return address
__ push(rcx); // return address
// Perform tail call to the entry.
__ TailCallExternalReference(ExternalReference(
@ -1228,7 +1240,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
// Search dictionary - put result in register rdx.
GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, CHECK_DICTIONARY);
GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, rdi, CHECK_DICTIONARY);
// Move the result to register rdi and check that it isn't a smi.
__ movq(rdi, rdx);
@ -1318,6 +1330,21 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
}
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
UNREACHABLE();
}
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
UNREACHABLE();
}
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
UNREACHABLE();
}
// The offset from the inlined patch site to the start of the
// inlined load instruction.
const int LoadIC::kOffsetToLoadInstruction = 20;
@ -1333,13 +1360,13 @@ void LoadIC::ClearInlinedVersion(Address address) {
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
__ pop(rbx);
__ push(Operand(rsp, 0)); // receiver
__ push(rax); // receiver
__ push(rcx); // name
__ push(rbx); // return address
@ -1351,14 +1378,12 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
StubCompiler::GenerateLoadArrayLength(masm, rax, rdx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@ -1367,14 +1392,12 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
StubCompiler::GenerateLoadFunctionPrototype(masm, rax, rdx, rbx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@ -1383,13 +1406,11 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
__ movq(rax, Operand(rsp, kPointerSize));
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
@ -1403,14 +1424,12 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
void LoadIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
Label miss, probe, global;
__ movq(rax, Operand(rsp, kPointerSize));
// Check that the receiver isn't a smi.
__ JumpIfSmi(rax, &miss);
@ -1432,7 +1451,8 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Search the dictionary placing the result in rax.
__ bind(&probe);
GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx, CHECK_DICTIONARY);
GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx,
rcx, rdi, CHECK_DICTIONARY);
__ ret(0);
// Global object access: Check access rights.
@ -1440,23 +1460,20 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ CheckAccessGlobalProxy(rax, rdx, &miss);
__ jmp(&probe);
// Cache miss: Restore receiver from stack and jump to runtime.
// Cache miss: Jump to runtime.
__ bind(&miss);
__ movq(rax, Operand(rsp, 1 * kPointerSize));
GenerateMiss(masm);
}
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);

143
deps/v8/src/x64/stub-cache-x64.cc

@ -114,6 +114,17 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
}
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype) {
// Get the global function with the given index.
JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
__ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
// Load a fast property out of a holder object (src). In-object properties
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
@ -695,6 +706,12 @@ static Object* GenerateCheckPropertyCell(MacroAssembler* masm,
#define __ ACCESS_MASM((masm()))
void CallStubCompiler::GenerateMissBranch() {
Handle<Code> ic = ComputeCallMiss(arguments().immediate(), kind_);
__ Jump(ic, RelocInfo::CODE_TARGET);
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@ -776,9 +793,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
__ j(above_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
rax);
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, rax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, name, &miss);
}
@ -796,9 +812,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(not_equal, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::NUMBER_FUNCTION_INDEX,
rax);
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, rax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, name, &miss);
}
@ -818,9 +833,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(not_equal, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::BOOLEAN_FUNCTION_INDEX,
rax);
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, rax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, name, &miss);
}
@ -845,8 +859,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Handle call cache miss.
__ bind(&miss_in_smi_check);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@ -897,8 +910,7 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
@ -1052,8 +1064,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@ -1137,8 +1148,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
1);
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@ -1221,8 +1231,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Handle load cache miss.
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(argc);
__ Jump(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
@ -1305,8 +1314,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(&Counters::call_global_inline_miss, 1);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);
@ -1318,13 +1326,12 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* holder,
AccessorInfo* callback) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
callback, name, &miss, &failure);
@ -1343,13 +1350,12 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
Object* value,
String* name) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
GenerateLoadConstant(object, holder, rax, rbx, rdx, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -1363,15 +1369,12 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
JSObject* object,
JSObject* last) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
Label miss;
// Load receiver.
__ movq(rax, Operand(rsp, kPointerSize));
// Chech that receiver is not a smi.
__ JumpIfSmi(rax, &miss);
@ -1409,13 +1412,12 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
int index,
String* name) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
GenerateLoadField(object, holder, rax, rbx, rdx, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -1429,16 +1431,15 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
Label miss;
LookupResult lookup;
LookupPostInterceptor(holder, name, &lookup);
__ movq(rax, Operand(rsp, kPointerSize));
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
GenerateLoadInterceptor(receiver,
@ -1465,15 +1466,12 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
String* name,
bool is_dont_delete) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
Label miss;
// Get the receiver from the stack.
__ movq(rax, Operand(rsp, kPointerSize));
// If the object is the holder then we know that it's a global
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
@ -1485,19 +1483,20 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
CheckPrototypes(object, rax, holder, rbx, rdx, name, &miss);
// Get the value from the cell.
__ Move(rax, Handle<JSGlobalPropertyCell>(cell));
__ movq(rax, FieldOperand(rax, JSGlobalPropertyCell::kValueOffset));
__ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
__ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
__ j(equal, &miss);
} else if (FLAG_debug_code) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
__ IncrementCounter(&Counters::named_load_global_inline, 1);
__ movq(rax, rbx);
__ ret(0);
__ bind(&miss);
@ -1514,14 +1513,12 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
JSObject* holder,
AccessorInfo* callback) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_callback, 1);
// Check that the name has not changed.
@ -1529,7 +1526,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ j(not_equal, &miss);
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx,
callback, name, &miss, &failure);
if (!success) return failure;
@ -1544,21 +1541,19 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_array_length, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
GenerateLoadArrayLength(masm(), rcx, rdx, &miss);
GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_array_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -1573,21 +1568,19 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
JSObject* holder,
Object* value) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_constant_function, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
GenerateLoadConstant(receiver, holder, rcx, rbx, rdx,
GenerateLoadConstant(receiver, holder, rdx, rbx, rcx,
value, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_constant_function, 1);
@ -1600,21 +1593,19 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
GenerateLoadFunctionPrototype(masm(), rcx, rdx, rbx, &miss);
GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -1628,14 +1619,12 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_interceptor, 1);
// Check that the name has not changed.
@ -1647,9 +1636,9 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
GenerateLoadInterceptor(receiver,
holder,
&lookup,
rcx,
rax,
rdx,
rax,
rcx,
rbx,
name,
&miss);
@ -1664,21 +1653,19 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_string_length, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
GenerateLoadStringLength(masm(), rcx, rdx, rbx, &miss);
GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -1856,21 +1843,19 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
JSObject* holder,
int index) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_field, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
GenerateLoadField(receiver, holder, rcx, rbx, rdx, index, name, &miss);
GenerateLoadField(receiver, holder, rdx, rbx, rcx, index, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_field, 1);

17
deps/v8/src/x64/virtual-frame-x64.cc

@ -1072,14 +1072,14 @@ void VirtualFrame::MoveResultsToRegisters(Result* a,
Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
// Name and receiver are on the top of the frame. The IC expects
// name in rcx and receiver on the stack. It does not drop the
// receiver.
// Name and receiver are on the top of the frame. Both are dropped.
// The IC expects name in rcx and receiver in rax.
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
Result name = Pop();
PrepareForCall(1, 0); // One stack arg, not callee-dropped.
name.ToRegister(rcx);
name.Unuse();
Result receiver = Pop();
PrepareForCall(0, 0);
MoveResultsToRegisters(&name, &receiver, rcx, rax);
return RawCallCodeObject(ic, mode);
}
@ -1088,7 +1088,10 @@ Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
// Key and receiver are on top of the frame. The IC expects them on
// the stack. It does not drop them.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
Result name = Pop();
Result receiver = Pop();
PrepareForCall(0, 0);
MoveResultsToRegisters(&name, &receiver, rax, rdx);
return RawCallCodeObject(ic, mode);
}

238
deps/v8/test/cctest/test-api.cc

@ -27,6 +27,8 @@
#include <limits.h>
#define USE_NEW_QUERY_CALLBACKS
#include "v8.h"
#include "api.h"
@ -610,6 +612,71 @@ THREADED_TEST(ScavengeExternalAsciiString) {
}
static int dispose_count = 0;
static void DisposeExternalStringCount(
String::ExternalStringResourceBase* resource) {
dispose_count++;
}
static void DisposeExternalStringDeleteAndCount(
String::ExternalStringResourceBase* resource) {
delete resource;
dispose_count++;
}
TEST(ExternalStringWithResourceDisposeCallback) {
const char* c_source = "1 + 2 * 3";
// Set an external string collected callback which does not delete the object.
v8::V8::SetExternalStringDiposeCallback(DisposeExternalStringCount);
// Use a stack allocated external string resource allocated object.
dispose_count = 0;
TestAsciiResource::dispose_count = 0;
TestAsciiResource res_stack(i::StrDup(c_source));
{
v8::HandleScope scope;
LocalContext env;
Local<String> source = String::NewExternal(&res_stack);
Local<Script> script = Script::Compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
v8::internal::Heap::CollectAllGarbage(false);
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
v8::internal::CompilationCache::Clear();
v8::internal::Heap::CollectAllGarbage(false);
CHECK_EQ(1, dispose_count);
CHECK_EQ(0, TestAsciiResource::dispose_count);
// Set an external string collected callback which does delete the object.
v8::V8::SetExternalStringDiposeCallback(DisposeExternalStringDeleteAndCount);
// Use a heap allocated external string resource allocated object.
dispose_count = 0;
TestAsciiResource::dispose_count = 0;
TestAsciiResource* res_heap = new TestAsciiResource(i::StrDup(c_source));
{
v8::HandleScope scope;
LocalContext env;
Local<String> source = String::NewExternal(res_heap);
Local<Script> script = Script::Compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
v8::internal::Heap::CollectAllGarbage(false);
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
v8::internal::CompilationCache::Clear();
v8::internal::Heap::CollectAllGarbage(false);
CHECK_EQ(1, dispose_count);
CHECK_EQ(1, TestAsciiResource::dispose_count);
}
THREADED_TEST(StringConcat) {
{
v8::HandleScope scope;
@ -1120,11 +1187,11 @@ v8::Handle<v8::Boolean> CheckThisIndexedPropertyQuery(
}
v8::Handle<v8::Boolean> CheckThisNamedPropertyQuery(Local<String> property,
v8::Handle<v8::Integer> CheckThisNamedPropertyQuery(Local<String> property,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
CHECK(info.This()->Equals(bottom));
return v8::Handle<v8::Boolean>();
return v8::Handle<v8::Integer>();
}
@ -1221,13 +1288,13 @@ static v8::Handle<Value> PrePropertyHandlerGet(Local<String> key,
}
static v8::Handle<v8::Boolean> PrePropertyHandlerHas(Local<String> key,
const AccessorInfo&) {
static v8::Handle<v8::Integer> PrePropertyHandlerQuery(Local<String> key,
const AccessorInfo&) {
if (v8_str("pre")->Equals(key)) {
return v8::True();
return v8::Integer::New(v8::None);
}
return v8::Handle<v8::Boolean>(); // do not intercept the call
return v8::Handle<v8::Integer>(); // do not intercept the call
}
@ -1236,7 +1303,7 @@ THREADED_TEST(PrePropertyHandler) {
v8::Handle<v8::FunctionTemplate> desc = v8::FunctionTemplate::New();
desc->InstanceTemplate()->SetNamedPropertyHandler(PrePropertyHandlerGet,
0,
PrePropertyHandlerHas);
PrePropertyHandlerQuery);
LocalContext env(NULL, desc->InstanceTemplate());
Script::Compile(v8_str(
"var pre = 'Object: pre'; var on = 'Object: on';"))->Run();
@ -7076,6 +7143,163 @@ THREADED_TEST(CallICFastApi_SimpleSignature_Miss2) {
}
v8::Handle<Value> keyed_call_ic_function;
static v8::Handle<Value> InterceptorKeyedCallICGetter(
Local<String> name, const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
if (v8_str("x")->Equals(name)) {
return keyed_call_ic_function;
}
return v8::Handle<Value>();
}
// Test the case when we stored cacheable lookup into
// a stub, but the function name changed (to another cacheable function).
THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
v8::HandleScope scope;
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
v8::Handle<Value> value = CompileRun(
"proto = new Object();"
"proto.y = function(x) { return x + 1; };"
"proto.z = function(x) { return x - 1; };"
"o.__proto__ = proto;"
"var result = 0;"
"var method = 'y';"
"for (var i = 0; i < 10; i++) {"
" if (i == 5) { method = 'z'; };"
" result += o[method](41);"
"}");
CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
}
// Test the case when we stored cacheable lookup into
// a stub, but the function name changed (and the new function is present
// both before and after the interceptor in the prototype chain).
THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
v8::HandleScope scope;
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(InterceptorKeyedCallICGetter);
LocalContext context;
context->Global()->Set(v8_str("proto1"), templ->NewInstance());
keyed_call_ic_function =
v8_compile("function f(x) { return x - 1; }; f")->Run();
v8::Handle<Value> value = CompileRun(
"o = new Object();"
"proto2 = new Object();"
"o.y = function(x) { return x + 1; };"
"proto2.y = function(x) { return x + 2; };"
"o.__proto__ = proto1;"
"proto1.__proto__ = proto2;"
"var result = 0;"
"var method = 'x';"
"for (var i = 0; i < 10; i++) {"
" if (i == 5) { method = 'y'; };"
" result += o[method](41);"
"}");
CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
}
// Same as InterceptorKeyedCallICKeyChange1 only the cacheable function sit
// on the global object.
THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
v8::HandleScope scope;
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
v8::Handle<Value> value = CompileRun(
"function inc(x) { return x + 1; };"
"inc(1);"
"function dec(x) { return x - 1; };"
"dec(1);"
"o.__proto__ = this;"
"this.__proto__.x = inc;"
"this.__proto__.y = dec;"
"var result = 0;"
"var method = 'x';"
"for (var i = 0; i < 10; i++) {"
" if (i == 5) { method = 'y'; };"
" result += o[method](41);"
"}");
CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
}
// Test the case when actual function to call sits on global object.
THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
v8::HandleScope scope;
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
templ_o->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ_o->NewInstance());
v8::Handle<Value> value = CompileRun(
"function len(x) { return x.length; };"
"o.__proto__ = this;"
"var m = 'parseFloat';"
"var result = 0;"
"for (var i = 0; i < 10; i++) {"
" if (i == 5) {"
" m = 'len';"
" saved_result = result;"
" };"
" result = o[m]('239');"
"}");
CHECK_EQ(3, context->Global()->Get(v8_str("result"))->Int32Value());
CHECK_EQ(239, context->Global()->Get(v8_str("saved_result"))->Int32Value());
}
// Test the map transition before the interceptor.
THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
v8::HandleScope scope;
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
templ_o->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("proto"), templ_o->NewInstance());
v8::Handle<Value> value = CompileRun(
"var o = new Object();"
"o.__proto__ = proto;"
"o.method = function(x) { return x + 1; };"
"var m = 'method';"
"var result = 0;"
"for (var i = 0; i < 10; i++) {"
" if (i == 5) { o.method = function(x) { return x - 1; }; };"
" result += o[m](41);"
"}");
CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
}
// Test the map transition after the interceptor.
THREADED_TEST(InterceptorKeyedCallICMapChangeAfter) {
v8::HandleScope scope;
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
templ_o->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ_o->NewInstance());
v8::Handle<Value> value = CompileRun(
"var proto = new Object();"
"o.__proto__ = proto;"
"proto.method = function(x) { return x + 1; };"
"var m = 'method';"
"var result = 0;"
"for (var i = 0; i < 10; i++) {"
" if (i == 5) { proto.method = function(x) { return x - 1; }; };"
" result += o[m](41);"
"}");
CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
}
static int interceptor_call_count = 0;
static v8::Handle<Value> InterceptorICRefErrorGetter(Local<String> name,

7
deps/v8/test/cctest/test-debug.cc

@ -27,6 +27,8 @@
#include <stdlib.h>
#define USE_NEW_QUERY_CALLBACKS
#include "v8.h"
#include "api.h"
@ -395,8 +397,9 @@ Handle<FixedArray> GetDebuggedFunctions() {
static Handle<Code> ComputeCallDebugBreak(int argc) {
CALL_HEAP_FUNCTION(v8::internal::StubCache::ComputeCallDebugBreak(argc),
Code);
CALL_HEAP_FUNCTION(
v8::internal::StubCache::ComputeCallDebugBreak(argc, Code::CALL_IC),
Code);
}

68
deps/v8/test/cctest/test-decls.cc

@ -27,6 +27,8 @@
#include <stdlib.h>
#define USE_NEW_QUERY_CALLBACKS
#include "v8.h"
#include "heap.h"
@ -63,12 +65,12 @@ class DeclarationContext {
int get_count() const { return get_count_; }
int set_count() const { return set_count_; }
int has_count() const { return has_count_; }
int query_count() const { return query_count_; }
protected:
virtual v8::Handle<Value> Get(Local<String> key);
virtual v8::Handle<Value> Set(Local<String> key, Local<Value> value);
virtual v8::Handle<Boolean> Has(Local<String> key);
virtual v8::Handle<Integer> Query(Local<String> key);
void InitializeIfNeeded();
@ -85,8 +87,8 @@ class DeclarationContext {
static v8::Handle<Value> HandleSet(Local<String> key,
Local<Value> value,
const AccessorInfo& info);
static v8::Handle<Boolean> HandleHas(Local<String> key,
const AccessorInfo& info);
static v8::Handle<Integer> HandleQuery(Local<String> key,
const AccessorInfo& info);
private:
bool is_initialized_;
@ -95,14 +97,14 @@ class DeclarationContext {
int get_count_;
int set_count_;
int has_count_;
int query_count_;
static DeclarationContext* GetInstance(const AccessorInfo& info);
};
DeclarationContext::DeclarationContext()
: is_initialized_(false), get_count_(0), set_count_(0), has_count_(0) {
: is_initialized_(false), get_count_(0), set_count_(0), query_count_(0) {
// Do nothing.
}
@ -114,7 +116,7 @@ void DeclarationContext::InitializeIfNeeded() {
Local<Value> data = External::New(this);
GetHolder(function)->SetNamedPropertyHandler(&HandleGet,
&HandleSet,
&HandleHas,
&HandleQuery,
0, 0,
data);
context_ = Context::New(0, function->InstanceTemplate(), Local<Value>());
@ -124,7 +126,7 @@ void DeclarationContext::InitializeIfNeeded() {
void DeclarationContext::Check(const char* source,
int get, int set, int has,
int get, int set, int query,
Expectations expectations,
v8::Handle<Value> value) {
InitializeIfNeeded();
@ -137,7 +139,7 @@ void DeclarationContext::Check(const char* source,
Local<Value> result = Script::Compile(String::New(source))->Run();
CHECK_EQ(get, get_count());
CHECK_EQ(set, set_count());
CHECK_EQ(has, has_count());
CHECK_EQ(query, query_count());
if (expectations == EXPECT_RESULT) {
CHECK(!catcher.HasCaught());
if (!value.IsEmpty()) {
@ -170,11 +172,11 @@ v8::Handle<Value> DeclarationContext::HandleSet(Local<String> key,
}
v8::Handle<Boolean> DeclarationContext::HandleHas(Local<String> key,
const AccessorInfo& info) {
v8::Handle<Integer> DeclarationContext::HandleQuery(Local<String> key,
const AccessorInfo& info) {
DeclarationContext* context = GetInstance(info);
context->has_count_++;
return context->Has(key);
context->query_count_++;
return context->Query(key);
}
@ -194,8 +196,8 @@ v8::Handle<Value> DeclarationContext::Set(Local<String> key,
}
v8::Handle<Boolean> DeclarationContext::Has(Local<String> key) {
return v8::Handle<Boolean>();
v8::Handle<Integer> DeclarationContext::Query(Local<String> key) {
return v8::Handle<Integer>();
}
@ -249,8 +251,8 @@ TEST(Unknown) {
class PresentPropertyContext: public DeclarationContext {
protected:
virtual v8::Handle<Boolean> Has(Local<String> key) {
return True();
virtual v8::Handle<Integer> Query(Local<String> key) {
return Integer::New(v8::None);
}
};
@ -304,8 +306,8 @@ TEST(Present) {
class AbsentPropertyContext: public DeclarationContext {
protected:
virtual v8::Handle<Boolean> Has(Local<String> key) {
return False();
virtual v8::Handle<Integer> Query(Local<String> key) {
return v8::Handle<Integer>();
}
};
@ -316,7 +318,7 @@ TEST(Absent) {
{ AbsentPropertyContext context;
context.Check("var x; x",
1, // access
2, // declaration + initialization
1, // declaration
2, // declaration + initialization
EXPECT_RESULT, Undefined());
}
@ -375,24 +377,24 @@ class AppearingPropertyContext: public DeclarationContext {
AppearingPropertyContext() : state_(DECLARE) { }
protected:
virtual v8::Handle<Boolean> Has(Local<String> key) {
virtual v8::Handle<Integer> Query(Local<String> key) {
switch (state_) {
case DECLARE:
// Force declaration by returning that the
// property is absent.
state_ = INITIALIZE_IF_ASSIGN;
return False();
return Handle<Integer>();
case INITIALIZE_IF_ASSIGN:
// Return that the property is present so we only get the
// setter called when initializing with a value.
state_ = UNKNOWN;
return True();
return Integer::New(v8::None);
default:
CHECK(state_ == UNKNOWN);
break;
}
// Do the lookup in the object.
return v8::Local<Boolean>();
return v8::Handle<Integer>();
}
private:
@ -458,31 +460,31 @@ class ReappearingPropertyContext: public DeclarationContext {
ReappearingPropertyContext() : state_(DECLARE) { }
protected:
virtual v8::Handle<Boolean> Has(Local<String> key) {
virtual v8::Handle<Integer> Query(Local<String> key) {
switch (state_) {
case DECLARE:
// Force the first declaration by returning that
// the property is absent.
state_ = DONT_DECLARE;
return False();
return Handle<Integer>();
case DONT_DECLARE:
// Ignore the second declaration by returning
// that the property is already there.
state_ = INITIALIZE;
return True();
return Integer::New(v8::None);
case INITIALIZE:
// Force an initialization by returning that
// the property is absent. This will make sure
// that the setter is called and it will not
// lead to redeclaration conflicts (yet).
state_ = UNKNOWN;
return False();
return Handle<Integer>();
default:
CHECK(state_ == UNKNOWN);
break;
}
// Do the lookup in the object.
return v8::Local<Boolean>();
return Handle<Integer>();
}
private:
@ -506,9 +508,9 @@ TEST(Reappearing) {
class ExistsInPrototypeContext: public DeclarationContext {
protected:
virtual v8::Handle<Boolean> Has(Local<String> key) {
virtual v8::Handle<Integer> Query(Local<String> key) {
// Let it seem that the property exists in the prototype object.
return True();
return Integer::New(v8::None);
}
// Use the prototype as the holder for the interceptors.
@ -568,9 +570,9 @@ TEST(ExistsInPrototype) {
class AbsentInPrototypeContext: public DeclarationContext {
protected:
virtual v8::Handle<Boolean> Has(Local<String> key) {
virtual v8::Handle<Integer> Query(Local<String> key) {
// Let it seem that the property is absent in the prototype object.
return False();
return Handle<Integer>();
}
// Use the prototype as the holder for the interceptors.

107
deps/v8/test/cctest/test-profile-generator.cc

@ -7,12 +7,14 @@
#include "v8.h"
#include "profile-generator-inl.h"
#include "cctest.h"
#include "../include/v8-profiler.h"
namespace i = v8::internal;
using i::CodeEntry;
using i::CodeMap;
using i::CpuProfile;
using i::CpuProfiler;
using i::CpuProfilesCollection;
using i::ProfileNode;
using i::ProfileTree;
@ -668,4 +670,109 @@ TEST(SampleRateCalculator) {
CHECK_EQ(kSamplingIntervalMs * 0.66666, calc3.ticks_per_ms());
}
// --- P r o f i l e r E x t e n s i o n ---
class ProfilerExtension : public v8::Extension {
public:
ProfilerExtension() : v8::Extension("v8/profiler", kSource) { }
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
static v8::Handle<v8::Value> StartProfiling(const v8::Arguments& args);
static v8::Handle<v8::Value> StopProfiling(const v8::Arguments& args);
private:
static const char* kSource;
};
const char* ProfilerExtension::kSource =
"native function startProfiling();"
"native function stopProfiling();";
v8::Handle<v8::FunctionTemplate> ProfilerExtension::GetNativeFunction(
v8::Handle<v8::String> name) {
if (name->Equals(v8::String::New("startProfiling"))) {
return v8::FunctionTemplate::New(ProfilerExtension::StartProfiling);
} else if (name->Equals(v8::String::New("stopProfiling"))) {
return v8::FunctionTemplate::New(ProfilerExtension::StopProfiling);
} else {
CHECK(false);
return v8::Handle<v8::FunctionTemplate>();
}
}
v8::Handle<v8::Value> ProfilerExtension::StartProfiling(
const v8::Arguments& args) {
if (args.Length() > 0)
v8::CpuProfiler::StartProfiling(args[0].As<v8::String>());
else
v8::CpuProfiler::StartProfiling(v8::String::New(""));
return v8::Undefined();
}
v8::Handle<v8::Value> ProfilerExtension::StopProfiling(
const v8::Arguments& args) {
if (args.Length() > 0)
v8::CpuProfiler::StopProfiling(args[0].As<v8::String>());
else
v8::CpuProfiler::StopProfiling(v8::String::New(""));
return v8::Undefined();
}
static ProfilerExtension kProfilerExtension;
v8::DeclareExtension kProfilerExtensionDeclaration(&kProfilerExtension);
static v8::Persistent<v8::Context> env;
static const ProfileNode* PickChild(const ProfileNode* parent,
const char* name) {
for (int i = 0; i < parent->children()->length(); ++i) {
const ProfileNode* child = parent->children()->at(i);
if (strcmp(child->entry()->name(), name) == 0) return child;
}
return NULL;
}
TEST(RecordStackTraceAtStartProfiling) {
if (env.IsEmpty()) {
v8::HandleScope scope;
const char* extensions[] = { "v8/profiler" };
v8::ExtensionConfiguration config(1, extensions);
env = v8::Context::New(&config);
}
v8::HandleScope scope;
env->Enter();
CHECK_EQ(0, CpuProfiler::GetProfilesCount());
CompileRun(
"function c() { startProfiling(); }\n"
"function b() { c(); }\n"
"function a() { b(); }\n"
"a();\n"
"stopProfiling();");
CHECK_EQ(1, CpuProfiler::GetProfilesCount());
CpuProfile* profile =
CpuProfiler::GetProfile(NULL, 0);
const ProfileTree* topDown = profile->top_down();
const ProfileNode* current = topDown->root();
// The tree should look like this:
// (root)
// (anonymous function)
// a
// b
// c
current = PickChild(current, "(anonymous function)");
CHECK_NE(NULL, const_cast<ProfileNode*>(current));
current = PickChild(current, "a");
CHECK_NE(NULL, const_cast<ProfileNode*>(current));
current = PickChild(current, "b");
CHECK_NE(NULL, const_cast<ProfileNode*>(current));
current = PickChild(current, "c");
CHECK_NE(NULL, const_cast<ProfileNode*>(current));
CHECK_EQ(0, current->children()->length());
}
#endif // ENABLE_LOGGING_AND_PROFILING

8
deps/v8/test/cctest/test-strings.cc

@ -473,11 +473,11 @@ TEST(CachedHashOverflow) {
printf("%s\n", line);
v8::Local<v8::Value> result =
v8::Script::Compile(v8::String::New(line))->Run();
ASSERT_EQ(results[i]->IsUndefined(), result->IsUndefined());
ASSERT_EQ(results[i]->IsNumber(), result->IsNumber());
CHECK_EQ(results[i]->IsUndefined(), result->IsUndefined());
CHECK_EQ(results[i]->IsNumber(), result->IsNumber());
if (result->IsNumber()) {
ASSERT_EQ(Smi::cast(results[i]->ToSmi())->value(),
result->ToInt32()->Value());
CHECK_EQ(Smi::cast(results[i]->ToSmi())->value(),
result->ToInt32()->Value());
}
}
}

52
deps/v8/test/cctest/test-utils.cc

@ -79,3 +79,55 @@ TEST(SNPrintF) {
buffer.Dispose();
}
}
void TestMemCopy(Vector<byte> src,
Vector<byte> dst,
int source_alignment,
int destination_alignment,
int length_alignment) {
memset(dst.start(), 0xFF, dst.length());
byte* to = dst.start() + 32 + destination_alignment;
byte* from = src.start() + source_alignment;
int length = kMinComplexMemCopy + length_alignment;
MemCopy(to, from, static_cast<size_t>(length));
printf("[%d,%d,%d]\n",
source_alignment, destination_alignment, length_alignment);
for (int i = 0; i < length; i++) {
CHECK_EQ(from[i], to[i]);
}
CHECK_EQ(0xFF, to[-1]);
CHECK_EQ(0xFF, to[length]);
}
TEST(MemCopy) {
const int N = kMinComplexMemCopy + 128;
Vector<byte> buffer1 = Vector<byte>::New(N);
Vector<byte> buffer2 = Vector<byte>::New(N);
for (int i = 0; i < N; i++) {
buffer1[i] = static_cast<byte>(i & 0x7F);
}
// Same alignment.
for (int i = 0; i < 32; i++) {
TestMemCopy(buffer1, buffer2, i, i, i * 2);
}
// Different alignment.
for (int i = 0; i < 32; i++) {
for (int j = 1; j < 32; j++) {
TestMemCopy(buffer1, buffer2, i, (i + j) & 0x1F , 0);
}
}
// Different lengths
for (int i = 0; i < 32; i++) {
TestMemCopy(buffer1, buffer2, 3, 7, i);
}
buffer2.Dispose();
buffer1.Dispose();
}

27
deps/v8/test/mjsunit/delete.js

@ -44,16 +44,11 @@ assertEquals(42, x);
assertTrue(delete x);
assertTrue(typeof x === 'undefined', "x is gone");
/****
* This test relies on DontDelete attributes. This is not
* working yet.
var y = 87; // should have DontDelete attribute
assertEquals(87, y);
assertFalse(delete y, "don't delete");
assertFalse(typeof y === 'undefined');
assertEquals(87, y);
*/
var o = { x: 42, y: 87 };
assertTrue(has(o, 'x'));
@ -161,3 +156,25 @@ assertFalse(has(a, 1), "delete 1");
assertFalse(has(a, Math.pow(2,30)-1), "delete 2^30-1");
assertFalse(has(a, Math.pow(2,31)-1), "delete 2^31-1");
assertEquals(Math.pow(2,31), a.length);
// Check that a LoadIC for a dictionary field works, even
// when the dictionary probe misses.
function load_deleted_property_using_IC() {
var x = new Object();
x.a = 3;
x.b = 4;
x.c = 5;
delete x.c;
assertEquals(3, load_a(x));
assertEquals(3, load_a(x));
delete x.a;
assertTrue(typeof load_a(x) === 'undefined', "x.a is gone");
assertTrue(typeof load_a(x) === 'undefined', "x.a is gone");
}
function load_a(x) {
return x.a;
}
load_deleted_property_using_IC();

2
deps/v8/test/mjsunit/eval.js

@ -50,7 +50,7 @@ global_eval = eval;
assertEquals(void 0, eval(eval("var eval = function f(x) { return 'hest';}")))
eval = global_eval;
//Test eval with different number of parameters.
// Test eval with different number of parameters.
global_eval = eval;
eval = function(x, y) { return x + y; };
assertEquals(4, eval(2, 2));

205
deps/v8/test/mjsunit/keyed-call-ic.js

@ -0,0 +1,205 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// A test for keyed call ICs.
var toStringName = 'toString';
var global = this;
function globalFunction1() {
return 'function1';
}
function globalFunction2() {
return 'function2';
}
assertEquals("[object global]", this[toStringName]());
assertEquals("[object global]", global[toStringName]());
function testGlobals() {
assertEquals("[object global]", this[toStringName]());
assertEquals("[object global]", global[toStringName]());
}
testGlobals();
function F() {}
F.prototype.one = function() {return 'one'; }
F.prototype.two = function() {return 'two'; }
F.prototype.three = function() {return 'three'; }
var keys =
['one', 'one', 'one', 'one', 'two', 'two', 'one', 'three', 'one', 'two'];
function testKeyTransitions() {
var i, key, result, message;
var f = new F();
// Custom call generators
var array = [];
for (i = 0; i != 10; i++) {
key = (i < 8) ? 'push' : 'pop';
array[key](i);
}
assertEquals(6, array.length);
for (i = 0; i != array.length; i++) {
assertEquals(i, array[i]);
}
for (i = 0; i != 10; i++) {
key = (i < 3) ? 'pop' : 'push';
array[key](i);
}
assertEquals(10, array.length);
for (i = 0; i != array.length; i++) {
assertEquals(i, array[i]);
}
var string = 'ABCDEFGHIJ';
for (i = 0; i != 10; i++) {
key = ((i < 5) ? 'charAt' : 'charCodeAt');
result = string[key](i);
message = '\'' + string + '\'[\'' + key + '\'](' + i + ')';
if (i < 5) {
assertEquals(string.charAt(i), result, message);
} else {
assertEquals(string.charCodeAt(i), result, message);
}
}
for (i = 0; i != 10; i++) {
key = ((i < 5) ? 'charCodeAt' : 'charAt');
result = string[key](i);
message = '\'' + string + '\'[\'' + key + '\'](' + i + ')';
if (i < 5) {
assertEquals(string.charCodeAt(i), result, message);
} else {
assertEquals(string.charAt(i), result, message);
}
}
// Function is a constant property
key = 'one';
for (i = 0; i != 10; i++) {
assertEquals(key, f[key]());
if (i == 5) {
key = 'two'; // the name change should case a miss
}
}
// Function is a fast property
f.field = function() { return 'field'; }
key = 'field';
for (i = 0; i != 10; i++) {
assertEquals(key, f[key]());
if (i == 5) {
key = 'two'; // the name change should case a miss
}
}
// Calling on slow case object
f.prop = 0;
delete f.prop; // force the object to the slow case
f.four = function() { return 'four'; }
f.five = function() { return 'five'; }
key = 'four';
for (i = 0; i != 10; i++) {
assertEquals(key, f[key]());
if (i == 5) {
key = 'five';
}
}
// Calling on global object
key = 'globalFunction1';
var expect = 'function1';
for (i = 0; i != 10; i++) {
assertEquals(expect, global[key]());
if (i == 5) {
key = 'globalFunction2';
expect = 'function2';
}
}
}
testKeyTransitions();
function testTypeTransitions() {
var f = new F();
var s = '';
var m = 'one';
var i;
s = '';
for (i = 0; i != 10; i++) {
if (i == 5) { F.prototype.one = function() { return '1'; } }
s += f[m]();
}
assertEquals("oneoneoneoneone11111", s);
s = '';
for (i = 0; i != 10; i++) {
if (i == 5) { f.__proto__ = { one: function() { return 'I'; } } }
s += f[m]();
}
assertEquals("11111IIIII", s);
s = '';
for (i = 0; i != 10; i++) {
if (i == 5) { f.one = function() { return 'ONE'; } }
s += f[m]();
}
assertEquals("IIIIIONEONEONEONEONE", s);
m = 'toString';
s = '';
var obj = { toString: function() { return '2'; } };
for (i = 0; i != 10; i++) {
if (i == 5) { obj = "TWO"; }
s += obj[m]();
}
assertEquals("22222TWOTWOTWOTWOTWO", s);
s = '';
obj = { toString: function() { return 'ONE'; } };
m = 'toString';
for (i = 0; i != 10; i++) {
if (i == 5) { obj = 1; }
s += obj[m]();
}
assertEquals("ONEONEONEONEONE11111", s);
}
testTypeTransitions();

42
deps/v8/test/mjsunit/regress/regress-728.js

@ -0,0 +1,42 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
var obj = { 0: "obj0" };
// Array index k is to big to fit into the string hash field.
var k = 16777217;
var h = "" + k;
obj[k] = "obj" + k;
// Force computation of hash for the string representation of array index.
for (var i = 0; i < 10; i++) { ({})[h]; }
function get(idx) { return obj[idx]; }
assertEquals(get(0), "obj0");
assertEquals(get(h), "obj" + h);

46
deps/v8/test/mjsunit/regress/regress-732.js

@ -0,0 +1,46 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// idx is a valid array index but is too big to be cached in hash field.
var idx = 10000000;
// Create a JSObject with NumberDictionary as a backing store for elements.
var obj = { };
for (var i = 0; i < 100000; i += 100) { obj[i] = "obj" + i; }
// Set value using numeric index.
obj[idx] = "obj" + idx;
// Make a string from index.
var str = "" + idx;
// Force hash computation for the string representation of index.
for (var i = 0; i < 10; i++) { ({})[str]; }
// Try getting value back using string and number representations of
// the same index.
assertEquals(obj[str], obj[idx])

91
deps/v8/test/mjsunit/string-charat.js

@ -27,52 +27,58 @@
var s = "test";
function getTwoByteString() { return "\u1234t"; }
function getCons() { return "testtesttesttest" + getTwoByteString() }
var slowIndex1 = { valueOf: function() { return 1; } };
var slowIndex2 = { toString: function() { return "2"; } };
var slowIndexOutOfRange = { valueOf: function() { return -1; } };
function basicTest() {
assertEquals("t", s.charAt());
assertEquals("t", s.charAt("string"));
assertEquals("t", s.charAt(null));
assertEquals("t", s.charAt(void 0));
assertEquals("t", s.charAt(false));
assertEquals("e", s.charAt(true));
assertEquals("", s.charAt(-1));
assertEquals("", s.charAt(4));
assertEquals("", s.charAt(slowIndexOutOfRange));
assertEquals("", s.charAt(1/0));
assertEquals("", s.charAt(-1/0));
assertEquals("t", s.charAt(0));
assertEquals("t", s.charAt(-0.0));
assertEquals("t", s.charAt(0.4));
assertEquals("e", s.charAt(slowIndex1));
assertEquals("s", s.charAt(slowIndex2));
assertEquals("t", s.charAt(3));
assertEquals("t", s.charAt(3.4));
assertEquals("t", s.charAt(NaN));
assertEquals(116, s.charCodeAt());
assertEquals(116, s.charCodeAt("string"));
assertEquals(116, s.charCodeAt(null));
assertEquals(116, s.charCodeAt(void 0));
assertEquals(116, s.charCodeAt(false));
assertEquals(101, s.charCodeAt(true));
assertEquals(116, s.charCodeAt(0));
assertEquals(116, s.charCodeAt(-0.0));
assertEquals(116, s.charCodeAt(0.4));
assertEquals(101, s.charCodeAt(slowIndex1));
assertEquals(115, s.charCodeAt(slowIndex2));
assertEquals(116, s.charCodeAt(3));
assertEquals(116, s.charCodeAt(3.4));
assertEquals(116, s.charCodeAt(NaN));
assertTrue(isNaN(s.charCodeAt(-1)));
assertTrue(isNaN(s.charCodeAt(4)));
assertTrue(isNaN(s.charCodeAt(slowIndexOutOfRange)));
assertTrue(isNaN(s.charCodeAt(1/0)));
assertTrue(isNaN(s.charCodeAt(-1/0)));
function basicTest(s, len) {
assertEquals("t", s().charAt());
assertEquals("t", s().charAt("string"));
assertEquals("t", s().charAt(null));
assertEquals("t", s().charAt(void 0));
assertEquals("t", s().charAt(false));
assertEquals("e", s().charAt(true));
assertEquals("", s().charAt(-1));
assertEquals("", s().charAt(len));
assertEquals("", s().charAt(slowIndexOutOfRange));
assertEquals("", s().charAt(1/0));
assertEquals("", s().charAt(-1/0));
assertEquals("t", s().charAt(0));
assertEquals("t", s().charAt(-0.0));
assertEquals("t", s().charAt(-0.1));
assertEquals("t", s().charAt(0.4));
assertEquals("e", s().charAt(slowIndex1));
assertEquals("s", s().charAt(slowIndex2));
assertEquals("t", s().charAt(3));
assertEquals("t", s().charAt(3.4));
assertEquals("t", s().charAt(NaN));
assertEquals(116, s().charCodeAt());
assertEquals(116, s().charCodeAt("string"));
assertEquals(116, s().charCodeAt(null));
assertEquals(116, s().charCodeAt(void 0));
assertEquals(116, s().charCodeAt(false));
assertEquals(101, s().charCodeAt(true));
assertEquals(116, s().charCodeAt(0));
assertEquals(116, s().charCodeAt(-0.0));
assertEquals(116, s().charCodeAt(-0.1));
assertEquals(116, s().charCodeAt(0.4));
assertEquals(101, s().charCodeAt(slowIndex1));
assertEquals(115, s().charCodeAt(slowIndex2));
assertEquals(116, s().charCodeAt(3));
assertEquals(116, s().charCodeAt(3.4));
assertEquals(116, s().charCodeAt(NaN));
assertTrue(isNaN(s().charCodeAt(-1)));
assertTrue(isNaN(s().charCodeAt(len)));
assertTrue(isNaN(s().charCodeAt(slowIndexOutOfRange)));
assertTrue(isNaN(s().charCodeAt(1/0)));
assertTrue(isNaN(s().charCodeAt(-1/0)));
}
basicTest();
basicTest(function() { return s; }, s.length);
basicTest(getCons, getCons().length);
// Make sure enough of the one-char string cache is filled.
var alpha = ['@'];
@ -122,7 +128,8 @@ stealTest();
// Test custom string IC-s.
for (var i = 0; i < 20; i++) {
basicTest();
basicTest(function() { return s; }, s.length);
basicTest(getCons, getCons().length);
stealTest();
}

4
deps/v8/test/mjsunit/string-charcodeat.js

@ -153,6 +153,10 @@ TestStringType(Slice16End, true);
TestStringType(Flat16, true);
TestStringType(NotAString16, true);
for (var i = 0; i != 10; i++) {
assertEquals(101, Cons16().charCodeAt(1.1));
assertEquals('e', Cons16().charAt(1.1));
}
function StupidThing() {
// Doesn't return a string from toString!

Loading…
Cancel
Save