Browse Source

Upgrade V8 to 2.2.18

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
1c6671aa67
  1. 14
      deps/v8/ChangeLog
  2. 139
      deps/v8/include/v8-profiler.h
  3. 62
      deps/v8/include/v8.h
  4. 287
      deps/v8/src/api.cc
  5. 54
      deps/v8/src/arm/assembler-arm.cc
  6. 24
      deps/v8/src/arm/assembler-arm.h
  7. 18
      deps/v8/src/arm/builtins-arm.cc
  8. 267
      deps/v8/src/arm/codegen-arm.cc
  9. 6
      deps/v8/src/arm/codegen-arm.h
  10. 43
      deps/v8/src/arm/full-codegen-arm.cc
  11. 444
      deps/v8/src/arm/ic-arm.cc
  12. 54
      deps/v8/src/arm/macro-assembler-arm.cc
  13. 8
      deps/v8/src/arm/macro-assembler-arm.h
  14. 20
      deps/v8/src/arm/stub-cache-arm.cc
  15. 1
      deps/v8/src/arm/virtual-frame-arm.cc
  16. 5
      deps/v8/src/arm/virtual-frame-arm.h
  17. 71
      deps/v8/src/heap-profiler.cc
  18. 25
      deps/v8/src/heap-profiler.h
  19. 4
      deps/v8/src/heap.h
  20. 74
      deps/v8/src/ia32/codegen-ia32.cc
  21. 134
      deps/v8/src/ia32/ic-ia32.cc
  22. 15
      deps/v8/src/ic.cc
  23. 13
      deps/v8/src/objects.cc
  24. 15
      deps/v8/src/objects.h
  25. 19
      deps/v8/src/profile-generator.cc
  26. 18
      deps/v8/src/profile-generator.h
  27. 27
      deps/v8/src/runtime.cc
  28. 52
      deps/v8/src/serialize.cc
  29. 33
      deps/v8/src/stub-cache.cc
  30. 35
      deps/v8/src/utils.h
  31. 4
      deps/v8/src/v8.cc
  32. 2
      deps/v8/src/version.cc
  33. 4
      deps/v8/src/virtual-frame-light-inl.h
  34. 108
      deps/v8/src/x64/codegen-x64.cc
  35. 38
      deps/v8/src/x64/full-codegen-x64.cc
  36. 470
      deps/v8/src/x64/ic-x64.cc
  37. 21
      deps/v8/src/x64/stub-cache-x64.cc
  38. 19
      deps/v8/src/x64/virtual-frame-x64.cc
  39. 2
      deps/v8/src/x64/virtual-frame-x64.h
  40. 72
      deps/v8/test/cctest/test-api.cc
  41. 2
      deps/v8/test/cctest/test-debug.cc
  42. 2
      deps/v8/test/cctest/test-decls.cc
  43. 39
      deps/v8/test/cctest/test-disasm-arm.cc
  44. 175
      deps/v8/test/cctest/test-heap-profiler.cc
  45. 8
      deps/v8/test/cctest/test-serialize.cc
  46. 17
      deps/v8/test/mjsunit/keyed-call-generic.js
  47. 153
      deps/v8/test/mjsunit/object-define-property.js
  48. 5
      deps/v8/test/mjsunit/regress/regress-619.js

14
deps/v8/ChangeLog

@ -1,3 +1,17 @@
2010-06-16: Version 2.2.18
Added API functions to retrieve information on indexed properties
managed by the embedding layer. Fixes bug 737.
Make ES5 Object.defineProperty support array elements. Fixes bug 619.
Add heap profiling to the API.
Remove old named property query from the API.
Incremental performance improvements.
2010-06-14: Version 2.2.17
Improved debugger support for stepping out of functions.

139
deps/v8/include/v8-profiler.h

@ -184,6 +184,145 @@ class V8EXPORT CpuProfiler {
};
class HeapGraphNode;
/**
* HeapSnapshotEdge represents a directed connection between heap
* graph nodes: from retaners to retained nodes.
*/
class V8EXPORT HeapGraphEdge {
public:
enum Type {
CONTEXT_VARIABLE = 0, // A variable from a function context.
ELEMENT = 1, // An element of an array.
PROPERTY = 2 // A named object property.
};
/** Returns edge type (see HeapGraphEdge::Type). */
Type GetType() const;
/**
* Returns edge name. This can be a variable name, an element index, or
* a property name.
*/
Handle<Value> GetName() const;
/** Returns origin node. */
const HeapGraphNode* GetFromNode() const;
/** Returns destination node. */
const HeapGraphNode* GetToNode() const;
};
class V8EXPORT HeapGraphPath {
public:
/** Returns the number of edges in the path. */
int GetEdgesCount() const;
/** Returns an edge from the path. */
const HeapGraphEdge* GetEdge(int index) const;
/** Returns origin node. */
const HeapGraphNode* GetFromNode() const;
/** Returns destination node. */
const HeapGraphNode* GetToNode() const;
};
/**
* HeapGraphNode represents a node in a heap graph.
*/
class V8EXPORT HeapGraphNode {
public:
enum Type {
INTERNAL = 0, // Internal node, a virtual one, for housekeeping.
ARRAY = 1, // An array of elements.
STRING = 2, // A string.
OBJECT = 3, // A JS object (except for arrays and strings).
CODE = 4, // Compiled code.
CLOSURE = 5 // Function closure.
};
/** Returns node type (see HeapGraphNode::Type). */
Type GetType() const;
/**
* Returns node name. Depending on node's type this can be the name
* of the constructor (for objects), the name of the function (for
* closures), string value, or an empty string (for compiled code).
*/
Handle<String> GetName() const;
/** Returns node's own size, in bytes. */
int GetSelfSize() const;
/** Returns node's network (self + reachable nodes) size, in bytes. */
int GetTotalSize() const;
/**
* Returns node's private size, in bytes. That is, the size of memory
* that will be reclaimed having this node collected.
*/
int GetPrivateSize() const;
/** Returns child nodes count of the node. */
int GetChildrenCount() const;
/** Retrieves a child by index. */
const HeapGraphEdge* GetChild(int index) const;
/** Returns retainer nodes count of the node. */
int GetRetainersCount() const;
/** Returns a retainer by index. */
const HeapGraphEdge* GetRetainer(int index) const;
/** Returns the number of simple retaining paths from the root to the node. */
int GetRetainingPathsCount() const;
/** Returns a retaining path by index. */
const HeapGraphPath* GetRetainingPath(int index) const;
};
/**
* HeapSnapshots record the state of the JS heap at some moment.
*/
class V8EXPORT HeapSnapshot {
public:
/** Returns heap snapshot UID (assigned by the profiler.) */
unsigned GetUid() const;
/** Returns heap snapshot title. */
Handle<String> GetTitle() const;
/** Returns the root node of the heap graph. */
const HeapGraphNode* GetHead() const;
};
/**
* Interface for controlling heap profiling.
*/
class V8EXPORT HeapProfiler {
public:
/** Returns the number of snapshots taken. */
static int GetSnapshotsCount();
/** Returns a snapshot by index. */
static const HeapSnapshot* GetSnapshot(int index);
/** Returns a profile by uid. */
static const HeapSnapshot* FindSnapshot(unsigned uid);
/** Takes a heap snapshot and returns it. Title may be an empty string. */
static const HeapSnapshot* TakeSnapshot(Handle<String> title);
};
} // namespace v8

62
deps/v8/include/v8.h

@ -1570,6 +1570,9 @@ class V8EXPORT Object : public Value {
* the backing store is preserved while V8 has a reference.
*/
void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
bool HasIndexedPropertiesInPixelData();
uint8_t* GetIndexedPropertiesPixelData();
int GetIndexedPropertiesPixelDataLength();
/**
* Set the backing store of the indexed properties to be managed by the
@ -1581,6 +1584,10 @@ class V8EXPORT Object : public Value {
void SetIndexedPropertiesToExternalArrayData(void* data,
ExternalArrayType array_type,
int number_of_elements);
bool HasIndexedPropertiesInExternalArrayData();
void* GetIndexedPropertiesExternalArrayData();
ExternalArrayType GetIndexedPropertiesExternalArrayDataType();
int GetIndexedPropertiesExternalArrayDataLength();
static Local<Object> New();
static inline Object* Cast(Value* obj);
@ -1761,20 +1768,11 @@ typedef Handle<Value> (*NamedPropertySetter)(Local<String> property,
/**
* Returns a non-empty handle if the interceptor intercepts the request.
* The result is either boolean (true if property exists and false
* otherwise) or an integer encoding property attributes.
* The result is an integer encoding property attributes (like v8::None,
* v8::DontEnum, etc.)
*/
#ifdef USE_NEW_QUERY_CALLBACKS
typedef Handle<Integer> (*NamedPropertyQuery)(Local<String> property,
const AccessorInfo& info);
#else
typedef Handle<Boolean> (*NamedPropertyQuery)(Local<String> property,
const AccessorInfo& info);
#endif
typedef Handle<Value> (*NamedPropertyQueryImpl)(Local<String> property,
const AccessorInfo& info);
/**
@ -2026,16 +2024,7 @@ class V8EXPORT FunctionTemplate : public Template {
NamedPropertyQuery query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
Handle<Value> data) {
NamedPropertyQueryImpl casted =
reinterpret_cast<NamedPropertyQueryImpl>(query);
SetNamedInstancePropertyHandlerImpl(getter,
setter,
casted,
remover,
enumerator,
data);
}
Handle<Value> data);
void SetIndexedInstancePropertyHandler(IndexedPropertyGetter getter,
IndexedPropertySetter setter,
IndexedPropertyQuery query,
@ -2047,13 +2036,6 @@ class V8EXPORT FunctionTemplate : public Template {
friend class Context;
friend class ObjectTemplate;
private:
void SetNamedInstancePropertyHandlerImpl(NamedPropertyGetter getter,
NamedPropertySetter setter,
NamedPropertyQueryImpl query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
Handle<Value> data);
};
@ -2111,7 +2093,8 @@ class V8EXPORT ObjectTemplate : public Template {
*
* \param getter The callback to invoke when getting a property.
* \param setter The callback to invoke when setting a property.
* \param query The callback to invoke to check if an object has a property.
* \param query The callback to invoke to check if a property is present,
* and if present, get its attributes.
* \param deleter The callback to invoke when deleting a property.
* \param enumerator The callback to invoke to enumerate all the named
* properties of an object.
@ -2123,26 +2106,7 @@ class V8EXPORT ObjectTemplate : public Template {
NamedPropertyQuery query = 0,
NamedPropertyDeleter deleter = 0,
NamedPropertyEnumerator enumerator = 0,
Handle<Value> data = Handle<Value>()) {
NamedPropertyQueryImpl casted =
reinterpret_cast<NamedPropertyQueryImpl>(query);
SetNamedPropertyHandlerImpl(getter,
setter,
casted,
deleter,
enumerator,
data);
}
private:
void SetNamedPropertyHandlerImpl(NamedPropertyGetter getter,
NamedPropertySetter setter,
NamedPropertyQueryImpl query,
NamedPropertyDeleter deleter,
NamedPropertyEnumerator enumerator,
Handle<Value> data);
public:
Handle<Value> data = Handle<Value>());
/**
* Sets an indexed property handler on the object template.

287
deps/v8/src/api.cc

@ -34,6 +34,7 @@
#include "debug.h"
#include "execution.h"
#include "global-handles.h"
#include "heap-profiler.h"
#include "messages.h"
#include "platform.h"
#include "profile-generator-inl.h"
@ -853,10 +854,10 @@ void FunctionTemplate::SetHiddenPrototype(bool value) {
}
void FunctionTemplate::SetNamedInstancePropertyHandlerImpl(
void FunctionTemplate::SetNamedInstancePropertyHandler(
NamedPropertyGetter getter,
NamedPropertySetter setter,
NamedPropertyQueryImpl query,
NamedPropertyQuery query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
Handle<Value> data) {
@ -987,12 +988,11 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
}
void ObjectTemplate::SetNamedPropertyHandlerImpl(NamedPropertyGetter getter,
void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
NamedPropertySetter setter,
NamedPropertyQueryImpl query,
NamedPropertyQuery query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator
enumerator,
NamedPropertyEnumerator enumerator,
Handle<Value> data) {
if (IsDeadCheck("v8::ObjectTemplate::SetNamedPropertyHandler()")) return;
ENTER_V8;
@ -1001,7 +1001,7 @@ void ObjectTemplate::SetNamedPropertyHandlerImpl(NamedPropertyGetter getter,
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
Utils::ToLocal(cons)->SetNamedInstancePropertyHandlerImpl(getter,
Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
setter,
query,
remover,
@ -2613,6 +2613,35 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
}
bool v8::Object::HasIndexedPropertiesInPixelData() {
ON_BAILOUT("v8::HasIndexedPropertiesInPixelData()", return false);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
return self->HasPixelElements();
}
uint8_t* v8::Object::GetIndexedPropertiesPixelData() {
ON_BAILOUT("v8::GetIndexedPropertiesPixelData()", return NULL);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (self->HasPixelElements()) {
return i::PixelArray::cast(self->elements())->external_pointer();
} else {
return NULL;
}
}
int v8::Object::GetIndexedPropertiesPixelDataLength() {
ON_BAILOUT("v8::GetIndexedPropertiesPixelDataLength()", return -1);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (self->HasPixelElements()) {
return i::PixelArray::cast(self->elements())->length();
} else {
return -1;
}
}
void v8::Object::SetIndexedPropertiesToExternalArrayData(
void* data,
ExternalArrayType array_type,
@ -2637,6 +2666,60 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
}
bool v8::Object::HasIndexedPropertiesInExternalArrayData() {
ON_BAILOUT("v8::HasIndexedPropertiesInExternalArrayData()", return false);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
return self->HasExternalArrayElements();
}
void* v8::Object::GetIndexedPropertiesExternalArrayData() {
ON_BAILOUT("v8::GetIndexedPropertiesExternalArrayData()", return NULL);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (self->HasExternalArrayElements()) {
return i::ExternalArray::cast(self->elements())->external_pointer();
} else {
return NULL;
}
}
ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
ON_BAILOUT("v8::GetIndexedPropertiesExternalArrayDataType()",
return static_cast<ExternalArrayType>(-1));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
switch (self->elements()->map()->instance_type()) {
case i::EXTERNAL_BYTE_ARRAY_TYPE:
return kExternalByteArray;
case i::EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
return kExternalUnsignedByteArray;
case i::EXTERNAL_SHORT_ARRAY_TYPE:
return kExternalShortArray;
case i::EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
return kExternalUnsignedShortArray;
case i::EXTERNAL_INT_ARRAY_TYPE:
return kExternalIntArray;
case i::EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
return kExternalUnsignedIntArray;
case i::EXTERNAL_FLOAT_ARRAY_TYPE:
return kExternalFloatArray;
default:
return static_cast<ExternalArrayType>(-1);
}
}
int v8::Object::GetIndexedPropertiesExternalArrayDataLength() {
ON_BAILOUT("v8::GetIndexedPropertiesExternalArrayDataLength()", return 0);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (self->HasExternalArrayElements()) {
return i::ExternalArray::cast(self->elements())->length();
} else {
return -1;
}
}
Local<v8::Object> Function::NewInstance() const {
return NewInstance(0, NULL);
}
@ -4363,6 +4446,196 @@ const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
*Utils::OpenHandle(*title)));
}
HeapGraphEdge::Type HeapGraphEdge::GetType() const {
IsDeadCheck("v8::HeapGraphEdge::GetType");
return static_cast<HeapGraphEdge::Type>(
reinterpret_cast<const i::HeapGraphEdge*>(this)->type());
}
Handle<Value> HeapGraphEdge::GetName() const {
IsDeadCheck("v8::HeapGraphEdge::GetName");
const i::HeapGraphEdge* edge =
reinterpret_cast<const i::HeapGraphEdge*>(this);
switch (edge->type()) {
case i::HeapGraphEdge::CONTEXT_VARIABLE:
case i::HeapGraphEdge::PROPERTY:
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
edge->name())));
case i::HeapGraphEdge::ELEMENT:
return Handle<Number>(ToApi<Number>(i::Factory::NewNumberFromInt(
edge->index())));
default: UNREACHABLE();
}
return ImplementationUtilities::Undefined();
}
const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
IsDeadCheck("v8::HeapGraphEdge::GetFromNode");
const i::HeapEntry* from =
reinterpret_cast<const i::HeapGraphEdge*>(this)->from();
return reinterpret_cast<const HeapGraphNode*>(from);
}
const HeapGraphNode* HeapGraphEdge::GetToNode() const {
IsDeadCheck("v8::HeapGraphEdge::GetToNode");
const i::HeapEntry* to =
reinterpret_cast<const i::HeapGraphEdge*>(this)->to();
return reinterpret_cast<const HeapGraphNode*>(to);
}
int HeapGraphPath::GetEdgesCount() const {
return reinterpret_cast<const i::HeapGraphPath*>(this)->path()->length();
}
const HeapGraphEdge* HeapGraphPath::GetEdge(int index) const {
return reinterpret_cast<const HeapGraphEdge*>(
reinterpret_cast<const i::HeapGraphPath*>(this)->path()->at(index));
}
const HeapGraphNode* HeapGraphPath::GetFromNode() const {
return GetEdgesCount() > 0 ? GetEdge(0)->GetFromNode() : NULL;
}
const HeapGraphNode* HeapGraphPath::GetToNode() const {
const int count = GetEdgesCount();
return count > 0 ? GetEdge(count - 1)->GetToNode() : NULL;
}
HeapGraphNode::Type HeapGraphNode::GetType() const {
IsDeadCheck("v8::HeapGraphNode::GetType");
return static_cast<HeapGraphNode::Type>(
reinterpret_cast<const i::HeapEntry*>(this)->type());
}
Handle<String> HeapGraphNode::GetName() const {
IsDeadCheck("v8::HeapGraphNode::GetName");
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
reinterpret_cast<const i::HeapEntry*>(this)->name())));
}
int HeapGraphNode::GetSelfSize() const {
IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
return reinterpret_cast<const i::HeapEntry*>(this)->self_size();
}
int HeapGraphNode::GetTotalSize() const {
IsDeadCheck("v8::HeapSnapshot::GetHead");
return const_cast<i::HeapEntry*>(
reinterpret_cast<const i::HeapEntry*>(this))->TotalSize();
}
int HeapGraphNode::GetPrivateSize() const {
IsDeadCheck("v8::HeapSnapshot::GetPrivateSize");
return const_cast<i::HeapEntry*>(
reinterpret_cast<const i::HeapEntry*>(this))->NonSharedTotalSize();
}
int HeapGraphNode::GetChildrenCount() const {
IsDeadCheck("v8::HeapSnapshot::GetChildrenCount");
return reinterpret_cast<const i::HeapEntry*>(this)->children()->length();
}
const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
IsDeadCheck("v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>(
reinterpret_cast<const i::HeapEntry*>(this)->children()->at(index));
}
int HeapGraphNode::GetRetainersCount() const {
IsDeadCheck("v8::HeapSnapshot::GetRetainersCount");
return reinterpret_cast<const i::HeapEntry*>(this)->retainers()->length();
}
const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
IsDeadCheck("v8::HeapSnapshot::GetRetainer");
return reinterpret_cast<const HeapGraphEdge*>(
reinterpret_cast<const i::HeapEntry*>(this)->retainers()->at(index));
}
int HeapGraphNode::GetRetainingPathsCount() const {
IsDeadCheck("v8::HeapSnapshot::GetRetainingPathsCount");
return const_cast<i::HeapEntry*>(
reinterpret_cast<const i::HeapEntry*>(
this))->GetRetainingPaths()->length();
}
const HeapGraphPath* HeapGraphNode::GetRetainingPath(int index) const {
IsDeadCheck("v8::HeapSnapshot::GetRetainingPath");
return reinterpret_cast<const HeapGraphPath*>(
const_cast<i::HeapEntry*>(
reinterpret_cast<const i::HeapEntry*>(
this))->GetRetainingPaths()->at(index));
}
unsigned HeapSnapshot::GetUid() const {
IsDeadCheck("v8::HeapSnapshot::GetUid");
return reinterpret_cast<const i::HeapSnapshot*>(this)->uid();
}
Handle<String> HeapSnapshot::GetTitle() const {
IsDeadCheck("v8::HeapSnapshot::GetTitle");
const i::HeapSnapshot* snapshot =
reinterpret_cast<const i::HeapSnapshot*>(this);
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
snapshot->title())));
}
const HeapGraphNode* HeapSnapshot::GetHead() const {
IsDeadCheck("v8::HeapSnapshot::GetHead");
const i::HeapSnapshot* snapshot =
reinterpret_cast<const i::HeapSnapshot*>(this);
return reinterpret_cast<const HeapGraphNode*>(snapshot->const_root());
}
int HeapProfiler::GetSnapshotsCount() {
IsDeadCheck("v8::HeapProfiler::GetSnapshotsCount");
return i::HeapProfiler::GetSnapshotsCount();
}
const HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
IsDeadCheck("v8::HeapProfiler::GetSnapshot");
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::GetSnapshot(index));
}
const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
IsDeadCheck("v8::HeapProfiler::FindSnapshot");
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::FindSnapshot(uid));
}
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title) {
IsDeadCheck("v8::HeapProfiler::TakeSnapshot");
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::TakeSnapshot(*Utils::OpenHandle(*title)));
}
#endif // ENABLE_LOGGING_AND_PROFILING

54
deps/v8/src/arm/assembler-arm.cc

@ -279,6 +279,20 @@ const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
const Instr kMovMvnPattern = 0xd * B21;
const Instr kMovMvnFlip = B22;
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21;
const Instr kALUMask = 0x6f * B21;
const Instr kAddPattern = 0x4 * B21;
const Instr kSubPattern = 0x2 * B21;
const Instr kBicPattern = 0xe * B21;
const Instr kAndPattern = 0x0 * B21;
const Instr kAddSubFlip = 0x6 * B21;
const Instr kAndBicFlip = 0xe * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kRdMask = 0x0000f000;
static const int kRdShift = 12;
@ -627,6 +641,9 @@ void Assembler::next(Label* L) {
// Low-level code emission routines depending on the addressing mode.
// If this returns true then you have to use the rotate_imm and immed_8
// that it returns, because it may have already changed the instruction
// to match them!
static bool fits_shifter(uint32_t imm32,
uint32_t* rotate_imm,
uint32_t* immed_8,
@ -640,13 +657,36 @@ static bool fits_shifter(uint32_t imm32,
return true;
}
}
// If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
// If the opcode is one with a complementary version and the complementary
// immediate fits, change the opcode.
if (instr != NULL) {
if ((*instr & kMovMvnMask) == kMovMvnPattern) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kMovMvnFlip;
return true;
}
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kCmpCmnFlip;
return true;
}
} else {
Instr alu_insn = (*instr & kALUMask);
if (alu_insn == kAddPattern ||
alu_insn == kSubPattern) {
if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kAddSubFlip;
return true;
}
} else if (alu_insn == kAndPattern ||
alu_insn == kBicPattern) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= 0x2*B21;
*instr ^= kAndBicFlip;
return true;
}
}
}
}
return false;
}
@ -670,6 +710,14 @@ static bool MustUseIp(RelocInfo::Mode rmode) {
}
bool Operand::is_single_instruction() const {
if (rm_.is_valid()) return true;
if (MustUseIp(rmode_)) return false;
uint32_t dummy1, dummy2;
return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
}
void Assembler::addrmod1(Instr instr,
Register rn,
Register rd,

24
deps/v8/src/arm/assembler-arm.h

@ -418,6 +418,15 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
// Return true of this operand fits in one instruction so that no
// 2-instruction solution with a load into the ip register is necessary.
bool is_single_instruction() const;
inline int32_t immediate() const {
ASSERT(!rm_.is_valid());
return imm32_;
}
Register rm() const { return rm_; }
private:
@ -532,6 +541,21 @@ extern const Instr kLdrPCPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
extern const Instr kMovMvnMask;
extern const Instr kMovMvnPattern;
extern const Instr kMovMvnFlip;
extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip;
extern const Instr kALUMask;
extern const Instr kAddPattern;
extern const Instr kSubPattern;
extern const Instr kAndPattern;
extern const Instr kBicPattern;
extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip;
class Assembler : public Malloced {
public:

18
deps/v8/src/arm/builtins-arm.cc

@ -136,7 +136,8 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
__ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
ASSERT(kSmiTag == 0);
__ sub(scratch1, scratch1, Operand(kHeapObjectTag));
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
@ -240,9 +241,10 @@ static void AllocateJSArray(MacroAssembler* masm,
FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
__ and_(elements_array_storage,
ASSERT(kSmiTag == 0);
__ sub(elements_array_storage,
elements_array_storage,
Operand(~kHeapObjectTagMask));
Operand(kHeapObjectTag));
// Initialize the fixed array and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
@ -617,12 +619,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// The field instance sizes contains both pre-allocated property fields and
// in-object properties.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ and_(r6,
r0,
Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
__ add(r3, r3, Operand(r6, LSR, Map::kPreAllocatedPropertyFieldsByte * 8));
__ and_(r6, r0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
__ sub(r3, r3, Operand(r6, LSR, Map::kInObjectPropertiesByte * 8), SetCC);
__ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
__ add(r3, r3, Operand(r6));
__ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
__ sub(r3, r3, Operand(r6), SetCC);
// Done if no extra properties are to be allocated.
__ b(eq, &allocated);

267
deps/v8/src/arm/codegen-arm.cc

@ -342,56 +342,27 @@ void CodeGenerator::Generate(CompilationInfo* info) {
}
}
// Generate the return sequence if necessary.
if (has_valid_frame() || function_return_.is_linked()) {
if (!function_return_.is_linked()) {
CodeForReturnPosition(info->function());
}
// exit
// r0: result
// sp: stack pointer
// fp: frame pointer
// cp: callee's context
// Handle the return from the function.
if (has_valid_frame()) {
// If there is a valid frame, control flow can fall off the end of
// the body. In that case there is an implicit return statement.
ASSERT(!function_return_is_shadowed_);
frame_->PrepareForReturn();
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
if (function_return_.is_bound()) {
function_return_.Jump();
} else {
function_return_.Bind();
if (FLAG_trace) {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns the parameter as it is.
frame_->EmitPush(r0);
frame_->CallRuntime(Runtime::kTraceExit, 1);
}
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Tear down the frame which will restore the caller's frame pointer and
// the link register.
frame_->Exit();
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
masm_->add(sp, sp, Operand(sp_delta));
masm_->Jump(lr);
#ifdef DEBUG
// Check that the size of the code used for returning matches what is
// expected by the debugger. If the sp_delts above cannot be encoded in
// the add instruction the add will generate two instructions.
int return_sequence_length =
masm_->InstructionsGeneratedSince(&check_exit_codesize);
CHECK(return_sequence_length ==
Assembler::kJSReturnSequenceInstructions ||
return_sequence_length ==
Assembler::kJSReturnSequenceInstructions + 1);
#endif
}
GenerateReturnSequence();
}
} else if (function_return_.is_linked()) {
// If the return target has dangling jumps to it, then we have not
// yet generated the return sequence. This can happen when (a)
// control does not flow off the end of the body so we did not
// compile an artificial return statement just above, and (b) there
// are return statements in the body but (c) they are all shadowed.
function_return_.Bind();
GenerateReturnSequence();
}
// Adjust for function-level loop nesting.
@ -1203,7 +1174,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
default: UNREACHABLE();
}
frame_->EmitPush(tos, TypeInfo::Smi());
@ -1215,7 +1186,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
default: UNREACHABLE();
}
deferred->BindExit();
@ -1958,8 +1929,56 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
// returning thus making it easier to merge.
frame_->EmitPop(r0);
frame_->PrepareForReturn();
if (function_return_.is_bound()) {
// If the function return label is already bound we reuse the
// code by jumping to the return site.
function_return_.Jump();
} else {
function_return_.Bind();
GenerateReturnSequence();
}
}
}
void CodeGenerator::GenerateReturnSequence() {
if (FLAG_trace) {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns the parameter as it is.
frame_->EmitPush(r0);
frame_->CallRuntime(Runtime::kTraceExit, 1);
}
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Tear down the frame which will restore the caller's frame pointer and
// the link register.
frame_->Exit();
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
masm_->add(sp, sp, Operand(sp_delta));
masm_->Jump(lr);
DeleteFrame();
#ifdef DEBUG
// Check that the size of the code used for returning matches what is
// expected by the debugger. If the sp_delts above cannot be encoded in
// the add instruction the add will generate two instructions.
int return_sequence_length =
masm_->InstructionsGeneratedSince(&check_exit_codesize);
CHECK(return_sequence_length ==
Assembler::kJSReturnSequenceInstructions ||
return_sequence_length ==
Assembler::kJSReturnSequenceInstructions + 1);
#endif
}
}
@ -4069,29 +4088,35 @@ void CodeGenerator::VisitCall(Call* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Load(property->obj());
if (!property->is_synthetic()) {
// Duplicate receiver for later use.
__ ldr(r0, MemOperand(sp, 0));
frame_->EmitPush(r0);
}
if (property->is_synthetic()) {
Load(property->key());
EmitKeyedLoad();
// Put the function below the receiver.
if (property->is_synthetic()) {
// Use the global receiver.
frame_->EmitPush(r0); // Function.
LoadGlobalReceiver(r0);
// Call the function.
CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
frame_->EmitPush(r0);
} else {
// Switch receiver and function.
frame_->EmitPop(r1); // Receiver.
frame_->EmitPush(r0); // Function.
frame_->EmitPush(r1); // Receiver.
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
}
// Call the function.
CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
// Set the name register and call the IC initialization code.
Load(property->key());
frame_->EmitPop(r2); // Function name.
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> stub = ComputeKeyedCallInitialize(arg_count, in_loop);
CodeForSourcePosition(node->position());
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
__ ldr(cp, frame_->Context());
frame_->EmitPush(r0);
}
}
} else {
// ----------------------------------
@ -6628,8 +6653,12 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
// Gets the wrong answer for 0, but we already checked for that case above.
__ CountLeadingZeros(source_, mantissa, zeros_);
// Compute exponent and or it into the exponent register.
// We use mantissa as a scratch register here.
__ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
// We use mantissa as a scratch register here. Use a fudge factor to
// divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
// that fit in the ARM's constant field.
int fudge = 0x400;
__ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
__ add(mantissa, mantissa, Operand(fudge));
__ orr(exponent,
exponent,
Operand(mantissa, LSL, HeapNumber::kExponentShift));
@ -6702,15 +6731,12 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
bool never_nan_nan) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = r5;
__ cmp(r0, r1);
__ b(ne, &not_identical);
// The two objects are identical. If we know that one of them isn't NaN then
// we now know they test equal.
if (cc != eq || !never_nan_nan) {
__ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
@ -6771,8 +6797,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Read top bits of double representation (second word of value).
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
// Test that exponent bits are all set.
__ and_(r3, r2, Operand(exp_mask_reg));
__ cmp(r3, Operand(exp_mask_reg));
__ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// NaNs have all-one exponents so they sign extend to -1.
__ cmp(r3, Operand(-1));
__ b(ne, &return_equal);
// Shift out flag and all exponent bits, retaining only mantissa.
@ -6893,14 +6920,14 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
Register rhs_mantissa = exp_first ? r1 : r0;
Register lhs_mantissa = exp_first ? r3 : r2;
Label one_is_nan, neither_is_nan;
Label lhs_not_nan_exp_mask_is_loaded;
Register exp_mask_reg = r5;
__ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
__ and_(r4, lhs_exponent, Operand(exp_mask_reg));
__ cmp(r4, Operand(exp_mask_reg));
__ b(ne, &lhs_not_nan_exp_mask_is_loaded);
__ Sbfx(r4,
lhs_exponent,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
// NaNs have all-one exponents so they sign extend to -1.
__ cmp(r4, Operand(-1));
__ b(ne, lhs_not_nan);
__ mov(r4,
Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
SetCC);
@ -6909,10 +6936,12 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
__ b(ne, &one_is_nan);
__ bind(lhs_not_nan);
__ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
__ bind(&lhs_not_nan_exp_mask_is_loaded);
__ and_(r4, rhs_exponent, Operand(exp_mask_reg));
__ cmp(r4, Operand(exp_mask_reg));
__ Sbfx(r4,
rhs_exponent,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
// NaNs have all-one exponents so they sign extend to -1.
__ cmp(r4, Operand(-1));
__ b(ne, &neither_is_nan);
__ mov(r4,
Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
@ -7633,7 +7662,10 @@ static void GetInt32(MacroAssembler* masm,
// Get exponent word.
__ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
__ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
__ Ubfx(scratch2,
scratch,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
// Load dest with zero. We use this either for the final shift or
// for the answer.
__ mov(dest, Operand(0));
@ -7641,9 +7673,14 @@ static void GetInt32(MacroAssembler* masm,
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
// the exponent that we are fastest at and also the highest exponent we can
// handle here.
const uint32_t non_smi_exponent =
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
__ cmp(scratch2, Operand(non_smi_exponent));
const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
// The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
// split it up to avoid a constant pool entry. You can't do that in general
// for cmp because of the overflow flag, but we know the exponent is in the
// range 0-2047 so there is no overflow.
int fudge_factor = 0x400;
__ sub(scratch2, scratch2, Operand(fudge_factor));
__ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
// If we have a match of the int32-but-not-Smi exponent then skip some logic.
__ b(eq, &right_exponent);
// If the exponent is higher than that then go to slow case. This catches
@ -7653,17 +7690,14 @@ static void GetInt32(MacroAssembler* masm,
// We know the exponent is smaller than 30 (biased). If it is less than
// 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
// it rounds to zero.
const uint32_t zero_exponent =
(HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
__ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
__ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
// Dest already has a Smi zero.
__ b(lt, &done);
if (!CpuFeatures::IsSupported(VFP3)) {
// We have a shifted exponent between 0 and 30 in scratch2.
__ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
// We now have the exponent in dest. Subtract from 30 to get
// how much to shift down.
__ rsb(dest, dest, Operand(30));
// We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
// get how much to shift down.
__ rsb(dest, scratch2, Operand(30));
}
__ bind(&right_exponent);
if (CpuFeatures::IsSupported(VFP3)) {
@ -8276,20 +8310,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&loaded);
// r2 = low 32 bits of double value
// r3 = high 32 bits of double value
// Compute hash:
// Compute hash (the shifts are arithmetic):
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
__ eor(r1, r2, Operand(r3));
__ eor(r1, r1, Operand(r1, LSR, 16));
__ eor(r1, r1, Operand(r1, LSR, 8));
__ eor(r1, r1, Operand(r1, ASR, 16));
__ eor(r1, r1, Operand(r1, ASR, 8));
ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
if (CpuFeatures::IsSupported(ARMv7)) {
const int kTranscendentalCacheSizeBits = 9;
ASSERT_EQ(1 << kTranscendentalCacheSizeBits,
TranscendentalCache::kCacheSize);
__ ubfx(r1, r1, 0, kTranscendentalCacheSizeBits);
} else {
__ and_(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
}
__ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
// r2 = low 32 bits of double value.
// r3 = high 32 bits of double value.
@ -9248,15 +9275,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// regexp_data: RegExp data (FixedArray)
// Check the representation and encoding of the subject string.
Label seq_string;
const int kStringRepresentationEncodingMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
__ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
// First check for sequential string.
ASSERT_EQ(0, kStringTag);
ASSERT_EQ(0, kSeqStringTag);
__ tst(r1, Operand(kIsNotStringMask | kStringRepresentationMask));
// First check for flat string.
__ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
ASSERT_EQ(0, kStringTag | kSeqStringTag);
__ b(eq, &seq_string);
// subject: Subject string
@ -9266,8 +9289,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
__ and_(r0, r0, Operand(kStringRepresentationMask));
__ cmp(r0, Operand(kConsStringTag));
ASSERT(kExternalStringTag !=0);
ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
__ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
__ b(ne, &runtime);
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
__ LoadRoot(r1, Heap::kEmptyStringRootIndex);
@ -9276,25 +9300,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
// Is first part a flat string?
ASSERT_EQ(0, kSeqStringTag);
__ tst(r0, Operand(kStringRepresentationMask));
__ b(nz, &runtime);
__ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
__ bind(&seq_string);
// r1: suject string type & kStringRepresentationEncodingMask
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// Check that the irregexp code has been generated for an ascii string. If
// it has, the field contains a code object otherwise it contains the hole.
#ifdef DEBUG
const int kSeqAsciiString = kStringTag | kSeqStringTag | kAsciiStringTag;
const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
CHECK_EQ(4, kSeqAsciiString);
CHECK_EQ(0, kSeqTwoByteString);
#endif
// r0: Instance type of subject string
ASSERT_EQ(4, kAsciiStringTag);
ASSERT_EQ(0, kTwoByteStringTag);
// Find the code object based on the assumptions above.
__ mov(r3, Operand(r1, ASR, 2), SetCC);
__ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 2), SetCC);
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);

6
deps/v8/src/arm/codegen-arm.h

@ -315,6 +315,12 @@ class CodeGenerator: public AstVisitor {
// Main code generation function
void Generate(CompilationInfo* info);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
// target (which can not be done more than once). The return value should
// be in r0.
void GenerateReturnSequence();
// Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode();

43
deps/v8/src/arm/full-codegen-arm.cc

@ -1648,6 +1648,30 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
}
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Expression* key,
RelocInfo::Mode mode) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
VisitForValue(args->at(i), kStack);
}
VisitForValue(key, kAccumulator);
__ mov(r2, r0);
// Record source position for debugger.
SetSourcePosition(expr->position());
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
in_loop);
__ Call(ic, mode);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
Apply(context_, r0);
}
void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
@ -1743,35 +1767,28 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForValue(prop->obj(), kStack);
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property, use keyed load IC followed by function
// call.
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use keyed CallIC.
VisitForValue(prop->obj(), kStack);
if (prop->is_synthetic()) {
VisitForValue(prop->key(), kAccumulator);
// Record source code position for IC call.
SetSourcePosition(prop->position());
if (prop->is_synthetic()) {
__ pop(r1); // We do not need to keep the receiver.
} else {
__ ldr(r1, MemOperand(sp, 0)); // Keep receiver, to call function on.
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
if (prop->is_synthetic()) {
// Push result (function).
__ push(r0);
// Push Global receiver.
__ ldr(r1, CodeGenerator::GlobalObject());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ push(r1);
EmitCallWithStub(expr);
} else {
// Pop receiver.
__ pop(r1);
// Push result (function).
__ push(r0);
__ push(r1);
EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
}
EmitCallWithStub(expr);
}
} else {
// Call to some other expression. If the expression is an anonymous

444
deps/v8/src/arm/ic-arm.cc

@ -167,16 +167,22 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
Register key,
Register result,
Register t0,
Register t1,
Register t2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver and is unchanged.
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry and is unchanged if a branch is
// performed to the miss label.
// Holds the result on exit if the load succeeded.
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
//
// Scratch registers:
//
@ -248,7 +254,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index and return.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
__ ldr(key, FieldMemOperand(t2, kValueOffset));
__ ldr(result, FieldMemOperand(t2, kValueOffset));
}
@ -298,22 +304,159 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* slow) {
// Check that the object isn't a smi.
__ BranchOnSmi(receiver, slow);
// Get the map of the receiver.
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
__ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
__ b(ne, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
// objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ cmp(scratch1, Operand(JS_OBJECT_TYPE));
__ b(lt, slow);
}
// Loads an indexed element from a fast case array.
static void GenerateFastArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
Register elements,
Register scratch1,
Register scratch2,
Register result,
Label* not_fast_array,
Label* out_of_range) {
// Register use:
//
// receiver - holds the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// elements - holds the elements of the receiver on exit.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
// used by further computation.
//
// Scratch registers:
//
// scratch1 - used to hold elements map and elements length.
// Holds the elements map if not_fast_array branch is taken.
//
// scratch2 - used to hold the loaded value.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch1, ip);
__ b(ne, not_fast_array);
// Check that the key (index) is within bounds.
__ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(scratch1));
__ b(hs, out_of_range);
// Fast case: Do the load.
__ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// The key is a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ ldr(scratch2,
MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch2, ip);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ b(eq, out_of_range);
__ mov(result, scratch2);
}
// Checks whether a key is an array index string or a symbol string.
// Falls through if a key is a symbol.
static void GenerateKeyStringCheck(MacroAssembler* masm,
Register key,
Register map,
Register hash,
Label* index_string,
Label* not_symbol) {
// The key is not a smi.
// Is it a string?
__ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE);
__ b(ge, not_symbol);
// Is the string an array index, with cached numeric value?
__ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
__ tst(hash, Operand(String::kContainsCachedArrayIndexMask));
__ b(eq, index_string);
// Is the string a symbol?
// map: key map
__ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
ASSERT(kSymbolTag != 0);
__ tst(hash, Operand(kIsSymbolMask));
__ b(eq, not_symbol);
}
// Picks out an array index from the hash field.
static void GenerateIndexFromHash(MacroAssembler* masm,
Register key,
Register hash) {
// Register use:
// key - holds the overwritten key on exit.
// hash - holds the key's hash. Clobbered.
// If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
// conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
ASSERT(String::kHashShift >= kSmiTagSize);
// Here we actually clobber the key which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
ASSERT(String::kHashShift >= kSmiTagSize);
__ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
__ mov(key, Operand(hash, LSL, kSmiTagSize));
}
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind) {
// ----------- S t a t e -------------
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
// Probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// If the stub cache probing failed, the receiver might be a value.
@ -355,9 +498,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&probe);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm, argc);
}
@ -390,7 +531,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
}
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@ -443,13 +584,11 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ CheckAccessGlobalProxy(r1, r0, &miss);
__ b(&invoke);
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm, argc);
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@ -465,7 +604,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Call the entry.
__ mov(r0, Operand(2));
__ mov(r1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
__ mov(r1, Operand(ExternalReference(IC_Utility(id))));
CEntryStub stub(1);
__ CallStub(&stub);
@ -496,18 +635,165 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
}
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
GenerateMiss(masm, argc);
}
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
GenerateCallNormal(masm, argc);
GenerateMiss(masm, argc);
}
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
UNREACHABLE();
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
}
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
UNREACHABLE();
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
Label do_call, slow_call, slow_load, slow_reload_receiver;
Label check_number_dictionary, check_string, lookup_monomorphic_cache;
Label index_smi, index_string;
// Check that the key is a smi.
__ BranchOnNotSmi(r2, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &slow_call);
GenerateFastArrayLoad(
masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
__ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1, r0, r3);
__ bind(&do_call);
// receiver in r1 is not used after this point.
// r2: key
// r1: function
// Check that the value in r1 is a JSFunction.
__ BranchOnSmi(r1, &slow_call);
__ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
__ b(ne, &slow_call);
// Invoke the function.
ParameterCount actual(argc);
__ InvokeFunction(r1, actual, JUMP_FUNCTION);
__ bind(&check_number_dictionary);
// r2: key
// r3: elements map
// r4: elements
// Check whether the elements is a number dictionary.
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow_load);
__ mov(r0, Operand(r2, ASR, kSmiTagSize));
// r0: untagged index
GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5);
__ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1, r0, r3);
__ jmp(&do_call);
__ bind(&slow_load);
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1, r0, r3);
__ EnterInternalFrame();
__ push(r2); // save the key
__ Push(r1, r2); // pass the receiver and the key
__ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ pop(r2); // restore the key
__ LeaveInternalFrame();
__ mov(r1, r0);
__ jmp(&do_call);
__ bind(&check_string);
GenerateKeyStringCheck(masm, r2, r0, r3, &index_string, &slow_call);
// The key is known to be a symbol.
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &lookup_monomorphic_cache);
__ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
__ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &lookup_monomorphic_cache);
GenerateDictionaryLoad(
masm, &slow_load, r1, r2, r1, r0, r3, r4, DICTIONARY_CHECK_DONE);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1, r0, r3);
__ jmp(&do_call);
__ bind(&lookup_monomorphic_cache);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1, r0, r3);
GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
// Fall through on miss.
__ bind(&slow_call);
// This branch is taken if:
// - the receiver requires boxing or access check,
// - the key is neither smi nor symbol,
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
__ IncrementCounter(&Counters::keyed_call_generic_slow, 1, r0, r3);
GenerateMiss(masm, argc);
__ bind(&index_string);
GenerateIndexFromHash(masm, r2, r3);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
UNREACHABLE();
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
GenerateCallNormal(masm, argc);
GenerateMiss(masm, argc);
}
@ -759,49 +1045,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Register key = r0;
Register receiver = r1;
// Check that the object isn't a smi.
__ BranchOnSmi(receiver, &slow);
// Get the map of the receiver.
__ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
__ tst(r3, Operand(kSlowCaseBitFieldMask));
__ b(ne, &slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
// objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(JS_OBJECT_TYPE));
__ b(lt, &slow);
GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r3, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &check_pixel_array);
// Check that the key (index) is within bounds.
__ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
__ cmp(key, Operand(r3));
__ b(hs, &slow);
// Fast case: Do the load.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// The key is a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ ldr(r2, MemOperand(r3, key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r2, ip);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ b(eq, &slow);
__ mov(r0, r2);
GenerateFastArrayLoad(
masm, receiver, key, r4, r3, r2, r0, &check_pixel_array, &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
__ Ret();
@ -831,7 +1084,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r3, ip);
__ b(ne, &slow);
__ mov(r2, Operand(r0, ASR, kSmiTagSize));
GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r2, r3, r5);
GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r0, r2, r3, r5);
__ Ret();
// Slow case, key and receiver still in r0 and r1.
@ -840,24 +1093,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
// The key is not a smi.
// Is it a string?
// r0: key
// r1: receiver
__ CompareObjectType(r0, r2, r3, FIRST_NONSTRING_TYPE);
__ b(ge, &slow);
// Is the string an array index, with cached numeric value?
__ ldr(r3, FieldMemOperand(r0, String::kHashFieldOffset));
__ tst(r3, Operand(String::kContainsCachedArrayIndexMask));
__ b(eq, &index_string);
// Is the string a symbol?
// r2: key map
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
ASSERT(kSymbolTag != 0);
__ tst(r3, Operand(kIsSymbolMask));
__ b(eq, &slow);
GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
@ -873,7 +1109,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
__ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
__ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
__ and_(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
__ And(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
@ -918,25 +1154,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
__ Ret();
__ b(&slow);
// If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
// conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
// r0: key (string)
// r1: receiver
// r3: hash field
// We want the smi-tagged index in r0. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
ASSERT(String::kHashShift >= kSmiTagSize);
__ and_(r3, r3, Operand(String::kArrayIndexValueMask));
// Here we actually clobber the key (r0) which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
__ mov(r0, Operand(r3, ASR, String::kHashShift - kSmiTagSize));
GenerateIndexFromHash(masm, key, r3);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@ -1665,32 +1884,29 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (array_type == kExternalFloatArray) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
if (array_type == kExternalFloatArray) {
__ vcvt_f32_f64(s0, d0);
__ vmov(r5, s0);
__ str(r5, MemOperand(r3, r4, LSL, 2));
} else {
Label done;
// Need to perform float-to-int conversion.
// Test for NaN.
__ vcmp(d0, d0);
// Move vector status bits to normal status bits.
__ vmrs(v8::internal::pc);
__ mov(r5, Operand(0), LeaveCC, vs); // NaN converts to 0.
__ b(vs, &done);
// Test whether exponent equal to 0x7FF (infinity or NaN).
__ vmov(r6, r7, d0);
__ mov(r5, Operand(0x7FF00000));
__ and_(r6, r6, Operand(r5));
__ teq(r6, Operand(r5));
__ mov(r6, Operand(0), LeaveCC, eq);
// Test for NaN or infinity (both give zero).
__ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
// Hoisted load. vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// NaNs and Infinities have all-one exponents so they sign extend to -1.
__ cmp(r6, Operand(-1));
__ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
// Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
@ -1698,10 +1914,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
} else {
__ vcvt_u32_f64(s0, d0, ne);
}
__ vmov(r5, s0, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:

54
deps/v8/src/arm/macro-assembler-arm.cc

@ -216,6 +216,60 @@ void MacroAssembler::Move(Register dst, Register src) {
}
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!CpuFeatures::IsSupported(ARMv7) || src2.is_single_instruction()) {
and_(dst, src1, src2, LeaveCC, cond);
return;
}
int32_t immediate = src2.immediate();
if (immediate == 0) {
mov(dst, Operand(0), LeaveCC, cond);
return;
}
if (IsPowerOf2(immediate + 1) && ((immediate & 1) != 0)) {
ubfx(dst, src1, 0, WhichPowerOf2(immediate + 1), cond);
return;
}
and_(dst, src1, src2, LeaveCC, cond);
}
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
}
} else {
ubfx(dst, src1, lsb, width, cond);
}
}
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
int shift_up = 32 - lsb - width;
int shift_down = lsb + shift_up;
if (shift_up != 0) {
mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
}
if (shift_down != 0) {
mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
}
} else {
sbfx(dst, src1, lsb, width, cond);
}
}
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);

8
deps/v8/src/arm/macro-assembler-arm.h

@ -93,6 +93,14 @@ class MacroAssembler: public Assembler {
Register scratch = no_reg,
Condition cond = al);
void And(Register dst, Register src1, const Operand& src2,
Condition cond = al);
void Ubfx(Register dst, Register src, int lsb, int width,
Condition cond = al);
void Sbfx(Register dst, Register src, int lsb, int width,
Condition cond = al);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
// May do nothing if the registers are identical.

20
deps/v8/src/arm/stub-cache-arm.cc

@ -1019,6 +1019,14 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, miss);
}
}
void CallStubCompiler::GenerateMissBranch() {
Handle<Code> ic = ComputeCallMiss(arguments().immediate(), kind_);
__ Jump(ic, RelocInfo::CODE_TARGET);
@ -1035,6 +1043,8 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// -----------------------------------
Label miss;
GenerateNameCheck(name, &miss);
const int argc = arguments().immediate();
// Get the receiver of the function from the stack into r0.
@ -1078,6 +1088,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
Label miss;
GenerateNameCheck(name, &miss);
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
@ -1127,6 +1139,8 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
Label miss;
GenerateNameCheck(name, &miss);
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
@ -1198,6 +1212,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
Label miss_in_smi_check;
GenerateNameCheck(name, &miss_in_smi_check);
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
@ -1337,6 +1353,8 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
Label miss;
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
@ -1384,6 +1402,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -----------------------------------
Label miss;
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();

1
deps/v8/src/arm/virtual-frame-arm.cc

@ -367,6 +367,7 @@ void VirtualFrame::CallCodeObject(Handle<Code> code,
int dropped_args) {
switch (code->kind()) {
case Code::CALL_IC:
case Code::KEYED_CALL_IC:
case Code::FUNCTION:
break;
case Code::KEYED_LOAD_IC:

5
deps/v8/src/arm/virtual-frame-arm.h

@ -212,10 +212,9 @@ class VirtualFrame : public ZoneObject {
void Enter();
void Exit();
// Prepare for returning from the frame by spilling locals and
// dropping all non-locals elements in the virtual frame. This
// Prepare for returning from the frame by elements in the virtual frame. This
// avoids generating unnecessary merge code when jumping to the
// shared return site. Emits code for spills.
// shared return site. No spill code emitted. Value to return should be in r0.
inline void PrepareForReturn();
// Number of local variables after when we use a loop for allocating.

71
deps/v8/src/heap-profiler.cc

@ -30,8 +30,8 @@
#include "heap-profiler.h"
#include "frames-inl.h"
#include "global-handles.h"
#include "profile-generator.h"
#include "string-stream.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {
@ -314,6 +314,75 @@ void RetainerTreeAggregator::Call(const JSObjectsCluster& cluster,
} // namespace
HeapProfiler* HeapProfiler::singleton_ = NULL;
HeapProfiler::HeapProfiler()
: snapshots_(new HeapSnapshotsCollection()),
next_snapshot_uid_(1) {
}
HeapProfiler::~HeapProfiler() {
delete snapshots_;
}
void HeapProfiler::Setup() {
if (singleton_ == NULL) {
singleton_ = new HeapProfiler();
}
}
void HeapProfiler::TearDown() {
delete singleton_;
singleton_ = NULL;
}
HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name) {
ASSERT(singleton_ != NULL);
return singleton_->TakeSnapshotImpl(name);
}
HeapSnapshot* HeapProfiler::TakeSnapshot(String* name) {
ASSERT(singleton_ != NULL);
return singleton_->TakeSnapshotImpl(name);
}
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) {
HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
HeapSnapshotGenerator generator(result);
generator.GenerateSnapshot();
return result;
}
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name) {
return TakeSnapshotImpl(snapshots_->GetName(name));
}
int HeapProfiler::GetSnapshotsCount() {
ASSERT(singleton_ != NULL);
return singleton_->snapshots_->snapshots()->length();
}
HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
ASSERT(singleton_ != NULL);
return singleton_->snapshots_->snapshots()->at(index);
}
HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
ASSERT(singleton_ != NULL);
return singleton_->snapshots_->GetSnapshot(uid);
}
const JSObjectsClusterTreeConfig::Key JSObjectsClusterTreeConfig::kNoKey;
const JSObjectsClusterTreeConfig::Value JSObjectsClusterTreeConfig::kNoValue;

25
deps/v8/src/heap-profiler.h

@ -28,23 +28,46 @@
#ifndef V8_HEAP_PROFILER_H_
#define V8_HEAP_PROFILER_H_
#include "zone.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
class HeapSnapshot;
class HeapSnapshotsCollection;
// The HeapProfiler writes data to the log files, which can be postprocessed
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
class HeapProfiler {
public:
static void Setup();
static void TearDown();
static HeapSnapshot* TakeSnapshot(const char* name);
static HeapSnapshot* TakeSnapshot(String* name);
static int GetSnapshotsCount();
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
// Obsolete interface.
// Write a single heap sample to the log file.
static void WriteSample();
private:
HeapProfiler();
~HeapProfiler();
HeapSnapshot* TakeSnapshotImpl(const char* name);
HeapSnapshot* TakeSnapshotImpl(String* name);
// Obsolete interface.
// Update the array info with stats from obj.
static void CollectStats(HeapObject* obj, HistogramInfo* info);
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
static HeapProfiler* singleton_;
};

4
deps/v8/src/heap.h

@ -1882,8 +1882,8 @@ class TranscendentalCache {
};
inline static int Hash(const Converter& c) {
uint32_t hash = (c.integers[0] ^ c.integers[1]);
hash ^= hash >> 16;
hash ^= hash >> 8;
hash ^= static_cast<int32_t>(hash) >> 16;
hash ^= static_cast<int32_t>(hash) >> 8;
return (hash & (kCacheSize - 1));
}

74
deps/v8/src/ia32/codegen-ia32.cc

@ -3278,6 +3278,9 @@ void CodeGenerator::VisitAndSpill(Statement* statement) {
void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
ASSERT(in_spilled_code());
set_in_spilled_code(false);
VisitStatements(statements);
@ -3285,14 +3288,20 @@ void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
frame_->SpillAll();
}
set_in_spilled_code(true);
ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
ASSERT(!in_spilled_code());
for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
Visit(statements->at(i));
}
ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
@ -6909,8 +6918,7 @@ void DeferredSearchCache::Generate() {
__ bind(&cache_miss);
__ push(cache_); // store a reference to cache
__ push(key_); // store a key
Handle<Object> receiver(Top::global_context()->global());
__ push(Immediate(receiver));
__ push(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ push(key_);
// On ia32 function must be in edi.
__ mov(edi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
@ -10285,15 +10293,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// ST[0] == double value
// ebx = low 32 bits of double value
// edx = high 32 bits of double value
// Compute hash:
// Compute hash (the shifts are arithmetic):
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
__ mov(ecx, ebx);
__ xor_(ecx, Operand(edx));
__ mov(eax, ecx);
__ shr(eax, 16);
__ sar(eax, 16);
__ xor_(ecx, Operand(eax));
__ mov(eax, ecx);
__ shr(eax, 8);
__ sar(eax, 8);
__ xor_(ecx, Operand(eax));
ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
__ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
@ -11305,58 +11313,58 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// Check the representation and encoding of the subject string.
Label seq_string, seq_two_byte_string, check_code;
const int kStringRepresentationEncodingMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
Label seq_ascii_string, seq_two_byte_string, check_code;
__ mov(eax, Operand(esp, kSubjectOffset));
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ and_(ebx, kStringRepresentationEncodingMask);
// First check for sequential string.
ASSERT_EQ(0, kStringTag);
ASSERT_EQ(0, kSeqStringTag);
// First check for flat two byte string.
__ and_(ebx,
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag);
__ j(zero, &seq_two_byte_string);
// Any other flat string must be a flat ascii string.
__ test(Operand(ebx),
Immediate(kIsNotStringMask | kStringRepresentationMask));
__ j(zero, &seq_string);
__ j(zero, &seq_ascii_string);
// Check for flat cons string.
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
__ and_(ebx, kStringRepresentationMask);
__ cmp(ebx, kConsStringTag);
__ j(not_equal, &runtime);
ASSERT(kExternalStringTag !=0);
ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
__ test(Operand(ebx),
Immediate(kIsNotStringMask | kExternalStringTag));
__ j(not_zero, &runtime);
// String is a cons string.
__ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
__ cmp(Operand(edx), Factory::empty_string());
__ j(not_equal, &runtime);
__ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
ASSERT_EQ(0, kSeqStringTag);
__ test(ebx, Immediate(kStringRepresentationMask));
// String is a cons string with empty second part.
// eax: first part of cons string.
// ebx: map of first part of cons string.
// Is first part a flat two byte string?
__ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
kStringRepresentationMask | kStringEncodingMask);
ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag);
__ j(zero, &seq_two_byte_string);
// Any other flat string must be ascii.
__ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
kStringRepresentationMask);
__ j(not_zero, &runtime);
__ and_(ebx, kStringRepresentationEncodingMask);
__ bind(&seq_string);
// eax: subject string (sequential either ascii to two byte)
// ebx: suject string type & kStringRepresentationEncodingMask
__ bind(&seq_ascii_string);
// eax: subject string (flat ascii)
// ecx: RegExp data (FixedArray)
// Check that the irregexp code has been generated for an ascii string. If
// it has, the field contains a code object otherwise it contains the hole.
const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
__ cmp(ebx, kSeqTwoByteString);
__ j(equal, &seq_two_byte_string);
if (FLAG_debug_code) {
__ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
__ Check(equal, "Expected sequential ascii string");
}
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
__ Set(edi, Immediate(1)); // Type is ascii.
__ jmp(&check_code);
__ bind(&seq_two_byte_string);
// eax: subject string
// eax: subject string (flat two byte)
// ecx: RegExp data (FixedArray)
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
__ Set(edi, Immediate(0)); // Type is two byte.

134
deps/v8/src/ia32/ic-ia32.cc

@ -306,22 +306,22 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register r0,
Register map,
Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// Scratch registers:
// r0 - used to hold the map of the receiver.
// map - used to hold the map of the receiver.
// Check that the object isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, slow, not_taken);
// Get the map of the receiver.
__ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
__ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ test_b(FieldOperand(r0, Map::kBitFieldOffset),
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
KeyedLoadIC::kSlowCaseBitFieldMask);
__ j(not_zero, slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type.
@ -330,7 +330,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// into string objects works as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpInstanceType(r0, JS_OBJECT_TYPE);
__ CmpInstanceType(map, JS_OBJECT_TYPE);
__ j(below, slow, not_taken);
}
@ -371,7 +371,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Checks whether a key is an array index string or a symbol string.
// Falls through if a key is a symbol.
// Falls through if the key is a symbol.
static void GenerateKeyStringCheck(MacroAssembler* masm,
Register key,
Register map,
@ -399,11 +399,9 @@ static void GenerateKeyStringCheck(MacroAssembler* masm,
// Picks out an array index from the hash field.
// The generated code never falls through.
static void GenerateIndexFromHash(MacroAssembler* masm,
Register key,
Register hash,
Label* index_smi) {
Register hash) {
// Register use:
// key - holds the overwritten key on exit.
// hash - holds the key's hash. Clobbered.
@ -415,8 +413,6 @@ static void GenerateIndexFromHash(MacroAssembler* masm,
(1 << String::kArrayIndexValueBits));
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
// key: string key
// ebx: hash field.
ASSERT(String::kHashShift >= kSmiTagSize);
__ and_(hash, String::kArrayIndexValueMask);
__ shr(hash, String::kHashShift - kSmiTagSize);
@ -424,8 +420,6 @@ static void GenerateIndexFromHash(MacroAssembler* masm,
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
__ mov(key, hash);
// Now jump to the place where smi keys are handled.
__ jmp(index_smi);
}
@ -574,7 +568,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0);
__ bind(&index_string);
GenerateIndexFromHash(masm, eax, ebx, &index_smi);
GenerateIndexFromHash(masm, eax, ebx);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@ -1125,13 +1121,12 @@ Object* CallIC_Miss(Arguments args);
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
Label* miss) {
Code::Kind kind) {
// ----------- S t a t e -------------
// -- ecx : name
// -- edx : receiver
// -----------------------------------
Label number, non_number, non_string, boolean, probe;
Label number, non_number, non_string, boolean, probe, miss;
// Probe the stub cache.
Code::Flags flags =
@ -1166,7 +1161,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
__ cmp(edx, Factory::true_value());
__ j(equal, &boolean, not_taken);
__ cmp(edx, Factory::false_value());
__ j(not_equal, miss, taken);
__ j(not_equal, &miss, taken);
__ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
@ -1174,6 +1169,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
__ bind(&miss);
}
@ -1214,8 +1210,8 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ InvokeFunction(edi, actual, JUMP_FUNCTION);
}
// The generated code never falls through.
static void GenerateCallNormal(MacroAssembler* masm, int argc, Label* miss) {
// The generated code falls through if the call should be handled by runtime.
static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@ -1223,20 +1219,20 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc, Label* miss) {
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label global_object, non_global_object;
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
__ j(zero, &miss, not_taken);
// Check that the receiver is a valid JS object.
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(eax, FIRST_JS_OBJECT_TYPE);
__ j(below, miss, not_taken);
__ j(below, &miss, not_taken);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
@ -1252,8 +1248,8 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc, Label* miss) {
// Check that the global object does not require access checks.
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_equal, miss, not_taken);
GenerateNormalHelper(masm, argc, true, miss);
__ j(not_equal, &miss, not_taken);
GenerateNormalHelper(masm, argc, true, &miss);
// Accessing non-global object: Check for access to global proxy.
Label global_proxy, invoke;
@ -1264,14 +1260,16 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc, Label* miss) {
// require access checks.
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_equal, miss, not_taken);
__ j(not_equal, &miss, not_taken);
__ bind(&invoke);
GenerateNormalHelper(masm, argc, false, miss);
GenerateNormalHelper(masm, argc, false, &miss);
// Global object proxy access: Check access rights.
__ bind(&global_proxy);
__ CheckAccessGlobalProxy(edx, eax, miss);
__ CheckAccessGlobalProxy(edx, eax, &miss);
__ jmp(&invoke);
__ bind(&miss);
}
@ -1337,24 +1335,36 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, &miss);
__ bind(&miss);
GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
GenerateMiss(masm, argc);
}
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
Label miss;
GenerateCallNormal(masm, argc, &miss);
__ bind(&miss);
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
GenerateCallNormal(masm, argc);
GenerateMiss(masm, argc);
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
}
@ -1385,13 +1395,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateKeyedLoadReceiverCheck(masm, edx, eax, &slow_call);
GenerateFastArrayLoad(masm,
edx,
ecx,
eax,
edi,
&check_number_dictionary,
&slow_load);
GenerateFastArrayLoad(
masm, edx, ecx, eax, edi, &check_number_dictionary, &slow_load);
__ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1);
__ bind(&do_call);
@ -1417,14 +1422,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ SmiUntag(ebx);
// ebx: untagged index
// Receiver in edx will be clobbered, need to reload it on miss.
GenerateNumberDictionaryLoad(masm,
&slow_reload_receiver,
eax,
ecx,
ebx,
edx,
edi,
edi);
GenerateNumberDictionaryLoad(
masm, &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
__ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1);
__ jmp(&do_call);
@ -1459,21 +1458,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
Immediate(Factory::hash_table_map()));
__ j(not_equal, &lookup_monomorphic_cache, not_taken);
GenerateDictionaryLoad(masm,
&slow_load,
edx,
ecx,
ebx,
eax,
edi,
edi,
DICTIONARY_CHECK_DONE);
GenerateDictionaryLoad(
masm, &slow_load, edx, ecx, ebx, eax, edi, edi, DICTIONARY_CHECK_DONE);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
__ jmp(&do_call);
__ bind(&lookup_monomorphic_cache);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1);
GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC, &slow_call);
GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
// Fall through on miss.
__ bind(&slow_call);
@ -1487,19 +1479,35 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
__ bind(&index_string);
GenerateIndexFromHash(masm, ecx, ebx, &index_smi);
GenerateIndexFromHash(masm, ecx, ebx);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
Label miss;
GenerateCallNormal(masm, argc, &miss);
__ bind(&miss);
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
GenerateCallNormal(masm, argc);
GenerateMiss(masm, argc);
}
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
}

15
deps/v8/src/ic.cc

@ -387,6 +387,7 @@ Object* CallICBase::TryCallAsFunction(Object* object) {
return *delegate;
}
void CallICBase::ReceiverToObject(Handle<Object> object) {
HandleScope scope;
Handle<Object> receiver(object);
@ -588,6 +589,9 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
state == MONOMORPHIC ||
state == MONOMORPHIC_PROTOTYPE_FAILURE) {
set_target(Code::cast(code));
} else if (state == MEGAMORPHIC) {
// Update the stub cache.
StubCache::Set(*name, GetCodeCacheMapForObject(*object), Code::cast(code));
}
#ifdef DEBUG
@ -664,7 +668,6 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
Code* target = NULL;
target = Builtins::builtin(Builtins::LoadIC_StringLength);
set_target(target);
StubCache::Set(*name, map, target);
return Smi::FromInt(String::cast(*object)->length());
}
@ -679,7 +682,6 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
Code* target = Builtins::builtin(Builtins::LoadIC_ArrayLength);
set_target(target);
StubCache::Set(*name, map, target);
return JSArray::cast(*object)->length();
}
@ -691,7 +693,6 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
#endif
Code* target = Builtins::builtin(Builtins::LoadIC_FunctionPrototype);
set_target(target);
StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
return Accessors::FunctionGetPrototype(*object, 0);
}
}
@ -847,6 +848,9 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
set_target(Code::cast(code));
} else if (state == MONOMORPHIC) {
set_target(megamorphic_stub());
} else if (state == MEGAMORPHIC) {
// Update the stub cache.
StubCache::Set(*name, GetCodeCacheMapForObject(*object), Code::cast(code));
}
#ifdef DEBUG
@ -1110,7 +1114,6 @@ Object* StoreIC::Store(State state,
return *value;
}
// Use specialized code for setting the length of arrays.
if (receiver->IsJSArray()
&& name->Equals(Heap::length_symbol())
@ -1120,7 +1123,6 @@ Object* StoreIC::Store(State state,
#endif
Code* target = Builtins::builtin(Builtins::StoreIC_ArrayLength);
set_target(target);
StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
return receiver->SetProperty(*name, *value, NONE);
}
@ -1210,6 +1212,9 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
} else if (state == MONOMORPHIC) {
// Only move to megamorphic if the target changes.
if (target() != Code::cast(code)) set_target(megamorphic_stub());
} else if (state == MEGAMORPHIC) {
// Update the stub cache.
StubCache::Set(*name, receiver->map(), Code::cast(code));
}
#ifdef DEBUG

13
deps/v8/src/objects.cc

@ -2013,26 +2013,19 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
CustomArguments args(interceptor->data(), receiver, this);
v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
v8::NamedPropertyQueryImpl query =
v8::ToCData<v8::NamedPropertyQueryImpl>(interceptor->query());
v8::NamedPropertyQuery query =
v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
LOG(ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
v8::Handle<v8::Value> result;
v8::Handle<v8::Integer> result;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
result = query(v8::Utils::ToLocal(name_handle), info);
}
if (!result.IsEmpty()) {
// Temporary complicated logic, would be removed soon.
if (result->IsBoolean()) {
// Convert the boolean result to a property attribute
// specification.
return result->IsTrue() ? NONE : ABSENT;
} else {
ASSERT(result->IsInt32());
return static_cast<PropertyAttributes>(result->Int32Value());
}
}
} else if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetter getter =
v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());

15
deps/v8/src/objects.h

@ -1123,7 +1123,7 @@ class HeapNumber: public HeapObject {
static const uint32_t kExponentMask = 0x7ff00000u;
static const uint32_t kMantissaMask = 0xfffffu;
static const int kMantissaBits = 52;
static const int KExponentBits = 11;
static const int kExponentBits = 11;
static const int kExponentBias = 1023;
static const int kExponentShift = 20;
static const int kMantissaBitsInTopWord = 20;
@ -2151,6 +2151,11 @@ class Dictionary: public HashTable<Shape, Key> {
// Set the value for entry.
void ValueAtPut(int entry, Object* value) {
// Check that this value can actually be written.
PropertyDetails details = DetailsAt(entry);
// If a value has not been initilized we allow writing to it even if
// it is read only (a declared const that has not been initialized).
if (details.IsReadOnly() && !ValueAt(entry)->IsTheHole()) return;
this->set(HashTable<Shape, Key>::EntryToIndex(entry)+1, value);
}
@ -2832,14 +2837,14 @@ class Code: public HeapObject {
// Flags layout.
static const int kFlagsICStateShift = 0;
static const int kFlagsICInLoopShift = 3;
static const int kFlagsKindShift = 4;
static const int kFlagsTypeShift = 8;
static const int kFlagsTypeShift = 4;
static const int kFlagsKindShift = 7;
static const int kFlagsArgumentsCountShift = 11;
static const int kFlagsICStateMask = 0x00000007; // 00000000111
static const int kFlagsICInLoopMask = 0x00000008; // 00000001000
static const int kFlagsKindMask = 0x000000F0; // 00011110000
static const int kFlagsTypeMask = 0x00000700; // 11100000000
static const int kFlagsTypeMask = 0x00000070; // 00001110000
static const int kFlagsKindMask = 0x00000780; // 11110000000
static const int kFlagsArgumentsCountMask = 0xFFFFF800;
static const int kFlagsNotUsedInLookup =

19
deps/v8/src/profile-generator.cc

@ -1114,7 +1114,7 @@ void HeapEntry::Print(int max_depth, int indent) {
const char* HeapEntry::TypeAsString() {
switch (type_) {
case INTERNAL: return "/internal/";
case JS_OBJECT: return "/object/";
case OBJECT: return "/object/";
case CLOSURE: return "/closure/";
case STRING: return "/string/";
case CODE: return "/code/";
@ -1262,7 +1262,7 @@ HeapEntry* HeapSnapshot::GetEntry(Object* obj) {
return AddEntry(object, HeapEntry::CLOSURE, collection_->GetName(name));
} else if (object->IsJSObject()) {
return AddEntry(object,
HeapEntry::JS_OBJECT,
HeapEntry::OBJECT,
collection_->GetName(
JSObject::cast(object)->constructor_name()));
} else if (object->IsJSGlobalPropertyCell()) {
@ -1276,10 +1276,19 @@ HeapEntry* HeapSnapshot::GetEntry(Object* obj) {
return AddEntry(object,
HeapEntry::STRING,
collection_->GetName(String::cast(object)));
} else if (object->IsCode()
|| object->IsSharedFunctionInfo()
|| object->IsScript()) {
} else if (object->IsCode()) {
return AddEntry(object, HeapEntry::CODE);
} else if (object->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
String* name = String::cast(shared->name())->length() > 0 ?
String::cast(shared->name()) : shared->inferred_name();
return AddEntry(object, HeapEntry::CODE, collection_->GetName(name));
} else if (object->IsScript()) {
Script* script = Script::cast(object);
return AddEntry(object,
HeapEntry::CODE,
script->name()->IsString() ?
collection_->GetName(String::cast(script->name())) : "");
} else if (object->IsFixedArray()) {
return AddEntry(object, HeapEntry::ARRAY);
}

18
deps/v8/src/profile-generator.h

@ -429,9 +429,9 @@ class HeapEntry;
class HeapGraphEdge {
public:
enum Type {
CONTEXT_VARIABLE,
ELEMENT,
PROPERTY
CONTEXT_VARIABLE = v8::HeapGraphEdge::CONTEXT_VARIABLE,
ELEMENT = v8::HeapGraphEdge::ELEMENT,
PROPERTY = v8::HeapGraphEdge::PROPERTY
};
HeapGraphEdge(Type type, const char* name, HeapEntry* from, HeapEntry* to);
@ -468,12 +468,12 @@ class CachedHeapGraphPath;
class HeapEntry {
public:
enum Type {
INTERNAL,
ARRAY,
STRING,
JS_OBJECT,
CODE,
CLOSURE
INTERNAL = v8::HeapGraphNode::INTERNAL,
ARRAY = v8::HeapGraphNode::ARRAY,
STRING = v8::HeapGraphNode::STRING,
OBJECT = v8::HeapGraphNode::OBJECT,
CODE = v8::HeapGraphNode::CODE,
CLOSURE = v8::HeapGraphNode::CLOSURE
};
explicit HeapEntry(HeapSnapshot* snapshot)

27
deps/v8/src/runtime.cc

@ -626,9 +626,9 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
PropertyDetails details = dictionary->DetailsAt(entry);
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!details.IsDontEnum()));
elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
return *desc;
} else {
// Elements that are stored as array elements always has:
@ -3849,11 +3849,29 @@ static Object* Runtime_DefineOrRedefineDataProperty(Arguments args) {
int unchecked = flag->value();
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
// Check if this is an element.
uint32_t index;
bool is_element = name->AsArrayIndex(&index);
// Special case for elements if any of the flags are true.
// If elements are in fast case we always implicitly assume that:
// DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
if (((unchecked & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) &&
is_element) {
// Normalize the elements to enable attributes on the property.
js_object->NormalizeElements();
NumberDictionary* dictionary = js_object->element_dictionary();
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
PropertyDetails details = PropertyDetails(attr, NORMAL);
dictionary->Set(index, *obj_value, details);
}
LookupResult result;
js_object->LocalLookupRealNamedProperty(*name, &result);
PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
// Take special care when attributes are different and there is already
// a property. For simplicity we normalize the property which enables us
// to not worry about changing the instance_descriptor and creating a new
@ -3869,6 +3887,7 @@ static Object* Runtime_DefineOrRedefineDataProperty(Arguments args) {
*obj_value,
attr);
}
return Runtime::SetObjectProperty(js_object, name, obj_value, attr);
}

52
deps/v8/src/serialize.cc

@ -364,90 +364,102 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
6,
"RegExpStack::limit_address()");
Add(ExternalReference::new_space_start().address(),
Add(ExternalReference::address_of_regexp_stack_memory_address().address(),
UNCLASSIFIED,
7,
"RegExpStack::memory_address()");
Add(ExternalReference::address_of_regexp_stack_memory_size().address(),
UNCLASSIFIED,
8,
"RegExpStack::memory_size()");
Add(ExternalReference::address_of_static_offsets_vector().address(),
UNCLASSIFIED,
9,
"OffsetsVector::static_offsets_vector");
Add(ExternalReference::new_space_start().address(),
UNCLASSIFIED,
10,
"Heap::NewSpaceStart()");
Add(ExternalReference::new_space_mask().address(),
UNCLASSIFIED,
8,
11,
"Heap::NewSpaceMask()");
Add(ExternalReference::heap_always_allocate_scope_depth().address(),
UNCLASSIFIED,
9,
12,
"Heap::always_allocate_scope_depth()");
Add(ExternalReference::new_space_allocation_limit_address().address(),
UNCLASSIFIED,
10,
13,
"Heap::NewSpaceAllocationLimitAddress()");
Add(ExternalReference::new_space_allocation_top_address().address(),
UNCLASSIFIED,
11,
14,
"Heap::NewSpaceAllocationTopAddress()");
#ifdef ENABLE_DEBUGGER_SUPPORT
Add(ExternalReference::debug_break().address(),
UNCLASSIFIED,
12,
15,
"Debug::Break()");
Add(ExternalReference::debug_step_in_fp_address().address(),
UNCLASSIFIED,
13,
16,
"Debug::step_in_fp_addr()");
#endif
Add(ExternalReference::double_fp_operation(Token::ADD).address(),
UNCLASSIFIED,
14,
17,
"add_two_doubles");
Add(ExternalReference::double_fp_operation(Token::SUB).address(),
UNCLASSIFIED,
15,
18,
"sub_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MUL).address(),
UNCLASSIFIED,
16,
19,
"mul_two_doubles");
Add(ExternalReference::double_fp_operation(Token::DIV).address(),
UNCLASSIFIED,
17,
20,
"div_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MOD).address(),
UNCLASSIFIED,
18,
21,
"mod_two_doubles");
Add(ExternalReference::compare_doubles().address(),
UNCLASSIFIED,
19,
22,
"compare_doubles");
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
UNCLASSIFIED,
20,
23,
"NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
Add(ExternalReference::re_check_stack_guard_state().address(),
UNCLASSIFIED,
21,
24,
"RegExpMacroAssembler*::CheckStackGuardState()");
Add(ExternalReference::re_grow_stack().address(),
UNCLASSIFIED,
22,
25,
"NativeRegExpMacroAssembler::GrowStack()");
Add(ExternalReference::re_word_character_map().address(),
UNCLASSIFIED,
23,
26,
"NativeRegExpMacroAssembler::word_character_map");
#endif // V8_INTERPRETED_REGEXP
// Keyed lookup cache.
Add(ExternalReference::keyed_lookup_cache_keys().address(),
UNCLASSIFIED,
24,
27,
"KeyedLookupCache::keys()");
Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
UNCLASSIFIED,
25,
28,
"KeyedLookupCache::field_offsets()");
Add(ExternalReference::transcendental_cache_array_address().address(),
UNCLASSIFIED,
26,
29,
"TranscendentalCache::caches()");
}

33
deps/v8/src/stub-cache.cc

@ -121,7 +121,7 @@ Object* StubCache::ComputeLoadNonexistent(String* name, JSObject* receiver) {
receiver->map()->UpdateCodeCache(cache_name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, receiver->map(), Code::cast(code));
return code;
}
@ -139,7 +139,7 @@ Object* StubCache::ComputeLoadField(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, receiver->map(), Code::cast(code));
return code;
}
@ -158,7 +158,7 @@ Object* StubCache::ComputeLoadCallback(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, receiver->map(), Code::cast(code));
return code;
}
@ -177,7 +177,7 @@ Object* StubCache::ComputeLoadConstant(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, receiver->map(), Code::cast(code));
return code;
}
@ -194,13 +194,12 @@ Object* StubCache::ComputeLoadInterceptor(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, receiver->map(), Code::cast(code));
return code;
}
Object* StubCache::ComputeLoadNormal(String* name, JSObject* receiver) {
Code* code = Builtins::builtin(Builtins::LoadIC_Normal);
return Set(name, receiver->map(), code);
return Builtins::builtin(Builtins::LoadIC_Normal);
}
@ -223,7 +222,7 @@ Object* StubCache::ComputeLoadGlobal(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, receiver->map(), Code::cast(code));
return code;
}
@ -368,7 +367,7 @@ Object* StubCache::ComputeStoreField(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, receiver->map(), Code::cast(code));
return code;
}
@ -385,7 +384,7 @@ Object* StubCache::ComputeStoreGlobal(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, receiver->map(), Code::cast(code));
return code;
}
@ -403,7 +402,7 @@ Object* StubCache::ComputeStoreCallback(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, receiver->map(), Code::cast(code));
return code;
}
@ -420,7 +419,7 @@ Object* StubCache::ComputeStoreInterceptor(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, receiver->map(), Code::cast(code));
return code;
}
@ -486,7 +485,7 @@ Object* StubCache::ComputeCallConstant(int argc,
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, map, Code::cast(code));
return code;
}
@ -525,7 +524,7 @@ Object* StubCache::ComputeCallField(int argc,
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, map, Code::cast(code));
return code;
}
@ -563,7 +562,7 @@ Object* StubCache::ComputeCallInterceptor(int argc,
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, map, Code::cast(code));
return code;
}
@ -574,7 +573,7 @@ Object* StubCache::ComputeCallNormal(int argc,
JSObject* receiver) {
Object* code = ComputeCallNormal(argc, in_loop, kind);
if (code->IsFailure()) return code;
return Set(name, receiver->map(), Code::cast(code));
return code;
}
@ -607,7 +606,7 @@ Object* StubCache::ComputeCallGlobal(int argc,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, receiver->map(), Code::cast(code));
return code;
}

35
deps/v8/src/utils.h

@ -47,6 +47,41 @@ static inline bool IsPowerOf2(T x) {
}
// X must be a power of 2. Returns the number of trailing zeros.
template <typename T>
static inline int WhichPowerOf2(T x) {
ASSERT(IsPowerOf2(x));
ASSERT(x != 0);
if (x < 0) return 31;
int bits = 0;
#ifdef DEBUG
int original_x = x;
#endif
if (x >= 0x10000) {
bits += 16;
x >>= 16;
}
if (x >= 0x100) {
bits += 8;
x >>= 8;
}
if (x >= 0x10) {
bits += 4;
x >>= 4;
}
switch (x) {
default: UNREACHABLE();
case 8: bits++; // Fall through.
case 4: bits++; // Fall through.
case 2: bits++; // Fall through.
case 1: break;
}
ASSERT_EQ(1 << bits, original_x);
return bits;
return 0;
}
// The C++ standard leaves the semantics of '>>' undefined for
// negative signed operands. Most implementations do the right thing,
// though.

4
deps/v8/src/v8.cc

@ -32,6 +32,7 @@
#include "serialize.h"
#include "simulator.h"
#include "stub-cache.h"
#include "heap-profiler.h"
#include "oprofile-agent.h"
#include "log.h"
@ -61,6 +62,7 @@ bool V8::Initialize(Deserializer* des) {
Logger::Setup();
CpuProfiler::Setup();
HeapProfiler::Setup();
// Setup the platform OS support.
OS::Setup();
@ -149,6 +151,8 @@ void V8::TearDown() {
Top::TearDown();
HeapProfiler::TearDown();
CpuProfiler::TearDown();
Heap::TearDown();

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
#define BUILD_NUMBER 17
#define BUILD_NUMBER 18
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

4
deps/v8/src/virtual-frame-light-inl.h

@ -74,7 +74,9 @@ bool VirtualFrame::Equals(const VirtualFrame* other) {
void VirtualFrame::PrepareForReturn() {
SpillAll();
// Don't bother flushing tos registers as returning does not require more
// access to the expression stack.
top_of_stack_state_ = NO_TOS_REGISTERS;
}

108
deps/v8/src/x64/codegen-x64.cc

@ -3108,25 +3108,31 @@ void CodeGenerator::VisitCall(Call* node) {
ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
} else {
Reference ref(this, property, false);
ASSERT(ref.size() == 2);
Result key = frame_->Pop();
frame_->Dup(); // Duplicate the receiver.
frame_->Push(&key);
ref.GetValue();
// Top of frame contains function to call, with duplicate copy of
// receiver below it. Swap them.
Result function = frame_->Pop();
Result receiver = frame_->Pop();
frame_->Push(&function);
frame_->Push(&receiver);
}
// Call the function.
CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
} else {
// Push the receiver onto the frame.
Load(property->obj());
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
frame_->SpillTop();
}
// Load the name of the function.
Load(property->key());
// Call the IC initialization code.
CodeForSourcePosition(node->position());
Result result = frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
arg_count,
loop_nesting());
frame_->RestoreContextRegister();
frame_->Push(&result);
}
}
} else {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is not global
@ -4793,8 +4799,7 @@ void DeferredSearchCache::Generate() {
__ bind(&cache_miss);
__ push(cache_); // store a reference to cache
__ push(key_); // store a key
Handle<Object> receiver(Top::global_context()->global());
__ Push(receiver);
__ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ push(key_);
// On x64 function must be in rdi.
__ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
@ -8178,7 +8183,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// ST[0] == double value
// rbx = bits of double value.
// rdx = also bits of double value.
// Compute hash (h is 32 bits, bits are 64):
// Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
// h = h0 = bits ^ (bits >> 32);
// h ^= h >> 16;
// h ^= h >> 8;
@ -8189,9 +8194,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ movl(rcx, rdx);
__ movl(rax, rdx);
__ movl(rdi, rdx);
__ shrl(rdx, Immediate(8));
__ shrl(rcx, Immediate(16));
__ shrl(rax, Immediate(24));
__ sarl(rdx, Immediate(8));
__ sarl(rcx, Immediate(16));
__ sarl(rax, Immediate(24));
__ xorl(rcx, rdx);
__ xorl(rax, rdi);
__ xorl(rcx, rax);
@ -8293,7 +8298,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
// Move exponent and sign bits to low bits.
__ shr(rdi, Immediate(HeapNumber::kMantissaBits));
// Remove sign bit.
__ andl(rdi, Immediate((1 << HeapNumber::KExponentBits) - 1));
__ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
__ cmpl(rdi, Immediate(supported_exponent_limit));
__ j(below, &in_range);
@ -8370,7 +8375,7 @@ void IntegerConvert(MacroAssembler* masm,
// Double to remove sign bit, shift exponent down to least significant bits.
// and subtract bias to get the unshifted, unbiased exponent.
__ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
__ shr(double_exponent, Immediate(64 - HeapNumber::KExponentBits));
__ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
__ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
// Check whether the exponent is too big for a 63 bit unsigned integer.
__ cmpl(double_exponent, Immediate(63));
@ -8601,59 +8606,58 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ cmpl(rdx, rax);
__ j(greater, &runtime);
// ecx: RegExp data (FixedArray)
// rcx: RegExp data (FixedArray)
// Check the representation and encoding of the subject string.
Label seq_string, seq_two_byte_string, check_code;
const int kStringRepresentationEncodingMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
Label seq_ascii_string, seq_two_byte_string, check_code;
__ movq(rax, Operand(rsp, kSubjectOffset));
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
__ andb(rbx, Immediate(kStringRepresentationEncodingMask));
// First check for sequential string.
ASSERT_EQ(0, kStringTag);
ASSERT_EQ(0, kSeqStringTag);
// First check for flat two byte string.
__ andb(rbx, Immediate(
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag);
__ j(zero, &seq_two_byte_string);
// Any other flat string must be a flat ascii string.
__ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
__ j(zero, &seq_string);
__ j(zero, &seq_ascii_string);
// Check for flat cons string.
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
__ andb(rbx, Immediate(kStringRepresentationMask));
__ cmpb(rbx, Immediate(kConsStringTag));
__ j(not_equal, &runtime);
ASSERT(kExternalStringTag !=0);
ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
__ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
__ j(not_zero, &runtime);
// String is a cons string.
__ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
__ Cmp(rdx, Factory::empty_string());
__ j(not_equal, &runtime);
__ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
ASSERT_EQ(0, kSeqStringTag);
__ testb(rbx, Immediate(kStringRepresentationMask));
// String is a cons string with empty second part.
// eax: first part of cons string.
// ebx: map of first part of cons string.
// Is first part a flat two byte string?
__ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
Immediate(kStringRepresentationMask | kStringEncodingMask));
ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag);
__ j(zero, &seq_two_byte_string);
// Any other flat string must be ascii.
__ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
Immediate(kStringRepresentationMask));
__ j(not_zero, &runtime);
__ andb(rbx, Immediate(kStringRepresentationEncodingMask));
__ bind(&seq_string);
// rax: subject string (sequential either ascii to two byte)
// rbx: suject string type & kStringRepresentationEncodingMask
__ bind(&seq_ascii_string);
// rax: subject string (sequential ascii)
// rcx: RegExp data (FixedArray)
// Check that the irregexp code has been generated for an ascii string. If
// it has, the field contains a code object otherwise it contains the hole.
const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
__ cmpb(rbx, Immediate(kSeqTwoByteString));
__ j(equal, &seq_two_byte_string);
if (FLAG_debug_code) {
__ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
__ Check(equal, "Expected sequential ascii string");
}
__ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
__ Set(rdi, 1); // Type is ascii.
__ jmp(&check_code);
__ bind(&seq_two_byte_string);
// rax: subject string
// rax: subject string (flat two-byte)
// rcx: RegExp data (FixedArray)
__ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
__ Set(rdi, 0); // Type is two byte.

38
deps/v8/src/x64/full-codegen-x64.cc

@ -1729,6 +1729,30 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
}
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Expression* key,
RelocInfo::Mode mode) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
VisitForValue(args->at(i), kStack);
}
VisitForValue(key, kAccumulator);
__ movq(rcx, rax);
// Record source position for debugger.
SetSourcePosition(expr->position());
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
in_loop);
__ Call(ic, mode);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
Apply(context_, rax);
}
void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
@ -1820,9 +1844,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForValue(prop->obj(), kStack);
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property, use keyed load IC followed by function
// call.
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use KeyedCallIC.
VisitForValue(prop->obj(), kStack);
if (prop->is_synthetic()) {
VisitForValue(prop->key(), kAccumulator);
__ movq(rdx, Operand(rsp, 0));
// Record source code position for IC call.
@ -1830,20 +1856,20 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// By emitting a nop we make sure that we do not have a "test rax,..."
// instruction after the call it is treated specially by the LoadIC code.
// instruction after the call as it is treated specially
// by the LoadIC code.
__ nop();
// Pop receiver.
__ pop(rbx);
// Push result (function).
__ push(rax);
// Push receiver object on stack.
if (prop->is_synthetic()) {
__ movq(rcx, CodeGenerator::GlobalObject());
__ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
EmitCallWithStub(expr);
} else {
__ push(rbx);
EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
}
EmitCallWithStub(expr);
}
} else {
// Call to some other expression. If the expression is an anonymous

470
deps/v8/src/x64/ic-x64.cc

@ -57,19 +57,21 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Register r2,
Register name,
Register r4,
Register result,
DictionaryCheck check_dictionary) {
// Register use:
//
// r0 - used to hold the property dictionary.
// r0 - used to hold the property dictionary and is unchanged.
//
// r1 - initially the receiver.
// - unchanged on any jump to miss_label.
// - holds the result on exit.
// r1 - used to hold the receiver and is unchanged.
//
// r2 - used to hold the capacity of the property dictionary.
//
// name - holds the name of the property and is unchanged.
//
// r4 - used to hold the index into the property dictionary.
//
// result - holds the result on exit if the load succeeded.
Label done;
@ -148,7 +150,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ movq(r1,
__ movq(result,
Operand(r0, r4, times_pointer_size, kValueOffset - kHeapObjectTag));
}
@ -159,14 +161,15 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Register key,
Register r0,
Register r1,
Register r2) {
Register r2,
Register result) {
// Register use:
//
// elements - holds the slow-case elements of the receiver and is unchanged.
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry and is unchanged if a branch is
// performed to the miss label.
// Holds the result on exit if the load succeeded.
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// Scratch registers:
//
@ -175,6 +178,12 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// r1 - used to hold the capacity mask of the dictionary
//
// r2 - used for the index into the dictionary.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
@ -246,7 +255,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
__ movq(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
__ movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@ -346,55 +355,167 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary, check_number_dictionary;
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register map,
Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// Scratch registers:
// map - used to hold the map of the receiver.
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
__ JumpIfSmi(receiver, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing
// into string objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
__ j(below, &slow);
__ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
__ j(below, slow);
// Check bit field.
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(kSlowCaseBitFieldMask));
__ j(not_zero, &slow);
__ testb(FieldOperand(map, Map::kBitFieldOffset),
Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
__ j(not_zero, slow);
}
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Loads an indexed element from a fast case array.
static void GenerateFastArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
Register elements,
Register scratch,
Register result,
Label* not_fast_array,
Label* out_of_range) {
// Register use:
//
// receiver - holds the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// elements - holds the elements of the receiver on exit.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
// used by further computation.
//
// Scratch registers:
//
// scratch - used to hold elements of the receiver and the loaded value.
__ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
__ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
__ j(not_equal, not_fast_array);
// Check that the key (index) is within bounds.
__ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &slow); // Unsigned comparison rejects negative indices.
__ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
// Unsigned comparison rejects negative indices.
__ j(above_equal, out_of_range);
// Fast case: Do the load.
SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
__ movq(rbx, FieldOperand(rcx,
SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
__ movq(scratch, FieldOperand(elements,
index.reg,
index.scale,
FixedArray::kHeaderSize));
__ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
__ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, &slow);
__ movq(rax, rbx);
__ j(equal, out_of_range);
if (!result.is(scratch)) {
__ movq(result, scratch);
}
}
// Checks whether a key is an array index string or a symbol string.
// Falls through if the key is a symbol.
static void GenerateKeyStringCheck(MacroAssembler* masm,
Register key,
Register map,
Register hash,
Label* index_string,
Label* not_symbol) {
// Register use:
// key - holds the key and is unchanged. Assumed to be non-smi.
// Scratch registers:
// map - used to hold the map of the key.
// hash - used to hold the hash of the key.
__ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
__ j(above_equal, not_symbol);
// Is the string an array index, with cached numeric value?
__ movl(hash, FieldOperand(key, String::kHashFieldOffset));
__ testl(hash, Immediate(String::kContainsCachedArrayIndexMask));
__ j(zero, index_string); // The value in hash is used at jump target.
// Is the string a symbol?
ASSERT(kSymbolTag != 0);
__ testb(FieldOperand(map, Map::kInstanceTypeOffset),
Immediate(kIsSymbolMask));
__ j(zero, not_symbol);
}
// Picks out an array index from the hash field.
static void GenerateIndexFromHash(MacroAssembler* masm,
Register key,
Register hash) {
// Register use:
// key - holds the overwritten key on exit.
// hash - holds the key's hash. Clobbered.
// The assert checks that the constants for the maximum number of digits
// for an array index cached in the hash field and the number of bits
// reserved for it does not conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
// We want the smi-tagged index in key. Even if we subsequently go to
// the slow case, converting the key to a smi is always valid.
// key: string key
// hash: key's hash field, including its array index value.
__ and_(hash, Immediate(String::kArrayIndexValueMask));
__ shr(hash, Immediate(String::kHashShift));
// Here we actually clobber the key which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
__ Integer32ToSmi(key, hash);
}
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary, check_number_dictionary;
GenerateKeyedLoadReceiverCheck(masm, rdx, rcx, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
GenerateFastArrayLoad(masm,
rdx,
rax,
rcx,
rbx,
rax,
&check_pixel_array,
&slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
__ ret(0);
@ -423,7 +544,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &slow);
GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi);
GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi, rax);
__ ret(0);
__ bind(&slow);
@ -434,22 +555,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
// The key is not a smi.
// Is it a string?
// rdx: receiver
// rax: key
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &slow);
// Is the string an array index, with cached numeric value?
__ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
__ testl(rbx, Immediate(String::kContainsCachedArrayIndexMask));
__ j(zero, &index_string); // The value in rbx is used at jump target.
// Is the string a symbol?
ASSERT(kSymbolTag != 0);
__ testb(FieldOperand(rcx, Map::kInstanceTypeOffset),
Immediate(kIsSymbolMask));
__ j(zero, &slow);
GenerateKeyStringCheck(masm, rax, rcx, rbx, &index_string, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx.
@ -509,29 +615,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
rcx,
rax,
rdi,
rax,
DICTIONARY_CHECK_DONE);
__ movq(rax, rdx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
// If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
// conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
// We want the smi-tagged index in rax. Even if we subsequently go to
// the slow case, converting the key to a smi is always valid.
// rdx: receiver
// rax: key (a string)
// rbx: key's hash field, including its array index value.
__ and_(rbx, Immediate(String::kArrayIndexValueMask));
__ shr(rbx, Immediate(String::kHashShift));
// Here we actually clobber the key (rax) which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
__ Integer32ToSmi(rax, rbx);
// Now jump to the place where smi keys are handled.
GenerateIndexFromHash(masm, rax, rbx);
__ jmp(&index_smi);
}
@ -1109,7 +1199,11 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@ -1132,7 +1226,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Call the entry.
CEntryStub stub(1);
__ movq(rax, Immediate(2));
__ movq(rbx, ExternalReference(IC_Utility(kCallIC_Miss)));
__ movq(rbx, ExternalReference(IC_Utility(id)));
__ CallStub(&stub);
// Move result to rdi and exit the internal frame.
@ -1160,27 +1254,20 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
}
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
// rsp[8] : argument argc
// rsp[16] : argument argc - 1
// ...
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// rdx : receiver
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
// Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, rax);
// If the stub cache probing failed, the receiver might be a value.
@ -1219,9 +1306,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&probe);
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm, argc);
}
@ -1240,19 +1325,16 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
// Search dictionary - put result in register rdx.
GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, rdi, CHECK_DICTIONARY);
// Move the result to register rdi and check that it isn't a smi.
__ movq(rdi, rdx);
__ JumpIfSmi(rdx, miss);
GenerateDictionaryLoad(
masm, miss, rax, rdx, rbx, rcx, rdi, rdi, CHECK_DICTIONARY);
__ JumpIfSmi(rdi, miss);
// Check that the value is a JavaScript function.
__ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rdx);
__ j(not_equal, miss);
// Patch the receiver with the global proxy if necessary.
if (is_global_object) {
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
__ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
}
@ -1263,7 +1345,8 @@ static void GenerateNormalHelper(MacroAssembler* masm,
}
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// The generated code falls through if the call should be handled by runtime.
static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@ -1324,24 +1407,197 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ CheckAccessGlobalProxy(rdx, rax, &miss);
__ jmp(&invoke);
// Cache miss: Jump to runtime.
__ bind(&miss);
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
// rsp[8] : argument argc
// rsp[16] : argument argc - 1
// ...
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
}
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
// rsp[8] : argument argc
// rsp[16] : argument argc - 1
// ...
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
// Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
GenerateMiss(masm, argc);
}
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
// rsp[8] : argument argc
// rsp[16] : argument argc - 1
// ...
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
GenerateCallNormal(masm, argc);
GenerateMiss(masm, argc);
}
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
UNREACHABLE();
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
// rsp[8] : argument argc
// rsp[16] : argument argc - 1
// ...
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
}
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
UNREACHABLE();
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
// rsp[8] : argument argc
// rsp[16] : argument argc - 1
// ...
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
// Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
Label do_call, slow_call, slow_load, slow_reload_receiver;
Label check_number_dictionary, check_string, lookup_monomorphic_cache;
Label index_smi, index_string;
// Check that the key is a smi.
__ JumpIfNotSmi(rcx, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, rdx, rax, &slow_call);
GenerateFastArrayLoad(
masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
__ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1);
__ bind(&do_call);
// receiver in rdx is not used after this point.
// rcx: key
// rdi: function
// Check that the value in edi is a JavaScript function.
__ JumpIfSmi(rdi, &slow_call);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
__ j(not_equal, &slow_call);
// Invoke the function.
ParameterCount actual(argc);
__ InvokeFunction(rdi, actual, JUMP_FUNCTION);
__ bind(&check_number_dictionary);
// eax: elements
// ecx: smi key
// Check whether the elements is a number dictionary.
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ SmiToInteger32(rbx, rcx);
// ebx: untagged index
GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi);
__ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1);
__ jmp(&do_call);
__ bind(&slow_load);
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1);
__ EnterInternalFrame();
__ push(rcx); // save the key
__ push(rdx); // pass the receiver
__ push(rcx); // pass the key
__ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ pop(rcx); // restore the key
__ LeaveInternalFrame();
__ movq(rdi, rax);
__ jmp(&do_call);
__ bind(&check_string);
GenerateKeyStringCheck(masm, rcx, rax, rbx, &index_string, &slow_call);
// The key is known to be a symbol.
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
GenerateKeyedLoadReceiverCheck(masm, rdx, rax, &lookup_monomorphic_cache);
__ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &lookup_monomorphic_cache);
GenerateDictionaryLoad(
masm, &slow_load, rbx, rdx, rax, rcx, rdi, rdi, DICTIONARY_CHECK_DONE);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
__ jmp(&do_call);
__ bind(&lookup_monomorphic_cache);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1);
GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
// Fall through on miss.
__ bind(&slow_call);
// This branch is taken if:
// - the receiver requires boxing or access check,
// - the key is neither smi nor symbol,
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
__ IncrementCounter(&Counters::keyed_call_generic_slow, 1);
GenerateMiss(masm, argc);
__ bind(&index_string);
GenerateIndexFromHash(masm, rcx, rbx);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
UNREACHABLE();
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
// rsp[8] : argument argc
// rsp[16] : argument argc - 1
// ...
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
GenerateCallNormal(masm, argc);
GenerateMiss(masm, argc);
}
@ -1452,7 +1708,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Search the dictionary placing the result in rax.
__ bind(&probe);
GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx,
rcx, rdi, CHECK_DICTIONARY);
rcx, rdi, rax, CHECK_DICTIONARY);
__ ret(0);
// Global object access: Check access rights.

21
deps/v8/src/x64/stub-cache-x64.cc

@ -706,6 +706,15 @@ static Object* GenerateCheckPropertyCell(MacroAssembler* masm,
#define __ ACCESS_MASM((masm()))
void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ Cmp(rcx, Handle<String>(name));
__ j(not_equal, miss);
}
}
void CallStubCompiler::GenerateMissBranch() {
Handle<Code> ic = ComputeCallMiss(arguments().immediate(), kind_);
__ Jump(ic, RelocInfo::CODE_TARGET);
@ -740,6 +749,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
Label miss_in_smi_check;
GenerateNameCheck(name, &miss_in_smi_check);
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@ -881,6 +892,8 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// -----------------------------------
Label miss;
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@ -938,6 +951,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
Label miss;
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@ -1092,6 +1107,8 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
Label miss, return_undefined, call_builtin;
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@ -1190,6 +1207,8 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// -----------------------------------
Label miss;
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
@ -1254,6 +1273,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// rsp[(argc + 1) * 8] : argument 0 = receiver
Label miss;
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();

19
deps/v8/src/x64/virtual-frame-x64.cc

@ -1164,6 +1164,25 @@ Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
}
Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
int arg_count,
int loop_nesting) {
// Function name, arguments, and receiver are found on top of the frame
// and dropped by the call. The IC expects the name in rcx and the rest
// on the stack, and drops them all.
InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
cgen()->ComputeKeyedCallInitialize(arg_count, in_loop);
Result name = Pop();
// Spill args, receiver, and function. The call will drop args and
// receiver.
PrepareForCall(arg_count + 1, arg_count + 1);
name.ToRegister(rcx);
name.Unuse();
return RawCallCodeObject(ic, mode);
}
Result VirtualFrame::CallConstructor(int arg_count) {
// Arguments, receiver, and function are on top of the frame. The
// IC expects arg count in rax, function in rdi, and the arguments

2
deps/v8/src/x64/virtual-frame-x64.h

@ -369,6 +369,8 @@ class VirtualFrame : public ZoneObject {
// The argument count does not include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
// Allocate and call JS function as constructor. Arguments,
// receiver (global object), and function are found on top of the
// frame. Function is not dropped. The argument count does not

72
deps/v8/test/cctest/test-api.cc

@ -27,8 +27,6 @@
#include <limits.h>
#define USE_NEW_QUERY_CALLBACKS
#include "v8.h"
#include "api.h"
@ -9637,32 +9635,53 @@ THREADED_TEST(PixelArray) {
}
template <class ExternalArrayClass, class ElementType>
static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
int64_t low,
int64_t high) {
THREADED_TEST(PixelArrayInfo) {
v8::HandleScope scope;
LocalContext context;
const int kElementCount = 40;
int element_size = 0;
for (int size = 0; size < 100; size += 10) {
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(size));
v8::Handle<v8::Object> obj = v8::Object::New();
obj->SetIndexedPropertiesToPixelData(pixel_data, size);
CHECK(obj->HasIndexedPropertiesInPixelData());
CHECK_EQ(pixel_data, obj->GetIndexedPropertiesPixelData());
CHECK_EQ(size, obj->GetIndexedPropertiesPixelDataLength());
free(pixel_data);
}
}
static int ExternalArrayElementSize(v8::ExternalArrayType array_type) {
switch (array_type) {
case v8::kExternalByteArray:
case v8::kExternalUnsignedByteArray:
element_size = 1;
return 1;
break;
case v8::kExternalShortArray:
case v8::kExternalUnsignedShortArray:
element_size = 2;
return 2;
break;
case v8::kExternalIntArray:
case v8::kExternalUnsignedIntArray:
case v8::kExternalFloatArray:
element_size = 4;
return 4;
break;
default:
UNREACHABLE();
break;
return -1;
}
UNREACHABLE();
return -1;
}
template <class ExternalArrayClass, class ElementType>
static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
int64_t low,
int64_t high) {
v8::HandleScope scope;
LocalContext context;
const int kElementCount = 40;
int element_size = ExternalArrayElementSize(array_type);
ElementType* array_data =
static_cast<ElementType*>(malloc(kElementCount * element_size));
i::Handle<ExternalArrayClass> array =
@ -10043,6 +10062,35 @@ THREADED_TEST(ExternalArrays) {
}
void ExternalArrayInfoTestHelper(v8::ExternalArrayType array_type) {
v8::HandleScope scope;
LocalContext context;
for (int size = 0; size < 100; size += 10) {
int element_size = ExternalArrayElementSize(array_type);
void* external_data = malloc(size * element_size);
v8::Handle<v8::Object> obj = v8::Object::New();
obj->SetIndexedPropertiesToExternalArrayData(
external_data, array_type, size);
CHECK(obj->HasIndexedPropertiesInExternalArrayData());
CHECK_EQ(external_data, obj->GetIndexedPropertiesExternalArrayData());
CHECK_EQ(array_type, obj->GetIndexedPropertiesExternalArrayDataType());
CHECK_EQ(size, obj->GetIndexedPropertiesExternalArrayDataLength());
free(external_data);
}
}
THREADED_TEST(ExternalArrayInfo) {
ExternalArrayInfoTestHelper(v8::kExternalByteArray);
ExternalArrayInfoTestHelper(v8::kExternalUnsignedByteArray);
ExternalArrayInfoTestHelper(v8::kExternalShortArray);
ExternalArrayInfoTestHelper(v8::kExternalUnsignedShortArray);
ExternalArrayInfoTestHelper(v8::kExternalIntArray);
ExternalArrayInfoTestHelper(v8::kExternalUnsignedIntArray);
ExternalArrayInfoTestHelper(v8::kExternalFloatArray);
}
THREADED_TEST(ScriptContextDependence) {
v8::HandleScope scope;
LocalContext c1;

2
deps/v8/test/cctest/test-debug.cc

@ -27,8 +27,6 @@
#include <stdlib.h>
#define USE_NEW_QUERY_CALLBACKS
#include "v8.h"
#include "api.h"

2
deps/v8/test/cctest/test-decls.cc

@ -27,8 +27,6 @@
#include <stdlib.h>
#define USE_NEW_QUERY_CALLBACKS
#include "v8.h"
#include "heap.h"

39
deps/v8/test/cctest/test-disasm-arm.cc

@ -248,6 +248,45 @@ TEST(Type0) {
COMPARE(mvn(r5, Operand(r4), SetCC, cc),
"31f05004 mvnccs r5, r4");
// Instructions autotransformed by the assembler.
// mov -> mvn.
COMPARE(mov(r3, Operand(-1), LeaveCC, al),
"e3e03000 mvn r3, #0");
COMPARE(mov(r4, Operand(-2), SetCC, al),
"e3f04001 mvns r4, #1");
COMPARE(mov(r5, Operand(0x0ffffff0), SetCC, ne),
"13f052ff mvnnes r5, #-268435441");
COMPARE(mov(r6, Operand(-1), LeaveCC, ne),
"13e06000 mvnne r6, #0");
// mvn -> mov.
COMPARE(mvn(r3, Operand(-1), LeaveCC, al),
"e3a03000 mov r3, #0");
COMPARE(mvn(r4, Operand(-2), SetCC, al),
"e3b04001 movs r4, #1");
COMPARE(mvn(r5, Operand(0x0ffffff0), SetCC, ne),
"13b052ff movnes r5, #-268435441");
COMPARE(mvn(r6, Operand(-1), LeaveCC, ne),
"13a06000 movne r6, #0");
// and <-> bic.
COMPARE(and_(r3, r5, Operand(0xfc03ffff)),
"e3c537ff bic r3, r5, #66846720");
COMPARE(bic(r3, r5, Operand(0xfc03ffff)),
"e20537ff and r3, r5, #66846720");
// sub <-> add.
COMPARE(add(r3, r5, Operand(-1024)),
"e2453b01 sub r3, r5, #1024");
COMPARE(sub(r3, r5, Operand(-1024)),
"e2853b01 add r3, r5, #1024");
// cmp <-> cmn.
COMPARE(cmp(r3, Operand(-1024)),
"e3730b01 cmn r3, #1024");
COMPARE(cmn(r3, Operand(-1024)),
"e3530b01 cmp r3, #1024");
// Miscellaneous instructions encoded as type 0.
COMPARE(blx(ip),
"e12fff3c blx ip");

175
deps/v8/test/cctest/test-heap-profiler.cc

@ -6,9 +6,11 @@
#include "v8.h"
#include "heap-profiler.h"
#include "snapshot.h"
#include "string-stream.h"
#include "cctest.h"
#include "zone-inl.h"
#include "../include/v8-profiler.h"
namespace i = v8::internal;
using i::ClustersCoarser;
@ -390,4 +392,177 @@ TEST(RetainerProfile) {
CHECK_EQ("(global property);1", printer.GetRetainers("C"));
}
namespace {
class NamedEntriesDetector {
public:
NamedEntriesDetector()
: has_A1(false), has_B1(false), has_C1(false),
has_A2(false), has_B2(false), has_C2(false) {
}
void Apply(i::HeapEntry* entry) {
const char* node_name = entry->name();
if (strcmp("A1", node_name) == 0
&& entry->GetRetainingPaths()->length() > 0) has_A1 = true;
if (strcmp("B1", node_name) == 0
&& entry->GetRetainingPaths()->length() > 0) has_B1 = true;
if (strcmp("C1", node_name) == 0
&& entry->GetRetainingPaths()->length() > 0) has_C1 = true;
if (strcmp("A2", node_name) == 0
&& entry->GetRetainingPaths()->length() > 0) has_A2 = true;
if (strcmp("B2", node_name) == 0
&& entry->GetRetainingPaths()->length() > 0) has_B2 = true;
if (strcmp("C2", node_name) == 0
&& entry->GetRetainingPaths()->length() > 0) has_C2 = true;
}
bool has_A1;
bool has_B1;
bool has_C1;
bool has_A2;
bool has_B2;
bool has_C2;
};
} // namespace
TEST(HeapSnapshot) {
v8::HandleScope scope;
v8::Handle<v8::String> token1 = v8::String::New("token1");
v8::Handle<v8::Context> env1 = v8::Context::New();
env1->SetSecurityToken(token1);
env1->Enter();
CompileAndRunScript(
"function A1() {}\n"
"function B1(x) { this.x = x; }\n"
"function C1(x) { this.x1 = x; this.x2 = x; }\n"
"var a1 = new A1();\n"
"var b1_1 = new B1(a1), b1_2 = new B1(a1);\n"
"var c1 = new C1(a1);");
v8::Handle<v8::String> token2 = v8::String::New("token2");
v8::Handle<v8::Context> env2 = v8::Context::New();
env2->SetSecurityToken(token2);
env2->Enter();
CompileAndRunScript(
"function A2() {}\n"
"function B2(x) { return function() { return typeof x; }; }\n"
"function C2(x) { this.x1 = x; this.x2 = x; this[1] = x; }\n"
"var a2 = new A2();\n"
"var b2_1 = new B2(a2), b2_2 = new B2(a2);\n"
"var c2 = new C2(a2);");
const v8::HeapSnapshot* snapshot_env2 =
v8::HeapProfiler::TakeSnapshot(v8::String::New("env2"));
const v8::HeapGraphNode* global_env2;
if (i::Snapshot::IsEnabled()) {
// In case if snapshots are enabled, there will present a
// vanilla deserealized global object, without properties
// added by the test code.
CHECK_EQ(2, snapshot_env2->GetHead()->GetChildrenCount());
// Choose the global object of a bigger size.
const v8::HeapGraphNode* node0 =
snapshot_env2->GetHead()->GetChild(0)->GetToNode();
const v8::HeapGraphNode* node1 =
snapshot_env2->GetHead()->GetChild(1)->GetToNode();
global_env2 = node0->GetTotalSize() > node1->GetTotalSize() ?
node0 : node1;
} else {
CHECK_EQ(1, snapshot_env2->GetHead()->GetChildrenCount());
global_env2 = snapshot_env2->GetHead()->GetChild(0)->GetToNode();
}
// Verify, that JS global object of env2 doesn't have '..1'
// properties, but has '..2' properties.
bool has_a1 = false, has_b1_1 = false, has_b1_2 = false, has_c1 = false;
bool has_a2 = false, has_b2_1 = false, has_b2_2 = false, has_c2 = false;
// This will be needed further.
const v8::HeapGraphNode* a2_node = NULL;
for (int i = 0, count = global_env2->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = global_env2->GetChild(i);
v8::String::AsciiValue prop_name(prop->GetName());
if (strcmp("a1", *prop_name) == 0) has_a1 = true;
if (strcmp("b1_1", *prop_name) == 0) has_b1_1 = true;
if (strcmp("b1_2", *prop_name) == 0) has_b1_2 = true;
if (strcmp("c1", *prop_name) == 0) has_c1 = true;
if (strcmp("a2", *prop_name) == 0) {
has_a2 = true;
a2_node = prop->GetToNode();
}
if (strcmp("b2_1", *prop_name) == 0) has_b2_1 = true;
if (strcmp("b2_2", *prop_name) == 0) has_b2_2 = true;
if (strcmp("c2", *prop_name) == 0) has_c2 = true;
}
CHECK(!has_a1);
CHECK(!has_b1_1);
CHECK(!has_b1_2);
CHECK(!has_c1);
CHECK(has_a2);
CHECK(has_b2_1);
CHECK(has_b2_2);
CHECK(has_c2);
// Verify that anything related to '[ABC]1' is not reachable.
NamedEntriesDetector det;
i::HeapSnapshot* i_snapshot_env2 =
const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot_env2));
i_snapshot_env2->IterateEntries(&det);
CHECK(!det.has_A1);
CHECK(!det.has_B1);
CHECK(!det.has_C1);
CHECK(det.has_A2);
CHECK(det.has_B2);
CHECK(det.has_C2);
// Verify 'a2' object retainers. They are:
// - (global object).a2
// - c2.x1, c2.x2, c2[1]
// - b2_1 and b2_2 closures: via 'x' variable
CHECK_EQ(6, a2_node->GetRetainingPathsCount());
bool has_global_obj_a2_ref = false;
bool has_c2_x1_ref = false, has_c2_x2_ref = false, has_c2_1_ref = false;
bool has_b2_1_x_ref = false, has_b2_2_x_ref = false;
for (int i = 0; i < a2_node->GetRetainingPathsCount(); ++i) {
const v8::HeapGraphPath* path = a2_node->GetRetainingPath(i);
const int edges_count = path->GetEdgesCount();
CHECK_GT(edges_count, 0);
const v8::HeapGraphEdge* last_edge = path->GetEdge(edges_count - 1);
v8::String::AsciiValue last_edge_name(last_edge->GetName());
if (strcmp("a2", *last_edge_name) == 0
&& last_edge->GetType() == v8::HeapGraphEdge::PROPERTY) {
has_global_obj_a2_ref = true;
continue;
}
CHECK_GT(edges_count, 1);
const v8::HeapGraphEdge* prev_edge = path->GetEdge(edges_count - 2);
v8::String::AsciiValue prev_edge_name(prev_edge->GetName());
if (strcmp("x1", *last_edge_name) == 0
&& last_edge->GetType() == v8::HeapGraphEdge::PROPERTY
&& strcmp("c2", *prev_edge_name) == 0) has_c2_x1_ref = true;
if (strcmp("x2", *last_edge_name) == 0
&& last_edge->GetType() == v8::HeapGraphEdge::PROPERTY
&& strcmp("c2", *prev_edge_name) == 0) has_c2_x2_ref = true;
if (strcmp("1", *last_edge_name) == 0
&& last_edge->GetType() == v8::HeapGraphEdge::ELEMENT
&& strcmp("c2", *prev_edge_name) == 0) has_c2_1_ref = true;
if (strcmp("x", *last_edge_name) == 0
&& last_edge->GetType() == v8::HeapGraphEdge::CONTEXT_VARIABLE
&& strcmp("b2_1", *prev_edge_name) == 0) has_b2_1_x_ref = true;
if (strcmp("x", *last_edge_name) == 0
&& last_edge->GetType() == v8::HeapGraphEdge::CONTEXT_VARIABLE
&& strcmp("b2_2", *prev_edge_name) == 0) has_b2_2_x_ref = true;
}
CHECK(has_global_obj_a2_ref);
CHECK(has_c2_x1_ref);
CHECK(has_c2_x2_ref);
CHECK(has_c2_1_ref);
CHECK(has_b2_1_x_ref);
CHECK(has_b2_2_x_ref);
}
#endif // ENABLE_LOGGING_AND_PROFILING

8
deps/v8/test/cctest/test-serialize.cc

@ -131,9 +131,9 @@ TEST(ExternalReferenceEncoder) {
ExternalReference::address_of_real_stack_limit();
CHECK_EQ(make_code(UNCLASSIFIED, 5),
encoder.Encode(real_stack_limit_address.address()));
CHECK_EQ(make_code(UNCLASSIFIED, 12),
CHECK_EQ(make_code(UNCLASSIFIED, 15),
encoder.Encode(ExternalReference::debug_break().address()));
CHECK_EQ(make_code(UNCLASSIFIED, 7),
CHECK_EQ(make_code(UNCLASSIFIED, 10),
encoder.Encode(ExternalReference::new_space_start().address()));
CHECK_EQ(make_code(UNCLASSIFIED, 3),
encoder.Encode(ExternalReference::roots_address().address()));
@ -165,9 +165,9 @@ TEST(ExternalReferenceDecoder) {
CHECK_EQ(ExternalReference::address_of_real_stack_limit().address(),
decoder.Decode(make_code(UNCLASSIFIED, 5)));
CHECK_EQ(ExternalReference::debug_break().address(),
decoder.Decode(make_code(UNCLASSIFIED, 12)));
decoder.Decode(make_code(UNCLASSIFIED, 15)));
CHECK_EQ(ExternalReference::new_space_start().address(),
decoder.Decode(make_code(UNCLASSIFIED, 7)));
decoder.Decode(make_code(UNCLASSIFIED, 10)));
}

17
deps/v8/test/mjsunit/keyed-call-generic.js

@ -94,3 +94,20 @@ testMany(fixed_array, first3num, first3num);
testMany(dict_array, first3num, first3num);
testMany(fast_prop, first3str, first3num);
testMany(normal_prop, first3str, first3num);
function testException(receiver, keys, exceptions) {
for (var i = 0; i != 10; i++) {
for (var k = 0; k != keys.length; k++) {
var thrown = false;
try {
var result = receiver[keys[k]]();
} catch (e) {
thrown = true;
}
assertEquals(exceptions[k], thrown);
}
}
}
testException([zero, one, /* hole */ ], [0, 1, 2], [false, false, true]);

153
deps/v8/test/mjsunit/object-define-property.js

@ -714,3 +714,156 @@ try {
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
var obj6 = {};
obj6[1] = 'foo';
obj6[2] = 'bar';
obj6[3] = '42';
obj6[4] = '43';
obj6[5] = '44';
var descElement = { value: 'foobar' };
var descElementNonConfigurable = { value: 'barfoo', configurable: false };
var descElementNonWritable = { value: 'foofoo', writable: false };
var descElementNonEnumerable = { value: 'barbar', enumerable: false };
var descElementAllFalse = { value: 'foofalse',
configurable: false,
writable: false,
enumerable: false };
// Redefine existing property.
Object.defineProperty(obj6, '1', descElement);
desc = Object.getOwnPropertyDescriptor(obj6, '1');
assertEquals(desc.value, 'foobar');
assertTrue(desc.writable);
assertTrue(desc.enumerable);
assertTrue(desc.configurable);
// Redefine existing property with configurable: false.
Object.defineProperty(obj6, '2', descElementNonConfigurable);
desc = Object.getOwnPropertyDescriptor(obj6, '2');
assertEquals(desc.value, 'barfoo');
assertTrue(desc.writable);
assertTrue(desc.enumerable);
assertFalse(desc.configurable);
// Ensure that we can't overwrite the non configurable element.
try {
Object.defineProperty(obj6, '2', descElement);
assertUnreachable();
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
Object.defineProperty(obj6, '3', descElementNonWritable);
desc = Object.getOwnPropertyDescriptor(obj6, '3');
assertEquals(desc.value, 'foofoo');
assertFalse(desc.writable);
assertTrue(desc.enumerable);
assertTrue(desc.configurable);
// Redefine existing property with configurable: false.
Object.defineProperty(obj6, '4', descElementNonEnumerable);
desc = Object.getOwnPropertyDescriptor(obj6, '4');
assertEquals(desc.value, 'barbar');
assertTrue(desc.writable);
assertFalse(desc.enumerable);
assertTrue(desc.configurable);
// Redefine existing property with configurable: false.
Object.defineProperty(obj6, '5', descElementAllFalse);
desc = Object.getOwnPropertyDescriptor(obj6, '5');
assertEquals(desc.value, 'foofalse');
assertFalse(desc.writable);
assertFalse(desc.enumerable);
assertFalse(desc.configurable);
// Define non existing property - all attributes should default to false.
Object.defineProperty(obj6, '15', descElement);
desc = Object.getOwnPropertyDescriptor(obj6, '15');
assertEquals(desc.value, 'foobar');
assertFalse(desc.writable);
assertFalse(desc.enumerable);
assertFalse(desc.configurable);
// Make sure that we can't redefine using direct access.
obj6[15] ='overwrite';
assertEquals(obj6[15],'foobar');
// Repeat the above tests on an array.
var arr = new Array();
arr[1] = 'foo';
arr[2] = 'bar';
arr[3] = '42';
arr[4] = '43';
arr[5] = '44';
var descElement = { value: 'foobar' };
var descElementNonConfigurable = { value: 'barfoo', configurable: false };
var descElementNonWritable = { value: 'foofoo', writable: false };
var descElementNonEnumerable = { value: 'barbar', enumerable: false };
var descElementAllFalse = { value: 'foofalse',
configurable: false,
writable: false,
enumerable: false };
// Redefine existing property.
Object.defineProperty(arr, '1', descElement);
desc = Object.getOwnPropertyDescriptor(arr, '1');
assertEquals(desc.value, 'foobar');
assertTrue(desc.writable);
assertTrue(desc.enumerable);
assertTrue(desc.configurable);
// Redefine existing property with configurable: false.
Object.defineProperty(arr, '2', descElementNonConfigurable);
desc = Object.getOwnPropertyDescriptor(arr, '2');
assertEquals(desc.value, 'barfoo');
assertTrue(desc.writable);
assertTrue(desc.enumerable);
assertFalse(desc.configurable);
// Ensure that we can't overwrite the non configurable element.
try {
Object.defineProperty(arr, '2', descElement);
assertUnreachable();
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
Object.defineProperty(arr, '3', descElementNonWritable);
desc = Object.getOwnPropertyDescriptor(arr, '3');
assertEquals(desc.value, 'foofoo');
assertFalse(desc.writable);
assertTrue(desc.enumerable);
assertTrue(desc.configurable);
// Redefine existing property with configurable: false.
Object.defineProperty(arr, '4', descElementNonEnumerable);
desc = Object.getOwnPropertyDescriptor(arr, '4');
assertEquals(desc.value, 'barbar');
assertTrue(desc.writable);
assertFalse(desc.enumerable);
assertTrue(desc.configurable);
// Redefine existing property with configurable: false.
Object.defineProperty(arr, '5', descElementAllFalse);
desc = Object.getOwnPropertyDescriptor(arr, '5');
assertEquals(desc.value, 'foofalse');
assertFalse(desc.writable);
assertFalse(desc.enumerable);
assertFalse(desc.configurable);
// Define non existing property - all attributes should default to false.
Object.defineProperty(arr, '15', descElement);
desc = Object.getOwnPropertyDescriptor(arr, '15');
assertEquals(desc.value, 'foobar');
assertFalse(desc.writable);
assertFalse(desc.enumerable);
assertFalse(desc.configurable);

5
deps/v8/test/mjsunit/bugs/bug-619.js → deps/v8/test/mjsunit/regress/regress-619.js

@ -25,9 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// When this bug is corrected move to object-define-property and add
// additional tests for configurable in the same manner as existing tests
// there.
// Tests that Object.defineProperty works correctly on array indices.
// Please see http://code.google.com/p/v8/issues/detail?id=619 for details.
var obj = {};
obj[1] = 42;
Loading…
Cancel
Save