Browse Source

v8: upgrade to 3.17.16

v0.11.1-release
Ben Noordhuis 12 years ago
parent
commit
587e83c6d6
  1. 5
      deps/v8/.gitignore
  2. 40
      deps/v8/ChangeLog
  3. 2
      deps/v8/OWNERS
  4. 9
      deps/v8/build/common.gypi
  5. 119
      deps/v8/include/v8-profiler.h
  6. 16
      deps/v8/include/v8.h
  7. 181
      deps/v8/src/api.cc
  8. 7
      deps/v8/src/api.h
  9. 4
      deps/v8/src/apiutils.h
  10. 21
      deps/v8/src/arm/builtins-arm.cc
  11. 110
      deps/v8/src/arm/code-stubs-arm.cc
  12. 6
      deps/v8/src/arm/codegen-arm.cc
  13. 30
      deps/v8/src/arm/full-codegen-arm.cc
  14. 43
      deps/v8/src/arm/lithium-arm.cc
  15. 16
      deps/v8/src/arm/lithium-arm.h
  16. 82
      deps/v8/src/arm/lithium-codegen-arm.cc
  17. 4
      deps/v8/src/arm/lithium-codegen-arm.h
  18. 82
      deps/v8/src/arm/macro-assembler-arm.cc
  19. 16
      deps/v8/src/arm/macro-assembler-arm.h
  20. 26
      deps/v8/src/arm/simulator-arm.cc
  21. 6
      deps/v8/src/arm/simulator-arm.h
  22. 45
      deps/v8/src/arm/stub-cache-arm.cc
  23. 210
      deps/v8/src/array.js
  24. 11
      deps/v8/src/assembler.cc
  25. 12
      deps/v8/src/ast.cc
  26. 83
      deps/v8/src/ast.h
  27. 36
      deps/v8/src/bootstrapper.cc
  28. 138
      deps/v8/src/code-stubs-hydrogen.cc
  29. 11
      deps/v8/src/code-stubs.cc
  30. 69
      deps/v8/src/code-stubs.h
  31. 1
      deps/v8/src/compiler.cc
  32. 4
      deps/v8/src/contexts.h
  33. 8
      deps/v8/src/d8.cc
  34. 5
      deps/v8/src/d8.h
  35. 3
      deps/v8/src/d8.js
  36. 9
      deps/v8/src/debug.cc
  37. 1
      deps/v8/src/debug.h
  38. 106
      deps/v8/src/deoptimizer.cc
  39. 12
      deps/v8/src/deoptimizer.h
  40. 9
      deps/v8/src/elements-kind.h
  41. 9
      deps/v8/src/flag-definitions.h
  42. 23
      deps/v8/src/frames.cc
  43. 28
      deps/v8/src/full-codegen.cc
  44. 39
      deps/v8/src/gdb-jit.cc
  45. 4
      deps/v8/src/gdb-jit.h
  46. 7
      deps/v8/src/global-handles.cc
  47. 2
      deps/v8/src/handles-inl.h
  48. 29
      deps/v8/src/handles.cc
  49. 10
      deps/v8/src/handles.h
  50. 4
      deps/v8/src/heap-inl.h
  51. 125
      deps/v8/src/heap-profiler.cc
  52. 46
      deps/v8/src/heap-profiler.h
  53. 1
      deps/v8/src/heap-snapshot-generator-inl.h
  54. 12
      deps/v8/src/heap-snapshot-generator.cc
  55. 11
      deps/v8/src/heap-snapshot-generator.h
  56. 28
      deps/v8/src/heap.cc
  57. 9
      deps/v8/src/heap.h
  58. 35
      deps/v8/src/hydrogen-instructions.cc
  59. 77
      deps/v8/src/hydrogen-instructions.h
  60. 601
      deps/v8/src/hydrogen.cc
  61. 80
      deps/v8/src/hydrogen.h
  62. 43
      deps/v8/src/ia32/builtins-ia32.cc
  63. 108
      deps/v8/src/ia32/code-stubs-ia32.cc
  64. 9
      deps/v8/src/ia32/codegen-ia32.cc
  65. 31
      deps/v8/src/ia32/full-codegen-ia32.cc
  66. 28
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  67. 41
      deps/v8/src/ia32/lithium-ia32.cc
  68. 14
      deps/v8/src/ia32/lithium-ia32.h
  69. 88
      deps/v8/src/ia32/macro-assembler-ia32.cc
  70. 32
      deps/v8/src/ia32/macro-assembler-ia32.h
  71. 44
      deps/v8/src/ia32/stub-cache-ia32.cc
  72. 7
      deps/v8/src/ic-inl.h
  73. 271
      deps/v8/src/ic.cc
  74. 7
      deps/v8/src/ic.h
  75. 10
      deps/v8/src/interpreter-irregexp.cc
  76. 25
      deps/v8/src/isolate.cc
  77. 3
      deps/v8/src/isolate.h
  78. 55
      deps/v8/src/json-parser.h
  79. 92
      deps/v8/src/json-stringifier.h
  80. 91
      deps/v8/src/log.cc
  81. 1
      deps/v8/src/log.h
  82. 5
      deps/v8/src/macros.py
  83. 104
      deps/v8/src/messages.js
  84. 1
      deps/v8/src/mips/OWNERS
  85. 10
      deps/v8/src/mips/assembler-mips.cc
  86. 1
      deps/v8/src/mips/assembler-mips.h
  87. 21
      deps/v8/src/mips/builtins-mips.cc
  88. 141
      deps/v8/src/mips/code-stubs-mips.cc
  89. 7
      deps/v8/src/mips/codegen-mips.cc
  90. 30
      deps/v8/src/mips/full-codegen-mips.cc
  91. 90
      deps/v8/src/mips/lithium-codegen-mips.cc
  92. 4
      deps/v8/src/mips/lithium-codegen-mips.h
  93. 43
      deps/v8/src/mips/lithium-mips.cc
  94. 17
      deps/v8/src/mips/lithium-mips.h
  95. 322
      deps/v8/src/mips/macro-assembler-mips.cc
  96. 13
      deps/v8/src/mips/macro-assembler-mips.h
  97. 1
      deps/v8/src/mips/regexp-macro-assembler-mips.cc
  98. 28
      deps/v8/src/mips/simulator-mips.cc
  99. 6
      deps/v8/src/mips/simulator-mips.h
  100. 45
      deps/v8/src/mips/stub-cache-mips.cc

5
deps/v8/.gitignore

@ -18,6 +18,7 @@
#*# #*#
*~ *~
.cpplint-cache .cpplint-cache
.d8_history
d8 d8
d8_g d8_g
shell shell
@ -50,3 +51,7 @@ shell_g
/xcodebuild /xcodebuild
TAGS TAGS
*.Makefile *.Makefile
GTAGS
GRTAGS
GSYMS
GPATH

40
deps/v8/ChangeLog

@ -1,3 +1,43 @@
2013-04-04: Version 3.17.16
Stack trace API: poison stack frames below the first strict mode frame.
(issue 2564)
Made Isolate::GetHeapStatistics robust against half-initialized
isolates (Chromium issue 2591).
Finished implementation of ES6 symbols aka. private names (issue 2158).
Performance and stability improvements on all platforms.
2013-03-21: Version 3.17.15
Rolled back API changes to maintain compatibility with older
3.17.x versions of V8.
Disable zapping of global handles in release mode.
Always mark the entire valid prefix of the descriptor array.
(Chromium issue 196331)
Use internal memcpy for CopyWords and when copying code.
(Chromium issue 196330)
Performance and stability improvements on all platforms.
2013-03-20: Version 3.17.14
Use internal memcpy when initializing code objects.
(Chromium issue 196330)
Disabled weak embedded maps because of crashes.
(Chromium issues 172489, 217858)
Performance and stability improvements on all platforms.
2013-03-19: Version 3.17.13 2013-03-19: Version 3.17.13
Turned Flags into a uint32_t typedef (Chromium issue 194749). Turned Flags into a uint32_t typedef (Chromium issue 194749).

2
deps/v8/OWNERS

@ -1,4 +1,6 @@
danno@chromium.org danno@chromium.org
dslomov@chromium.org
hpayer@chromium.org
jkummerow@chromium.org jkummerow@chromium.org
mmassi@chromium.org mmassi@chromium.org
mstarzinger@chromium.org mstarzinger@chromium.org

9
deps/v8/build/common.gypi

@ -409,6 +409,15 @@
}], }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', { or OS=="android"', {
'cflags!': [
'-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
'-O3',
],
'conditions': [ 'conditions': [
[ 'gcc_version==44 and clang==0', { [ 'gcc_version==44 and clang==0', {
'cflags': [ 'cflags': [

119
deps/v8/include/v8-profiler.h

@ -135,7 +135,7 @@ class V8EXPORT CpuProfile {
/** /**
* Returns number of samples recorded. The samples are not recorded unless * Returns number of samples recorded. The samples are not recorded unless
* |record_samples| parameter of CpuProfiler::StartProfiling is true. * |record_samples| parameter of CpuProfiler::StartCpuProfiling is true.
*/ */
int GetSamplesCount() const; int GetSamplesCount() const;
@ -158,7 +158,8 @@ class V8EXPORT CpuProfile {
/** /**
* Interface for controlling CPU profiling. * Interface for controlling CPU profiling. Instance of the
* profiler can be retrieved using v8::Isolate::GetCpuProfiler.
*/ */
class V8EXPORT CpuProfiler { class V8EXPORT CpuProfiler {
public: public:
@ -171,22 +172,34 @@ class V8EXPORT CpuProfiler {
* obtaining profiling results. * obtaining profiling results.
*/ */
/** Deprecated. Use GetProfileCount instead. */
static int GetProfilesCount();
/** /**
* Returns the number of profiles collected (doesn't include * Returns the number of profiles collected (doesn't include
* profiles that are being collected at the moment of call.) * profiles that are being collected at the moment of call.)
*/ */
static int GetProfilesCount(); int GetProfileCount();
/** Returns a profile by index. */ /** Deprecated. Use GetCpuProfile instead. */
static const CpuProfile* GetProfile( static const CpuProfile* GetProfile(
int index, int index,
Handle<Value> security_token = Handle<Value>()); Handle<Value> security_token = Handle<Value>());
/** Returns a profile by index. */
const CpuProfile* GetCpuProfile(
int index,
Handle<Value> security_token = Handle<Value>());
/** Returns a profile by uid. */ /** Deprecated. Use FindProfile instead. */
static const CpuProfile* FindProfile( static const CpuProfile* FindProfile(
unsigned uid, unsigned uid,
Handle<Value> security_token = Handle<Value>()); Handle<Value> security_token = Handle<Value>());
/** Returns a profile by uid. */
const CpuProfile* FindCpuProfile(
unsigned uid,
Handle<Value> security_token = Handle<Value>());
/** Deprecated. Use StartCpuProfiling instead. */
static void StartProfiling(Handle<String> title, bool record_samples = false);
/** /**
* Starts collecting CPU profile. Title may be an empty string. It * Starts collecting CPU profile. Title may be an empty string. It
* is allowed to have several profiles being collected at * is allowed to have several profiles being collected at
@ -198,22 +211,34 @@ class V8EXPORT CpuProfiler {
* |record_samples| parameter controls whether individual samples should * |record_samples| parameter controls whether individual samples should
* be recorded in addition to the aggregated tree. * be recorded in addition to the aggregated tree.
*/ */
static void StartProfiling(Handle<String> title, bool record_samples = false); void StartCpuProfiling(Handle<String> title, bool record_samples = false);
/** Deprecated. Use StopCpuProfiling instead. */
static const CpuProfile* StopProfiling(
Handle<String> title,
Handle<Value> security_token = Handle<Value>());
/** /**
* Stops collecting CPU profile with a given title and returns it. * Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started. * If the title given is empty, finishes the last profile started.
*/ */
static const CpuProfile* StopProfiling( const CpuProfile* StopCpuProfiling(
Handle<String> title, Handle<String> title,
Handle<Value> security_token = Handle<Value>()); Handle<Value> security_token = Handle<Value>());
/** Deprecated. Use DeleteAllCpuProfiles instead. */
static void DeleteAllProfiles();
/** /**
* Deletes all existing profiles, also cancelling all profiling * Deletes all existing profiles, also cancelling all profiling
* activity. All previously returned pointers to profiles and their * activity. All previously returned pointers to profiles and their
* contents become invalid after this call. * contents become invalid after this call.
*/ */
static void DeleteAllProfiles(); void DeleteAllCpuProfiles();
private:
CpuProfiler();
~CpuProfiler();
CpuProfiler(const CpuProfiler&);
CpuProfiler& operator=(const CpuProfiler&);
}; };
@ -321,8 +346,8 @@ class V8EXPORT HeapSnapshot {
kJSON = 0 // See format description near 'Serialize' method. kJSON = 0 // See format description near 'Serialize' method.
}; };
/** Returns heap snapshot type. */ /** Deprecated. Returns kFull. */
Type GetType() const; V8_DEPRECATED(Type GetType() const);
/** Returns heap snapshot UID (assigned by the profiler.) */ /** Returns heap snapshot UID (assigned by the profiler.) */
unsigned GetUid() const; unsigned GetUid() const;
@ -385,7 +410,8 @@ class V8EXPORT HeapSnapshot {
class RetainedObjectInfo; class RetainedObjectInfo;
/** /**
* Interface for controlling heap profiling. * Interface for controlling heap profiling. Instance of the
* profiler can be retrieved using v8::Isolate::GetHeapProfiler.
*/ */
class V8EXPORT HeapProfiler { class V8EXPORT HeapProfiler {
public: public:
@ -398,20 +424,28 @@ class V8EXPORT HeapProfiler {
typedef RetainedObjectInfo* (*WrapperInfoCallback) typedef RetainedObjectInfo* (*WrapperInfoCallback)
(uint16_t class_id, Handle<Value> wrapper); (uint16_t class_id, Handle<Value> wrapper);
/** Returns the number of snapshots taken. */ /** Deprecated. Use GetSnapshotCount instead. */
static int GetSnapshotsCount(); static int GetSnapshotsCount();
/** Returns the number of snapshots taken. */
int GetSnapshotCount();
/** Returns a snapshot by index. */ /** Deprecated. Use GetHeapSnapshot instead. */
static const HeapSnapshot* GetSnapshot(int index); static const HeapSnapshot* GetSnapshot(int index);
/** Returns a snapshot by index. */
const HeapSnapshot* GetHeapSnapshot(int index);
/** Returns a profile by uid. */ /** Deprecated. Use FindHeapSnapshot instead. */
static const HeapSnapshot* FindSnapshot(unsigned uid); static const HeapSnapshot* FindSnapshot(unsigned uid);
/** Returns a profile by uid. */
const HeapSnapshot* FindHeapSnapshot(unsigned uid);
/** Deprecated. Use GetObjectId instead. */
static SnapshotObjectId GetSnapshotObjectId(Handle<Value> value);
/** /**
* Returns SnapshotObjectId for a heap object referenced by |value| if * Returns SnapshotObjectId for a heap object referenced by |value| if
* it has been seen by the heap profiler, kUnknownObjectId otherwise. * it has been seen by the heap profiler, kUnknownObjectId otherwise.
*/ */
static SnapshotObjectId GetSnapshotObjectId(Handle<Value> value); SnapshotObjectId GetObjectId(Handle<Value> value);
/** /**
* A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return * A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
@ -424,33 +458,42 @@ class V8EXPORT HeapProfiler {
* Callback interface for retrieving user friendly names of global objects. * Callback interface for retrieving user friendly names of global objects.
*/ */
class ObjectNameResolver { class ObjectNameResolver {
public: public:
/** /**
* Returns name to be used in the heap snapshot for given node. Returned * Returns name to be used in the heap snapshot for given node. Returned
* string must stay alive until snapshot collection is completed. * string must stay alive until snapshot collection is completed.
*/ */
virtual const char* GetName(Handle<Object> object) = 0; virtual const char* GetName(Handle<Object> object) = 0;
protected: protected:
virtual ~ObjectNameResolver() {} virtual ~ObjectNameResolver() {}
}; };
/** Deprecated. Use TakeHeapSnapshot instead. */
static const HeapSnapshot* TakeSnapshot(
Handle<String> title,
HeapSnapshot::Type type = HeapSnapshot::kFull,
ActivityControl* control = NULL,
ObjectNameResolver* global_object_name_resolver = NULL);
/** /**
* Takes a heap snapshot and returns it. Title may be an empty string. * Takes a heap snapshot and returns it. Title may be an empty string.
* See HeapSnapshot::Type for types description.
*/ */
static const HeapSnapshot* TakeSnapshot( const HeapSnapshot* TakeHeapSnapshot(
Handle<String> title, Handle<String> title,
HeapSnapshot::Type type = HeapSnapshot::kFull,
ActivityControl* control = NULL, ActivityControl* control = NULL,
ObjectNameResolver* global_object_name_resolver = NULL); ObjectNameResolver* global_object_name_resolver = NULL);
/** Deprecated. Use StartTrackingHeapObjects instead. */
static void StartHeapObjectsTracking();
/** /**
* Starts tracking of heap objects population statistics. After calling * Starts tracking of heap objects population statistics. After calling
* this method, all heap objects relocations done by the garbage collector * this method, all heap objects relocations done by the garbage collector
* are being registered. * are being registered.
*/ */
static void StartHeapObjectsTracking(); void StartTrackingHeapObjects();
/** Deprecated. Use GetHeapStats instead. */
static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
/** /**
* Adds a new time interval entry to the aggregated statistics array. The * Adds a new time interval entry to the aggregated statistics array. The
* time interval entry contains information on the current heap objects * time interval entry contains information on the current heap objects
@ -460,28 +503,36 @@ class V8EXPORT HeapProfiler {
* HeapStatsUpdate structure instances. * HeapStatsUpdate structure instances.
* The return value of the function is the last seen heap object Id. * The return value of the function is the last seen heap object Id.
* *
* StartHeapObjectsTracking must be called before the first call to this * StartTrackingHeapObjects must be called before the first call to this
* method. * method.
*/ */
static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream); SnapshotObjectId GetHeapStats(OutputStream* stream);
/** Deprecated. Use StopTrackingHeapObjects instead. */
static void StopHeapObjectsTracking();
/** /**
* Stops tracking of heap objects population statistics, cleans up all * Stops tracking of heap objects population statistics, cleans up all
* collected data. StartHeapObjectsTracking must be called again prior to * collected data. StartHeapObjectsTracking must be called again prior to
* calling PushHeapObjectsStats next time. * calling PushHeapObjectsStats next time.
*/ */
static void StopHeapObjectsTracking(); void StopTrackingHeapObjects();
/** Deprecated. Use DeleteAllHeapSnapshots instead. */
static void DeleteAllSnapshots();
/** /**
* Deletes all snapshots taken. All previously returned pointers to * Deletes all snapshots taken. All previously returned pointers to
* snapshots and their contents become invalid after this call. * snapshots and their contents become invalid after this call.
*/ */
static void DeleteAllSnapshots(); void DeleteAllHeapSnapshots();
/** Binds a callback to embedder's class ID. */ /** Deprecated. Use SetWrapperClassInfoProvider instead. */
static void DefineWrapperClass( static void DefineWrapperClass(
uint16_t class_id, uint16_t class_id,
WrapperInfoCallback callback); WrapperInfoCallback callback);
/** Binds a callback to embedder's class ID. */
void SetWrapperClassInfoProvider(
uint16_t class_id,
WrapperInfoCallback callback);
/** /**
* Default value of persistent handle class ID. Must not be used to * Default value of persistent handle class ID. Must not be used to
@ -490,11 +541,21 @@ class V8EXPORT HeapProfiler {
*/ */
static const uint16_t kPersistentHandleNoClassId = 0; static const uint16_t kPersistentHandleNoClassId = 0;
/** Returns the number of currently existing persistent handles. */ /**
* Deprecated. Returns the number of currently existing persistent handles.
*/
static int GetPersistentHandleCount(); static int GetPersistentHandleCount();
/** Returns memory used for profiler internal data and snapshots. */ /** Deprecated. Use GetHeapProfilerMemorySize instead. */
static size_t GetMemorySizeUsedByProfiler(); static size_t GetMemorySizeUsedByProfiler();
/** Returns memory used for profiler internal data and snapshots. */
size_t GetProfilerMemorySize();
private:
HeapProfiler();
~HeapProfiler();
HeapProfiler(const HeapProfiler&);
HeapProfiler& operator=(const HeapProfiler&);
}; };
@ -574,7 +635,7 @@ class V8EXPORT RetainedObjectInfo { // NOLINT
/** /**
* A struct for exporting HeapStats data from V8, using "push" model. * A struct for exporting HeapStats data from V8, using "push" model.
* See HeapProfiler::PushHeapObjectsStats. * See HeapProfiler::GetHeapStats.
*/ */
struct HeapStatsUpdate { struct HeapStatsUpdate {
HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size) HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size)

16
deps/v8/include/v8.h

@ -103,12 +103,14 @@ class Array;
class Boolean; class Boolean;
class BooleanObject; class BooleanObject;
class Context; class Context;
class CpuProfiler;
class Data; class Data;
class Date; class Date;
class DeclaredAccessorDescriptor; class DeclaredAccessorDescriptor;
class External; class External;
class Function; class Function;
class FunctionTemplate; class FunctionTemplate;
class HeapProfiler;
class ImplementationUtilities; class ImplementationUtilities;
class Int32; class Int32;
class Integer; class Integer;
@ -3022,6 +3024,18 @@ class V8EXPORT Isolate {
*/ */
intptr_t AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes); intptr_t AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes);
/**
* Returns heap profiler for this isolate. Will return NULL until the isolate
* is initialized.
*/
HeapProfiler* GetHeapProfiler();
/**
* Returns CPU profiler for this isolate. Will return NULL until the isolate
* is initialized.
*/
CpuProfiler* GetCpuProfiler();
private: private:
Isolate(); Isolate();
Isolate(const Isolate&); Isolate(const Isolate&);
@ -4274,7 +4288,7 @@ class Internals {
static const int kJSObjectHeaderSize = 3 * kApiPointerSize; static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize; static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize; static const int kContextHeaderSize = 2 * kApiPointerSize;
static const int kContextEmbedderDataIndex = 54; static const int kContextEmbedderDataIndex = 55;
static const int kFullStringRepresentationMask = 0x07; static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4; static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02; static const int kExternalTwoByteRepresentationTag = 0x02;

181
deps/v8/src/api.cc

@ -477,14 +477,6 @@ void V8::SetAllowCodeGenerationFromStringsCallback(
} }
#ifdef DEBUG
void ImplementationUtilities::ZapHandleRange(i::Object** begin,
i::Object** end) {
i::HandleScope::ZapRange(begin, end);
}
#endif
void V8::SetFlagsFromString(const char* str, int length) { void V8::SetFlagsFromString(const char* str, int length) {
i::FlagList::SetFlagsFromString(str, length); i::FlagList::SetFlagsFromString(str, length);
} }
@ -706,7 +698,7 @@ void HandleScope::Leave() {
i::HandleScope::DeleteExtensions(isolate_); i::HandleScope::DeleteExtensions(isolate_);
} }
#ifdef DEBUG #ifdef ENABLE_EXTRA_CHECKS
i::HandleScope::ZapRange(prev_next_, prev_limit_); i::HandleScope::ZapRange(prev_next_, prev_limit_);
#endif #endif
} }
@ -3197,7 +3189,7 @@ Local<String> v8::Object::ObjectProtoToString() {
i::Handle<i::Object> name(self->class_name(), isolate); i::Handle<i::Object> name(self->class_name(), isolate);
// Native implementation of Object.prototype.toString (v8natives.js): // Native implementation of Object.prototype.toString (v8natives.js):
// var c = %ClassOf(this); // var c = %_ClassOf(this);
// if (c === 'Arguments') c = 'Object'; // if (c === 'Arguments') c = 'Object';
// return "[object " + c + "]"; // return "[object " + c + "]";
@ -5807,6 +5799,20 @@ intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
} }
HeapProfiler* Isolate::GetHeapProfiler() {
i::HeapProfiler* heap_profiler =
reinterpret_cast<i::Isolate*>(this)->heap_profiler();
return reinterpret_cast<HeapProfiler*>(heap_profiler);
}
CpuProfiler* Isolate::GetCpuProfiler() {
i::CpuProfiler* cpu_profiler =
reinterpret_cast<i::Isolate*>(this)->cpu_profiler();
return reinterpret_cast<CpuProfiler*>(cpu_profiler);
}
void V8::SetGlobalGCPrologueCallback(GCCallback callback) { void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return; if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return;
@ -5979,6 +5985,14 @@ void Isolate::Exit() {
void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) { void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (!isolate->IsInitialized()) {
heap_statistics->total_heap_size_ = 0;
heap_statistics->total_heap_size_executable_ = 0;
heap_statistics->total_physical_size_ = 0;
heap_statistics->used_heap_size_ = 0;
heap_statistics->heap_size_limit_ = 0;
return;
}
i::Heap* heap = isolate->heap(); i::Heap* heap = isolate->heap();
heap_statistics->total_heap_size_ = heap->CommittedMemory(); heap_statistics->total_heap_size_ = heap->CommittedMemory();
heap_statistics->total_heap_size_executable_ = heap_statistics->total_heap_size_executable_ =
@ -6532,6 +6546,11 @@ int CpuProfiler::GetProfilesCount() {
} }
int CpuProfiler::GetProfileCount() {
return reinterpret_cast<i::CpuProfiler*>(this)->GetProfilesCount();
}
const CpuProfile* CpuProfiler::GetProfile(int index, const CpuProfile* CpuProfiler::GetProfile(int index,
Handle<Value> security_token) { Handle<Value> security_token) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
@ -6545,6 +6564,15 @@ const CpuProfile* CpuProfiler::GetProfile(int index,
} }
const CpuProfile* CpuProfiler::GetCpuProfile(int index,
Handle<Value> security_token) {
return reinterpret_cast<const CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
index));
}
const CpuProfile* CpuProfiler::FindProfile(unsigned uid, const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
Handle<Value> security_token) { Handle<Value> security_token) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
@ -6558,6 +6586,15 @@ const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
} }
const CpuProfile* CpuProfiler::FindCpuProfile(unsigned uid,
Handle<Value> security_token) {
return reinterpret_cast<const CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->FindProfile(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
uid));
}
void CpuProfiler::StartProfiling(Handle<String> title, bool record_samples) { void CpuProfiler::StartProfiling(Handle<String> title, bool record_samples) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfiler::StartProfiling"); IsDeadCheck(isolate, "v8::CpuProfiler::StartProfiling");
@ -6567,6 +6604,12 @@ void CpuProfiler::StartProfiling(Handle<String> title, bool record_samples) {
} }
void CpuProfiler::StartCpuProfiling(Handle<String> title, bool record_samples) {
reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
*Utils::OpenHandle(*title), record_samples);
}
const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title, const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
Handle<Value> security_token) { Handle<Value> security_token) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
@ -6580,6 +6623,15 @@ const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
} }
const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title,
Handle<Value> security_token) {
return reinterpret_cast<const CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
*Utils::OpenHandle(*title)));
}
void CpuProfiler::DeleteAllProfiles() { void CpuProfiler::DeleteAllProfiles() {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles"); IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles");
@ -6589,6 +6641,11 @@ void CpuProfiler::DeleteAllProfiles() {
} }
void CpuProfiler::DeleteAllCpuProfiles() {
reinterpret_cast<i::CpuProfiler*>(this)->DeleteAllProfiles();
}
static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) { static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
return const_cast<i::HeapGraphEdge*>( return const_cast<i::HeapGraphEdge*>(
reinterpret_cast<const i::HeapGraphEdge*>(edge)); reinterpret_cast<const i::HeapGraphEdge*>(edge));
@ -6708,11 +6765,11 @@ static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
void HeapSnapshot::Delete() { void HeapSnapshot::Delete() {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::Delete"); IsDeadCheck(isolate, "v8::HeapSnapshot::Delete");
if (i::HeapProfiler::GetSnapshotsCount() > 1) { if (isolate->heap_profiler()->GetSnapshotsCount() > 1) {
ToInternal(this)->Delete(); ToInternal(this)->Delete();
} else { } else {
// If this is the last snapshot, clean up all accessory data as well. // If this is the last snapshot, clean up all accessory data as well.
i::HeapProfiler::DeleteAllSnapshots(); isolate->heap_profiler()->DeleteAllSnapshots();
} }
} }
@ -6720,7 +6777,7 @@ void HeapSnapshot::Delete() {
HeapSnapshot::Type HeapSnapshot::GetType() const { HeapSnapshot::Type HeapSnapshot::GetType() const {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetType"); IsDeadCheck(isolate, "v8::HeapSnapshot::GetType");
return static_cast<HeapSnapshot::Type>(ToInternal(this)->type()); return kFull;
} }
@ -6797,7 +6854,12 @@ void HeapSnapshot::Serialize(OutputStream* stream,
int HeapProfiler::GetSnapshotsCount() { int HeapProfiler::GetSnapshotsCount() {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotsCount"); IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotsCount");
return i::HeapProfiler::GetSnapshotsCount(); return isolate->heap_profiler()->GetSnapshotsCount();
}
int HeapProfiler::GetSnapshotCount() {
return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotsCount();
} }
@ -6805,7 +6867,13 @@ const HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshot"); IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshot");
return reinterpret_cast<const HeapSnapshot*>( return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::GetSnapshot(index)); isolate->heap_profiler()->GetSnapshot(index));
}
const HeapSnapshot* HeapProfiler::GetHeapSnapshot(int index) {
return reinterpret_cast<const HeapSnapshot*>(
reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshot(index));
} }
@ -6813,7 +6881,13 @@ const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::FindSnapshot"); IsDeadCheck(isolate, "v8::HeapProfiler::FindSnapshot");
return reinterpret_cast<const HeapSnapshot*>( return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::FindSnapshot(uid)); isolate->heap_profiler()->FindSnapshot(uid));
}
const HeapSnapshot* HeapProfiler::FindHeapSnapshot(unsigned uid) {
return reinterpret_cast<const HeapSnapshot*>(
reinterpret_cast<i::HeapProfiler*>(this)->FindSnapshot(uid));
} }
@ -6821,7 +6895,13 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotObjectId"); IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotObjectId");
i::Handle<i::Object> obj = Utils::OpenHandle(*value); i::Handle<i::Object> obj = Utils::OpenHandle(*value);
return i::HeapProfiler::GetSnapshotObjectId(obj); return isolate->heap_profiler()->GetSnapshotObjectId(obj);
}
SnapshotObjectId HeapProfiler::GetObjectId(Handle<Value> value) {
i::Handle<i::Object> obj = Utils::OpenHandle(*value);
return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotObjectId(obj);
} }
@ -6831,45 +6911,67 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
ObjectNameResolver* resolver) { ObjectNameResolver* resolver) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot"); IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
switch (type) {
case HeapSnapshot::kFull:
internal_type = i::HeapSnapshot::kFull;
break;
default:
UNREACHABLE();
}
return reinterpret_cast<const HeapSnapshot*>( return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::TakeSnapshot( isolate->heap_profiler()->TakeSnapshot(
*Utils::OpenHandle(*title), internal_type, control, resolver)); *Utils::OpenHandle(*title), control, resolver));
}
const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(
Handle<String> title,
ActivityControl* control,
ObjectNameResolver* resolver) {
return reinterpret_cast<const HeapSnapshot*>(
reinterpret_cast<i::HeapProfiler*>(this)->TakeSnapshot(
*Utils::OpenHandle(*title), control, resolver));
} }
void HeapProfiler::StartHeapObjectsTracking() { void HeapProfiler::StartHeapObjectsTracking() {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::StartHeapObjectsTracking"); IsDeadCheck(isolate, "v8::HeapProfiler::StartHeapObjectsTracking");
i::HeapProfiler::StartHeapObjectsTracking(); isolate->heap_profiler()->StartHeapObjectsTracking();
}
void HeapProfiler::StartTrackingHeapObjects() {
reinterpret_cast<i::HeapProfiler*>(this)->StartHeapObjectsTracking();
} }
void HeapProfiler::StopHeapObjectsTracking() { void HeapProfiler::StopHeapObjectsTracking() {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::StopHeapObjectsTracking"); IsDeadCheck(isolate, "v8::HeapProfiler::StopHeapObjectsTracking");
i::HeapProfiler::StopHeapObjectsTracking(); isolate->heap_profiler()->StopHeapObjectsTracking();
}
void HeapProfiler::StopTrackingHeapObjects() {
reinterpret_cast<i::HeapProfiler*>(this)->StopHeapObjectsTracking();
} }
SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) { SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::PushHeapObjectsStats"); IsDeadCheck(isolate, "v8::HeapProfiler::PushHeapObjectsStats");
return i::HeapProfiler::PushHeapObjectsStats(stream); return isolate->heap_profiler()->PushHeapObjectsStats(stream);
}
SnapshotObjectId HeapProfiler::GetHeapStats(OutputStream* stream) {
return reinterpret_cast<i::HeapProfiler*>(this)->PushHeapObjectsStats(stream);
} }
void HeapProfiler::DeleteAllSnapshots() { void HeapProfiler::DeleteAllSnapshots() {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots"); IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots");
i::HeapProfiler::DeleteAllSnapshots(); isolate->heap_profiler()->DeleteAllSnapshots();
}
void HeapProfiler::DeleteAllHeapSnapshots() {
reinterpret_cast<i::HeapProfiler*>(this)->DeleteAllSnapshots();
} }
@ -6880,6 +6982,13 @@ void HeapProfiler::DefineWrapperClass(uint16_t class_id,
} }
void HeapProfiler::SetWrapperClassInfoProvider(uint16_t class_id,
WrapperInfoCallback callback) {
reinterpret_cast<i::HeapProfiler*>(this)->DefineWrapperClass(class_id,
callback);
}
int HeapProfiler::GetPersistentHandleCount() { int HeapProfiler::GetPersistentHandleCount() {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
return isolate->global_handles()->NumberOfGlobalHandles(); return isolate->global_handles()->NumberOfGlobalHandles();
@ -6887,7 +6996,13 @@ int HeapProfiler::GetPersistentHandleCount() {
size_t HeapProfiler::GetMemorySizeUsedByProfiler() { size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
return i::HeapProfiler::GetMemorySizeUsedByProfiler(); return i::Isolate::Current()->heap_profiler()->GetMemorySizeUsedByProfiler();
}
size_t HeapProfiler::GetProfilerMemorySize() {
return reinterpret_cast<i::HeapProfiler*>(this)->
GetMemorySizeUsedByProfiler();
} }
@ -7082,7 +7197,7 @@ DeferredHandles::~DeferredHandles() {
isolate_->UnlinkDeferredHandles(this); isolate_->UnlinkDeferredHandles(this);
for (int i = 0; i < blocks_.length(); i++) { for (int i = 0; i < blocks_.length(); i++) {
#ifdef DEBUG #ifdef ENABLE_EXTRA_CHECKS
HandleScope::ZapRange(blocks_[i], &blocks_[i][kHandleBlockSize]); HandleScope::ZapRange(blocks_[i], &blocks_[i][kHandleBlockSize]);
#endif #endif
isolate_->handle_scope_implementer()->ReturnBlock(blocks_[i]); isolate_->handle_scope_implementer()->ReturnBlock(blocks_[i]);

7
deps/v8/src/api.h

@ -294,6 +294,9 @@ MAKE_TO_LOCAL(ToLocal, DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \ v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
const v8::From* that, bool allow_empty_handle) { \ const v8::From* that, bool allow_empty_handle) { \
EXTRA_CHECK(allow_empty_handle || that != NULL); \ EXTRA_CHECK(allow_empty_handle || that != NULL); \
EXTRA_CHECK(that == NULL || \
!(*reinterpret_cast<v8::internal::To**>( \
const_cast<v8::From*>(that)))->IsFailure()); \
return v8::internal::Handle<v8::internal::To>( \ return v8::internal::Handle<v8::internal::To>( \
reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \ reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
} }
@ -569,8 +572,8 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
#endif #endif
blocks_.RemoveLast(); blocks_.RemoveLast();
#ifdef DEBUG #ifdef ENABLE_EXTRA_CHECKS
v8::ImplementationUtilities::ZapHandleRange(block_start, block_limit); internal::HandleScope::ZapRange(block_start, block_limit);
#endif #endif
if (spare_ != NULL) { if (spare_ != NULL) {
DeleteArray(spare_); DeleteArray(spare_);

4
deps/v8/src/apiutils.h

@ -67,10 +67,6 @@ class ImplementationUtilities {
// Introduce an alias for the handle scope data to allow non-friends // Introduce an alias for the handle scope data to allow non-friends
// to access the HandleScope data. // to access the HandleScope data.
typedef v8::HandleScope::Data HandleScopeData; typedef v8::HandleScope::Data HandleScopeData;
#ifdef DEBUG
static void ZapHandleRange(internal::Object** begin, internal::Object** end);
#endif
}; };
} // namespace v8 } // namespace v8

21
deps/v8/src/arm/builtins-arm.cc

@ -221,13 +221,12 @@ static void AllocateJSArray(MacroAssembler* masm,
__ add(elements_array_end, __ add(elements_array_end,
elements_array_end, elements_array_end,
Operand(array_size, ASR, kSmiTagSize)); Operand(array_size, ASR, kSmiTagSize));
__ AllocateInNewSpace( __ Allocate(elements_array_end,
elements_array_end, result,
result, scratch1,
scratch1, scratch2,
scratch2, gc_required,
gc_required, static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Allocated the JSArray. Now initialize the fields except for the elements // Allocated the JSArray. Now initialize the fields except for the elements
// array. // array.
@ -874,7 +873,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function // r1: constructor function
// r2: initial map // r2: initial map
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS); __ Allocate(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to // Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array. // initial map and properties and elements are set to empty fixed array.
@ -949,7 +948,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r4: JSObject // r4: JSObject
// r5: start of next object // r5: start of next object
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
__ AllocateInNewSpace( __ Allocate(
r0, r0,
r5, r5,
r6, r6,
@ -1105,10 +1104,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CompareObjectType(r0, r1, r3, FIRST_SPEC_OBJECT_TYPE); __ CompareObjectType(r0, r1, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &exit); __ b(ge, &exit);
// Symbols are "objects".
__ CompareInstanceType(r1, r3, SYMBOL_TYPE);
__ b(eq, &exit);
// Throw away the result of the constructor invocation and use the // Throw away the result of the constructor invocation and use the
// on-stack receiver as the result. // on-stack receiver as the result.
__ bind(&use_receiver); __ bind(&use_receiver);

110
deps/v8/src/arm/code-stubs-arm.cc

@ -61,6 +61,17 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
} }
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r2, r1, r0 };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
}
void TransitionElementsKindStub::InitializeInterfaceDescriptor( void TransitionElementsKindStub::InitializeInterfaceDescriptor(
Isolate* isolate, Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) { CodeStubInterfaceDescriptor* descriptor) {
@ -84,7 +95,7 @@ static void InitializeArrayConstructorDescriptor(Isolate* isolate,
// stack param count needs (constructor pointer, and single argument) // stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &r0; descriptor->stack_parameter_count_ = &r0;
descriptor->register_params_ = registers; descriptor->register_params_ = registers;
descriptor->extra_expression_stack_count_ = 1; descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ = descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ArrayConstructor_StubFailure); FUNCTION_ADDR(ArrayConstructor_StubFailure);
} }
@ -2623,8 +2634,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
OverwriteMode mode) { OverwriteMode mode) {
Register left = r1; Register left = r1;
Register right = r0; Register right = r0;
Register scratch1 = r7; Register scratch1 = r6;
Register scratch2 = r9; Register scratch2 = r7;
Register scratch3 = r4; Register scratch3 = r4;
ASSERT(smi_operands || (not_numbers != NULL)); ASSERT(smi_operands || (not_numbers != NULL));
@ -2639,7 +2650,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ JumpIfNotSmi(right, miss); __ JumpIfNotSmi(right, miss);
} }
Register heap_number_map = r6; Register heap_number_map = r9;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
switch (op) { switch (op) {
@ -4477,35 +4488,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
} }
void ArrayLengthStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver;
if (kind() == Code::KEYED_LOAD_IC) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
__ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
__ b(ne, &miss);
receiver = r1;
} else {
ASSERT(kind() == Code::LOAD_IC);
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
receiver = r0;
}
StubCompiler::GenerateLoadArrayLength(masm, receiver, r3, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
}
void FunctionPrototypeStub::Generate(MacroAssembler* masm) { void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss; Label miss;
Register receiver; Register receiver;
@ -4780,7 +4762,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ add(r9, r9, Operand(Heap::kArgumentsObjectSize)); __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
// Do the allocation of all three objects in one go. // Do the allocation of all three objects in one go.
__ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT); __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
// r0 = address of new object(s) (tagged) // r0 = address of new object(s) (tagged)
// r2 = argument count (tagged) // r2 = argument count (tagged)
@ -4956,13 +4938,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
// Do the allocation of both objects in one go. // Do the allocation of both objects in one go.
__ AllocateInNewSpace(r1, __ Allocate(r1, r0, r2, r3, &runtime,
r0, static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
r2,
r3,
&runtime,
static_cast<AllocationFlags>(TAG_OBJECT |
SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context. // Get the arguments boilerplate from the current native context.
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@ -5466,7 +5443,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
(JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
__ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize)); __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
__ add(r2, r5, Operand(objects_size)); __ add(r2, r5, Operand(objects_size));
__ AllocateInNewSpace( __ Allocate(
r2, // In: Size, in words. r2, // In: Size, in words.
r0, // Out: Start of allocation (tagged). r0, // Out: Start of allocation (tagged).
r3, // Scratch register. r3, // Scratch register.
@ -7408,33 +7385,31 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ cmp(entity_name, tmp); __ cmp(entity_name, tmp);
__ b(eq, done); __ b(eq, done);
if (i != kInlinedProbes - 1) { // Load the hole ready for use below:
// Load the hole ready for use below: __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
__ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
// Stop if found the property. // Stop if found the property.
__ cmp(entity_name, Operand(Handle<Name>(name))); __ cmp(entity_name, Operand(Handle<Name>(name)));
__ b(eq, miss); __ b(eq, miss);
Label good; Label good;
__ cmp(entity_name, tmp); __ cmp(entity_name, tmp);
__ b(eq, &good); __ b(eq, &good);
// Check if the entry name is not a unique name. // Check if the entry name is not a unique name.
__ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ ldrb(entity_name, __ ldrb(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
__ tst(entity_name, Operand(kIsInternalizedMask)); __ tst(entity_name, Operand(kIsInternalizedMask));
__ b(ne, &good); __ b(ne, &good);
__ cmp(entity_name, Operand(SYMBOL_TYPE)); __ cmp(entity_name, Operand(SYMBOL_TYPE));
__ b(ne, miss); __ b(ne, miss);
__ bind(&good); __ bind(&good);
// Restore the properties. // Restore the properties.
__ ldr(properties, __ ldr(properties,
FieldMemOperand(receiver, JSObject::kPropertiesOffset)); FieldMemOperand(receiver, JSObject::kPropertiesOffset));
}
} }
const int spill_mask = const int spill_mask =
@ -7987,6 +7962,9 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
int parameter_count_offset = int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ ldr(r1, MemOperand(fp, parameter_count_offset)); __ ldr(r1, MemOperand(fp, parameter_count_offset));
if (function_mode_ == JS_FUNCTION_STUB_MODE) {
__ add(r1, r1, Operand(1));
}
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ mov(r1, Operand(r1, LSL, kPointerSizeLog2)); __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
__ add(sp, sp, r1); __ add(sp, sp, r1);

6
deps/v8/src/arm/codegen-arm.cc

@ -207,7 +207,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Use lr as a temporary register. // Use lr as a temporary register.
__ mov(lr, Operand(r5, LSL, 2)); __ mov(lr, Operand(r5, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize)); __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
__ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT); __ Allocate(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
// r6: destination FixedDoubleArray, not tagged as heap object. // r6: destination FixedDoubleArray, not tagged as heap object.
// Set destination FixedDoubleArray's length and map. // Set destination FixedDoubleArray's length and map.
@ -348,7 +348,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Allocate new FixedArray. // Allocate new FixedArray.
__ mov(r0, Operand(FixedDoubleArray::kHeaderSize)); __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
__ add(r0, r0, Operand(r5, LSL, 1)); __ add(r0, r0, Operand(r5, LSL, 1));
__ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS); __ Allocate(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedArray, not tagged as heap object // r6: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map. // Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex); __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
@ -691,7 +691,7 @@ void Code::PatchPlatformCodeAge(byte* sequence,
uint32_t young_length; uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length); byte* young_sequence = GetNoCodeAgeSequence(&young_length);
if (age == kNoAge) { if (age == kNoAge) {
memcpy(sequence, young_sequence, young_length); CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length); CPU::FlushICache(sequence, young_length);
} else { } else {
Code* stub = GetCodeAgeStub(age, parity); Code* stub = GetCodeAgeStub(age, parity);

30
deps/v8/src/arm/full-codegen-arm.cc

@ -2764,28 +2764,6 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
} }
void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r2, SYMBOL_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1); ASSERT(args->length() == 1);
@ -4292,6 +4270,10 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable)); __ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->symbol_string())) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r0, r1, SYMBOL_TYPE);
Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->boolean_string())) { } else if (check->Equals(isolate()->heap()->boolean_string())) {
__ CompareRoot(r0, Heap::kTrueValueRootIndex); __ CompareRoot(r0, Heap::kTrueValueRootIndex);
__ b(eq, if_true); __ b(eq, if_true);
@ -4324,10 +4306,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ CompareRoot(r0, Heap::kNullValueRootIndex); __ CompareRoot(r0, Heap::kNullValueRootIndex);
__ b(eq, if_true); __ b(eq, if_true);
} }
if (FLAG_harmony_symbols) {
__ CompareObjectType(r0, r0, r1, SYMBOL_TYPE);
__ b(eq, if_true);
}
// Check for JS objects => true. // Check for JS objects => true.
__ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ b(lt, if_false); __ b(lt, if_false);

43
deps/v8/src/arm/lithium-arm.cc

@ -871,6 +871,35 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LInstruction* instr = current->CompileToLithium(this); LInstruction* instr = current->CompileToLithium(this);
if (instr != NULL) { if (instr != NULL) {
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
// start. If this invariant doesn't hold, the register allocator can decide
// to insert a split of a range immediately before the instruction due to an
// already allocated register needing to be used for the instruction's fixed
// register constraint. In this case, The register allocator won't see an
// interference between the split child and the use-at-start (it would if
// the it was just a plain use), so it is free to move the split child into
// the same register that is used for the use-at-start.
// See https://code.google.com/p/chromium/issues/detail?id=201590
if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
int fixed = 0;
int used_at_start = 0;
for (UseIterator it(instr); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
if (operand->IsUsedAtStart()) ++used_at_start;
}
if (instr->Output() != NULL) {
if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
}
for (TempIterator it(instr); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
if (operand->HasFixedPolicy()) ++fixed;
}
ASSERT(fixed == 0 || used_at_start == 0);
}
#endif
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr); instr = AssignPointerMap(instr);
} }
@ -1115,7 +1144,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp); LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
return DefineFixedDouble(result, d2); return DefineFixedDouble(result, d2);
} else { } else {
LOperand* input = UseRegisterAtStart(instr->value()); LOperand* input = UseRegister(instr->value());
LOperand* temp = (op == kMathRound) ? FixedTemp(d3) : NULL; LOperand* temp = (op == kMathRound) ? FixedTemp(d3) : NULL;
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp); LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
@ -1711,12 +1740,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
} }
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LJSArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayBaseLength( LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
HFixedArrayBaseLength* instr) { HFixedArrayBaseLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value()); LOperand* array = UseRegisterAtStart(instr->value());
@ -1823,11 +1846,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineAsRegister(res)); return AssignEnvironment(DefineAsRegister(res));
} else { } else {
ASSERT(to.IsInteger32()); ASSERT(to.IsInteger32());
LOperand* value = UseRegisterAtStart(instr->value()); LOperand* value = NULL;
LInstruction* res = NULL; LInstruction* res = NULL;
if (instr->value()->type().IsSmi()) { if (instr->value()->type().IsSmi()) {
value = UseRegisterAtStart(instr->value());
res = DefineAsRegister(new(zone()) LSmiUntag(value, false)); res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else { } else {
value = UseRegister(instr->value());
LOperand* temp1 = TempRegister(); LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL; : NULL;
@ -2205,7 +2230,7 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LOperand* new_map_reg = TempRegister(); LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result = LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, new_map_reg, NULL); new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
return DefineSameAsFirst(result); return result;
} else if (FLAG_compiled_transitions) { } else if (FLAG_compiled_transitions) {
LTransitionElementsKind* result = LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, NULL, NULL); new(zone()) LTransitionElementsKind(object, NULL, NULL);

16
deps/v8/src/arm/lithium-arm.h

@ -120,7 +120,6 @@ class LCodeGen;
V(IsStringAndBranch) \ V(IsStringAndBranch) \
V(IsSmiAndBranch) \ V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \ V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \ V(Label) \
V(LazyBailout) \ V(LazyBailout) \
V(LoadContextSlot) \ V(LoadContextSlot) \
@ -1165,19 +1164,6 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
}; };
class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LJSArrayLength(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> { class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LFixedArrayBaseLength(LOperand* value) { explicit LFixedArrayBaseLength(LOperand* value) {
@ -2114,7 +2100,7 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
}; };
class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> { class LTransitionElementsKind: public LTemplateInstruction<0, 1, 2> {
public: public:
LTransitionElementsKind(LOperand* object, LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp, LOperand* new_map_temp,

82
deps/v8/src/arm/lithium-codegen-arm.cc

@ -239,7 +239,7 @@ bool LCodeGen::GeneratePrologue() {
__ str(r0, target); __ str(r0, target);
// Update the write barrier. This clobbers r3 and r0. // Update the write barrier. This clobbers r3 and r0.
__ RecordWriteContextSlot( __ RecordWriteContextSlot(
cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs); cp, target.offset(), r0, r3, GetLinkRegisterState(), kSaveFPRegs);
} }
} }
Comment(";;; End allocate local context"); Comment(";;; End allocate local context");
@ -1918,13 +1918,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
} }
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->value());
__ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) { void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Register array = ToRegister(instr->value()); Register array = ToRegister(instr->value());
@ -2301,6 +2294,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string); __ bind(&not_string);
} }
if (expected.Contains(ToBooleanStub::SYMBOL)) {
// Symbol value -> true.
__ CompareInstanceType(map, ip, SYMBOL_TYPE);
__ b(eq, true_label);
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
CpuFeatureScope scope(masm(), VFP2); CpuFeatureScope scope(masm(), VFP2);
// heap number -> false iff +0, -0, or NaN. // heap number -> false iff +0, -0, or NaN.
@ -3075,7 +3074,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
target.offset(), target.offset(),
value, value,
scratch, scratch,
kLRHasBeenSaved, GetLinkRegisterState(),
kSaveFPRegs, kSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
check_needed); check_needed);
@ -4367,7 +4366,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
HeapObject::kMapOffset, HeapObject::kMapOffset,
scratch, scratch,
temp, temp,
kLRHasBeenSaved, GetLinkRegisterState(),
kSaveFPRegs, kSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
@ -4386,7 +4385,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
offset, offset,
value, value,
scratch, scratch,
kLRHasBeenSaved, GetLinkRegisterState(),
kSaveFPRegs, kSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
check_needed); check_needed);
@ -4401,7 +4400,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
offset, offset,
value, value,
object, object,
kLRHasBeenSaved, GetLinkRegisterState(),
kSaveFPRegs, kSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
check_needed); check_needed);
@ -4602,7 +4601,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
__ RecordWrite(elements, __ RecordWrite(elements,
key, key,
value, value,
kLRHasBeenSaved, GetLinkRegisterState(),
kSaveFPRegs, kSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
check_needed); check_needed);
@ -4654,7 +4653,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier. // Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kLRHasBeenSaved, kDontSaveFPRegs); scratch, GetLinkRegisterState(), kDontSaveFPRegs);
} else if (FLAG_compiled_transitions) { } else if (FLAG_compiled_transitions) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ Move(r0, object_reg); __ Move(r0, object_reg);
@ -4897,10 +4896,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
// Convert unsigned integer with specified number of leading zeroes in binary // Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double. // representation to IEEE 754 double.
// Integer to convert is passed in register hiword. // Integer to convert is passed in register src.
// Resulting double is returned in registers hiword:loword. // Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0. // This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm, static void GenerateUInt2Double(MacroAssembler* masm,
Register src,
Register hiword, Register hiword,
Register loword, Register loword,
Register scratch, Register scratch,
@ -4914,13 +4914,13 @@ static void GenerateUInt2Double(MacroAssembler* masm,
kBitsPerInt - mantissa_shift_for_hi_word; kBitsPerInt - mantissa_shift_for_hi_word;
masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
if (mantissa_shift_for_hi_word > 0) { if (mantissa_shift_for_hi_word > 0) {
masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); masm->mov(loword, Operand(src, LSL, mantissa_shift_for_lo_word));
masm->orr(hiword, scratch, masm->orr(hiword, scratch,
Operand(hiword, LSR, mantissa_shift_for_hi_word)); Operand(src, LSR, mantissa_shift_for_hi_word));
} else { } else {
masm->mov(loword, Operand::Zero()); masm->mov(loword, Operand::Zero());
masm->orr(hiword, scratch, masm->orr(hiword, scratch,
Operand(hiword, LSL, -mantissa_shift_for_hi_word)); Operand(src, LSL, -mantissa_shift_for_hi_word));
} }
// If least significant bit of biased exponent was not 1 it was corrupted // If least significant bit of biased exponent was not 1 it was corrupted
@ -4969,17 +4969,17 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ vmov(flt_scratch, src); __ vmov(flt_scratch, src);
__ vcvt_f64_u32(dbl_scratch, flt_scratch); __ vcvt_f64_u32(dbl_scratch, flt_scratch);
} else { } else {
Label no_leading_zero, done; Label no_leading_zero, convert_done;
__ tst(src, Operand(0x80000000)); __ tst(src, Operand(0x80000000));
__ b(ne, &no_leading_zero); __ b(ne, &no_leading_zero);
// Integer has one leading zeros. // Integer has one leading zeros.
GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1); GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, r9, 1);
__ b(&done); __ b(&convert_done);
__ bind(&no_leading_zero); __ bind(&no_leading_zero);
GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0); GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, r9, 0);
__ b(&done); __ bind(&convert_done);
} }
} }
@ -4996,10 +4996,18 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// TODO(3095996): Put a valid pointer value in the stack slot where the result // TODO(3095996): Put a valid pointer value in the stack slot where the result
// register is stored, as this register is in the pointer map, but contains an // register is stored, as this register is in the pointer map, but contains an
// integer value. // integer value.
if (!CpuFeatures::IsSupported(VFP2)) {
// Preserve sfpd_lo.
__ mov(r9, sfpd_lo);
}
__ mov(ip, Operand::Zero()); __ mov(ip, Operand::Zero());
__ StoreToSafepointRegisterSlot(ip, dst); __ StoreToSafepointRegisterSlot(ip, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ Move(dst, r0); __ Move(dst, r0);
if (!CpuFeatures::IsSupported(VFP2)) {
// Restore sfpd_lo.
__ mov(sfpd_lo, r9);
}
__ sub(dst, dst, Operand(kHeapObjectTag)); __ sub(dst, dst, Operand(kHeapObjectTag));
// Done. Put the value in dbl_scratch into the value of the allocated heap // Done. Put the value in dbl_scratch into the value of the allocated heap
@ -5671,12 +5679,12 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else { } else {
Register size = ToRegister(instr->size()); Register size = ToRegister(instr->size());
__ AllocateInNewSpace(size, __ Allocate(size,
result, result,
scratch, scratch,
scratch2, scratch2,
deferred->entry(), deferred->entry(),
flags); flags);
} }
__ bind(deferred->exit()); __ bind(deferred->exit());
@ -6074,6 +6082,11 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ tst(ip, Operand(1 << Map::kIsUndetectable)); __ tst(ip, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq; final_branch_condition = eq;
} else if (type_name->Equals(heap()->symbol_string())) {
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, input, scratch, SYMBOL_TYPE);
final_branch_condition = eq;
} else if (type_name->Equals(heap()->boolean_string())) { } else if (type_name->Equals(heap()->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex); __ CompareRoot(input, Heap::kTrueValueRootIndex);
__ b(eq, true_label); __ b(eq, true_label);
@ -6108,15 +6121,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ CompareRoot(input, Heap::kNullValueRootIndex); __ CompareRoot(input, Heap::kNullValueRootIndex);
__ b(eq, true_label); __ b(eq, true_label);
} }
if (FLAG_harmony_symbols) { __ CompareObjectType(input, input, scratch,
__ CompareObjectType(input, input, scratch, SYMBOL_TYPE); FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ b(eq, true_label);
__ CompareInstanceType(input, scratch,
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
} else {
__ CompareObjectType(input, input, scratch,
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
}
__ b(lt, false_label); __ b(lt, false_label);
__ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ b(gt, false_label); __ b(gt, false_label);

4
deps/v8/src/arm/lithium-codegen-arm.h

@ -87,6 +87,10 @@ class LCodeGen BASE_EMBEDDED {
return !NeedsEagerFrame() && info()->is_deferred_calling(); return !NeedsEagerFrame() && info()->is_deferred_calling();
} }
LinkRegisterStatus GetLinkRegisterState() const {
return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
}
// Support for converting LOperands to assembler types. // Support for converting LOperands to assembler types.
// LOperand must be a register. // LOperand must be a register.
Register ToRegister(LOperand* op) const; Register ToRegister(LOperand* op) const;

82
deps/v8/src/arm/macro-assembler-arm.cc

@ -708,15 +708,14 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) { const MemOperand& src, Condition cond) {
ASSERT(src.rm().is(no_reg)); ASSERT(src.rm().is(no_reg));
ASSERT(!dst1.is(lr)); // r14. ASSERT(!dst1.is(lr)); // r14.
ASSERT_EQ(0, dst1.code() % 2);
ASSERT_EQ(dst1.code() + 1, dst2.code());
// V8 does not use this addressing mode, so the fallback code // V8 does not use this addressing mode, so the fallback code
// below doesn't support it yet. // below doesn't support it yet.
ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex)); ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
// Generate two ldr instructions if ldrd is not available. // Generate two ldr instructions if ldrd is not available.
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
(dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
CpuFeatureScope scope(this, ARMv7); CpuFeatureScope scope(this, ARMv7);
ldrd(dst1, dst2, src, cond); ldrd(dst1, dst2, src, cond);
} else { } else {
@ -750,15 +749,14 @@ void MacroAssembler::Strd(Register src1, Register src2,
const MemOperand& dst, Condition cond) { const MemOperand& dst, Condition cond) {
ASSERT(dst.rm().is(no_reg)); ASSERT(dst.rm().is(no_reg));
ASSERT(!src1.is(lr)); // r14. ASSERT(!src1.is(lr)); // r14.
ASSERT_EQ(0, src1.code() % 2);
ASSERT_EQ(src1.code() + 1, src2.code());
// V8 does not use this addressing mode, so the fallback code // V8 does not use this addressing mode, so the fallback code
// below doesn't support it yet. // below doesn't support it yet.
ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
// Generate two str instructions if strd is not available. // Generate two str instructions if strd is not available.
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
(src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
CpuFeatureScope scope(this, ARMv7); CpuFeatureScope scope(this, ARMv7);
strd(src1, src2, dst, cond); strd(src1, src2, dst, cond);
} else { } else {
@ -1671,13 +1669,12 @@ void MacroAssembler::Allocate(int object_size,
} }
void MacroAssembler::AllocateInNewSpace(Register object_size, void MacroAssembler::Allocate(Register object_size,
Register result, Register result,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* gc_required, Label* gc_required,
AllocationFlags flags) { AllocationFlags flags) {
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
if (!FLAG_inline_new) { if (!FLAG_inline_new) {
if (emit_debug_code()) { if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure. // Trash the registers to simulate an allocation failure.
@ -1703,20 +1700,20 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// The values must be adjacent in memory to allow the use of LDM. // The values must be adjacent in memory to allow the use of LDM.
// Also, assert that the registers are numbered such that the values // Also, assert that the registers are numbered such that the values
// are loaded in the correct order. // are loaded in the correct order.
ExternalReference new_space_allocation_top = ExternalReference allocation_top =
ExternalReference::new_space_allocation_top_address(isolate()); AllocationUtils::GetAllocationTopReference(isolate(), flags);
ExternalReference new_space_allocation_limit = ExternalReference allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate()); AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top = intptr_t top =
reinterpret_cast<intptr_t>(new_space_allocation_top.address()); reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit = intptr_t limit =
reinterpret_cast<intptr_t>(new_space_allocation_limit.address()); reinterpret_cast<intptr_t>(allocation_limit.address());
ASSERT((limit - top) == kPointerSize); ASSERT((limit - top) == kPointerSize);
ASSERT(result.code() < ip.code()); ASSERT(result.code() < ip.code());
// Set up allocation top address. // Set up allocation top address.
Register topaddr = scratch1; Register topaddr = scratch1;
mov(topaddr, Operand(new_space_allocation_top)); mov(topaddr, Operand(allocation_top));
// This code stores a temporary value in ip. This is OK, as the code below // This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation. // does not need ip for implicit literal generation.
@ -1739,6 +1736,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
if ((flags & DOUBLE_ALIGNMENT) != 0) { if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is // Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned. // always safe because the limit of the heap is always aligned.
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment); ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned; Label aligned;
@ -1809,12 +1807,12 @@ void MacroAssembler::AllocateTwoByteString(Register result,
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space. // Allocate two-byte string in new space.
AllocateInNewSpace(scratch1, Allocate(scratch1,
result, result,
scratch2, scratch2,
scratch3, scratch3,
gc_required, gc_required,
TAG_OBJECT); TAG_OBJECT);
// Set the map, length and hash field. // Set the map, length and hash field.
InitializeNewString(result, InitializeNewString(result,
@ -1840,12 +1838,12 @@ void MacroAssembler::AllocateAsciiString(Register result,
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space. // Allocate ASCII string in new space.
AllocateInNewSpace(scratch1, Allocate(scratch1,
result, result,
scratch2, scratch2,
scratch3, scratch3,
gc_required, gc_required,
TAG_OBJECT); TAG_OBJECT);
// Set the map, length and hash field. // Set the map, length and hash field.
InitializeNewString(result, InitializeNewString(result,
@ -2499,9 +2497,9 @@ void MacroAssembler::TryInt32Floor(Register result,
void MacroAssembler::ECMAConvertNumberToInt32(Register source, void MacroAssembler::ECMAConvertNumberToInt32(Register source,
Register result, Register result,
Register scratch,
Register input_high,
Register input_low, Register input_low,
Register input_high,
Register scratch,
DwVfpRegister double_scratch1, DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2) { DwVfpRegister double_scratch2) {
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP2)) {
@ -2578,24 +2576,26 @@ void MacroAssembler::ECMAToInt32NoVFP(Register result,
Ubfx(scratch, input_high, Ubfx(scratch, input_high,
HeapNumber::kExponentShift, HeapNumber::kExponentBits); HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Load scratch with exponent - 1. This is faster than loading // Load scratch with exponent.
// with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. sub(scratch, scratch, Operand(HeapNumber::kExponentBias));
sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
// If exponent is negative, 0 < input < 1, the result is 0. // If exponent is negative, 0 < input < 1, the result is 0.
// If exponent is greater than or equal to 84, the 32 less significant // If exponent is greater than or equal to 84, the 32 less significant
// bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
// the result is 0. // the result is 0.
// This test also catch Nan and infinities which also return 0. // This test also catch Nan and infinities which also return 0.
// Compare exponent with 84 (compare exponent - 1 with 83). cmp(scratch, Operand(84));
cmp(scratch, Operand(83));
// We do an unsigned comparison so negative numbers are treated as big // We do an unsigned comparison so negative numbers are treated as big
// positive number and the two tests above are done in one test. // positive number and the two tests above are done in one test.
b(hs, &out_of_range); b(hs, &out_of_range);
// Load scratch with 20 - exponent (load with 19 - (exponent - 1)). // Load scratch with 20 - exponent.
rsb(scratch, scratch, Operand(19), SetCC); rsb(scratch, scratch, Operand(20), SetCC);
b(mi, &both); b(mi, &both);
// Test 0 and -0.
bic(result, input_high, Operand(HeapNumber::kSignMask));
orr(result, result, Operand(input_low), SetCC);
b(eq, &done);
// 0 <= exponent <= 20, shift only input_high. // 0 <= exponent <= 20, shift only input_high.
// Scratch contains: 20 - exponent. // Scratch contains: 20 - exponent.
Ubfx(result, input_high, Ubfx(result, input_high,

16
deps/v8/src/arm/macro-assembler-arm.h

@ -679,12 +679,12 @@ class MacroAssembler: public Assembler {
Label* gc_required, Label* gc_required,
AllocationFlags flags); AllocationFlags flags);
void AllocateInNewSpace(Register object_size, void Allocate(Register object_size,
Register result, Register result,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* gc_required, Label* gc_required,
AllocationFlags flags); AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after // Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. The caller must make sure that no pointers // it will no longer be allocated. The caller must make sure that no pointers
@ -960,9 +960,9 @@ class MacroAssembler: public Assembler {
// Exits with 'result' holding the answer. // Exits with 'result' holding the answer.
void ECMAConvertNumberToInt32(Register source, void ECMAConvertNumberToInt32(Register source,
Register result, Register result,
Register scratch,
Register input_high,
Register input_low, Register input_low,
Register input_high,
Register scratch,
DwVfpRegister double_scratch1, DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2); DwVfpRegister double_scratch2);

26
deps/v8/src/arm/simulator-arm.cc

@ -112,8 +112,8 @@ void ArmDebugger::Stop(Instruction* instr) {
ASSERT(msg != NULL); ASSERT(msg != NULL);
// Update this stop description. // Update this stop description.
if (isWatchedStop(code) && !watched_stops[code].desc) { if (isWatchedStop(code) && !watched_stops_[code].desc) {
watched_stops[code].desc = msg; watched_stops_[code].desc = msg;
} }
if (strlen(msg) > 0) { if (strlen(msg) > 0) {
@ -141,8 +141,8 @@ void ArmDebugger::Stop(Instruction* instr) {
char* msg = *reinterpret_cast<char**>(sim_->get_pc() char* msg = *reinterpret_cast<char**>(sim_->get_pc()
+ Instruction::kInstrSize); + Instruction::kInstrSize);
// Update this stop description. // Update this stop description.
if (sim_->isWatchedStop(code) && !sim_->watched_stops[code].desc) { if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
sim_->watched_stops[code].desc = msg; sim_->watched_stops_[code].desc = msg;
} }
// Print the stop message and code if it is not the default code. // Print the stop message and code if it is not the default code.
if (code != kMaxStopCode) { if (code != kMaxStopCode) {
@ -1880,14 +1880,14 @@ bool Simulator::isEnabledStop(uint32_t code) {
ASSERT(code <= kMaxStopCode); ASSERT(code <= kMaxStopCode);
// Unwatched stops are always enabled. // Unwatched stops are always enabled.
return !isWatchedStop(code) || return !isWatchedStop(code) ||
!(watched_stops[code].count & kStopDisabledBit); !(watched_stops_[code].count & kStopDisabledBit);
} }
void Simulator::EnableStop(uint32_t code) { void Simulator::EnableStop(uint32_t code) {
ASSERT(isWatchedStop(code)); ASSERT(isWatchedStop(code));
if (!isEnabledStop(code)) { if (!isEnabledStop(code)) {
watched_stops[code].count &= ~kStopDisabledBit; watched_stops_[code].count &= ~kStopDisabledBit;
} }
} }
@ -1895,7 +1895,7 @@ void Simulator::EnableStop(uint32_t code) {
void Simulator::DisableStop(uint32_t code) { void Simulator::DisableStop(uint32_t code) {
ASSERT(isWatchedStop(code)); ASSERT(isWatchedStop(code));
if (isEnabledStop(code)) { if (isEnabledStop(code)) {
watched_stops[code].count |= kStopDisabledBit; watched_stops_[code].count |= kStopDisabledBit;
} }
} }
@ -1903,13 +1903,13 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) { void Simulator::IncreaseStopCounter(uint32_t code) {
ASSERT(code <= kMaxStopCode); ASSERT(code <= kMaxStopCode);
ASSERT(isWatchedStop(code)); ASSERT(isWatchedStop(code));
if ((watched_stops[code].count & ~(1 << 31)) == 0x7fffffff) { if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
PrintF("Stop counter for code %i has overflowed.\n" PrintF("Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n", code); "Enabling this code and reseting the counter to 0.\n", code);
watched_stops[code].count = 0; watched_stops_[code].count = 0;
EnableStop(code); EnableStop(code);
} else { } else {
watched_stops[code].count++; watched_stops_[code].count++;
} }
} }
@ -1921,12 +1921,12 @@ void Simulator::PrintStopInfo(uint32_t code) {
PrintF("Stop not watched."); PrintF("Stop not watched.");
} else { } else {
const char* state = isEnabledStop(code) ? "Enabled" : "Disabled"; const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
int32_t count = watched_stops[code].count & ~kStopDisabledBit; int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
// Don't print the state of unused breakpoints. // Don't print the state of unused breakpoints.
if (count != 0) { if (count != 0) {
if (watched_stops[code].desc) { if (watched_stops_[code].desc) {
PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
code, code, state, count, watched_stops[code].desc); code, code, state, count, watched_stops_[code].desc);
} else { } else {
PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n", PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
code, code, state, count); code, code, state, count);

6
deps/v8/src/arm/simulator-arm.h

@ -414,14 +414,14 @@ class Simulator {
static const uint32_t kStopDisabledBit = 1 << 31; static const uint32_t kStopDisabledBit = 1 << 31;
// A stop is enabled, meaning the simulator will stop when meeting the // A stop is enabled, meaning the simulator will stop when meeting the
// instruction, if bit 31 of watched_stops[code].count is unset. // instruction, if bit 31 of watched_stops_[code].count is unset.
// The value watched_stops[code].count & ~(1 << 31) indicates how many times // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through. // the breakpoint was hit or gone through.
struct StopCountAndDesc { struct StopCountAndDesc {
uint32_t count; uint32_t count;
char* desc; char* desc;
}; };
StopCountAndDesc watched_stops[kNumOfWatchedStops]; StopCountAndDesc watched_stops_[kNumOfWatchedStops];
}; };

45
deps/v8/src/arm/stub-cache-arm.cc

@ -423,7 +423,7 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// registers have their original values. // registers have their original values.
void StubCompiler::GenerateStoreField(MacroAssembler* masm, void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object, Handle<JSObject> object,
int index, LookupResult* lookup,
Handle<Map> transition, Handle<Map> transition,
Handle<Name> name, Handle<Name> name,
Register receiver_reg, Register receiver_reg,
@ -436,16 +436,6 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// r0 : value // r0 : value
Label exit; Label exit;
LookupResult lookup(masm->isolate());
object->Lookup(*name, &lookup);
if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
// In sloppy mode, we could just return the value and be done. However, we
// might be in strict mode, where we have to throw. Since we cannot tell,
// go into slow case unconditionally.
__ jmp(miss_label);
return;
}
// Check that the map of the object hasn't changed. // Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP; : REQUIRE_EXACT_MAP;
@ -460,8 +450,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Check that we are allowed to write this. // Check that we are allowed to write this.
if (!transition.is_null() && object->GetPrototype()->IsJSObject()) { if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
JSObject* holder; JSObject* holder;
if (lookup.IsFound()) { // holder == object indicates that no property was found.
holder = lookup.holder(); if (lookup->holder() != *object) {
holder = lookup->holder();
} else { } else {
// Find the top object. // Find the top object.
holder = *object; holder = *object;
@ -469,8 +460,19 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
holder = JSObject::cast(holder->GetPrototype()); holder = JSObject::cast(holder->GetPrototype());
} while (holder->GetPrototype()->IsJSObject()); } while (holder->GetPrototype()->IsJSObject());
} }
CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg, Register holder_reg = CheckPrototypes(
scratch1, scratch2, name, miss_restore_name); object, receiver_reg, Handle<JSObject>(holder), name_reg,
scratch1, scratch2, name, miss_restore_name);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
if (lookup->holder() == *object &&
!holder->HasFastProperties() &&
!holder->IsJSGlobalProxy() &&
!holder->IsJSGlobalObject()) {
GenerateDictionaryNegativeLookup(
masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
}
} }
// Stub never generated for non-global objects that require access // Stub never generated for non-global objects that require access
@ -492,6 +494,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return; return;
} }
int index;
if (!transition.is_null()) { if (!transition.is_null()) {
// Update the map of the object. // Update the map of the object.
__ mov(scratch1, Operand(transition)); __ mov(scratch1, Operand(transition));
@ -507,6 +510,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
kDontSaveFPRegs, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
} else {
index = lookup->GetFieldIndex().field_index();
} }
// Adjust for the number of properties stored in the object. Even in the // Adjust for the number of properties stored in the object. Even in the
@ -2391,6 +2398,12 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
// Check that the object is a symbol. // Check that the object is a symbol.
__ CompareObjectType(r1, r1, r3, SYMBOL_TYPE); __ CompareObjectType(r1, r1, r3, SYMBOL_TYPE);
__ b(ne, &miss); __ b(ne, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::SYMBOL_FUNCTION_INDEX, r0, &miss);
CheckPrototypes(
Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
r0, holder, r3, r1, r4, name, &miss);
break; break;
case NUMBER_CHECK: { case NUMBER_CHECK: {
@ -2982,7 +2995,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
__ cmp(r3, Operand(instance_size >> kPointerSizeLog2)); __ cmp(r3, Operand(instance_size >> kPointerSizeLog2));
__ Check(eq, "Instance size of initial map changed."); __ Check(eq, "Instance size of initial map changed.");
#endif #endif
__ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS); __ Allocate(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial // Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array. // map and properties and elements are set to empty fixed array.

210
deps/v8/src/array.js

@ -38,22 +38,21 @@ var visited_arrays = new InternalArray();
// Gets a sorted array of array keys. Useful for operations on sparse // Gets a sorted array of array keys. Useful for operations on sparse
// arrays. Dupes have not been removed. // arrays. Dupes have not been removed.
function GetSortedArrayKeys(array, intervals) { function GetSortedArrayKeys(array, indices) {
var length = intervals.length; var keys = new InternalArray();
var keys = []; if (IS_NUMBER(indices)) {
for (var k = 0; k < length; k++) { // It's an interval
var key = intervals[k]; var limit = indices;
if (key < 0) { for (var i = 0; i < limit; ++i) {
var j = -1 - key; var e = array[i];
var limit = j + intervals[++k]; if (!IS_UNDEFINED(e) || i in array) {
for (; j < limit; j++) { keys.push(i);
var e = array[j];
if (!IS_UNDEFINED(e) || j in array) {
keys.push(j);
}
} }
} else { }
// The case where key is undefined also ends here. } else {
var length = indices.length;
for (var k = 0; k < length; ++k) {
var key = indices[k];
if (!IS_UNDEFINED(key)) { if (!IS_UNDEFINED(key)) {
var e = array[key]; var e = array[key];
if (!IS_UNDEFINED(e) || key in array) { if (!IS_UNDEFINED(e) || key in array) {
@ -61,8 +60,8 @@ function GetSortedArrayKeys(array, intervals) {
} }
} }
} }
%_CallFunction(keys, function(a, b) { return a - b; }, ArraySort);
} }
%_CallFunction(keys, function(a, b) { return a - b; }, ArraySort);
return keys; return keys;
} }
@ -217,34 +216,21 @@ function ConvertToLocaleString(e) {
// special array operations to handle sparse arrays in a sensible fashion. // special array operations to handle sparse arrays in a sensible fashion.
function SmartSlice(array, start_i, del_count, len, deleted_elements) { function SmartSlice(array, start_i, del_count, len, deleted_elements) {
// Move deleted elements to a new array (the return value from splice). // Move deleted elements to a new array (the return value from splice).
// Intervals array can contain keys and intervals. See comment in Concat. var indices = %GetArrayKeys(array, start_i + del_count);
var intervals = %GetArrayKeys(array, start_i + del_count); if (IS_NUMBER(indices)) {
var length = intervals.length; var limit = indices;
for (var k = 0; k < length; k++) { for (var i = start_i; i < limit; ++i) {
var key = intervals[k]; var current = array[i];
if (key < 0) { if (!IS_UNDEFINED(current) || i in array) {
var j = -1 - key; deleted_elements[i - start_i] = current;
var interval_limit = j + intervals[++k];
if (j < start_i) {
j = start_i;
}
for (; j < interval_limit; j++) {
// ECMA-262 15.4.4.12 line 10. The spec could also be
// interpreted such that %HasLocalProperty would be the
// appropriate test. We follow KJS in consulting the
// prototype.
var current = array[j];
if (!IS_UNDEFINED(current) || j in array) {
deleted_elements[j - start_i] = current;
}
} }
} else { }
} else {
var length = indices.length;
for (var k = 0; k < length; ++k) {
var key = indices[k];
if (!IS_UNDEFINED(key)) { if (!IS_UNDEFINED(key)) {
if (key >= start_i) { if (key >= start_i) {
// ECMA-262 15.4.4.12 line 10. The spec could also be
// interpreted such that %HasLocalProperty would be the
// appropriate test. We follow KJS in consulting the
// prototype.
var current = array[key]; var current = array[key];
if (!IS_UNDEFINED(current) || key in array) { if (!IS_UNDEFINED(current) || key in array) {
deleted_elements[key - start_i] = current; deleted_elements[key - start_i] = current;
@ -261,50 +247,32 @@ function SmartSlice(array, start_i, del_count, len, deleted_elements) {
function SmartMove(array, start_i, del_count, len, num_additional_args) { function SmartMove(array, start_i, del_count, len, num_additional_args) {
// Move data to new array. // Move data to new array.
var new_array = new InternalArray(len - del_count + num_additional_args); var new_array = new InternalArray(len - del_count + num_additional_args);
var intervals = %GetArrayKeys(array, len); var indices = %GetArrayKeys(array, len);
var length = intervals.length; if (IS_NUMBER(indices)) {
for (var k = 0; k < length; k++) { var limit = indices;
var key = intervals[k]; for (var i = 0; i < start_i && i < limit; ++i) {
if (key < 0) { var current = array[i];
var j = -1 - key; if (!IS_UNDEFINED(current) || i in array) {
var interval_limit = j + intervals[++k]; new_array[i] = current;
while (j < start_i && j < interval_limit) {
// The spec could also be interpreted such that
// %HasLocalProperty would be the appropriate test. We follow
// KJS in consulting the prototype.
var current = array[j];
if (!IS_UNDEFINED(current) || j in array) {
new_array[j] = current;
}
j++;
} }
j = start_i + del_count; }
while (j < interval_limit) { for (var i = start_i + del_count; i < limit; ++i) {
// ECMA-262 15.4.4.12 lines 24 and 41. The spec could also be var current = array[i];
// interpreted such that %HasLocalProperty would be the if (!IS_UNDEFINED(current) || i in array) {
// appropriate test. We follow KJS in consulting the new_array[i - del_count + num_additional_args] = current;
// prototype.
var current = array[j];
if (!IS_UNDEFINED(current) || j in array) {
new_array[j - del_count + num_additional_args] = current;
}
j++;
} }
} else { }
} else {
var length = indices.length;
for (var k = 0; k < length; ++k) {
var key = indices[k];
if (!IS_UNDEFINED(key)) { if (!IS_UNDEFINED(key)) {
if (key < start_i) { if (key < start_i) {
// The spec could also be interpreted such that
// %HasLocalProperty would be the appropriate test. We follow
// KJS in consulting the prototype.
var current = array[key]; var current = array[key];
if (!IS_UNDEFINED(current) || key in array) { if (!IS_UNDEFINED(current) || key in array) {
new_array[key] = current; new_array[key] = current;
} }
} else if (key >= start_i + del_count) { } else if (key >= start_i + del_count) {
// ECMA-262 15.4.4.12 lines 24 and 41. The spec could also
// be interpreted such that %HasLocalProperty would be the
// appropriate test. We follow KJS in consulting the
// prototype.
var current = array[key]; var current = array[key];
if (!IS_UNDEFINED(current) || key in array) { if (!IS_UNDEFINED(current) || key in array) {
new_array[key - del_count + num_additional_args] = current; new_array[key - del_count + num_additional_args] = current;
@ -887,24 +855,22 @@ function ArraySort(comparefn) {
var max = 0; var max = 0;
for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) { for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) {
var indices = %GetArrayKeys(proto, length); var indices = %GetArrayKeys(proto, length);
if (indices.length > 0) { if (IS_NUMBER(indices)) {
if (indices[0] == -1) { // It's an interval.
// It's an interval. var proto_length = indices;
var proto_length = indices[1]; for (var i = 0; i < proto_length; i++) {
for (var i = 0; i < proto_length; i++) { if (!obj.hasOwnProperty(i) && proto.hasOwnProperty(i)) {
if (!obj.hasOwnProperty(i) && proto.hasOwnProperty(i)) { obj[i] = proto[i];
obj[i] = proto[i]; if (i >= max) { max = i + 1; }
if (i >= max) { max = i + 1; }
}
} }
} else { }
for (var i = 0; i < indices.length; i++) { } else {
var index = indices[i]; for (var i = 0; i < indices.length; i++) {
if (!IS_UNDEFINED(index) && var index = indices[i];
!obj.hasOwnProperty(index) && proto.hasOwnProperty(index)) { if (!IS_UNDEFINED(index) &&
obj[index] = proto[index]; !obj.hasOwnProperty(index) && proto.hasOwnProperty(index)) {
if (index >= max) { max = index + 1; } obj[index] = proto[index];
} if (index >= max) { max = index + 1; }
} }
} }
} }
@ -918,22 +884,20 @@ function ArraySort(comparefn) {
var ShadowPrototypeElements = function(obj, from, to) { var ShadowPrototypeElements = function(obj, from, to) {
for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) { for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) {
var indices = %GetArrayKeys(proto, to); var indices = %GetArrayKeys(proto, to);
if (indices.length > 0) { if (IS_NUMBER(indices)) {
if (indices[0] == -1) { // It's an interval.
// It's an interval. var proto_length = indices;
var proto_length = indices[1]; for (var i = from; i < proto_length; i++) {
for (var i = from; i < proto_length; i++) { if (proto.hasOwnProperty(i)) {
if (proto.hasOwnProperty(i)) { obj[i] = void 0;
obj[i] = void 0;
}
} }
} else { }
for (var i = 0; i < indices.length; i++) { } else {
var index = indices[i]; for (var i = 0; i < indices.length; i++) {
if (!IS_UNDEFINED(index) && from <= index && var index = indices[i];
proto.hasOwnProperty(index)) { if (!IS_UNDEFINED(index) && from <= index &&
obj[index] = void 0; proto.hasOwnProperty(index)) {
} obj[index] = void 0;
} }
} }
} }
@ -1284,18 +1248,15 @@ function ArrayIndexOf(element, index) {
var min = index; var min = index;
var max = length; var max = length;
if (UseSparseVariant(this, length, IS_ARRAY(this))) { if (UseSparseVariant(this, length, IS_ARRAY(this))) {
var intervals = %GetArrayKeys(this, length); var indices = %GetArrayKeys(this, length);
if (intervals.length == 2 && intervals[0] < 0) { if (IS_NUMBER(indices)) {
// A single interval. // It's an interval.
var intervalMin = -(intervals[0] + 1); max = indices; // Capped by length already.
var intervalMax = intervalMin + intervals[1];
if (min < intervalMin) min = intervalMin;
max = intervalMax; // Capped by length already.
// Fall through to loop below. // Fall through to loop below.
} else { } else {
if (intervals.length == 0) return -1; if (indices.length == 0) return -1;
// Get all the keys in sorted order. // Get all the keys in sorted order.
var sortedKeys = GetSortedArrayKeys(this, intervals); var sortedKeys = GetSortedArrayKeys(this, indices);
var n = sortedKeys.length; var n = sortedKeys.length;
var i = 0; var i = 0;
while (i < n && sortedKeys[i] < index) i++; while (i < n && sortedKeys[i] < index) i++;
@ -1345,18 +1306,15 @@ function ArrayLastIndexOf(element, index) {
var min = 0; var min = 0;
var max = index; var max = index;
if (UseSparseVariant(this, length, IS_ARRAY(this))) { if (UseSparseVariant(this, length, IS_ARRAY(this))) {
var intervals = %GetArrayKeys(this, index + 1); var indices = %GetArrayKeys(this, index + 1);
if (intervals.length == 2 && intervals[0] < 0) { if (IS_NUMBER(indices)) {
// A single interval. // It's an interval.
var intervalMin = -(intervals[0] + 1); max = indices; // Capped by index already.
var intervalMax = intervalMin + intervals[1];
if (min < intervalMin) min = intervalMin;
max = intervalMax; // Capped by index already.
// Fall through to loop below. // Fall through to loop below.
} else { } else {
if (intervals.length == 0) return -1; if (indices.length == 0) return -1;
// Get all the keys in sorted order. // Get all the keys in sorted order.
var sortedKeys = GetSortedArrayKeys(this, intervals); var sortedKeys = GetSortedArrayKeys(this, indices);
var i = sortedKeys.length - 1; var i = sortedKeys.length - 1;
while (i >= 0) { while (i >= 0) {
var key = sortedKeys[i]; var key = sortedKeys[i];

11
deps/v8/src/assembler.cc

@ -927,20 +927,9 @@ void ExternalReference::InitializeMathExpData() {
math_exp_log_table_array = new double[kTableSize]; math_exp_log_table_array = new double[kTableSize];
for (int i = 0; i < kTableSize; i++) { for (int i = 0; i < kTableSize; i++) {
double value = pow(2, i / kTableSizeDouble); double value = pow(2, i / kTableSizeDouble);
uint64_t bits = BitCast<uint64_t, double>(value); uint64_t bits = BitCast<uint64_t, double>(value);
bits &= (static_cast<uint64_t>(1) << 52) - 1; bits &= (static_cast<uint64_t>(1) << 52) - 1;
double mantissa = BitCast<double, uint64_t>(bits); double mantissa = BitCast<double, uint64_t>(bits);
// <just testing>
uint64_t doublebits;
memcpy(&doublebits, &value, sizeof doublebits);
doublebits &= (static_cast<uint64_t>(1) << 52) - 1;
double mantissa2;
memcpy(&mantissa2, &doublebits, sizeof mantissa2);
CHECK_EQ(mantissa, mantissa2);
// </just testing>
math_exp_log_table_array[i] = mantissa; math_exp_log_table_array[i] = mantissa;
} }

12
deps/v8/src/ast.cc

@ -128,7 +128,8 @@ Assignment::Assignment(Isolate* isolate,
pos_(pos), pos_(pos),
binary_operation_(NULL), binary_operation_(NULL),
assignment_id_(GetNextId(isolate)), assignment_id_(GetNextId(isolate)),
is_monomorphic_(false) { } is_monomorphic_(false),
store_mode_(STANDARD_STORE) { }
Token::Value Assignment::binary_op() const { Token::Value Assignment::binary_op() const {
@ -413,12 +414,9 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this); is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
receiver_types_.Clear(); receiver_types_.Clear();
if (key()->IsPropertyName()) { if (key()->IsPropertyName()) {
ArrayLengthStub array_stub(Code::LOAD_IC);
FunctionPrototypeStub proto_stub(Code::LOAD_IC); FunctionPrototypeStub proto_stub(Code::LOAD_IC);
StringLengthStub string_stub(Code::LOAD_IC, false); StringLengthStub string_stub(Code::LOAD_IC, false);
if (oracle->LoadIsStub(this, &array_stub)) { if (oracle->LoadIsStub(this, &string_stub)) {
is_array_length_ = true;
} else if (oracle->LoadIsStub(this, &string_stub)) {
is_string_length_ = true; is_string_length_ = true;
} else if (oracle->LoadIsStub(this, &proto_stub)) { } else if (oracle->LoadIsStub(this, &proto_stub)) {
is_function_prototype_ = true; is_function_prototype_ = true;
@ -455,9 +453,11 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
} else if (is_monomorphic_) { } else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores. // Record receiver type for monomorphic keyed stores.
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone); receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone);
store_mode_ = oracle->GetStoreMode(id);
} else if (oracle->StoreIsPolymorphic(id)) { } else if (oracle->StoreIsPolymorphic(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone); receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_); oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
store_mode_ = oracle->GetStoreMode(id);
} }
} }
@ -475,6 +475,7 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone); receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_); oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
} }
store_mode_ = oracle->GetStoreMode(id);
} }
@ -1061,6 +1062,7 @@ DONT_OPTIMIZE_NODE(ModuleVariable)
DONT_OPTIMIZE_NODE(ModulePath) DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl) DONT_OPTIMIZE_NODE(ModuleUrl)
DONT_OPTIMIZE_NODE(ModuleStatement) DONT_OPTIMIZE_NODE(ModuleStatement)
DONT_OPTIMIZE_NODE(Yield)
DONT_OPTIMIZE_NODE(WithStatement) DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(TryCatchStatement) DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement) DONT_OPTIMIZE_NODE(TryFinallyStatement)

83
deps/v8/src/ast.h

@ -102,6 +102,7 @@ namespace internal {
V(ObjectLiteral) \ V(ObjectLiteral) \
V(ArrayLiteral) \ V(ArrayLiteral) \
V(Assignment) \ V(Assignment) \
V(Yield) \
V(Throw) \ V(Throw) \
V(Property) \ V(Property) \
V(Call) \ V(Call) \
@ -119,6 +120,10 @@ namespace internal {
STATEMENT_NODE_LIST(V) \ STATEMENT_NODE_LIST(V) \
EXPRESSION_NODE_LIST(V) EXPRESSION_NODE_LIST(V)
#ifdef WIN32
#undef Yield
#endif
// Forward declarations // Forward declarations
class AstConstructionVisitor; class AstConstructionVisitor;
template<class> class AstNodeFactory; template<class> class AstNodeFactory;
@ -349,6 +354,10 @@ class Expression: public AstNode {
ASSERT(types != NULL && types->length() == 1); ASSERT(types != NULL && types->length() == 1);
return types->at(0); return types->at(0);
} }
virtual KeyedAccessStoreMode GetStoreMode() {
UNREACHABLE();
return STANDARD_STORE;
}
BailoutId id() const { return id_; } BailoutId id() const { return id_; }
TypeFeedbackId test_id() const { return test_id_; } TypeFeedbackId test_id() const { return test_id_; }
@ -1481,7 +1490,9 @@ class Property: public Expression {
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone); void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() { return is_monomorphic_; } virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; } virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
bool IsArrayLength() { return is_array_length_; } virtual KeyedAccessStoreMode GetStoreMode() {
return STANDARD_STORE;
}
bool IsUninitialized() { return is_uninitialized_; } bool IsUninitialized() { return is_uninitialized_; }
TypeFeedbackId PropertyFeedbackId() { return reuse(id()); } TypeFeedbackId PropertyFeedbackId() { return reuse(id()); }
@ -1497,7 +1508,6 @@ class Property: public Expression {
load_id_(GetNextId(isolate)), load_id_(GetNextId(isolate)),
is_monomorphic_(false), is_monomorphic_(false),
is_uninitialized_(false), is_uninitialized_(false),
is_array_length_(false),
is_string_length_(false), is_string_length_(false),
is_string_access_(false), is_string_access_(false),
is_function_prototype_(false) { } is_function_prototype_(false) { }
@ -1511,7 +1521,6 @@ class Property: public Expression {
SmallMapList receiver_types_; SmallMapList receiver_types_;
bool is_monomorphic_ : 1; bool is_monomorphic_ : 1;
bool is_uninitialized_ : 1; bool is_uninitialized_ : 1;
bool is_array_length_ : 1;
bool is_string_length_ : 1; bool is_string_length_ : 1;
bool is_string_access_ : 1; bool is_string_access_ : 1;
bool is_function_prototype_ : 1; bool is_function_prototype_ : 1;
@ -1773,6 +1782,9 @@ class CountOperation: public Expression {
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* znoe); void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* znoe);
virtual bool IsMonomorphic() { return is_monomorphic_; } virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; } virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
virtual KeyedAccessStoreMode GetStoreMode() {
return store_mode_;
}
BailoutId AssignmentId() const { return assignment_id_; } BailoutId AssignmentId() const { return assignment_id_; }
@ -1788,6 +1800,8 @@ class CountOperation: public Expression {
: Expression(isolate), : Expression(isolate),
op_(op), op_(op),
is_prefix_(is_prefix), is_prefix_(is_prefix),
is_monomorphic_(false),
store_mode_(STANDARD_STORE),
expression_(expr), expression_(expr),
pos_(pos), pos_(pos),
assignment_id_(GetNextId(isolate)), assignment_id_(GetNextId(isolate)),
@ -1795,8 +1809,9 @@ class CountOperation: public Expression {
private: private:
Token::Value op_; Token::Value op_;
bool is_prefix_; bool is_prefix_ : 1;
bool is_monomorphic_; bool is_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_: 4;
Expression* expression_; Expression* expression_;
int pos_; int pos_;
const BailoutId assignment_id_; const BailoutId assignment_id_;
@ -1909,6 +1924,9 @@ class Assignment: public Expression {
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone); void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() { return is_monomorphic_; } virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; } virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
virtual KeyedAccessStoreMode GetStoreMode() {
return store_mode_;
}
protected: protected:
Assignment(Isolate* isolate, Assignment(Isolate* isolate,
@ -1934,11 +1952,37 @@ class Assignment: public Expression {
BinaryOperation* binary_operation_; BinaryOperation* binary_operation_;
const BailoutId assignment_id_; const BailoutId assignment_id_;
bool is_monomorphic_; bool is_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 4;
SmallMapList receiver_types_; SmallMapList receiver_types_;
}; };
class Yield: public Expression {
public:
DECLARE_NODE_TYPE(Yield)
Expression* expression() const { return expression_; }
bool is_delegating_yield() const { return is_delegating_yield_; }
virtual int position() const { return pos_; }
protected:
Yield(Isolate* isolate,
Expression* expression,
bool is_delegating_yield,
int pos)
: Expression(isolate),
expression_(expression),
is_delegating_yield_(is_delegating_yield),
pos_(pos) { }
private:
Expression* expression_;
bool is_delegating_yield_;
int pos_;
};
class Throw: public Expression { class Throw: public Expression {
public: public:
DECLARE_NODE_TYPE(Throw) DECLARE_NODE_TYPE(Throw)
@ -1979,6 +2023,11 @@ class FunctionLiteral: public Expression {
kNotParenthesized kNotParenthesized
}; };
enum IsGeneratorFlag {
kIsGenerator,
kNotGenerator
};
DECLARE_NODE_TYPE(FunctionLiteral) DECLARE_NODE_TYPE(FunctionLiteral)
Handle<String> name() const { return name_; } Handle<String> name() const { return name_; }
@ -2039,6 +2088,10 @@ class FunctionLiteral: public Expression {
bitfield_ = IsParenthesized::update(bitfield_, kIsParenthesized); bitfield_ = IsParenthesized::update(bitfield_, kIsParenthesized);
} }
bool is_generator() {
return IsGenerator::decode(bitfield_) == kIsGenerator;
}
int ast_node_count() { return ast_properties_.node_count(); } int ast_node_count() { return ast_properties_.node_count(); }
AstProperties::Flags* flags() { return ast_properties_.flags(); } AstProperties::Flags* flags() { return ast_properties_.flags(); }
void set_ast_properties(AstProperties* ast_properties) { void set_ast_properties(AstProperties* ast_properties) {
@ -2059,7 +2112,8 @@ class FunctionLiteral: public Expression {
Type type, Type type,
ParameterFlag has_duplicate_parameters, ParameterFlag has_duplicate_parameters,
IsFunctionFlag is_function, IsFunctionFlag is_function,
IsParenthesizedFlag is_parenthesized) IsParenthesizedFlag is_parenthesized,
IsGeneratorFlag is_generator)
: Expression(isolate), : Expression(isolate),
name_(name), name_(name),
scope_(scope), scope_(scope),
@ -2079,7 +2133,8 @@ class FunctionLiteral: public Expression {
Pretenure::encode(false) | Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters) | HasDuplicateParameters::encode(has_duplicate_parameters) |
IsFunction::encode(is_function) | IsFunction::encode(is_function) |
IsParenthesized::encode(is_parenthesized); IsParenthesized::encode(is_parenthesized) |
IsGenerator::encode(is_generator);
} }
private: private:
@ -2104,6 +2159,7 @@ class FunctionLiteral: public Expression {
class HasDuplicateParameters: public BitField<ParameterFlag, 4, 1> {}; class HasDuplicateParameters: public BitField<ParameterFlag, 4, 1> {};
class IsFunction: public BitField<IsFunctionFlag, 5, 1> {}; class IsFunction: public BitField<IsFunctionFlag, 5, 1> {};
class IsParenthesized: public BitField<IsParenthesizedFlag, 6, 1> {}; class IsParenthesized: public BitField<IsParenthesizedFlag, 6, 1> {};
class IsGenerator: public BitField<IsGeneratorFlag, 7, 1> {};
}; };
@ -2902,6 +2958,12 @@ class AstNodeFactory BASE_EMBEDDED {
VISIT_AND_RETURN(Assignment, assign) VISIT_AND_RETURN(Assignment, assign)
} }
Yield* NewYield(Expression* expression, bool is_delegating_yield, int pos) {
Yield* yield =
new(zone_) Yield(isolate_, expression, is_delegating_yield, pos);
VISIT_AND_RETURN(Yield, yield)
}
Throw* NewThrow(Expression* exception, int pos) { Throw* NewThrow(Expression* exception, int pos) {
Throw* t = new(zone_) Throw(isolate_, exception, pos); Throw* t = new(zone_) Throw(isolate_, exception, pos);
VISIT_AND_RETURN(Throw, t) VISIT_AND_RETURN(Throw, t)
@ -2920,13 +2982,14 @@ class AstNodeFactory BASE_EMBEDDED {
FunctionLiteral::ParameterFlag has_duplicate_parameters, FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::Type type, FunctionLiteral::Type type,
FunctionLiteral::IsFunctionFlag is_function, FunctionLiteral::IsFunctionFlag is_function,
FunctionLiteral::IsParenthesizedFlag is_parenthesized) { FunctionLiteral::IsParenthesizedFlag is_parenthesized,
FunctionLiteral::IsGeneratorFlag is_generator) {
FunctionLiteral* lit = new(zone_) FunctionLiteral( FunctionLiteral* lit = new(zone_) FunctionLiteral(
isolate_, name, scope, body, isolate_, name, scope, body,
materialized_literal_count, expected_property_count, handler_count, materialized_literal_count, expected_property_count, handler_count,
has_only_simple_this_property_assignments, this_property_assignments, has_only_simple_this_property_assignments, this_property_assignments,
parameter_count, type, has_duplicate_parameters, is_function, parameter_count, type, has_duplicate_parameters, is_function,
is_parenthesized); is_parenthesized, is_generator);
// Top-level literal doesn't count for the AST's properties. // Top-level literal doesn't count for the AST's properties.
if (is_function == FunctionLiteral::kIsFunction) { if (is_function == FunctionLiteral::kIsFunction) {
visitor_.VisitFunctionLiteral(lit); visitor_.VisitFunctionLiteral(lit);

36
deps/v8/src/bootstrapper.cc

@ -303,9 +303,9 @@ Handle<Context> Bootstrapper::CreateEnvironment(
v8::ExtensionConfiguration* extensions) { v8::ExtensionConfiguration* extensions) {
HandleScope scope(isolate_); HandleScope scope(isolate_);
Genesis genesis(isolate_, global_object, global_template, extensions); Genesis genesis(isolate_, global_object, global_template, extensions);
Handle<Object> context(isolate_->global_handles()->Create(*genesis.result())); if (!genesis.result().is_null()) {
Handle<Context> env = Handle<Context>::cast(context); Handle<Object> ctx(isolate_->global_handles()->Create(*genesis.result()));
if (!env.is_null()) { Handle<Context> env = Handle<Context>::cast(ctx);
if (InstallExtensions(env, extensions)) { if (InstallExtensions(env, extensions)) {
return env; return env;
} }
@ -1280,7 +1280,17 @@ void Genesis::InitializeExperimentalGlobal() {
Handle<JSObject> global = Handle<JSObject>(native_context()->global_object()); Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
// TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no // TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no
// longer need to live behind a flag, so functions get added to the snapshot. // longer need to live behind flags, so functions get added to the snapshot.
if (FLAG_harmony_symbols) {
// --- S y m b o l ---
Handle<JSFunction> symbol_fun =
InstallFunction(global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true);
native_context()->set_symbol_function(*symbol_fun);
}
if (FLAG_harmony_collections) { if (FLAG_harmony_collections) {
{ // -- S e t { // -- S e t
Handle<JSObject> prototype = Handle<JSObject> prototype =
@ -1301,6 +1311,16 @@ void Genesis::InitializeExperimentalGlobal() {
prototype, Builtins::kIllegal, true); prototype, Builtins::kIllegal, true);
} }
} }
if (FLAG_harmony_typed_arrays) {
{ // -- A r r a y B u f f e r
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "__ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSize, prototype,
Builtins::kIllegal, true);
}
}
} }
@ -1436,9 +1456,6 @@ void Genesis::InstallNativeFunctions() {
} }
void Genesis::InstallExperimentalNativeFunctions() { void Genesis::InstallExperimentalNativeFunctions() {
if (FLAG_harmony_symbols) {
INSTALL_NATIVE(JSObject, "SymbolDelegate", symbol_delegate);
}
if (FLAG_harmony_proxies) { if (FLAG_harmony_proxies) {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap); INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap); INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
@ -1911,6 +1928,11 @@ bool Genesis::InstallExperimentalNatives() {
"native object-observe.js") == 0) { "native object-observe.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false; if (!CompileExperimentalBuiltin(isolate(), i)) return false;
} }
if (FLAG_harmony_typed_arrays &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native typedarray.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
} }
InstallExperimentalNativeFunctions(); InstallExperimentalNativeFunctions();

138
deps/v8/src/code-stubs-hydrogen.cc

@ -106,7 +106,8 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
Zone* zone = this->zone(); Zone* zone = this->zone();
int param_count = descriptor_->register_param_count_; int param_count = descriptor_->register_param_count_;
HEnvironment* start_environment = graph()->start_environment(); HEnvironment* start_environment = graph()->start_environment();
HBasicBlock* next_block = CreateBasicBlock(start_environment); HBasicBlock* next_block =
CreateBasicBlock(start_environment, BailoutId::StubEntry());
current_block()->Goto(next_block); current_block()->Goto(next_block);
next_block->SetJoinId(BailoutId::StubEntry()); next_block->SetJoinId(BailoutId::StubEntry());
set_current_block(next_block); set_current_block(next_block);
@ -128,7 +129,8 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
if (descriptor_->stack_parameter_count_ != NULL) { if (descriptor_->stack_parameter_count_ != NULL) {
ASSERT(descriptor_->environment_length() == (param_count + 1)); ASSERT(descriptor_->environment_length() == (param_count + 1));
stack_parameter_count = new(zone) HParameter(param_count, stack_parameter_count = new(zone) HParameter(param_count,
HParameter::REGISTER_PARAMETER); HParameter::REGISTER_PARAMETER,
Representation::Integer32());
// it's essential to bind this value to the environment in case of deopt // it's essential to bind this value to the environment in case of deopt
start_environment->Bind(param_count, stack_parameter_count); start_environment->Bind(param_count, stack_parameter_count);
AddInstruction(stack_parameter_count); AddInstruction(stack_parameter_count);
@ -146,13 +148,26 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
AddSimulate(BailoutId::StubEntry()); AddSimulate(BailoutId::StubEntry());
HValue* return_value = BuildCodeStub(); HValue* return_value = BuildCodeStub();
// We might have extra expressions to pop from the stack in addition to the
// arguments above
HInstruction* stack_pop_count = stack_parameter_count;
if (descriptor_->function_mode_ == JS_FUNCTION_STUB_MODE) {
HInstruction* amount = graph()->GetConstant1();
stack_pop_count = AddInstruction(
HAdd::New(zone, context_, stack_parameter_count, amount));
stack_pop_count->ChangeRepresentation(Representation::Integer32());
stack_pop_count->ClearFlag(HValue::kCanOverflow);
}
HReturn* hreturn_instruction = new(zone) HReturn(return_value, HReturn* hreturn_instruction = new(zone) HReturn(return_value,
context_, context_,
stack_parameter_count); stack_pop_count);
current_block()->Finish(hreturn_instruction); current_block()->Finish(hreturn_instruction);
return true; return true;
} }
template <class Stub> template <class Stub>
class CodeStubGraphBuilder: public CodeStubGraphBuilderBase { class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
public: public:
@ -165,6 +180,14 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
}; };
template <class Stub>
static Handle<Code> DoGenerateCode(Stub* stub) {
CodeStubGraphBuilder<Stub> builder(stub);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(Code::COMPILED_STUB);
}
template <> template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() { HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
Zone* zone = this->zone(); Zone* zone = this->zone();
@ -176,7 +199,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
NULL, NULL,
FAST_ELEMENTS)); FAST_ELEMENTS));
CheckBuilder builder(this, BailoutId::StubEntry()); CheckBuilder builder(this);
builder.CheckNotUndefined(boilerplate); builder.CheckNotUndefined(boilerplate);
int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize; int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize;
@ -216,9 +239,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
Handle<Code> FastCloneShallowObjectStub::GenerateCode() { Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
CodeStubGraphBuilder<FastCloneShallowObjectStub> builder(this); return DoGenerateCode(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(Code::COMPILED_STUB);
} }
@ -227,16 +248,30 @@ HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess( HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL, NULL, GetParameter(0), GetParameter(1), NULL, NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(), casted_stub()->is_js_array(), casted_stub()->elements_kind(),
false, Representation::Tagged()); false, STANDARD_STORE, Representation::Tagged());
AddInstruction(load);
return load; return load;
} }
Handle<Code> KeyedLoadFastElementStub::GenerateCode() { Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
CodeStubGraphBuilder<KeyedLoadFastElementStub> builder(this); return DoGenerateCode(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph()); }
return chunk->Codegen(Code::COMPILED_STUB);
template <>
HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), GetParameter(2), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
true, casted_stub()->store_mode(), Representation::Tagged());
AddSimulate(BailoutId::StubEntry(), REMOVABLE_SIMULATE);
return GetParameter(2);
}
Handle<Code> KeyedStoreFastElementStub::GenerateCode() {
return DoGenerateCode(this);
} }
@ -252,31 +287,19 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
AddInstruction(new(zone) HTrapAllocationMemento(js_array)); AddInstruction(new(zone) HTrapAllocationMemento(js_array));
HInstruction* array_length = HInstruction* array_length =
AddInstruction(new(zone) HJSArrayLength(js_array, AddInstruction(HLoadNamedField::NewArrayLength(
js_array, zone, js_array, js_array, HType::Smi()));
HType::Smi()));
Heap* heap = isolate()->heap(); ElementsKind to_kind = casted_stub()->to_kind();
const int kMinFreeNewSpaceAfterGC = BuildNewSpaceArrayCheck(array_length, to_kind);
((heap->InitialSemiSpaceSize() - sizeof(FixedArrayBase)) / 2) /
kDoubleSize;
HConstant* max_alloc_size =
new(zone) HConstant(kMinFreeNewSpaceAfterGC, Representation::Integer32());
AddInstruction(max_alloc_size);
// Since we're forcing Integer32 representation for this HBoundsCheck,
// there's no need to Smi-check the index.
AddInstruction(
new(zone) HBoundsCheck(array_length, max_alloc_size,
DONT_ALLOW_SMI_KEY, Representation::Integer32()));
IfBuilder if_builder(this, BailoutId::StubEntry()); IfBuilder if_builder(this);
if_builder.BeginTrue(array_length, graph()->GetConstant0(), Token::EQ); if_builder.BeginIf(array_length, graph()->GetConstant0(), Token::EQ);
// Nothing to do, just change the map. // Nothing to do, just change the map.
if_builder.BeginFalse(); if_builder.BeginElse();
HInstruction* elements = HInstruction* elements =
AddInstruction(new(zone) HLoadElements(js_array, js_array)); AddInstruction(new(zone) HLoadElements(js_array, js_array));
@ -284,37 +307,14 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
HInstruction* elements_length = HInstruction* elements_length =
AddInstruction(new(zone) HFixedArrayBaseLength(elements)); AddInstruction(new(zone) HFixedArrayBaseLength(elements));
ElementsKind to_kind = casted_stub()->to_kind();
HValue* new_elements = HValue* new_elements =
BuildAllocateElements(context(), to_kind, elements_length); BuildAllocateElements(context(), to_kind, elements_length);
// Fast elements kinds need to be initialized in case statements below cause a
// garbage collection.
Factory* factory = isolate()->factory();
ASSERT(!IsFastSmiElementsKind(to_kind));
double nan_double = FixedDoubleArray::hole_nan_as_double();
HValue* hole = IsFastObjectElementsKind(to_kind)
? AddInstruction(new(zone) HConstant(factory->the_hole_value(),
Representation::Tagged()))
: AddInstruction(new(zone) HConstant(nan_double,
Representation::Double()));
LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement,
BailoutId::StubEntry());
HValue* zero = graph()->GetConstant0();
HValue* start = IsFastElementsKind(to_kind) ? zero : array_length;
HValue* key = builder.BeginBody(start, elements_length, Token::LT);
AddInstruction(new(zone) HStoreKeyed(new_elements, key, hole, to_kind));
AddSimulate(BailoutId::StubEntry(), REMOVABLE_SIMULATE);
builder.EndBody();
BuildCopyElements(context(), elements, BuildCopyElements(context(), elements,
casted_stub()->from_kind(), new_elements, casted_stub()->from_kind(), new_elements,
to_kind, array_length); to_kind, array_length, elements_length);
Factory* factory = isolate()->factory();
AddInstruction(new(zone) HStoreNamedField(js_array, AddInstruction(new(zone) HStoreNamedField(js_array,
factory->elements_field_string(), factory->elements_field_string(),
@ -331,6 +331,11 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
} }
Handle<Code> TransitionElementsKindStub::GenerateCode() {
return DoGenerateCode(this);
}
template <> template <>
HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() { HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
HInstruction* deopt = new(zone()) HSoftDeoptimize(); HInstruction* deopt = new(zone()) HSoftDeoptimize();
@ -341,9 +346,7 @@ HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() { Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
CodeStubGraphBuilder<ArrayNoArgumentConstructorStub> builder(this); return DoGenerateCode(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(Code::COMPILED_STUB);
} }
@ -357,17 +360,8 @@ HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
} }
Handle<Code> TransitionElementsKindStub::GenerateCode() {
CodeStubGraphBuilder<TransitionElementsKindStub> builder(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(Code::COMPILED_STUB);
}
Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() { Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
CodeStubGraphBuilder<ArraySingleArgumentConstructorStub> builder(this); return DoGenerateCode(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(Code::COMPILED_STUB);
} }
@ -381,9 +375,7 @@ HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() { Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
CodeStubGraphBuilder<ArrayNArgumentsConstructorStub> builder(this); return DoGenerateCode(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(Code::COMPILED_STUB);
} }
} } // namespace v8::internal } } // namespace v8::internal

11
deps/v8/src/code-stubs.cc

@ -510,6 +510,7 @@ void ToBooleanStub::Types::Print(StringStream* stream) const {
if (Contains(SMI)) stream->Add("Smi"); if (Contains(SMI)) stream->Add("Smi");
if (Contains(SPEC_OBJECT)) stream->Add("SpecObject"); if (Contains(SPEC_OBJECT)) stream->Add("SpecObject");
if (Contains(STRING)) stream->Add("String"); if (Contains(STRING)) stream->Add("String");
if (Contains(SYMBOL)) stream->Add("Symbol");
if (Contains(HEAP_NUMBER)) stream->Add("HeapNumber"); if (Contains(HEAP_NUMBER)) stream->Add("HeapNumber");
} }
@ -549,6 +550,9 @@ bool ToBooleanStub::Types::Record(Handle<Object> object) {
Add(STRING); Add(STRING);
return !object->IsUndetectableObject() && return !object->IsUndetectableObject() &&
String::cast(*object)->length() != 0; String::cast(*object)->length() != 0;
} else if (object->IsSymbol()) {
Add(SYMBOL);
return true;
} else if (object->IsHeapNumber()) { } else if (object->IsHeapNumber()) {
ASSERT(!object->IsUndetectableObject()); ASSERT(!object->IsUndetectableObject());
Add(HEAP_NUMBER); Add(HEAP_NUMBER);
@ -565,6 +569,7 @@ bool ToBooleanStub::Types::Record(Handle<Object> object) {
bool ToBooleanStub::Types::NeedsMap() const { bool ToBooleanStub::Types::NeedsMap() const {
return Contains(ToBooleanStub::SPEC_OBJECT) return Contains(ToBooleanStub::SPEC_OBJECT)
|| Contains(ToBooleanStub::STRING) || Contains(ToBooleanStub::STRING)
|| Contains(ToBooleanStub::SYMBOL)
|| Contains(ToBooleanStub::HEAP_NUMBER); || Contains(ToBooleanStub::HEAP_NUMBER);
} }
@ -614,10 +619,8 @@ void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) { void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
int i = 0; StubFailureTrampolineStub(NOT_JS_FUNCTION_STUB_MODE).GetCode(isolate);
for (; i <= StubFailureTrampolineStub::kMaxExtraExpressionStackCount; ++i) { StubFailureTrampolineStub(JS_FUNCTION_STUB_MODE).GetCode(isolate);
StubFailureTrampolineStub(i).GetCode(isolate);
}
} }

69
deps/v8/src/code-stubs.h

@ -47,7 +47,6 @@ namespace internal {
V(Compare) \ V(Compare) \
V(CompareIC) \ V(CompareIC) \
V(MathPow) \ V(MathPow) \
V(ArrayLength) \
V(StringLength) \ V(StringLength) \
V(FunctionPrototype) \ V(FunctionPrototype) \
V(StoreArrayLength) \ V(StoreArrayLength) \
@ -259,15 +258,17 @@ class PlatformCodeStub : public CodeStub {
}; };
enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
struct CodeStubInterfaceDescriptor { struct CodeStubInterfaceDescriptor {
CodeStubInterfaceDescriptor() CodeStubInterfaceDescriptor()
: register_param_count_(-1), : register_param_count_(-1),
stack_parameter_count_(NULL), stack_parameter_count_(NULL),
extra_expression_stack_count_(0), function_mode_(NOT_JS_FUNCTION_STUB_MODE),
register_params_(NULL) { } register_params_(NULL) { }
int register_param_count_; int register_param_count_;
const Register* stack_parameter_count_; const Register* stack_parameter_count_;
int extra_expression_stack_count_; StubFunctionMode function_mode_;
Register* register_params_; Register* register_params_;
Address deoptimization_handler_; Address deoptimization_handler_;
@ -597,16 +598,6 @@ class ICStub: public PlatformCodeStub {
}; };
class ArrayLengthStub: public ICStub {
public:
explicit ArrayLengthStub(Code::Kind kind) : ICStub(kind) { }
virtual void Generate(MacroAssembler* masm);
private:
virtual CodeStub::Major MajorKey() { return ArrayLength; }
};
class FunctionPrototypeStub: public ICStub { class FunctionPrototypeStub: public ICStub {
public: public:
explicit FunctionPrototypeStub(Code::Kind kind) : ICStub(kind) { } explicit FunctionPrototypeStub(Code::Kind kind) : ICStub(kind) { }
@ -1312,6 +1303,47 @@ class KeyedLoadFastElementStub : public HydrogenCodeStub {
}; };
class KeyedStoreFastElementStub : public HydrogenCodeStub {
public:
KeyedStoreFastElementStub(bool is_js_array,
ElementsKind elements_kind,
KeyedAccessStoreMode mode) {
bit_field_ = ElementsKindBits::encode(elements_kind) |
IsJSArrayBits::encode(is_js_array) |
StoreModeBits::encode(mode);
}
Major MajorKey() { return KeyedStoreElement; }
int MinorKey() { return bit_field_; }
bool is_js_array() const {
return IsJSArrayBits::decode(bit_field_);
}
ElementsKind elements_kind() const {
return ElementsKindBits::decode(bit_field_);
}
KeyedAccessStoreMode store_mode() const {
return StoreModeBits::decode(bit_field_);
}
virtual Handle<Code> GenerateCode();
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
private:
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class StoreModeBits: public BitField<KeyedAccessStoreMode, 8, 4> {};
class IsJSArrayBits: public BitField<bool, 12, 1> {};
uint32_t bit_field_;
DISALLOW_COPY_AND_ASSIGN(KeyedStoreFastElementStub);
};
class TransitionElementsKindStub : public HydrogenCodeStub { class TransitionElementsKindStub : public HydrogenCodeStub {
public: public:
TransitionElementsKindStub(ElementsKind from_kind, TransitionElementsKindStub(ElementsKind from_kind,
@ -1447,6 +1479,7 @@ class ToBooleanStub: public PlatformCodeStub {
SMI, SMI,
SPEC_OBJECT, SPEC_OBJECT,
STRING, STRING,
SYMBOL,
HEAP_NUMBER, HEAP_NUMBER,
NUMBER_OF_TYPES NUMBER_OF_TYPES
}; };
@ -1570,10 +1603,8 @@ class StoreArrayLiteralElementStub : public PlatformCodeStub {
class StubFailureTrampolineStub : public PlatformCodeStub { class StubFailureTrampolineStub : public PlatformCodeStub {
public: public:
static const int kMaxExtraExpressionStackCount = 1; explicit StubFailureTrampolineStub(StubFunctionMode function_mode)
: function_mode_(function_mode) {}
explicit StubFailureTrampolineStub(int extra_expression_stack_count)
: extra_expression_stack_count_(extra_expression_stack_count) {}
virtual bool IsPregenerated() { return true; } virtual bool IsPregenerated() { return true; }
@ -1581,11 +1612,11 @@ class StubFailureTrampolineStub : public PlatformCodeStub {
private: private:
Major MajorKey() { return StubFailureTrampoline; } Major MajorKey() { return StubFailureTrampoline; }
int MinorKey() { return extra_expression_stack_count_; } int MinorKey() { return static_cast<int>(function_mode_); }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
int extra_expression_stack_count_; StubFunctionMode function_mode_;
DISALLOW_COPY_AND_ASSIGN(StubFailureTrampolineStub); DISALLOW_COPY_AND_ASSIGN(StubFailureTrampolineStub);
}; };

1
deps/v8/src/compiler.cc

@ -1121,6 +1121,7 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize)); function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
function_info->set_dont_inline(lit->flags()->Contains(kDontInline)); function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
function_info->set_dont_cache(lit->flags()->Contains(kDontCache)); function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
function_info->set_is_generator(lit->is_generator());
} }

4
deps/v8/src/contexts.h

@ -103,6 +103,7 @@ enum BindingFlags {
V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \ V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \ V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \ V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \ V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \ V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \ V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
@ -156,7 +157,6 @@ enum BindingFlags {
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \ V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \ V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \ error_message_for_code_gen_from_strings) \
V(SYMBOL_DELEGATE_INDEX, JSObject, symbol_delegate) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \ V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \ to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \ V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
@ -251,6 +251,7 @@ class Context: public FixedArray {
NUMBER_FUNCTION_INDEX, NUMBER_FUNCTION_INDEX,
STRING_FUNCTION_INDEX, STRING_FUNCTION_INDEX,
STRING_FUNCTION_PROTOTYPE_MAP_INDEX, STRING_FUNCTION_PROTOTYPE_MAP_INDEX,
SYMBOL_FUNCTION_INDEX,
OBJECT_FUNCTION_INDEX, OBJECT_FUNCTION_INDEX,
INTERNAL_ARRAY_FUNCTION_INDEX, INTERNAL_ARRAY_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX, ARRAY_FUNCTION_INDEX,
@ -287,7 +288,6 @@ class Context: public FixedArray {
EMBEDDER_DATA_INDEX, EMBEDDER_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX, ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
SYMBOL_DELEGATE_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX, DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX, DERIVED_GET_TRAP_INDEX,

8
deps/v8/src/d8.cc

@ -227,11 +227,13 @@ bool Shell::ExecuteString(Isolate* isolate,
} }
#if !defined(V8_SHARED) #if !defined(V8_SHARED)
} else { } else {
v8::TryCatch try_catch;
Context::Scope context_scope(utility_context_); Context::Scope context_scope(utility_context_);
Handle<Object> global = utility_context_->Global(); Handle<Object> global = utility_context_->Global();
Handle<Value> fun = global->Get(String::New("Stringify")); Handle<Value> fun = global->Get(String::New("Stringify"));
Handle<Value> argv[1] = { result }; Handle<Value> argv[1] = { result };
Handle<Value> s = Handle<Function>::Cast(fun)->Call(global, 1, argv); Handle<Value> s = Handle<Function>::Cast(fun)->Call(global, 1, argv);
if (try_catch.HasCaught()) return true;
v8::String::Utf8Value str(s); v8::String::Utf8Value str(s);
fwrite(*str, sizeof(**str), str.length(), stdout); fwrite(*str, sizeof(**str), str.length(), stdout);
printf("\n"); printf("\n");
@ -905,12 +907,6 @@ Handle<Value> Shell::Uint8ClampedArray(const Arguments& args) {
} }
Handle<Value> Shell::Yield(const Arguments& args) {
v8::Unlocker unlocker(args.GetIsolate());
return Undefined(args.GetIsolate());
}
Handle<Value> Shell::Quit(const Arguments& args) { Handle<Value> Shell::Quit(const Arguments& args) {
int exit_code = args[0]->Int32Value(); int exit_code = args[0]->Int32Value();
OnExit(); OnExit();

5
deps/v8/src/d8.h

@ -298,13 +298,8 @@ class Shell : public i::AllStatic {
#endif // ENABLE_DEBUGGER_SUPPORT #endif // ENABLE_DEBUGGER_SUPPORT
#endif // V8_SHARED #endif // V8_SHARED
#ifdef WIN32
#undef Yield
#endif
static Handle<Value> Print(const Arguments& args); static Handle<Value> Print(const Arguments& args);
static Handle<Value> Write(const Arguments& args); static Handle<Value> Write(const Arguments& args);
static Handle<Value> Yield(const Arguments& args);
static Handle<Value> Quit(const Arguments& args); static Handle<Value> Quit(const Arguments& args);
static Handle<Value> Version(const Arguments& args); static Handle<Value> Version(const Arguments& args);
static Handle<Value> EnableProfiler(const Arguments& args); static Handle<Value> EnableProfiler(const Arguments& args);

3
deps/v8/src/d8.js

@ -2214,7 +2214,8 @@ function Stringify(x, depth) {
return x.toString(); return x.toString();
case "string": case "string":
return "\"" + x.toString() + "\""; return "\"" + x.toString() + "\"";
// TODO(rossberg): add symbol case case "symbol":
return "Symbol(" + (x.name ? Stringify(x.name, depth) : "") + ")"
case "object": case "object":
if (x === null) return "null"; if (x === null) return "null";
if (x.constructor && x.constructor.name === "Array") { if (x.constructor && x.constructor.name === "Array") {

9
deps/v8/src/debug.cc

@ -3761,8 +3761,8 @@ void LockingCommandMessageQueue::Clear() {
MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate) MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
: Thread("v8:MsgDispHelpr"), : Thread("v8:MsgDispHelpr"),
sem_(OS::CreateSemaphore(0)), mutex_(OS::CreateMutex()), isolate_(isolate), sem_(OS::CreateSemaphore(0)),
already_signalled_(false) { mutex_(OS::CreateMutex()), already_signalled_(false) {
} }
@ -3785,7 +3785,6 @@ void MessageDispatchHelperThread::Schedule() {
void MessageDispatchHelperThread::Run() { void MessageDispatchHelperThread::Run() {
Isolate* isolate = Isolate::Current();
while (true) { while (true) {
sem_->Wait(); sem_->Wait();
{ {
@ -3793,8 +3792,8 @@ void MessageDispatchHelperThread::Run() {
already_signalled_ = false; already_signalled_ = false;
} }
{ {
Locker locker(reinterpret_cast<v8::Isolate*>(isolate)); Locker locker(reinterpret_cast<v8::Isolate*>(isolate_));
isolate->debugger()->CallMessageDispatchHandler(); isolate_->debugger()->CallMessageDispatchHandler();
} }
} }
} }

1
deps/v8/src/debug.h

@ -1041,6 +1041,7 @@ class MessageDispatchHelperThread: public Thread {
private: private:
void Run(); void Run();
Isolate* isolate_;
Semaphore* const sem_; Semaphore* const sem_;
Mutex* const mutex_; Mutex* const mutex_;
bool already_signalled_; bool already_signalled_;

106
deps/v8/src/deoptimizer.cc

@ -1280,29 +1280,37 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
} }
intptr_t caller_arg_count = 0; intptr_t caller_arg_count = 0;
if (descriptor->stack_parameter_count_ != NULL) { bool arg_count_known = descriptor->stack_parameter_count_ == NULL;
caller_arg_count =
input_->GetRegister(descriptor->stack_parameter_count_->code());
}
// Build the Arguments object for the caller's parameters and a pointer to it. // Build the Arguments object for the caller's parameters and a pointer to it.
output_frame_offset -= kPointerSize; output_frame_offset -= kPointerSize;
value = frame_ptr + StandardFrameConstants::kCallerSPOffset + int args_arguments_offset = output_frame_offset;
(caller_arg_count - 1) * kPointerSize; intptr_t the_hole = reinterpret_cast<intptr_t>(
output_frame->SetFrameSlot(output_frame_offset, value); isolate_->heap()->the_hole_value());
if (arg_count_known) {
value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
(caller_arg_count - 1) * kPointerSize;
} else {
value = the_hole;
}
output_frame->SetFrameSlot(args_arguments_offset, value);
if (trace_) { if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args.arguments\n", V8PRIxPTR " ; args.arguments %s\n",
top_address + output_frame_offset, output_frame_offset, value); top_address + args_arguments_offset, args_arguments_offset, value,
arg_count_known ? "" : "(the hole)");
} }
output_frame_offset -= kPointerSize; output_frame_offset -= kPointerSize;
value = caller_arg_count; int length_frame_offset = output_frame_offset;
output_frame->SetFrameSlot(output_frame_offset, value); value = arg_count_known ? caller_arg_count : the_hole;
output_frame->SetFrameSlot(length_frame_offset, value);
if (trace_) { if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args.length\n", V8PRIxPTR " ; args.length %s\n",
top_address + output_frame_offset, output_frame_offset, value); top_address + length_frame_offset, length_frame_offset, value,
arg_count_known ? "" : "(the hole)");
} }
output_frame_offset -= kPointerSize; output_frame_offset -= kPointerSize;
@ -1321,6 +1329,20 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
DoTranslateCommand(iterator, 0, output_frame_offset); DoTranslateCommand(iterator, 0, output_frame_offset);
} }
if (!arg_count_known) {
DoTranslateCommand(iterator, 0, length_frame_offset,
TRANSLATED_VALUE_IS_NATIVE);
caller_arg_count = output_frame->GetFrameSlot(length_frame_offset);
value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
(caller_arg_count - 1) * kPointerSize;
output_frame->SetFrameSlot(args_arguments_offset, value);
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args.arguments\n",
top_address + args_arguments_offset, args_arguments_offset, value);
}
}
ASSERT(0 == output_frame_offset); ASSERT(0 == output_frame_offset);
// Copy the double registers from the input into the output frame. // Copy the double registers from the input into the output frame.
@ -1331,8 +1353,9 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// Compute this frame's PC, state, and continuation. // Compute this frame's PC, state, and continuation.
Code* trampoline = NULL; Code* trampoline = NULL;
int extra = descriptor->extra_expression_stack_count_; StubFunctionMode function_mode = descriptor->function_mode_;
StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_); StubFailureTrampolineStub(function_mode).FindCodeInCache(&trampoline,
isolate_);
ASSERT(trampoline != NULL); ASSERT(trampoline != NULL);
output_frame->SetPc(reinterpret_cast<intptr_t>( output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start())); trampoline->instruction_start()));
@ -1476,12 +1499,25 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
#endif #endif
static const char* TraceValueType(bool is_smi, bool is_native) {
if (is_native) {
return "native";
} else if (is_smi) {
return "smi";
}
return "heap number";
}
void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int frame_index, int frame_index,
unsigned output_offset) { unsigned output_offset,
DeoptimizerTranslatedValueType value_type) {
disasm::NameConverter converter; disasm::NameConverter converter;
// A GC-safe temporary placeholder that we can put in the output frame. // A GC-safe temporary placeholder that we can put in the output frame.
const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0)); const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
bool is_native = value_type == TRANSLATED_VALUE_IS_NATIVE;
// Ignore commands marked as duplicate and act on the first non-duplicate. // Ignore commands marked as duplicate and act on the first non-duplicate.
Translation::Opcode opcode = Translation::Opcode opcode =
@ -1524,7 +1560,9 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::INT32_REGISTER: { case Translation::INT32_REGISTER: {
int input_reg = iterator->Next(); int input_reg = iterator->Next();
intptr_t value = input_->GetRegister(input_reg); intptr_t value = input_->GetRegister(input_reg);
bool is_smi = Smi::IsValid(value); bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
Smi::IsValid(value);
if (trace_) { if (trace_) {
PrintF( PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n", " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
@ -1532,15 +1570,18 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
output_offset, output_offset,
value, value,
converter.NameOfCPURegister(input_reg), converter.NameOfCPURegister(input_reg),
is_smi ? "smi" : "heap number"); TraceValueType(is_smi, is_native));
} }
if (is_smi) { if (is_smi) {
intptr_t tagged_value = intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value))); reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value); output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
} else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
output_[frame_index]->SetFrameSlot(output_offset, value);
} else { } else {
// We save the untagged value on the side and store a GC-safe // We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame. // temporary placeholder in the frame.
ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset, AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<int32_t>(value))); static_cast<double>(static_cast<int32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@ -1551,7 +1592,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::UINT32_REGISTER: { case Translation::UINT32_REGISTER: {
int input_reg = iterator->Next(); int input_reg = iterator->Next();
uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg)); uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue)); bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
(value <= static_cast<uintptr_t>(Smi::kMaxValue));
if (trace_) { if (trace_) {
PrintF( PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
@ -1560,15 +1602,18 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
output_offset, output_offset,
value, value,
converter.NameOfCPURegister(input_reg), converter.NameOfCPURegister(input_reg),
is_smi ? "smi" : "heap number"); TraceValueType(is_smi, is_native));
} }
if (is_smi) { if (is_smi) {
intptr_t tagged_value = intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value))); reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value); output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
} else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
output_[frame_index]->SetFrameSlot(output_offset, value);
} else { } else {
// We save the untagged value on the side and store a GC-safe // We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame. // temporary placeholder in the frame.
ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset, AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<uint32_t>(value))); static_cast<double>(static_cast<uint32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@ -1617,7 +1662,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
unsigned input_offset = unsigned input_offset =
input_->GetOffsetFromSlotIndex(input_slot_index); input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t value = input_->GetFrameSlot(input_offset); intptr_t value = input_->GetFrameSlot(input_offset);
bool is_smi = Smi::IsValid(value); bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
Smi::IsValid(value);
if (trace_) { if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": ", PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset); output_[frame_index]->GetTop() + output_offset);
@ -1625,15 +1671,18 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
output_offset, output_offset,
value, value,
input_offset, input_offset,
is_smi ? "smi" : "heap number"); TraceValueType(is_smi, is_native));
} }
if (is_smi) { if (is_smi) {
intptr_t tagged_value = intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value))); reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value); output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
} else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
output_[frame_index]->SetFrameSlot(output_offset, value);
} else { } else {
// We save the untagged value on the side and store a GC-safe // We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame. // temporary placeholder in the frame.
ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset, AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<int32_t>(value))); static_cast<double>(static_cast<int32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@ -1647,7 +1696,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
input_->GetOffsetFromSlotIndex(input_slot_index); input_->GetOffsetFromSlotIndex(input_slot_index);
uintptr_t value = uintptr_t value =
static_cast<uintptr_t>(input_->GetFrameSlot(input_offset)); static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue)); bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
(value <= static_cast<uintptr_t>(Smi::kMaxValue));
if (trace_) { if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": ", PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset); output_[frame_index]->GetTop() + output_offset);
@ -1655,15 +1705,18 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
output_offset, output_offset,
value, value,
input_offset, input_offset,
is_smi ? "smi" : "heap number"); TraceValueType(is_smi, is_native));
} }
if (is_smi) { if (is_smi) {
intptr_t tagged_value = intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value))); reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value); output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
} else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
output_[frame_index]->SetFrameSlot(output_offset, value);
} else { } else {
// We save the untagged value on the side and store a GC-safe // We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame. // temporary placeholder in the frame.
ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset, AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<uint32_t>(value))); static_cast<double>(static_cast<uint32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@ -2130,7 +2183,8 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >= ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
desc.instr_size); desc.instr_size);
chunk->CommitArea(desc.instr_size); chunk->CommitArea(desc.instr_size);
memcpy(chunk->area_start(), desc.buffer, desc.instr_size); CopyBytes(chunk->area_start(), desc.buffer,
static_cast<size_t>(desc.instr_size));
CPU::FlushICache(chunk->area_start(), desc.instr_size); CPU::FlushICache(chunk->area_start(), desc.instr_size);
if (type == EAGER) { if (type == EAGER) {

12
deps/v8/src/deoptimizer.h

@ -356,9 +356,17 @@ class Deoptimizer : public Malloced {
bool is_setter_stub_frame); bool is_setter_stub_frame);
void DoComputeCompiledStubFrame(TranslationIterator* iterator, void DoComputeCompiledStubFrame(TranslationIterator* iterator,
int frame_index); int frame_index);
enum DeoptimizerTranslatedValueType {
TRANSLATED_VALUE_IS_NATIVE,
TRANSLATED_VALUE_IS_TAGGED
};
void DoTranslateCommand(TranslationIterator* iterator, void DoTranslateCommand(TranslationIterator* iterator,
int frame_index, int frame_index,
unsigned output_offset); unsigned output_offset,
DeoptimizerTranslatedValueType value_type = TRANSLATED_VALUE_IS_TAGGED);
// Translate a command for OSR. Updates the input offset to be used for // Translate a command for OSR. Updates the input offset to be used for
// the next command. Returns false if translation of the command failed // the next command. Returns false if translation of the command failed
// (e.g., a number conversion failed) and may or may not have updated the // (e.g., a number conversion failed) and may or may not have updated the

9
deps/v8/src/elements-kind.h

@ -110,10 +110,15 @@ inline bool IsFastDoubleElementsKind(ElementsKind kind) {
} }
inline bool IsExternalFloatOrDoubleElementsKind(ElementsKind kind) {
return kind == EXTERNAL_DOUBLE_ELEMENTS ||
kind == EXTERNAL_FLOAT_ELEMENTS;
}
inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) { inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
return IsFastDoubleElementsKind(kind) || return IsFastDoubleElementsKind(kind) ||
kind == EXTERNAL_DOUBLE_ELEMENTS || IsExternalFloatOrDoubleElementsKind(kind);
kind == EXTERNAL_FLOAT_ELEMENTS;
} }

9
deps/v8/src/flag-definitions.h

@ -148,6 +148,9 @@ DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)") "enable harmony collections (sets, maps, and weak maps)")
DEFINE_bool(harmony_observation, false, DEFINE_bool(harmony_observation, false,
"enable harmony object observation (implies harmony collections") "enable harmony object observation (implies harmony collections")
DEFINE_bool(harmony_typed_arrays, false,
"enable harmony typed arrays")
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)") DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_modules)
@ -155,14 +158,18 @@ DEFINE_implication(harmony, harmony_symbols)
DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony, harmony_observation) DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony, harmony_generators)
DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections) DEFINE_implication(harmony_observation, harmony_collections)
DEFINE_implication(harmony, harmony_typed_arrays)
// Flags for experimental implementation features. // Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes") DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values") DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(compiled_transitions, false, "use optimizing compiler to " DEFINE_bool(compiled_transitions, false, "use optimizing compiler to "
"generate array elements transition stubs") "generate array elements transition stubs")
DEFINE_bool(compiled_keyed_stores, false, "use optimizing compiler to "
"generate keyed store stubs")
DEFINE_bool(clever_optimizations, DEFINE_bool(clever_optimizations,
true, true,
"Optimize object size, Array shift, DOM strings and string +") "Optimize object size, Array shift, DOM strings and string +")
@ -424,7 +431,7 @@ DEFINE_bool(trace_external_memory, false,
"it is adjusted.") "it is adjusted.")
DEFINE_bool(collect_maps, true, DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached") "garbage collect maps from which no objects can be reached")
DEFINE_bool(weak_embedded_maps_in_optimized_code, true, DEFINE_bool(weak_embedded_maps_in_optimized_code, false,
"make maps embedded in optimized code weak") "make maps embedded in optimized code weak")
DEFINE_bool(flush_code, true, DEFINE_bool(flush_code, true,
"flush code that we expect not to use again (during full gc)") "flush code that we expect not to use again (during full gc)")

23
deps/v8/src/frames.cc

@ -1311,18 +1311,19 @@ Address StubFailureTrampolineFrame::GetCallerStackPointer() const {
Code* StubFailureTrampolineFrame::unchecked_code() const { Code* StubFailureTrampolineFrame::unchecked_code() const {
int i = 0; Code* trampoline;
for (; i <= StubFailureTrampolineStub::kMaxExtraExpressionStackCount; ++i) { StubFailureTrampolineStub(NOT_JS_FUNCTION_STUB_MODE).
Code* trampoline; FindCodeInCache(&trampoline, isolate());
StubFailureTrampolineStub(i).FindCodeInCache(&trampoline, isolate()); if (trampoline->contains(pc())) {
ASSERT(trampoline != NULL); return trampoline;
Address current_pc = pc();
Address code_start = trampoline->instruction_start();
Address code_end = code_start + trampoline->instruction_size();
if (code_start <= current_pc && current_pc < code_end) {
return trampoline;
}
} }
StubFailureTrampolineStub(JS_FUNCTION_STUB_MODE).
FindCodeInCache(&trampoline, isolate());
if (trampoline->contains(pc())) {
return trampoline;
}
UNREACHABLE(); UNREACHABLE();
return NULL; return NULL;
} }

28
deps/v8/src/full-codegen.cc

@ -232,6 +232,12 @@ void BreakableStatementChecker::VisitAssignment(Assignment* expr) {
} }
void BreakableStatementChecker::VisitYield(Yield* expr) {
// Yield is breakable if the expression is.
Visit(expr->expression());
}
void BreakableStatementChecker::VisitThrow(Throw* expr) { void BreakableStatementChecker::VisitThrow(Throw* expr) {
// Throw is breakable if the expression is. // Throw is breakable if the expression is.
Visit(expr->exception()); Visit(expr->exception());
@ -1538,6 +1544,28 @@ void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
} }
void FullCodeGenerator::VisitYield(Yield* expr) {
if (expr->is_delegating_yield())
UNIMPLEMENTED();
Comment cmnt(masm_, "[ Yield");
VisitForAccumulatorValue(expr->expression());
// TODO(wingo): Assert that the operand stack depth is 0, at least while
// general yield expressions are unimplemented.
// TODO(wingo): What follows is as in VisitReturnStatement. Replace it with a
// call to a builtin that will resume the generator.
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
while (current != NULL) {
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
EmitReturnSequence();
}
void FullCodeGenerator::VisitThrow(Throw* expr) { void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw"); Comment cmnt(masm_, "[ Throw");
VisitForStackValue(expr->exception()); VisitForStackValue(expr->exception());

39
deps/v8/src/gdb-jit.cc

@ -187,7 +187,7 @@ class Writer BASE_EMBEDDED {
byte* buffer_; byte* buffer_;
}; };
class StringTable; class ELFStringTable;
template<typename THeader> template<typename THeader>
class DebugSectionBase : public ZoneObject { class DebugSectionBase : public ZoneObject {
@ -338,7 +338,7 @@ class ELFSection : public DebugSectionBase<ELFSectionHeader> {
virtual ~ELFSection() { } virtual ~ELFSection() { }
void PopulateHeader(Writer::Slot<Header> header, StringTable* strtab); void PopulateHeader(Writer::Slot<Header> header, ELFStringTable* strtab);
virtual void WriteBody(Writer::Slot<Header> header, Writer* w) { virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
uintptr_t start = w->position(); uintptr_t start = w->position();
@ -438,9 +438,9 @@ class FullHeaderELFSection : public ELFSection {
}; };
class StringTable : public ELFSection { class ELFStringTable : public ELFSection {
public: public:
explicit StringTable(const char* name) explicit ELFStringTable(const char* name)
: ELFSection(name, TYPE_STRTAB, 1), writer_(NULL), offset_(0), size_(0) { : ELFSection(name, TYPE_STRTAB, 1), writer_(NULL), offset_(0), size_(0) {
} }
@ -488,7 +488,7 @@ class StringTable : public ELFSection {
void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header, void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
StringTable* strtab) { ELFStringTable* strtab) {
header->name = strtab->Add(name_); header->name = strtab->Add(name_);
header->type = type_; header->type = type_;
header->alignment = align_; header->alignment = align_;
@ -631,7 +631,7 @@ class ELF BASE_EMBEDDED {
public: public:
ELF(Zone* zone) : sections_(6, zone) { ELF(Zone* zone) : sections_(6, zone) {
sections_.Add(new(zone) ELFSection("", ELFSection::TYPE_NULL, 0), zone); sections_.Add(new(zone) ELFSection("", ELFSection::TYPE_NULL, 0), zone);
sections_.Add(new(zone) StringTable(".shstrtab"), zone); sections_.Add(new(zone) ELFStringTable(".shstrtab"), zone);
} }
void Write(Writer* w) { void Write(Writer* w) {
@ -718,7 +718,7 @@ class ELF BASE_EMBEDDED {
w->CreateSlotsHere<ELFSection::Header>(sections_.length()); w->CreateSlotsHere<ELFSection::Header>(sections_.length());
// String table for section table is the first section. // String table for section table is the first section.
StringTable* strtab = static_cast<StringTable*>(SectionAt(1)); ELFStringTable* strtab = static_cast<ELFStringTable*>(SectionAt(1));
strtab->AttachWriter(w); strtab->AttachWriter(w);
for (int i = 0, length = sections_.length(); for (int i = 0, length = sections_.length();
i < length; i < length;
@ -832,7 +832,7 @@ class ELFSymbol BASE_EMBEDDED {
}; };
#endif #endif
void Write(Writer::Slot<SerializedLayout> s, StringTable* t) { void Write(Writer::Slot<SerializedLayout> s, ELFStringTable* t) {
// Convert symbol names from strings to indexes in the string table. // Convert symbol names from strings to indexes in the string table.
s->name = t->Add(name); s->name = t->Add(name);
s->value = value; s->value = value;
@ -871,8 +871,8 @@ class ELFSymbolTable : public ELFSection {
header->size = w->position() - header->offset; header->size = w->position() - header->offset;
// String table for this symbol table should follow it in the section table. // String table for this symbol table should follow it in the section table.
StringTable* strtab = ELFStringTable* strtab =
static_cast<StringTable*>(w->debug_object()->SectionAt(index() + 1)); static_cast<ELFStringTable*>(w->debug_object()->SectionAt(index() + 1));
strtab->AttachWriter(w); strtab->AttachWriter(w);
symbols.at(0).set(ELFSymbol::SerializedLayout(0, symbols.at(0).set(ELFSymbol::SerializedLayout(0,
0, 0,
@ -905,7 +905,7 @@ class ELFSymbolTable : public ELFSection {
private: private:
void WriteSymbolsList(const ZoneList<ELFSymbol>* src, void WriteSymbolsList(const ZoneList<ELFSymbol>* src,
Writer::Slot<ELFSymbol::SerializedLayout> dst, Writer::Slot<ELFSymbol::SerializedLayout> dst,
StringTable* strtab) { ELFStringTable* strtab) {
for (int i = 0, len = src->length(); for (int i = 0, len = src->length();
i < len; i < len;
i++) { i++) {
@ -1023,7 +1023,7 @@ static void CreateSymbolsTable(CodeDescription* desc,
int text_section_index) { int text_section_index) {
Zone* zone = desc->info()->zone(); Zone* zone = desc->info()->zone();
ELFSymbolTable* symtab = new(zone) ELFSymbolTable(".symtab", zone); ELFSymbolTable* symtab = new(zone) ELFSymbolTable(".symtab", zone);
StringTable* strtab = new(zone) StringTable(".strtab"); ELFStringTable* strtab = new(zone) ELFStringTable(".strtab");
// Symbol table should be followed by the linked string table. // Symbol table should be followed by the linked string table.
elf->AddSection(symtab, zone); elf->AddSection(symtab, zone);
@ -1996,7 +1996,7 @@ static GDBJITLineInfo* UntagLineInfo(void* ptr) {
} }
void GDBJITInterface::AddCode(Handle<String> name, void GDBJITInterface::AddCode(Handle<Name> name,
Handle<Script> script, Handle<Script> script,
Handle<Code> code, Handle<Code> code,
CompilationInfo* info) { CompilationInfo* info) {
@ -2005,8 +2005,9 @@ void GDBJITInterface::AddCode(Handle<String> name,
// Force initialization of line_ends array. // Force initialization of line_ends array.
GetScriptLineNumber(script, 0); GetScriptLineNumber(script, 0);
if (!name.is_null()) { if (!name.is_null() && name->IsString()) {
SmartArrayPointer<char> name_cstring = name->ToCString(DISALLOW_NULLS); SmartArrayPointer<char> name_cstring =
Handle<String>::cast(name)->ToCString(DISALLOW_NULLS);
AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script, info); AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script, info);
} else { } else {
AddCode("", *code, GDBJITInterface::FUNCTION, *script, info); AddCode("", *code, GDBJITInterface::FUNCTION, *script, info);
@ -2124,10 +2125,14 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
String* name, Name* name,
Code* code) { Code* code) {
if (!FLAG_gdbjit) return; if (!FLAG_gdbjit) return;
AddCode(tag, name != NULL ? *name->ToCString(DISALLOW_NULLS) : NULL, code); if (name != NULL && name->IsString()) {
AddCode(tag, *String::cast(name)->ToCString(DISALLOW_NULLS), code);
} else {
AddCode(tag, "", code);
}
} }

4
deps/v8/src/gdb-jit.h

@ -118,12 +118,12 @@ class GDBJITInterface: public AllStatic {
Script* script, Script* script,
CompilationInfo* info); CompilationInfo* info);
static void AddCode(Handle<String> name, static void AddCode(Handle<Name> name,
Handle<Script> script, Handle<Script> script,
Handle<Code> code, Handle<Code> code,
CompilationInfo* info); CompilationInfo* info);
static void AddCode(CodeTag tag, String* name, Code* code); static void AddCode(CodeTag tag, Name* name, Code* code);
static void AddCode(CodeTag tag, const char* name, Code* code); static void AddCode(CodeTag tag, const char* name, Code* code);

7
deps/v8/src/global-handles.cc

@ -72,7 +72,7 @@ class GlobalHandles::Node {
Internals::kNodeIsPartiallyDependentShift); Internals::kNodeIsPartiallyDependentShift);
} }
#ifdef DEBUG #ifdef ENABLE_EXTRA_CHECKS
~Node() { ~Node() {
// TODO(1428): if it's a weak handle we should have invoked its callback. // TODO(1428): if it's a weak handle we should have invoked its callback.
// Zap the values for eager trapping. // Zap the values for eager trapping.
@ -111,10 +111,9 @@ class GlobalHandles::Node {
void Release(GlobalHandles* global_handles) { void Release(GlobalHandles* global_handles) {
ASSERT(state() != FREE); ASSERT(state() != FREE);
set_state(FREE); set_state(FREE);
// TODO(mstarzinger): Put behind debug flag once embedders are stabilized. #ifdef ENABLE_EXTRA_CHECKS
object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
#ifdef DEBUG
// Zap the values for eager trapping. // Zap the values for eager trapping.
object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId; class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_independent(false); set_independent(false);
set_partially_dependent(false); set_partially_dependent(false);

2
deps/v8/src/handles-inl.h

@ -107,7 +107,7 @@ void HandleScope::CloseScope() {
current->limit = prev_limit_; current->limit = prev_limit_;
DeleteExtensions(isolate_); DeleteExtensions(isolate_);
} }
#ifdef DEBUG #ifdef ENABLE_EXTRA_CHECKS
ZapRange(prev_next_, prev_limit_); ZapRange(prev_next_, prev_limit_);
#endif #endif
} }

29
deps/v8/src/handles.cc

@ -101,12 +101,14 @@ void HandleScope::DeleteExtensions(Isolate* isolate) {
} }
#ifdef ENABLE_EXTRA_CHECKS
void HandleScope::ZapRange(Object** start, Object** end) { void HandleScope::ZapRange(Object** start, Object** end) {
ASSERT(end - start <= kHandleBlockSize); ASSERT(end - start <= kHandleBlockSize);
for (Object** p = start; p != end; p++) { for (Object** p = start; p != end; p++) {
*reinterpret_cast<Address*>(p) = v8::internal::kHandleZapValue; *reinterpret_cast<Address*>(p) = v8::internal::kHandleZapValue;
} }
} }
#endif
Address HandleScope::current_level_address(Isolate* isolate) { Address HandleScope::current_level_address(Isolate* isolate) {
@ -259,20 +261,6 @@ Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
} }
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->SetPropertyWithInterceptor(*key,
*value,
attributes,
strict_mode),
Object);
}
Handle<Object> GetProperty(Handle<JSReceiver> obj, Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name) { const char* name) {
Isolate* isolate = obj->GetIsolate(); Isolate* isolate = obj->GetIsolate();
@ -289,19 +277,6 @@ Handle<Object> GetProperty(Isolate* isolate,
} }
Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<Name> name,
PropertyAttributes* attributes) {
Isolate* isolate = receiver->GetIsolate();
CALL_HEAP_FUNCTION(isolate,
holder->GetPropertyWithInterceptor(*receiver,
*name,
attributes),
Object);
}
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) { Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
const bool skip_hidden_prototypes = false; const bool skip_hidden_prototypes = false;
CALL_HEAP_FUNCTION(obj->GetIsolate(), CALL_HEAP_FUNCTION(obj->GetIsolate(),

10
deps/v8/src/handles.h

@ -160,13 +160,14 @@ class HandleScope {
// Extend the handle scope making room for more handles. // Extend the handle scope making room for more handles.
static internal::Object** Extend(Isolate* isolate); static internal::Object** Extend(Isolate* isolate);
#ifdef ENABLE_EXTRA_CHECKS
// Zaps the handles in the half-open interval [start, end). // Zaps the handles in the half-open interval [start, end).
static void ZapRange(internal::Object** start, internal::Object** end); static void ZapRange(internal::Object** start, internal::Object** end);
#endif
friend class v8::internal::DeferredHandles;
friend class v8::HandleScope; friend class v8::HandleScope;
friend class v8::internal::DeferredHandles;
friend class v8::internal::HandleScopeImplementer; friend class v8::internal::HandleScopeImplementer;
friend class v8::ImplementationUtilities;
friend class v8::internal::Isolate; friend class v8::internal::Isolate;
}; };
@ -232,11 +233,6 @@ Handle<Object> GetProperty(Isolate* isolate,
Handle<Object> obj, Handle<Object> obj,
Handle<Object> key); Handle<Object> key);
Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<String> name,
PropertyAttributes* attributes);
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value); Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate, Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,

4
deps/v8/src/heap-inl.h

@ -399,7 +399,9 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
ASSERT(type != ODDBALL_TYPE); ASSERT(type != ODDBALL_TYPE);
ASSERT(type != JS_GLOBAL_PROPERTY_CELL_TYPE); ASSERT(type != JS_GLOBAL_PROPERTY_CELL_TYPE);
if (type < FIRST_NONSTRING_TYPE) { if (type <= LAST_NAME_TYPE) {
if (type == SYMBOL_TYPE) return OLD_POINTER_SPACE;
ASSERT(type < FIRST_NONSTRING_TYPE);
// There are four string representations: sequential strings, external // There are four string representations: sequential strings, external
// strings, cons strings, and sliced strings. // strings, cons strings, and sliced strings.
// Only the latter two contain non-map-word pointers to heap objects. // Only the latter two contain non-map-word pointers to heap objects.

125
deps/v8/src/heap-profiler.cc

@ -44,72 +44,13 @@ HeapProfiler::~HeapProfiler() {
} }
void HeapProfiler::ResetSnapshots() { void HeapProfiler::DeleteAllSnapshots() {
Heap* the_heap = heap(); Heap* the_heap = heap();
delete snapshots_; delete snapshots_;
snapshots_ = new HeapSnapshotsCollection(the_heap); snapshots_ = new HeapSnapshotsCollection(the_heap);
} }
void HeapProfiler::SetUp() {
Isolate* isolate = Isolate::Current();
if (isolate->heap_profiler() == NULL) {
isolate->set_heap_profiler(new HeapProfiler(isolate->heap()));
}
}
void HeapProfiler::TearDown() {
Isolate* isolate = Isolate::Current();
delete isolate->heap_profiler();
isolate->set_heap_profiler(NULL);
}
HeapSnapshot* HeapProfiler::TakeSnapshot(
const char* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type,
control,
resolver);
}
HeapSnapshot* HeapProfiler::TakeSnapshot(
String* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type,
control,
resolver);
}
void HeapProfiler::StartHeapObjectsTracking() {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
Isolate::Current()->heap_profiler()->StartHeapObjectsTrackingImpl();
}
void HeapProfiler::StopHeapObjectsTracking() {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
Isolate::Current()->heap_profiler()->StopHeapObjectsTrackingImpl();
}
SnapshotObjectId HeapProfiler::PushHeapObjectsStats(v8::OutputStream* stream) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->PushHeapObjectsStatsImpl(stream);
}
void HeapProfiler::DefineWrapperClass( void HeapProfiler::DefineWrapperClass(
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) { uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId); ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
@ -129,99 +70,69 @@ v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
} }
HeapSnapshot* HeapProfiler::TakeSnapshotImpl( HeapSnapshot* HeapProfiler::TakeSnapshot(
const char* name, const char* name,
int type,
v8::ActivityControl* control, v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) { v8::HeapProfiler::ObjectNameResolver* resolver) {
HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type); HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
HeapSnapshot* result = {
snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++); HeapSnapshotGenerator generator(result, control, resolver, heap());
bool generation_completed = true; if (!generator.GenerateSnapshot()) {
switch (s_type) { delete result;
case HeapSnapshot::kFull: { result = NULL;
HeapSnapshotGenerator generator(result, control, resolver, heap());
generation_completed = generator.GenerateSnapshot();
break;
} }
default:
UNREACHABLE();
}
if (!generation_completed) {
delete result;
result = NULL;
} }
snapshots_->SnapshotGenerationFinished(result); snapshots_->SnapshotGenerationFinished(result);
return result; return result;
} }
HeapSnapshot* HeapProfiler::TakeSnapshotImpl( HeapSnapshot* HeapProfiler::TakeSnapshot(
String* name, String* name,
int type,
v8::ActivityControl* control, v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) { v8::HeapProfiler::ObjectNameResolver* resolver) {
return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control, return TakeSnapshot(snapshots_->names()->GetName(name), control, resolver);
resolver);
} }
void HeapProfiler::StartHeapObjectsTrackingImpl() { void HeapProfiler::StartHeapObjectsTracking() {
snapshots_->StartHeapObjectsTracking(); snapshots_->StartHeapObjectsTracking();
} }
SnapshotObjectId HeapProfiler::PushHeapObjectsStatsImpl(OutputStream* stream) { SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
return snapshots_->PushHeapObjectsStats(stream); return snapshots_->PushHeapObjectsStats(stream);
} }
void HeapProfiler::StopHeapObjectsTrackingImpl() { void HeapProfiler::StopHeapObjectsTracking() {
snapshots_->StopHeapObjectsTracking(); snapshots_->StopHeapObjectsTracking();
} }
size_t HeapProfiler::GetMemorySizeUsedByProfiler() { size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler(); return snapshots_->GetUsedMemorySize();
ASSERT(profiler != NULL);
size_t size = profiler->snapshots_->GetUsedMemorySize();
return size;
} }
int HeapProfiler::GetSnapshotsCount() { int HeapProfiler::GetSnapshotsCount() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler(); return snapshots_->snapshots()->length();
ASSERT(profiler != NULL);
return profiler->snapshots_->snapshots()->length();
} }
HeapSnapshot* HeapProfiler::GetSnapshot(int index) { HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
HeapProfiler* profiler = Isolate::Current()->heap_profiler(); return snapshots_->snapshots()->at(index);
ASSERT(profiler != NULL);
return profiler->snapshots_->snapshots()->at(index);
} }
HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) { HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
HeapProfiler* profiler = Isolate::Current()->heap_profiler(); return snapshots_->GetSnapshot(uid);
ASSERT(profiler != NULL);
return profiler->snapshots_->GetSnapshot(uid);
} }
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) { SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
if (!obj->IsHeapObject()) if (!obj->IsHeapObject())
return v8::HeapProfiler::kUnknownObjectId; return v8::HeapProfiler::kUnknownObjectId;
HeapProfiler* profiler = Isolate::Current()->heap_profiler(); return snapshots_->FindObjectId(HeapObject::cast(*obj)->address());
ASSERT(profiler != NULL);
return profiler->snapshots_->FindObjectId(HeapObject::cast(*obj)->address());
}
void HeapProfiler::DeleteAllSnapshots() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);
profiler->ResetSnapshots();
} }

46
deps/v8/src/heap-profiler.h

@ -46,30 +46,28 @@ class HeapSnapshotsCollection;
class HeapProfiler { class HeapProfiler {
public: public:
static void SetUp(); explicit HeapProfiler(Heap* heap);
static void TearDown(); ~HeapProfiler();
static size_t GetMemorySizeUsedByProfiler(); size_t GetMemorySizeUsedByProfiler();
static HeapSnapshot* TakeSnapshot( HeapSnapshot* TakeSnapshot(
const char* name, const char* name,
int type,
v8::ActivityControl* control, v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver); v8::HeapProfiler::ObjectNameResolver* resolver);
static HeapSnapshot* TakeSnapshot( HeapSnapshot* TakeSnapshot(
String* name, String* name,
int type,
v8::ActivityControl* control, v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver); v8::HeapProfiler::ObjectNameResolver* resolver);
static void StartHeapObjectsTracking(); void StartHeapObjectsTracking();
static void StopHeapObjectsTracking(); void StopHeapObjectsTracking();
static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream); SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
static int GetSnapshotsCount(); int GetSnapshotsCount();
static HeapSnapshot* GetSnapshot(int index); HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid); HeapSnapshot* FindSnapshot(unsigned uid);
static SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj); SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
static void DeleteAllSnapshots(); void DeleteAllSnapshots();
void ObjectMoveEvent(Address from, Address to); void ObjectMoveEvent(Address from, Address to);
@ -83,24 +81,6 @@ class HeapProfiler {
} }
private: private:
explicit HeapProfiler(Heap* heap);
~HeapProfiler();
HeapSnapshot* TakeSnapshotImpl(
const char* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
HeapSnapshot* TakeSnapshotImpl(
String* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
void ResetSnapshots();
void StartHeapObjectsTrackingImpl();
void StopHeapObjectsTrackingImpl();
SnapshotObjectId PushHeapObjectsStatsImpl(OutputStream* stream);
Heap* heap() const { return snapshots_->heap(); } Heap* heap() const { return snapshots_->heap(); }
HeapSnapshotsCollection* snapshots_; HeapSnapshotsCollection* snapshots_;

1
deps/v8/src/heap-snapshot-generator-inl.h

@ -85,3 +85,4 @@ int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_ #endif // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_

12
deps/v8/src/heap-snapshot-generator.cc

@ -189,7 +189,7 @@ template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapGraphEdgeSize = 12; static const int kExpectedHeapGraphEdgeSize = 12;
static const int kExpectedHeapEntrySize = 24; static const int kExpectedHeapEntrySize = 24;
static const int kExpectedHeapSnapshotsCollectionSize = 100; static const int kExpectedHeapSnapshotsCollectionSize = 100;
static const int kExpectedHeapSnapshotSize = 136; static const int kExpectedHeapSnapshotSize = 132;
static const size_t kMaxSerializableSnapshotRawSize = 256 * MB; static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
}; };
@ -197,7 +197,7 @@ template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapGraphEdgeSize = 24; static const int kExpectedHeapGraphEdgeSize = 24;
static const int kExpectedHeapEntrySize = 32; static const int kExpectedHeapEntrySize = 32;
static const int kExpectedHeapSnapshotsCollectionSize = 152; static const int kExpectedHeapSnapshotsCollectionSize = 152;
static const int kExpectedHeapSnapshotSize = 168; static const int kExpectedHeapSnapshotSize = 160;
static const uint64_t kMaxSerializableSnapshotRawSize = static const uint64_t kMaxSerializableSnapshotRawSize =
static_cast<uint64_t>(6000) * MB; static_cast<uint64_t>(6000) * MB;
}; };
@ -205,11 +205,9 @@ template <> struct SnapshotSizeConstants<8> {
} // namespace } // namespace
HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection, HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
HeapSnapshot::Type type,
const char* title, const char* title,
unsigned uid) unsigned uid)
: collection_(collection), : collection_(collection),
type_(type),
title_(title), title_(title),
uid_(uid), uid_(uid),
root_index_(HeapEntry::kNoEntry), root_index_(HeapEntry::kNoEntry),
@ -599,11 +597,10 @@ HeapSnapshotsCollection::~HeapSnapshotsCollection() {
} }
HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(HeapSnapshot::Type type, HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
const char* name,
unsigned uid) { unsigned uid) {
is_tracking_objects_ = true; // Start watching for heap objects moves. is_tracking_objects_ = true; // Start watching for heap objects moves.
return new HeapSnapshot(this, type, name, uid); return new HeapSnapshot(this, name, uid);
} }
@ -2410,7 +2407,6 @@ void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() { HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() {
HeapSnapshot* result = new HeapSnapshot(snapshot_->collection(), HeapSnapshot* result = new HeapSnapshot(snapshot_->collection(),
HeapSnapshot::kFull,
snapshot_->title(), snapshot_->title(),
snapshot_->uid()); snapshot_->uid());
result->AddRootEntry(); result->AddRootEntry();

11
deps/v8/src/heap-snapshot-generator.h

@ -157,18 +157,12 @@ class HeapSnapshotsCollection;
// HeapSnapshotGenerator fills in a HeapSnapshot. // HeapSnapshotGenerator fills in a HeapSnapshot.
class HeapSnapshot { class HeapSnapshot {
public: public:
enum Type {
kFull = v8::HeapSnapshot::kFull
};
HeapSnapshot(HeapSnapshotsCollection* collection, HeapSnapshot(HeapSnapshotsCollection* collection,
Type type,
const char* title, const char* title,
unsigned uid); unsigned uid);
void Delete(); void Delete();
HeapSnapshotsCollection* collection() { return collection_; } HeapSnapshotsCollection* collection() { return collection_; }
Type type() { return type_; }
const char* title() { return title_; } const char* title() { return title_; }
unsigned uid() { return uid_; } unsigned uid() { return uid_; }
size_t RawSnapshotSize() const; size_t RawSnapshotSize() const;
@ -203,7 +197,6 @@ class HeapSnapshot {
private: private:
HeapSnapshotsCollection* collection_; HeapSnapshotsCollection* collection_;
Type type_;
const char* title_; const char* title_;
unsigned uid_; unsigned uid_;
int root_index_; int root_index_;
@ -305,8 +298,7 @@ class HeapSnapshotsCollection {
void StartHeapObjectsTracking() { is_tracking_objects_ = true; } void StartHeapObjectsTracking() { is_tracking_objects_ = true; }
void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); } void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); }
HeapSnapshot* NewSnapshot( HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
HeapSnapshot::Type type, const char* name, unsigned uid);
void SnapshotGenerationFinished(HeapSnapshot* snapshot); void SnapshotGenerationFinished(HeapSnapshot* snapshot);
List<HeapSnapshot*>* snapshots() { return &snapshots_; } List<HeapSnapshot*>* snapshots() { return &snapshots_; }
HeapSnapshot* GetSnapshot(unsigned uid); HeapSnapshot* GetSnapshot(unsigned uid);
@ -695,3 +687,4 @@ class HeapSnapshotJSONSerializer {
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_HEAP_SNAPSHOT_GENERATOR_H_ #endif // V8_HEAP_SNAPSHOT_GENERATOR_H_

28
deps/v8/src/heap.cc

@ -1779,6 +1779,10 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>:: &ObjectEvacuationStrategy<POINTER_OBJECT>::
template VisitSpecialized<SlicedString::kSize>); template VisitSpecialized<SlicedString::kSize>);
table_.Register(kVisitSymbol,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
template VisitSpecialized<Symbol::kSize>);
table_.Register(kVisitSharedFunctionInfo, table_.Register(kVisitSharedFunctionInfo,
&ObjectEvacuationStrategy<POINTER_OBJECT>:: &ObjectEvacuationStrategy<POINTER_OBJECT>::
template VisitSpecialized<SharedFunctionInfo::kSize>); template VisitSpecialized<SharedFunctionInfo::kSize>);
@ -3785,11 +3789,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
ASSERT(!isolate_->code_range()->exists() || ASSERT(!isolate_->code_range()->exists() ||
isolate_->code_range()->contains(code->address())); isolate_->code_range()->contains(code->address()));
code->set_instruction_size(desc.instr_size); code->set_instruction_size(desc.instr_size);
// TODO(mstarzinger): Remove once we found the bug.
CHECK(reloc_info->IsByteArray());
code->set_relocation_info(reloc_info); code->set_relocation_info(reloc_info);
// TODO(mstarzinger): Remove once we found the bug.
CHECK(code->relocation_info()->IsByteArray());
code->set_flags(flags); code->set_flags(flags);
if (code->is_call_stub() || code->is_keyed_call_stub()) { if (code->is_call_stub() || code->is_keyed_call_stub()) {
code->set_check_type(RECEIVER_MAP_CHECK); code->set_check_type(RECEIVER_MAP_CHECK);
@ -3805,8 +3805,6 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
} }
// Allow self references to created code object by patching the handle to // Allow self references to created code object by patching the handle to
// point to the newly allocated Code object. // point to the newly allocated Code object.
CHECK(code->IsCode());
CHECK(code->relocation_info()->IsByteArray());
if (!self_reference.is_null()) { if (!self_reference.is_null()) {
*(self_reference.location()) = code; *(self_reference.location()) = code;
} }
@ -3815,8 +3813,6 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
// that are dereferenced during the copy to point directly to the actual heap // that are dereferenced during the copy to point directly to the actual heap
// objects. These pointers can include references to the code object itself, // objects. These pointers can include references to the code object itself,
// through the self_reference parameter. // through the self_reference parameter.
CHECK(code->IsCode());
CHECK(code->relocation_info()->IsByteArray());
code->CopyFrom(desc); code->CopyFrom(desc);
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
@ -3888,13 +3884,15 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
// Copy header and instructions. // Copy header and instructions.
memcpy(new_addr, old_addr, relocation_offset); CopyBytes(new_addr, old_addr, relocation_offset);
Code* new_code = Code::cast(result); Code* new_code = Code::cast(result);
new_code->set_relocation_info(ByteArray::cast(reloc_info_array)); new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
// Copy patched rinfo. // Copy patched rinfo.
memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length()); CopyBytes(new_code->relocation_start(),
reloc_info.start(),
static_cast<size_t>(reloc_info.length()));
// Relocate the copy. // Relocate the copy.
ASSERT(!isolate_->code_range()->exists() || ASSERT(!isolate_->code_range()->exists() ||
@ -5430,13 +5428,13 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
} }
MaybeObject* Heap::AllocateSymbol(PretenureFlag pretenure) { MaybeObject* Heap::AllocateSymbol() {
// Statically ensure that it is safe to allocate symbols in paged spaces. // Statically ensure that it is safe to allocate symbols in paged spaces.
STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize); STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Object* result; Object* result;
MaybeObject* maybe = AllocateRaw(Symbol::kSize, space, OLD_DATA_SPACE); MaybeObject* maybe =
AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
if (!maybe->ToObject(&result)) return maybe; if (!maybe->ToObject(&result)) return maybe;
HeapObject::cast(result)->set_map_no_write_barrier(symbol_map()); HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
@ -5452,6 +5450,7 @@ MaybeObject* Heap::AllocateSymbol(PretenureFlag pretenure) {
Symbol::cast(result)->set_hash_field( Symbol::cast(result)->set_hash_field(
Name::kIsNotArrayIndexMask | (hash << Name::kHashShift)); Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
Symbol::cast(result)->set_name(undefined_value());
ASSERT(result->IsSymbol()); ASSERT(result->IsSymbol());
return result; return result;
@ -7471,6 +7470,9 @@ void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
} }
name = internalized_string; name = internalized_string;
} }
// This cache is cleared only between mark compact passes, so we expect the
// cache to only contain old space names.
ASSERT(!HEAP->InNewSpace(name));
int index = (Hash(map, name) & kHashMask); int index = (Hash(map, name) & kHashMask);
// After a GC there will be free slots, so we use them in order (this may // After a GC there will be free slots, so we use them in order (this may

9
deps/v8/src/heap.h

@ -213,6 +213,8 @@ namespace internal {
V(prototype_string, "prototype") \ V(prototype_string, "prototype") \
V(string_string, "string") \ V(string_string, "string") \
V(String_string, "String") \ V(String_string, "String") \
V(symbol_string, "symbol") \
V(Symbol_string, "Symbol") \
V(Date_string, "Date") \ V(Date_string, "Date") \
V(this_string, "this") \ V(this_string, "this") \
V(to_string_string, "toString") \ V(to_string_string, "toString") \
@ -220,6 +222,7 @@ namespace internal {
V(undefined_string, "undefined") \ V(undefined_string, "undefined") \
V(value_of_string, "valueOf") \ V(value_of_string, "valueOf") \
V(stack_string, "stack") \ V(stack_string, "stack") \
V(toJSON_string, "toJSON") \
V(InitializeVarGlobal_string, "InitializeVarGlobal") \ V(InitializeVarGlobal_string, "InitializeVarGlobal") \
V(InitializeConstGlobal_string, "InitializeConstGlobal") \ V(InitializeConstGlobal_string, "InitializeConstGlobal") \
V(KeyedLoadElementMonomorphic_string, \ V(KeyedLoadElementMonomorphic_string, \
@ -520,6 +523,7 @@ class Heap {
int InitialSemiSpaceSize() { return initial_semispace_size_; } int InitialSemiSpaceSize() { return initial_semispace_size_; }
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; } intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
intptr_t MaxExecutableSize() { return max_executable_size_; } intptr_t MaxExecutableSize() { return max_executable_size_; }
int MaxNewSpaceAllocationSize() { return InitialSemiSpaceSize() * 3/4; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when // Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit. // more spaces are needed until it reaches the limit.
@ -878,12 +882,11 @@ class Heap {
void* external_pointer, void* external_pointer,
PretenureFlag pretenure); PretenureFlag pretenure);
// Allocate a symbol. // Allocate a symbol in old space.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateSymbol( MUST_USE_RESULT MaybeObject* AllocateSymbol();
PretenureFlag pretenure = NOT_TENURED);
// Allocate a tenured JS global property cell. // Allocate a tenured JS global property cell.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation

35
deps/v8/src/hydrogen-instructions.cc

@ -1277,7 +1277,8 @@ Representation HBranch::observed_input_representation(int index) {
ToBooleanStub::UNDEFINED | ToBooleanStub::UNDEFINED |
ToBooleanStub::NULL_TYPE | ToBooleanStub::NULL_TYPE |
ToBooleanStub::SPEC_OBJECT | ToBooleanStub::SPEC_OBJECT |
ToBooleanStub::STRING); ToBooleanStub::STRING |
ToBooleanStub::SYMBOL);
if (expected_input_types_.ContainsAnyOf(tagged_types)) { if (expected_input_types_.ContainsAnyOf(tagged_types)) {
return Representation::Tagged(); return Representation::Tagged();
} else if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) { } else if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
@ -1467,15 +1468,6 @@ void HChange::PrintDataTo(StringStream* stream) {
} }
void HJSArrayLength::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
if (HasTypeCheck()) {
stream->Add(" ");
typecheck()->PrintNameTo(stream);
}
}
HValue* HUnaryMathOperation::Canonicalize() { HValue* HUnaryMathOperation::Canonicalize() {
if (op() == kMathFloor) { if (op() == kMathFloor) {
// If the input is integer32 then we replace the floor instruction // If the input is integer32 then we replace the floor instruction
@ -2415,6 +2407,10 @@ void HParameter::PrintDataTo(StringStream* stream) {
void HLoadNamedField::PrintDataTo(StringStream* stream) { void HLoadNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream); object()->PrintNameTo(stream);
stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : ""); stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
if (HasTypeCheck()) {
stream->Add(" ");
typecheck()->PrintNameTo(stream);
}
} }
@ -2589,6 +2585,10 @@ bool HLoadKeyed::UsesMustHandleHole() const {
return false; return false;
} }
if (IsExternalArrayElementsKind(elements_kind())) {
return false;
}
if (hole_mode() == ALLOW_RETURN_HOLE) return true; if (hole_mode() == ALLOW_RETURN_HOLE) return true;
if (IsFastDoubleElementsKind(elements_kind())) { if (IsFastDoubleElementsKind(elements_kind())) {
@ -2611,6 +2611,10 @@ bool HLoadKeyed::RequiresHoleCheck() const {
return false; return false;
} }
if (IsExternalArrayElementsKind(elements_kind())) {
return false;
}
return !UsesMustHandleHole(); return !UsesMustHandleHole();
} }
@ -3036,10 +3040,19 @@ bool HStoreKeyed::NeedsCanonicalization() {
// If value is an integer or smi or comes from the result of a keyed load or // If value is an integer or smi or comes from the result of a keyed load or
// constant then it is either be a non-hole value or in the case of a constant // constant then it is either be a non-hole value or in the case of a constant
// the hole is only being stored explicitly: no need for canonicalization. // the hole is only being stored explicitly: no need for canonicalization.
if (value()->IsLoadKeyed() || value()->IsConstant()) { //
// The exception to that is keyed loads from external float or double arrays:
// these can load arbitrary representation of NaN.
if (value()->IsConstant()) {
return false; return false;
} }
if (value()->IsLoadKeyed()) {
return IsExternalFloatOrDoubleElementsKind(
HLoadKeyed::cast(value())->elements_kind());
}
if (value()->IsChange()) { if (value()->IsChange()) {
if (HChange::cast(value())->from().IsInteger32()) { if (HChange::cast(value())->from().IsInteger32()) {
return false; return false;

77
deps/v8/src/hydrogen-instructions.h

@ -134,7 +134,6 @@ class LChunkBuilder;
V(IsStringAndBranch) \ V(IsStringAndBranch) \
V(IsSmiAndBranch) \ V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \ V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(LeaveInlined) \ V(LeaveInlined) \
V(LoadContextSlot) \ V(LoadContextSlot) \
V(LoadElements) \ V(LoadElements) \
@ -2392,45 +2391,6 @@ class HCallRuntime: public HCall<1> {
}; };
class HJSArrayLength: public HTemplateInstruction<2> {
public:
HJSArrayLength(HValue* value, HValue* typecheck,
HType type = HType::Tagged()) {
set_type(type);
// The length of an array is stored as a tagged value in the array
// object. It is guaranteed to be 32 bit integer, but it can be
// represented as either a smi or heap number.
SetOperandAt(0, value);
SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnArrayLengths);
SetGVNFlag(kDependsOnMaps);
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
HValue* value() { return OperandAt(0); }
HValue* typecheck() {
ASSERT(HasTypeCheck());
return OperandAt(1);
}
bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)
protected:
virtual bool DataEquals(HValue* other_raw) { return true; }
private:
virtual bool IsDeletable() const { return true; }
};
class HFixedArrayBaseLength: public HUnaryOperation { class HFixedArrayBaseLength: public HUnaryOperation {
public: public:
explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) { explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
@ -4693,6 +4653,14 @@ class HParameter: public HTemplateInstruction<0> {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
} }
explicit HParameter(unsigned index,
ParameterKind kind,
Representation r)
: index_(index),
kind_(kind) {
set_representation(r);
}
unsigned index() const { return index_; } unsigned index() const { return index_; }
ParameterKind kind() const { return kind_; } ParameterKind kind() const { return kind_; }
@ -5184,12 +5152,16 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
}; };
class HLoadNamedField: public HUnaryOperation { class HLoadNamedField: public HTemplateInstruction<2> {
public: public:
HLoadNamedField(HValue* object, bool is_in_object, int offset) HLoadNamedField(HValue* object, bool is_in_object, int offset,
: HUnaryOperation(object), HValue* typecheck = NULL)
is_in_object_(is_in_object), : is_in_object_(is_in_object),
offset_(offset) { offset_(offset) {
ASSERT(object != NULL);
SetOperandAt(0, object);
SetOperandAt(1, typecheck != NULL ? typecheck : object);
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlag(kUseGVN); SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps); SetGVNFlag(kDependsOnMaps);
@ -5200,7 +5172,24 @@ class HLoadNamedField: public HUnaryOperation {
} }
} }
static HLoadNamedField* NewArrayLength(Zone* zone, HValue* object,
HValue* typecheck,
HType type = HType::Tagged()) {
HLoadNamedField* result = new(zone) HLoadNamedField(
object, true, JSArray::kLengthOffset, typecheck);
result->set_type(type);
result->SetGVNFlag(kDependsOnArrayLengths);
result->ClearGVNFlag(kDependsOnInobjectFields);
return result;
}
HValue* object() { return OperandAt(0); } HValue* object() { return OperandAt(0); }
HValue* typecheck() {
ASSERT(HasTypeCheck());
return OperandAt(1);
}
bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
bool is_in_object() const { return is_in_object_; } bool is_in_object() const { return is_in_object_; }
int offset() const { return offset_; } int offset() const { return offset_; }

601
deps/v8/src/hydrogen.cc

File diff suppressed because it is too large

80
deps/v8/src/hydrogen.h

@ -108,7 +108,7 @@ class HBasicBlock: public ZoneObject {
bool Dominates(HBasicBlock* other) const; bool Dominates(HBasicBlock* other) const;
int LoopNestingDepth() const; int LoopNestingDepth() const;
void SetInitialEnvironment(HEnvironment* env); void SetInitialEnvironment(HEnvironment* env, BailoutId previous_id);
void ClearEnvironment() { last_environment_ = NULL; } void ClearEnvironment() { last_environment_ = NULL; }
bool HasEnvironment() const { return last_environment_ != NULL; } bool HasEnvironment() const { return last_environment_ != NULL; }
void UpdateEnvironment(HEnvironment* env) { last_environment_ = env; } void UpdateEnvironment(HEnvironment* env) { last_environment_ = env; }
@ -483,6 +483,8 @@ class HEnvironment: public ZoneObject {
BailoutId ast_id() const { return ast_id_; } BailoutId ast_id() const { return ast_id_; }
void set_ast_id(BailoutId id) { ast_id_ = id; } void set_ast_id(BailoutId id) { ast_id_ = id; }
BailoutId previous_ast_id() const { return previous_ast_id_; }
void set_previous_ast_id(BailoutId id) { previous_ast_id_ = id; }
HEnterInlined* entry() const { return entry_; } HEnterInlined* entry() const { return entry_; }
void set_entry(HEnterInlined* entry) { entry_ = entry; } void set_entry(HEnterInlined* entry) { entry_ = entry; }
@ -644,6 +646,7 @@ class HEnvironment: public ZoneObject {
int pop_count_; int pop_count_;
int push_count_; int push_count_;
BailoutId ast_id_; BailoutId ast_id_;
BailoutId previous_ast_id_;
Zone* zone_; Zone* zone_;
}; };
@ -891,8 +894,9 @@ class HGraphBuilder {
protected: protected:
virtual bool BuildGraph() = 0; virtual bool BuildGraph() = 0;
HBasicBlock* CreateBasicBlock(HEnvironment* env); HBasicBlock* CreateBasicBlock(HEnvironment* envy,
HBasicBlock* CreateLoopHeaderBlock(); BailoutId previous_ast_id);
HBasicBlock* CreateLoopHeaderBlock(BailoutId previous_ast_id);
// Building common constructs // Building common constructs
HInstruction* BuildExternalArrayElementAccess( HInstruction* BuildExternalArrayElementAccess(
@ -909,7 +913,20 @@ class HGraphBuilder {
HValue* val, HValue* val,
HValue* dependency, HValue* dependency,
ElementsKind elements_kind, ElementsKind elements_kind,
bool is_store); bool is_store,
KeyedAccessStoreMode store_mode);
HValue* BuildCheckForCapacityGrow(HValue* object,
HValue* elements,
ElementsKind kind,
HValue* length,
HValue* key,
bool is_js_array);
HValue* BuildCopyElementsOnWrite(HValue* object,
HValue* elements,
ElementsKind kind,
HValue* length);
HInstruction* BuildUncheckedMonomorphicElementAccess( HInstruction* BuildUncheckedMonomorphicElementAccess(
HValue* object, HValue* object,
@ -919,6 +936,7 @@ class HGraphBuilder {
bool is_js_array, bool is_js_array,
ElementsKind elements_kind, ElementsKind elements_kind,
bool is_store, bool is_store,
KeyedAccessStoreMode store_mode,
Representation checked_index_representation = Representation::None()); Representation checked_index_representation = Representation::None());
HInstruction* BuildStoreMap(HValue* object, HValue* map, BailoutId id); HInstruction* BuildStoreMap(HValue* object, HValue* map, BailoutId id);
@ -926,13 +944,14 @@ class HGraphBuilder {
class CheckBuilder { class CheckBuilder {
public: public:
CheckBuilder(HGraphBuilder* builder, BailoutId id); explicit CheckBuilder(HGraphBuilder* builder);
~CheckBuilder() { ~CheckBuilder() {
if (!finished_) End(); if (!finished_) End();
} }
void CheckNotUndefined(HValue* value); HValue* CheckNotUndefined(HValue* value);
void CheckIntegerEq(HValue* left, HValue* right); HValue* CheckIntegerCompare(HValue* left, HValue* right, Token::Value op);
HValue* CheckIntegerEq(HValue* left, HValue* right);
void End(); void End();
private: private:
@ -947,17 +966,19 @@ class HGraphBuilder {
class IfBuilder { class IfBuilder {
public: public:
IfBuilder(HGraphBuilder* builder, BailoutId id); explicit IfBuilder(HGraphBuilder* builder);
~IfBuilder() { ~IfBuilder() {
if (!finished_) End(); if (!finished_) End();
} }
HInstruction* BeginTrue( HInstruction* BeginIf(
HValue* left, HValue* left,
HValue* right, HValue* right,
Token::Value token, Token::Value token,
Representation input_representation = Representation::Integer32()); Representation input_representation = Representation::Integer32());
void BeginFalse(); HInstruction* BeginIfObjectsEqual(HValue* left, HValue* right);
HInstruction* BeginIfMapEquals(HValue* value, Handle<Map> map);
void BeginElse();
void End(); void End();
private: private:
@ -965,6 +986,7 @@ class HGraphBuilder {
HGraphBuilder* builder_; HGraphBuilder* builder_;
bool finished_; bool finished_;
bool did_else_;
HBasicBlock* first_true_block_; HBasicBlock* first_true_block_;
HBasicBlock* last_true_block_; HBasicBlock* last_true_block_;
HBasicBlock* first_false_block_; HBasicBlock* first_false_block_;
@ -983,8 +1005,7 @@ class HGraphBuilder {
LoopBuilder(HGraphBuilder* builder, LoopBuilder(HGraphBuilder* builder,
HValue* context, HValue* context,
Direction direction, Direction direction);
BailoutId id);
~LoopBuilder() { ~LoopBuilder() {
ASSERT(finished_); ASSERT(finished_);
} }
@ -1011,16 +1032,35 @@ class HGraphBuilder {
bool finished_; bool finished_;
}; };
HValue* BuildAllocateElements(HContext* context, HValue* BuildNewElementsCapacity(HValue* context,
HValue* old_capacity);
void BuildNewSpaceArrayCheck(HValue* length,
ElementsKind kind);
HValue* BuildAllocateElements(HValue* context,
ElementsKind kind, ElementsKind kind,
HValue* capacity); HValue* capacity);
void BuildCopyElements(HContext* context, HValue* BuildGrowElementsCapacity(HValue* object,
HValue* elements,
ElementsKind kind,
HValue* length,
HValue* new_capacity);
void BuildFillElementsWithHole(HValue* context,
HValue* elements,
ElementsKind elements_kind,
HValue* from,
HValue* to);
void BuildCopyElements(HValue* context,
HValue* from_elements, HValue* from_elements,
ElementsKind from_elements_kind, ElementsKind from_elements_kind,
HValue* to_elements, HValue* to_elements,
ElementsKind to_elements_kind, ElementsKind to_elements_kind,
HValue* length); HValue* length,
HValue* capacity);
private: private:
HGraphBuilder(); HGraphBuilder();
@ -1311,6 +1351,10 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* object, HValue* object,
SmallMapList* types, SmallMapList* types,
Handle<String> name); Handle<String> name);
bool HandlePolymorphicArrayLengthLoad(Property* expr,
HValue* object,
SmallMapList* types,
Handle<String> name);
void HandlePolymorphicStoreNamedField(Assignment* expr, void HandlePolymorphicStoreNamedField(Assignment* expr,
HValue* object, HValue* object,
HValue* value, HValue* value,
@ -1348,7 +1392,8 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* val, HValue* val,
HValue* dependency, HValue* dependency,
Handle<Map> map, Handle<Map> map,
bool is_store); bool is_store,
KeyedAccessStoreMode store_mode);
HValue* HandlePolymorphicElementAccess(HValue* object, HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key, HValue* key,
@ -1357,6 +1402,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
BailoutId ast_id, BailoutId ast_id,
int position, int position,
bool is_store, bool is_store,
KeyedAccessStoreMode store_mode,
bool* has_side_effects); bool* has_side_effects);
HValue* HandleKeyedElementAccess(HValue* obj, HValue* HandleKeyedElementAccess(HValue* obj,
@ -1383,6 +1429,8 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
Property* expr, Property* expr,
Handle<Map> map); Handle<Map> map);
void AddCheckMap(HValue* object, Handle<Map> map);
void AddCheckMapsWithTransitions(HValue* object, void AddCheckMapsWithTransitions(HValue* object,
Handle<Map> map); Handle<Map> map);

43
deps/v8/src/ia32/builtins-ia32.cc

@ -216,8 +216,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// eax: initial map // eax: initial map
__ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset)); __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
__ shl(edi, kPointerSizeLog2); __ shl(edi, kPointerSizeLog2);
__ AllocateInNewSpace( __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields. // Allocated the JSObject, now initialize the fields.
// eax: initial map // eax: initial map
// ebx: JSObject // ebx: JSObject
@ -280,15 +279,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// ebx: JSObject // ebx: JSObject
// edi: start of next object (will be start of FixedArray) // edi: start of next object (will be start of FixedArray)
// edx: number of elements in properties array // edx: number of elements in properties array
__ AllocateInNewSpace(FixedArray::kHeaderSize, __ Allocate(FixedArray::kHeaderSize,
times_pointer_size, times_pointer_size,
edx, edx,
REGISTER_VALUE_IS_INT32, REGISTER_VALUE_IS_INT32,
edi, edi,
ecx, ecx,
no_reg, no_reg,
&undo_allocation, &undo_allocation,
RESULT_CONTAINS_TOP); RESULT_CONTAINS_TOP);
// Initialize the FixedArray. // Initialize the FixedArray.
// ebx: JSObject // ebx: JSObject
@ -409,10 +408,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &exit); __ j(above_equal, &exit);
// Symbols are "objects".
__ CmpInstanceType(ecx, SYMBOL_TYPE);
__ j(equal, &exit);
// Throw away the result of the constructor invocation and use the // Throw away the result of the constructor invocation and use the
// on-stack receiver as the result. // on-stack receiver as the result.
__ bind(&use_receiver); __ bind(&use_receiver);
@ -1129,15 +1124,15 @@ static void AllocateJSArray(MacroAssembler* masm,
// Allocate the JSArray object together with space for a FixedArray with the // Allocate the JSArray object together with space for a FixedArray with the
// requested elements. // requested elements.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize, __ Allocate(JSArray::kSize + FixedArray::kHeaderSize,
times_pointer_size, times_pointer_size,
array_size, array_size,
REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_SMI,
result, result,
elements_array_end, elements_array_end,
scratch, scratch,
gc_required, gc_required,
TAG_OBJECT); TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements // Allocated the JSArray. Now initialize the fields except for the elements
// array. // array.

108
deps/v8/src/ia32/code-stubs-ia32.cc

@ -67,6 +67,17 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
} }
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { edx, ecx, eax };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
}
void TransitionElementsKindStub::InitializeInterfaceDescriptor( void TransitionElementsKindStub::InitializeInterfaceDescriptor(
Isolate* isolate, Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) { CodeStubInterfaceDescriptor* descriptor) {
@ -89,7 +100,7 @@ static void InitializeArrayConstructorDescriptor(Isolate* isolate,
// stack param count needs (constructor pointer, and single argument) // stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &eax; descriptor->stack_parameter_count_ = &eax;
descriptor->register_params_ = registers; descriptor->register_params_ = registers;
descriptor->extra_expression_stack_count_ = 1; descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ = descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ArrayConstructor_StubFailure); FUNCTION_ADDR(ArrayConstructor_StubFailure);
} }
@ -621,6 +632,14 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ bind(&not_string); __ bind(&not_string);
} }
if (types_.Contains(SYMBOL)) {
// Symbol value -> true.
Label not_symbol;
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(not_equal, &not_symbol, Label::kNear);
__ bind(&not_symbol);
}
if (types_.Contains(HEAP_NUMBER)) { if (types_.Contains(HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN. // heap number -> false iff +0, -0, or NaN.
Label not_heap_number, false_result; Label not_heap_number, false_result;
@ -3285,25 +3304,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
} }
void ArrayLengthStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
if (kind() == Code::KEYED_LOAD_IC) {
__ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
__ j(not_equal, &miss);
}
StubCompiler::GenerateLoadArrayLength(masm, edx, eax, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
}
void FunctionPrototypeStub::Generate(MacroAssembler* masm) { void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- ecx : name // -- ecx : name
@ -3558,7 +3558,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ add(ebx, Immediate(Heap::kArgumentsObjectSize)); __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
// Do the allocation of all three objects in one go. // Do the allocation of all three objects in one go.
__ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT); __ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
// eax = address of new object(s) (tagged) // eax = address of new object(s) (tagged)
// ecx = argument count (tagged) // ecx = argument count (tagged)
@ -3756,7 +3756,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict)); __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
// Do the allocation of both objects in one go. // Do the allocation of both objects in one go.
__ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT); __ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
// Get the arguments boilerplate from the current native context. // Get the arguments boilerplate from the current native context.
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@ -4280,15 +4280,15 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Allocate RegExpResult followed by FixedArray with size in ebx. // Allocate RegExpResult followed by FixedArray with size in ebx.
// JSArray: [Map][empty properties][Elements][Length-smi][index][input] // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
// Elements: [Map][Length][..elements..] // Elements: [Map][Length][..elements..]
__ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize, __ Allocate(JSRegExpResult::kSize + FixedArray::kHeaderSize,
times_pointer_size, times_pointer_size,
ebx, // In: Number of elements as a smi ebx, // In: Number of elements as a smi
REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_SMI,
eax, // Out: Start of allocation (tagged). eax, // Out: Start of allocation (tagged).
ecx, // Out: End of allocation. ecx, // Out: End of allocation.
edx, // Scratch register edx, // Scratch register
&slowcase, &slowcase,
TAG_OBJECT); TAG_OBJECT);
// eax: Start of allocated area, object-tagged. // eax: Start of allocated area, object-tagged.
// Set JSArray map to global.regexp_result_map(). // Set JSArray map to global.regexp_result_map().
@ -4525,6 +4525,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Identical objects can be compared fast, but there are some tricky cases // Identical objects can be compared fast, but there are some tricky cases
// for NaN and undefined. // for NaN and undefined.
Label generic_heap_number_comparison;
{ {
Label not_identical; Label not_identical;
__ cmp(eax, edx); __ cmp(eax, edx);
@ -4541,12 +4542,11 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_for_nan); __ bind(&check_for_nan);
} }
// Test for NaN. Sadly, we can't just compare to factory->nan_value(), // Test for NaN. Compare heap numbers in a general way,
// so we do the second best thing - test it ourselves. // to hanlde NaNs correctly.
Label heap_number;
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(masm->isolate()->factory()->heap_number_map())); Immediate(masm->isolate()->factory()->heap_number_map()));
__ j(equal, &heap_number, Label::kNear); __ j(equal, &generic_heap_number_comparison, Label::kNear);
if (cc != equal) { if (cc != equal) {
// Call runtime on identical JSObjects. Otherwise return equal. // Call runtime on identical JSObjects. Otherwise return equal.
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
@ -4555,37 +4555,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ Set(eax, Immediate(Smi::FromInt(EQUAL))); __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0); __ ret(0);
__ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if
// it's not NaN.
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
// We only accept QNaNs, which have bit 51 set.
// Read top bits of double representation (second word of value).
// Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
// all bits in the mask are set. We only need to check the word
// that contains the exponent and high bit of the mantissa.
STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
__ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ Set(eax, Immediate(0));
// Shift value and mask so kQuietNaNHighBitsMask applies to topmost
// bits.
__ add(edx, edx);
__ cmp(edx, kQuietNaNHighBitsMask << 1);
if (cc == equal) {
STATIC_ASSERT(EQUAL != 1);
__ setcc(above_equal, eax);
__ ret(0);
} else {
Label nan;
__ j(above_equal, &nan, Label::kNear);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
__ bind(&nan);
__ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
__ ret(0);
}
__ bind(&not_identical); __ bind(&not_identical);
} }
@ -4665,6 +4634,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Generate the number comparison code. // Generate the number comparison code.
Label non_number_comparison; Label non_number_comparison;
Label unordered; Label unordered;
__ bind(&generic_heap_number_comparison);
if (CpuFeatures::IsSupported(SSE2)) { if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2); CpuFeatureScope use_sse2(masm, SSE2);
CpuFeatureScope use_cmov(masm, CMOV); CpuFeatureScope use_cmov(masm, CMOV);
@ -7825,8 +7795,10 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ mov(ebx, MemOperand(ebp, parameter_count_offset)); __ mov(ebx, MemOperand(ebp, parameter_count_offset));
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ pop(ecx); __ pop(ecx);
__ lea(esp, MemOperand(esp, ebx, times_pointer_size, int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
extra_expression_stack_count_ * kPointerSize)); ? kPointerSize
: 0;
__ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset));
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack. __ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
} }

9
deps/v8/src/ia32/codegen-ia32.cc

@ -450,9 +450,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// edi: length of source FixedArray (smi-tagged) // edi: length of source FixedArray (smi-tagged)
AllocationFlags flags = AllocationFlags flags =
static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT); static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
__ AllocateInNewSpace(FixedDoubleArray::kHeaderSize, times_8, __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
edi, REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
eax, ebx, no_reg, &gc_required, flags);
// eax: destination FixedDoubleArray // eax: destination FixedDoubleArray
// edi: number of elements // edi: number of elements
@ -589,7 +588,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Allocate new FixedArray. // Allocate new FixedArray.
// ebx: length of source FixedDoubleArray (smi-tagged) // ebx: length of source FixedDoubleArray (smi-tagged)
__ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize)); __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
__ AllocateInNewSpace(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT); __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
// eax: destination FixedArray // eax: destination FixedArray
// ebx: number of elements // ebx: number of elements
@ -952,7 +951,7 @@ void Code::PatchPlatformCodeAge(byte* sequence,
uint32_t young_length; uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length); byte* young_sequence = GetNoCodeAgeSequence(&young_length);
if (age == kNoAge) { if (age == kNoAge) {
memcpy(sequence, young_sequence, young_length); CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length); CPU::FlushICache(sequence, young_length);
} else { } else {
Code* stub = GetCodeAgeStub(age, parity); Code* stub = GetCodeAgeStub(age, parity);

31
deps/v8/src/ia32/full-codegen-ia32.cc

@ -2567,7 +2567,6 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) { void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
// TODO(rossberg): incorporate symbols.
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1); ASSERT(args->length() == 1);
@ -2703,28 +2702,6 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
} }
void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, SYMBOL_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1); ASSERT(args->length() == 1);
@ -4275,6 +4252,10 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ test_b(FieldOperand(edx, Map::kBitFieldOffset), __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable); 1 << Map::kIsUndetectable);
Split(zero, if_true, if_false, fall_through); Split(zero, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->symbol_string())) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, SYMBOL_TYPE, edx);
Split(equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->boolean_string())) { } else if (check->Equals(isolate()->heap()->boolean_string())) {
__ cmp(eax, isolate()->factory()->true_value()); __ cmp(eax, isolate()->factory()->true_value());
__ j(equal, if_true); __ j(equal, if_true);
@ -4306,10 +4287,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ cmp(eax, isolate()->factory()->null_value()); __ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true); __ j(equal, if_true);
} }
if (FLAG_harmony_symbols) {
__ CmpObjectType(eax, SYMBOL_TYPE, edx);
__ j(equal, if_true);
}
__ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx); __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
__ j(below, if_false); __ j(below, if_false);
__ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);

28
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -1740,13 +1740,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
} }
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->value());
__ mov(result, FieldOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayBaseLength( void LCodeGen::DoFixedArrayBaseLength(
LFixedArrayBaseLength* instr) { LFixedArrayBaseLength* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
@ -2116,6 +2109,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string); __ bind(&not_string);
} }
if (expected.Contains(ToBooleanStub::SYMBOL)) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, true_label);
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN. // heap number -> false iff +0, -0, or NaN.
Label not_heap_number; Label not_heap_number;
@ -5566,7 +5565,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags); __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else { } else {
Register size = ToRegister(instr->size()); Register size = ToRegister(instr->size());
__ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags); __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} }
__ bind(deferred->exit()); __ bind(deferred->exit());
@ -5979,6 +5978,11 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
1 << Map::kIsUndetectable); 1 << Map::kIsUndetectable);
final_branch_condition = zero; final_branch_condition = zero;
} else if (type_name->Equals(heap()->symbol_string())) {
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->boolean_string())) { } else if (type_name->Equals(heap()->boolean_string())) {
__ cmp(input, factory()->true_value()); __ cmp(input, factory()->true_value());
__ j(equal, true_label); __ j(equal, true_label);
@ -6013,13 +6017,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ cmp(input, factory()->null_value()); __ cmp(input, factory()->null_value());
__ j(equal, true_label); __ j(equal, true_label);
} }
if (FLAG_harmony_symbols) { __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
__ CmpObjectType(input, SYMBOL_TYPE, input);
__ j(equal, true_label);
__ CmpInstanceType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
} else {
__ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
}
__ j(below, false_label); __ j(below, false_label);
__ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(above, false_label); __ j(above, false_label);

41
deps/v8/src/ia32/lithium-ia32.cc

@ -923,6 +923,35 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LInstruction* instr = current->CompileToLithium(this); LInstruction* instr = current->CompileToLithium(this);
if (instr != NULL) { if (instr != NULL) {
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
// start. If this invariant doesn't hold, the register allocator can decide
// to insert a split of a range immediately before the instruction due to an
// already allocated register needing to be used for the instruction's fixed
// register constraint. In this case, The register allocator won't see an
// interference between the split child and the use-at-start (it would if
// the it was just a plain use), so it is free to move the split child into
// the same register that is used for the use-at-start.
// See https://code.google.com/p/chromium/issues/detail?id=201590
if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
int fixed = 0;
int used_at_start = 0;
for (UseIterator it(instr); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
if (operand->IsUsedAtStart()) ++used_at_start;
}
if (instr->Output() != NULL) {
if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
}
for (TempIterator it(instr); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
if (operand->HasFixedPolicy()) ++fixed;
}
ASSERT(fixed == 0 || used_at_start == 0);
}
#endif
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr); instr = AssignPointerMap(instr);
} }
@ -1182,16 +1211,20 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
input); input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr); return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
} else { } else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs. LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
LOperand* input = NULL;
if (op == kMathPowHalf) { if (op == kMathPowHalf) {
input = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister(); LOperand* temp = TempRegister();
LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp); LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
return DefineSameAsFirst(result); return DefineSameAsFirst(result);
} else if (op == kMathRound) { } else if (op == kMathRound) {
input = UseRegister(instr->value());
LOperand* temp = FixedTemp(xmm4); LOperand* temp = FixedTemp(xmm4);
LMathRound* result = new(zone()) LMathRound(context, input, temp); LMathRound* result = new(zone()) LMathRound(context, input, temp);
return AssignEnvironment(DefineAsRegister(result)); return AssignEnvironment(DefineAsRegister(result));
} else {
input = UseRegisterAtStart(instr->value());
} }
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context, LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
input); input);
@ -1716,12 +1749,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
} }
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LJSArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayBaseLength( LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
HFixedArrayBaseLength* instr) { HFixedArrayBaseLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value()); LOperand* array = UseRegisterAtStart(instr->value());

14
deps/v8/src/ia32/lithium-ia32.h

@ -114,7 +114,6 @@ class LCodeGen;
V(IsStringAndBranch) \ V(IsStringAndBranch) \
V(IsSmiAndBranch) \ V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \ V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \ V(Label) \
V(LazyBailout) \ V(LazyBailout) \
V(LoadContextSlot) \ V(LoadContextSlot) \
@ -1147,19 +1146,6 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
}; };
class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LJSArrayLength(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> { class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LFixedArrayBaseLength(LOperand* value) { explicit LFixedArrayBaseLength(LOperand* value) {

88
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -1332,18 +1332,16 @@ void MacroAssembler::Allocate(int object_size,
} }
void MacroAssembler::AllocateInNewSpace( void MacroAssembler::Allocate(int header_size,
int header_size, ScaleFactor element_size,
ScaleFactor element_size, Register element_count,
Register element_count, RegisterValueType element_count_type,
RegisterValueType element_count_type, Register result,
Register result, Register result_end,
Register result_end, Register scratch,
Register scratch, Label* gc_required,
Label* gc_required, AllocationFlags flags) {
AllocationFlags flags) {
ASSERT((flags & SIZE_IN_WORDS) == 0); ASSERT((flags & SIZE_IN_WORDS) == 0);
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
if (!FLAG_inline_new) { if (!FLAG_inline_new) {
if (emit_debug_code()) { if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure. // Trash the registers to simulate an allocation failure.
@ -1365,6 +1363,7 @@ void MacroAssembler::AllocateInNewSpace(
// Align the next allocation. Storing the filler map without checking top is // Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned. // always safe because the limit of the heap is always aligned.
if ((flags & DOUBLE_ALIGNMENT) != 0) { if ((flags & DOUBLE_ALIGNMENT) != 0) {
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment); ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned; Label aligned;
test(result, Immediate(kDoubleAlignmentMask)); test(result, Immediate(kDoubleAlignmentMask));
@ -1375,9 +1374,9 @@ void MacroAssembler::AllocateInNewSpace(
bind(&aligned); bind(&aligned);
} }
// Calculate new top and bail out if new space is exhausted. // Calculate new top and bail out if space is exhausted.
ExternalReference new_space_allocation_limit = ExternalReference allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate()); AllocationUtils::GetAllocationLimitReference(isolate(), flags);
// We assume that element_count*element_size + header_size does not // We assume that element_count*element_size + header_size does not
// overflow. // overflow.
@ -1394,7 +1393,7 @@ void MacroAssembler::AllocateInNewSpace(
lea(result_end, Operand(element_count, element_size, header_size)); lea(result_end, Operand(element_count, element_size, header_size));
add(result_end, result); add(result_end, result);
j(carry, gc_required); j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit)); cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required); j(above, gc_required);
if ((flags & TAG_OBJECT) != 0) { if ((flags & TAG_OBJECT) != 0) {
@ -1407,14 +1406,13 @@ void MacroAssembler::AllocateInNewSpace(
} }
void MacroAssembler::AllocateInNewSpace(Register object_size, void MacroAssembler::Allocate(Register object_size,
Register result, Register result,
Register result_end, Register result_end,
Register scratch, Register scratch,
Label* gc_required, Label* gc_required,
AllocationFlags flags) { AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
if (!FLAG_inline_new) { if (!FLAG_inline_new) {
if (emit_debug_code()) { if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure. // Trash the registers to simulate an allocation failure.
@ -1436,6 +1434,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Align the next allocation. Storing the filler map without checking top is // Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned. // always safe because the limit of the heap is always aligned.
if ((flags & DOUBLE_ALIGNMENT) != 0) { if ((flags & DOUBLE_ALIGNMENT) != 0) {
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment); ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned; Label aligned;
test(result, Immediate(kDoubleAlignmentMask)); test(result, Immediate(kDoubleAlignmentMask));
@ -1446,15 +1445,16 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
bind(&aligned); bind(&aligned);
} }
// Calculate new top and bail out if new space is exhausted. // Calculate new top and bail out if space is exhausted.
ExternalReference new_space_allocation_limit = ExternalReference allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate()); AllocationUtils::GetAllocationLimitReference(isolate(), flags);
if (!object_size.is(result_end)) { if (!object_size.is(result_end)) {
mov(result_end, object_size); mov(result_end, object_size);
} }
add(result_end, result); add(result_end, result);
j(carry, gc_required); j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit)); cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required); j(above, gc_required);
// Tag result if requested. // Tag result if requested.
@ -1511,15 +1511,15 @@ void MacroAssembler::AllocateTwoByteString(Register result,
and_(scratch1, Immediate(~kObjectAlignmentMask)); and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space. // Allocate two byte string in new space.
AllocateInNewSpace(SeqTwoByteString::kHeaderSize, Allocate(SeqTwoByteString::kHeaderSize,
times_1, times_1,
scratch1, scratch1,
REGISTER_VALUE_IS_INT32, REGISTER_VALUE_IS_INT32,
result, result,
scratch2, scratch2,
scratch3, scratch3,
gc_required, gc_required,
TAG_OBJECT); TAG_OBJECT);
// Set the map, length and hash field. // Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset), mov(FieldOperand(result, HeapObject::kMapOffset),
@ -1547,15 +1547,15 @@ void MacroAssembler::AllocateAsciiString(Register result,
and_(scratch1, Immediate(~kObjectAlignmentMask)); and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate ASCII string in new space. // Allocate ASCII string in new space.
AllocateInNewSpace(SeqOneByteString::kHeaderSize, Allocate(SeqOneByteString::kHeaderSize,
times_1, times_1,
scratch1, scratch1,
REGISTER_VALUE_IS_INT32, REGISTER_VALUE_IS_INT32,
result, result,
scratch2, scratch2,
scratch3, scratch3,
gc_required, gc_required,
TAG_OBJECT); TAG_OBJECT);
// Set the map, length and hash field. // Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset), mov(FieldOperand(result, HeapObject::kMapOffset),

32
deps/v8/src/ia32/macro-assembler-ia32.h

@ -582,22 +582,22 @@ class MacroAssembler: public Assembler {
Label* gc_required, Label* gc_required,
AllocationFlags flags); AllocationFlags flags);
void AllocateInNewSpace(int header_size, void Allocate(int header_size,
ScaleFactor element_size, ScaleFactor element_size,
Register element_count, Register element_count,
RegisterValueType element_count_type, RegisterValueType element_count_type,
Register result, Register result,
Register result_end, Register result_end,
Register scratch, Register scratch,
Label* gc_required, Label* gc_required,
AllocationFlags flags); AllocationFlags flags);
void AllocateInNewSpace(Register object_size, void Allocate(Register object_size,
Register result, Register result,
Register result_end, Register result_end,
Register scratch, Register scratch,
Label* gc_required, Label* gc_required,
AllocationFlags flags); AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after // Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the // it will no longer be allocated. Make sure that no pointers are left to the

44
deps/v8/src/ia32/stub-cache-ia32.cc

@ -730,7 +730,7 @@ void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
// but may be destroyed if store is successful. // but may be destroyed if store is successful.
void StubCompiler::GenerateStoreField(MacroAssembler* masm, void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object, Handle<JSObject> object,
int index, LookupResult* lookup,
Handle<Map> transition, Handle<Map> transition,
Handle<Name> name, Handle<Name> name,
Register receiver_reg, Register receiver_reg,
@ -740,16 +740,6 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Register scratch2, Register scratch2,
Label* miss_label, Label* miss_label,
Label* miss_restore_name) { Label* miss_restore_name) {
LookupResult lookup(masm->isolate());
object->Lookup(*name, &lookup);
if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
// In sloppy mode, we could just return the value and be done. However, we
// might be in strict mode, where we have to throw. Since we cannot tell,
// go into slow case unconditionally.
__ jmp(miss_label);
return;
}
// Check that the map of the object hasn't changed. // Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP; : REQUIRE_EXACT_MAP;
@ -764,8 +754,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Check that we are allowed to write this. // Check that we are allowed to write this.
if (!transition.is_null() && object->GetPrototype()->IsJSObject()) { if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
JSObject* holder; JSObject* holder;
if (lookup.IsFound()) { // holder == object indicates that no property was found.
holder = lookup.holder(); if (lookup->holder() != *object) {
holder = lookup->holder();
} else { } else {
// Find the top object. // Find the top object.
holder = *object; holder = *object;
@ -774,8 +765,19 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
} while (holder->GetPrototype()->IsJSObject()); } while (holder->GetPrototype()->IsJSObject());
} }
// We need an extra register, push // We need an extra register, push
CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg, Register holder_reg = CheckPrototypes(
scratch1, scratch2, name, miss_restore_name); object, receiver_reg, Handle<JSObject>(holder), name_reg,
scratch1, scratch2, name, miss_restore_name);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
if (lookup->holder() == *object &&
!holder->HasFastProperties() &&
!holder->IsJSGlobalProxy() &&
!holder->IsJSGlobalObject()) {
GenerateDictionaryNegativeLookup(
masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
}
} }
// Stub never generated for non-global objects that require access // Stub never generated for non-global objects that require access
@ -799,6 +801,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return; return;
} }
int index;
if (!transition.is_null()) { if (!transition.is_null()) {
// Update the map of the object. // Update the map of the object.
__ mov(scratch1, Immediate(transition)); __ mov(scratch1, Immediate(transition));
@ -813,8 +816,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
kDontSaveFPRegs, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
} else {
index = lookup->GetFieldIndex().field_index();
} }
// Adjust for the number of properties stored in the object. Even in the // Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the // face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change. // object and the number of in-object properties is not going to change.
@ -2350,6 +2358,12 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
// Check that the object is a symbol. // Check that the object is a symbol.
__ CmpObjectType(edx, SYMBOL_TYPE, eax); __ CmpObjectType(edx, SYMBOL_TYPE, eax);
__ j(not_equal, &miss); __ j(not_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::SYMBOL_FUNCTION_INDEX, eax, &miss);
CheckPrototypes(
Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
eax, holder, ebx, edx, edi, name, &miss);
break; break;
case NUMBER_CHECK: { case NUMBER_CHECK: {

7
deps/v8/src/ic-inl.h

@ -109,7 +109,7 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
// If the object is a value, we use the prototype map for the cache. // If the object is a value, we use the prototype map for the cache.
ASSERT(object->IsString() || object->IsSymbol() || ASSERT(object->IsString() || object->IsSymbol() ||
object->IsNumber() || object->IsBoolean()); object->IsNumber() || object->IsBoolean());
return DELEGATE_MAP; return PROTOTYPE_MAP;
} }
@ -124,7 +124,7 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
!object->HasFastProperties() && !object->HasFastProperties() &&
!object->IsJSGlobalProxy() && !object->IsJSGlobalProxy() &&
!object->IsJSGlobalObject()) { !object->IsJSGlobalObject()) {
return DELEGATE_MAP; return PROTOTYPE_MAP;
} }
return OWN_MAP; return OWN_MAP;
} }
@ -133,7 +133,8 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
JSObject* IC::GetCodeCacheHolder(Isolate* isolate, JSObject* IC::GetCodeCacheHolder(Isolate* isolate,
Object* object, Object* object,
InlineCacheHolderFlag holder) { InlineCacheHolderFlag holder) {
Object* map_owner = holder == OWN_MAP ? object : object->GetDelegate(isolate); Object* map_owner =
holder == OWN_MAP ? object : object->GetPrototype(isolate);
ASSERT(map_owner->IsJSObject()); ASSERT(map_owner->IsJSObject());
return JSObject::cast(map_owner); return JSObject::cast(map_owner);
} }

271
deps/v8/src/ic.cc

@ -160,7 +160,7 @@ Address IC::OriginalCodeAddress() const {
// Find the function on the stack and both the active code for the // Find the function on the stack and both the active code for the
// function and the original code. // function and the original code.
JSFunction* function = JSFunction::cast(frame->function()); JSFunction* function = JSFunction::cast(frame->function());
Handle<SharedFunctionInfo> shared(function->shared()); Handle<SharedFunctionInfo> shared(function->shared(), isolate());
Code* code = shared->code(); Code* code = shared->code();
ASSERT(Debug::HasDebugInfo(shared)); ASSERT(Debug::HasDebugInfo(shared));
Code* original_code = Debug::GetDebugInfo(shared)->original_code(); Code* original_code = Debug::GetDebugInfo(shared)->original_code();
@ -190,7 +190,7 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
// The stub was generated for JSObject but called for non-JSObject. // The stub was generated for JSObject but called for non-JSObject.
// IC::GetCodeCacheHolder is not applicable. // IC::GetCodeCacheHolder is not applicable.
return false; return false;
} else if (cache_holder == DELEGATE_MAP && } else if (cache_holder == PROTOTYPE_MAP &&
receiver->GetPrototype(isolate)->IsNull()) { receiver->GetPrototype(isolate)->IsNull()) {
// IC::GetCodeCacheHolder is not applicable. // IC::GetCodeCacheHolder is not applicable.
return false; return false;
@ -435,7 +435,7 @@ static void LookupForRead(Handle<Object> object,
return; return;
} }
Handle<JSObject> holder(lookup->holder()); Handle<JSObject> holder(lookup->holder(), lookup->isolate());
if (HasInterceptorGetter(*holder)) { if (HasInterceptorGetter(*holder)) {
return; return;
} }
@ -446,7 +446,7 @@ static void LookupForRead(Handle<Object> object,
return; return;
} }
Handle<Object> proto(holder->GetPrototype(), name->GetIsolate()); Handle<Object> proto(holder->GetPrototype(), lookup->isolate());
if (proto->IsNull()) { if (proto->IsNull()) {
ASSERT(!lookup->IsFound()); ASSERT(!lookup->IsFound());
return; return;
@ -636,7 +636,7 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
Handle<Object> object, Handle<Object> object,
Handle<String> name) { Handle<String> name) {
int argc = target()->arguments_count(); int argc = target()->arguments_count();
Handle<JSObject> holder(lookup->holder()); Handle<JSObject> holder(lookup->holder(), isolate());
switch (lookup->type()) { switch (lookup->type()) {
case FIELD: { case FIELD: {
PropertyIndex index = lookup->GetFieldIndex(); PropertyIndex index = lookup->GetFieldIndex();
@ -647,7 +647,7 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
// Get the constant function and compute the code stub for this // Get the constant function and compute the code stub for this
// call; used for rewriting to monomorphic state and making sure // call; used for rewriting to monomorphic state and making sure
// that the code stub is in the stub cache. // that the code stub is in the stub cache.
Handle<JSFunction> function(lookup->GetConstantFunction()); Handle<JSFunction> function(lookup->GetConstantFunction(), isolate());
return isolate()->stub_cache()->ComputeCallConstant( return isolate()->stub_cache()->ComputeCallConstant(
argc, kind_, extra_state, name, object, holder, function); argc, kind_, extra_state, name, object, holder, function);
} }
@ -658,7 +658,8 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
if (holder->IsGlobalObject()) { if (holder->IsGlobalObject()) {
Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder); Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup)); Handle<JSGlobalPropertyCell> cell(
global->GetPropertyCell(lookup), isolate());
if (!cell->value()->IsJSFunction()) return Handle<Code>::null(); if (!cell->value()->IsJSFunction()) return Handle<Code>::null();
Handle<JSFunction> function(JSFunction::cast(cell->value())); Handle<JSFunction> function(JSFunction::cast(cell->value()));
return isolate()->stub_cache()->ComputeCallGlobal( return isolate()->stub_cache()->ComputeCallGlobal(
@ -746,7 +747,8 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
// GenerateMonomorphicCacheProbe. It is not the map which holds the stub. // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
Handle<JSObject> cache_object = object->IsJSObject() Handle<JSObject> cache_object = object->IsJSObject()
? Handle<JSObject>::cast(object) ? Handle<JSObject>::cast(object)
: Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))); : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate())),
isolate());
// Update the stub cache. // Update the stub cache.
UpdateMegamorphicCache(cache_object->map(), *name, *code); UpdateMegamorphicCache(cache_object->map(), *name, *code);
break; break;
@ -855,28 +857,6 @@ MaybeObject* LoadIC::Load(State state,
return Smi::FromInt(String::cast(*string)->length()); return Smi::FromInt(String::cast(*string)->length());
} }
// Use specialized code for getting the length of arrays.
if (object->IsJSArray() &&
name->Equals(isolate()->heap()->length_string())) {
Handle<Code> stub;
if (state == UNINITIALIZED) {
stub = pre_monomorphic_stub();
} else if (state == PREMONOMORPHIC) {
ArrayLengthStub array_length_stub(kind());
stub = array_length_stub.GetCode(isolate());
} else if (state != MEGAMORPHIC) {
ASSERT(state != GENERIC);
stub = megamorphic_stub();
}
if (!stub.is_null()) {
set_target(*stub);
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
#endif
}
return JSArray::cast(*object)->length();
}
// Use specialized code for getting prototype of functions. // Use specialized code for getting prototype of functions.
if (object->IsJSFunction() && if (object->IsJSFunction() &&
name->Equals(isolate()->heap()->prototype_string()) && name->Equals(isolate()->heap()->prototype_string()) &&
@ -1035,6 +1015,22 @@ void IC::CopyICToMegamorphicCache(Handle<String> name) {
} }
bool IC::IsTransitionedMapOfMonomorphicTarget(Map* receiver_map) {
AssertNoAllocation no_allocation;
Map* current_map = target()->FindFirstMap();
ElementsKind receiver_elements_kind = receiver_map->elements_kind();
bool more_general_transition =
IsMoreGeneralElementsKindTransition(
current_map->elements_kind(), receiver_elements_kind);
Map* transitioned_map = more_general_transition
? current_map->LookupElementsTransitionMap(receiver_elements_kind)
: NULL;
return transitioned_map == receiver_map;
}
// Since GC may have been invoked, by the time PatchCache is called, |state| is // Since GC may have been invoked, by the time PatchCache is called, |state| is
// not necessarily equal to target()->state(). // not necessarily equal to target()->state().
void IC::PatchCache(State state, void IC::PatchCache(State state,
@ -1052,6 +1048,17 @@ void IC::PatchCache(State state,
// Only move to megamorphic if the target changes. // Only move to megamorphic if the target changes.
if (target() != *code) { if (target() != *code) {
if (target()->is_load_stub()) { if (target()->is_load_stub()) {
bool is_same_handler = false;
{
AssertNoAllocation no_allocation;
Code* old_handler = target()->FindFirstCode();
is_same_handler = old_handler == *code;
}
if (is_same_handler
&& IsTransitionedMapOfMonomorphicTarget(receiver->map())) {
UpdateMonomorphicIC(receiver, code, name);
break;
}
if (UpdatePolymorphicIC(state, strict_mode, receiver, name, code)) { if (UpdatePolymorphicIC(state, strict_mode, receiver, name, code)) {
break; break;
} }
@ -1196,7 +1203,8 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
case NORMAL: case NORMAL:
if (holder->IsGlobalObject()) { if (holder->IsGlobalObject()) {
Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder); Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup)); Handle<JSGlobalPropertyCell> cell(
global->GetPropertyCell(lookup), isolate());
return isolate()->stub_cache()->ComputeLoadGlobal( return isolate()->stub_cache()->ComputeLoadGlobal(
name, receiver, global, cell, lookup->IsDontDelete()); name, receiver, global, cell, lookup->IsDontDelete());
} }
@ -1223,6 +1231,12 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
if (!holder->HasFastProperties()) break; if (!holder->HasFastProperties()) break;
return isolate()->stub_cache()->ComputeLoadViaGetter( return isolate()->stub_cache()->ComputeLoadViaGetter(
name, receiver, holder, Handle<JSFunction>::cast(getter)); name, receiver, holder, Handle<JSFunction>::cast(getter));
} else if (receiver->IsJSArray() &&
name->Equals(isolate()->heap()->length_string())) {
PropertyIndex lengthIndex =
PropertyIndex::NewHeaderIndex(JSArray::kLengthOffset / kPointerSize);
return isolate()->stub_cache()->ComputeLoadField(
name, receiver, holder, lengthIndex);
} }
// TODO(dcarney): Handle correctly. // TODO(dcarney): Handle correctly.
if (callback->IsDeclaredAccessorInfo()) break; if (callback->IsDeclaredAccessorInfo()) break;
@ -1272,7 +1286,7 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
return generic_stub(); return generic_stub();
} }
Handle<Map> receiver_map(receiver->map()); Handle<Map> receiver_map(receiver->map(), isolate());
MapHandleList target_receiver_maps; MapHandleList target_receiver_maps;
if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) { if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
// Optimistically assume that ICs that haven't reached the MONOMORPHIC state // Optimistically assume that ICs that haven't reached the MONOMORPHIC state
@ -1283,7 +1297,8 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
if (target() == *string_stub()) { if (target() == *string_stub()) {
target_receiver_maps.Add(isolate()->factory()->string_map()); target_receiver_maps.Add(isolate()->factory()->string_map());
} else { } else {
GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps); GetReceiverMapsForStub(Handle<Code>(target(), isolate()),
&target_receiver_maps);
if (target_receiver_maps.length() == 0) { if (target_receiver_maps.length() == 0) {
return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map); return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
} }
@ -1355,7 +1370,8 @@ MaybeObject* KeyedLoadIC::Load(State state,
stub = non_strict_arguments_stub(); stub = non_strict_arguments_stub();
} else if (receiver->HasIndexedInterceptor()) { } else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub(); stub = indexed_interceptor_stub();
} else if (key->IsSmi() && (target() != *non_strict_arguments_stub())) { } else if (!key->ToSmi()->IsFailure() &&
(target() != *non_strict_arguments_stub())) {
stub = LoadElementStub(receiver); stub = LoadElementStub(receiver);
} }
} }
@ -1379,13 +1395,13 @@ Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup,
if (!lookup->IsProperty()) return Handle<Code>::null(); if (!lookup->IsProperty()) return Handle<Code>::null();
// Compute a monomorphic stub. // Compute a monomorphic stub.
Handle<JSObject> holder(lookup->holder()); Handle<JSObject> holder(lookup->holder(), isolate());
switch (lookup->type()) { switch (lookup->type()) {
case FIELD: case FIELD:
return isolate()->stub_cache()->ComputeKeyedLoadField( return isolate()->stub_cache()->ComputeKeyedLoadField(
name, receiver, holder, lookup->GetFieldIndex()); name, receiver, holder, lookup->GetFieldIndex());
case CONSTANT_FUNCTION: { case CONSTANT_FUNCTION: {
Handle<JSFunction> constant(lookup->GetConstantFunction()); Handle<JSFunction> constant(lookup->GetConstantFunction(), isolate());
return isolate()->stub_cache()->ComputeKeyedLoadConstant( return isolate()->stub_cache()->ComputeKeyedLoadConstant(
name, receiver, holder, constant); name, receiver, holder, constant);
} }
@ -1413,41 +1429,42 @@ Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup,
} }
static bool StoreICableLookup(LookupResult* lookup) {
// Bail out if we didn't find a result.
if (!lookup->IsFound()) return false;
// Bail out if inline caching is not allowed.
if (!lookup->IsCacheable()) return false;
// If the property is read-only, we leave the IC in its current state.
if (lookup->IsTransition()) {
return !lookup->GetTransitionDetails().IsReadOnly();
}
return !lookup->IsReadOnly();
}
static bool LookupForWrite(Handle<JSObject> receiver, static bool LookupForWrite(Handle<JSObject> receiver,
Handle<String> name, Handle<String> name,
LookupResult* lookup) { LookupResult* lookup) {
receiver->LocalLookup(*name, lookup); Handle<JSObject> holder = receiver;
if (!lookup->IsFound()) { receiver->Lookup(*name, lookup);
receiver->map()->LookupTransition(*receiver, *name, lookup); if (lookup->IsFound()) {
} if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
if (!StoreICableLookup(lookup)) {
// 2nd chance: There can be accessors somewhere in the prototype chain. if (lookup->holder() == *receiver) {
receiver->Lookup(*name, lookup); if (lookup->IsInterceptor() &&
return lookup->IsPropertyCallbacks() && StoreICableLookup(lookup); receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
} receiver->LocalLookupRealNamedProperty(*name, lookup);
return lookup->IsFound() &&
!lookup->IsReadOnly() &&
lookup->IsCacheable();
}
return true;
}
if (lookup->IsInterceptor() && if (lookup->IsPropertyCallbacks()) return true;
receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
receiver->LocalLookupRealNamedProperty(*name, lookup); // Currently normal holders in the prototype chain are not supported. They
return StoreICableLookup(lookup); // would require a runtime positive lookup and verification that the details
// have not changed.
if (lookup->IsInterceptor() || lookup->IsNormal()) return false;
holder = Handle<JSObject>(lookup->holder(), lookup->isolate());
} }
return true; // While normally LookupTransition gets passed the receiver, in this case we
// pass the holder of the property that we overwrite. This keeps the holder in
// the LookupResult intact so we can later use it to generate a prototype
// chain check. This avoids a double lookup, but requires us to pass in the
// receiver when trying to fetch extra information from the transition.
receiver->map()->LookupTransition(*holder, *name, lookup);
return lookup->IsTransition() &&
!lookup->GetTransitionDetails(receiver->map()).IsReadOnly();
} }
@ -1547,7 +1564,6 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<String> name, Handle<String> name,
Handle<Object> value) { Handle<Object> value) {
ASSERT(!receiver->IsJSGlobalProxy()); ASSERT(!receiver->IsJSGlobalProxy());
ASSERT(StoreICableLookup(lookup));
ASSERT(lookup->IsFound()); ASSERT(lookup->IsFound());
// These are not cacheable, so we never see such LookupResults here. // These are not cacheable, so we never see such LookupResults here.
@ -1570,19 +1586,19 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
switch (lookup->type()) { switch (lookup->type()) {
case FIELD: case FIELD:
return isolate()->stub_cache()->ComputeStoreField( return isolate()->stub_cache()->ComputeStoreField(
name, receiver, lookup->GetFieldIndex().field_index(), name, receiver, lookup, Handle<Map>::null(), strict_mode);
Handle<Map>::null(), strict_mode);
case NORMAL: case NORMAL:
if (receiver->IsGlobalObject()) { if (receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly // The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the // from the property cell. So the property must be directly on the
// global object. // global object.
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver); Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup)); Handle<JSGlobalPropertyCell> cell(
global->GetPropertyCell(lookup), isolate());
return isolate()->stub_cache()->ComputeStoreGlobal( return isolate()->stub_cache()->ComputeStoreGlobal(
name, global, cell, strict_mode); name, global, cell, strict_mode);
} }
if (!holder.is_identical_to(receiver)) break; ASSERT(holder.is_identical_to(receiver));
return isolate()->stub_cache()->ComputeStoreNormal(strict_mode); return isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
case CALLBACKS: { case CALLBACKS: {
Handle<Object> callback(lookup->GetCallbackObject(), isolate()); Handle<Object> callback(lookup->GetCallbackObject(), isolate());
@ -1595,8 +1611,8 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
return isolate()->stub_cache()->ComputeStoreCallback( return isolate()->stub_cache()->ComputeStoreCallback(
name, receiver, holder, info, strict_mode); name, receiver, holder, info, strict_mode);
} else if (callback->IsAccessorPair()) { } else if (callback->IsAccessorPair()) {
Handle<Object> setter(Handle<AccessorPair>::cast(callback)->setter(), Handle<Object> setter(
isolate()); Handle<AccessorPair>::cast(callback)->setter(), isolate());
if (!setter->IsJSFunction()) break; if (!setter->IsJSFunction()) break;
if (holder->IsGlobalObject()) break; if (holder->IsGlobalObject()) break;
if (!holder->HasFastProperties()) break; if (!holder->HasFastProperties()) break;
@ -1617,7 +1633,10 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
case CONSTANT_FUNCTION: case CONSTANT_FUNCTION:
break; break;
case TRANSITION: { case TRANSITION: {
Handle<Map> transition(lookup->GetTransitionTarget()); // Explicitly pass in the receiver map since LookupForWrite may have
// stored something else than the receiver in the holder.
Handle<Map> transition(
lookup->GetTransitionTarget(receiver->map()), isolate());
int descriptor = transition->LastAdded(); int descriptor = transition->LastAdded();
DescriptorArray* target_descriptors = transition->instance_descriptors(); DescriptorArray* target_descriptors = transition->instance_descriptors();
@ -1625,9 +1644,8 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
if (details.type() != FIELD || details.attributes() != NONE) break; if (details.type() != FIELD || details.attributes() != NONE) break;
int field_index = target_descriptors->GetFieldIndex(descriptor);
return isolate()->stub_cache()->ComputeStoreField( return isolate()->stub_cache()->ComputeStoreField(
name, receiver, field_index, transition, strict_mode); name, receiver, lookup, transition, strict_mode);
} }
case NONEXISTENT: case NONEXISTENT:
case HANDLER: case HANDLER:
@ -1649,7 +1667,8 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub(); return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
} }
if ((store_mode == STORE_NO_TRANSITION_HANDLE_COW || if (!FLAG_compiled_keyed_stores &&
(store_mode == STORE_NO_TRANSITION_HANDLE_COW ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS)) { store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS)) {
// TODO(danno): We'll soon handle MONOMORPHIC ICs that also support // TODO(danno): We'll soon handle MONOMORPHIC ICs that also support
// copying COW arrays and silently ignoring some OOB stores into external // copying COW arrays and silently ignoring some OOB stores into external
@ -1661,7 +1680,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
} }
State ic_state = target()->ic_state(); State ic_state = target()->ic_state();
Handle<Map> receiver_map(receiver->map()); Handle<Map> receiver_map(receiver->map(), isolate());
if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) { if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
// Optimistically assume that ICs that haven't reached the MONOMORPHIC state // Optimistically assume that ICs that haven't reached the MONOMORPHIC state
// yet will do so and stay there. // yet will do so and stay there.
@ -1697,24 +1716,18 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
transitioned_receiver_map = transitioned_receiver_map =
ComputeTransitionedMap(receiver, store_mode); ComputeTransitionedMap(receiver, store_mode);
} }
ElementsKind transitioned_kind = if (IsTransitionedMapOfMonomorphicTarget(*transitioned_receiver_map)) {
transitioned_receiver_map->elements_kind();
bool more_general_transition =
IsMoreGeneralElementsKindTransition(
previous_receiver_map->elements_kind(),
transitioned_kind);
Map* transitioned_previous_map = more_general_transition
? previous_receiver_map->LookupElementsTransitionMap(transitioned_kind)
: NULL;
if (transitioned_previous_map == *transitioned_receiver_map) {
// Element family is the same, use the "worst" case map. // Element family is the same, use the "worst" case map.
store_mode = GetNonTransitioningStoreMode(store_mode); store_mode = GetNonTransitioningStoreMode(store_mode);
return isolate()->stub_cache()->ComputeKeyedStoreElement( return isolate()->stub_cache()->ComputeKeyedStoreElement(
transitioned_receiver_map, strict_mode, store_mode); transitioned_receiver_map, strict_mode, store_mode);
} else if (*previous_receiver_map == receiver->map()) { } else if (*previous_receiver_map == receiver->map()) {
if (IsGrowStoreMode(store_mode)) { if (IsGrowStoreMode(store_mode) ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
// A "normal" IC that handles stores can switch to a version that can // A "normal" IC that handles stores can switch to a version that can
// grow at the end of the array and still stay MONOMORPHIC. // grow at the end of the array, handle OOB accesses or copy COW arrays
// and still stay MONOMORPHIC.
return isolate()->stub_cache()->ComputeKeyedStoreElement( return isolate()->stub_cache()->ComputeKeyedStoreElement(
receiver_map, strict_mode, store_mode); receiver_map, strict_mode, store_mode);
} }
@ -1761,6 +1774,26 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
} }
} }
// If the store mode isn't the standard mode, make sure that all polymorphic
// receivers are either external arrays, or all "normal" arrays. Otherwise,
// use the generic stub.
if (store_mode != STANDARD_STORE) {
int external_arrays = 0;
for (int i = 0; i < target_receiver_maps.length(); ++i) {
if (target_receiver_maps[i]->has_external_array_elements()) {
external_arrays++;
}
}
if (external_arrays != 0 &&
external_arrays != target_receiver_maps.length()) {
TRACE_GENERIC_IC(isolate(), "KeyedIC",
"unsupported combination of external and normal arrays");
return strict_mode == kStrictMode
? generic_stub_strict()
: generic_stub();
}
}
return isolate()->stub_cache()->ComputeStoreElementPolymorphic( return isolate()->stub_cache()->ComputeStoreElementPolymorphic(
&target_receiver_maps, store_mode, strict_mode); &target_receiver_maps, store_mode, strict_mode);
} }
@ -1794,7 +1827,7 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
case STORE_NO_TRANSITION_HANDLE_COW: case STORE_NO_TRANSITION_HANDLE_COW:
case STANDARD_STORE: case STANDARD_STORE:
case STORE_AND_GROW_NO_TRANSITION: case STORE_AND_GROW_NO_TRANSITION:
return Handle<Map>(receiver->map()); return Handle<Map>(receiver->map(), isolate());
} }
return Handle<Map>::null(); return Handle<Map>::null();
} }
@ -1813,8 +1846,10 @@ bool IsOutOfBoundsAccess(Handle<JSObject> receiver,
KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver, KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
Handle<Object> key, Handle<Object> key,
Handle<Object> value) { Handle<Object> value) {
ASSERT(key->IsSmi()); ASSERT(!key->ToSmi()->IsFailure());
int index = Smi::cast(*key)->value(); Smi* smi_key = NULL;
key->ToSmi()->To(&smi_key);
int index = smi_key->value();
bool oob_access = IsOutOfBoundsAccess(receiver, index); bool oob_access = IsOutOfBoundsAccess(receiver, index);
bool allow_growth = receiver->IsJSArray() && oob_access; bool allow_growth = receiver->IsJSArray() && oob_access;
if (allow_growth) { if (allow_growth) {
@ -1872,6 +1907,10 @@ KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
if (!FLAG_trace_external_array_abuse && if (!FLAG_trace_external_array_abuse &&
receiver->map()->has_external_array_elements() && oob_access) { receiver->map()->has_external_array_elements() && oob_access) {
return STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS; return STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS;
}
Heap* heap = receiver->GetHeap();
if (receiver->elements()->map() == heap->fixed_cow_array_map()) {
return STORE_NO_TRANSITION_HANDLE_COW;
} else { } else {
return STANDARD_STORE; return STANDARD_STORE;
} }
@ -1910,13 +1949,20 @@ MaybeObject* KeyedStoreIC::Store(State state,
if (miss_mode != MISS_FORCE_GENERIC) { if (miss_mode != MISS_FORCE_GENERIC) {
if (object->IsJSObject()) { if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object); Handle<JSObject> receiver = Handle<JSObject>::cast(object);
bool key_is_smi_like = key->IsSmi() ||
(FLAG_compiled_keyed_stores && !key->ToSmi()->IsFailure());
if (receiver->elements()->map() == if (receiver->elements()->map() ==
isolate()->heap()->non_strict_arguments_elements_map()) { isolate()->heap()->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub(); stub = non_strict_arguments_stub();
} else if (key->IsSmi() && (target() != *non_strict_arguments_stub())) { } else if (key_is_smi_like &&
(target() != *non_strict_arguments_stub())) {
KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value); KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value);
stub = StoreElementStub(receiver, store_mode, strict_mode); stub = StoreElementStub(receiver, store_mode, strict_mode);
} else {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "key not a number");
} }
} else {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "not an object");
} }
} else { } else {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "force generic"); TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "force generic");
@ -1941,19 +1987,20 @@ Handle<Code> KeyedStoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
switch (lookup->type()) { switch (lookup->type()) {
case FIELD: case FIELD:
return isolate()->stub_cache()->ComputeKeyedStoreField( return isolate()->stub_cache()->ComputeKeyedStoreField(
name, receiver, lookup->GetFieldIndex().field_index(), name, receiver, lookup, Handle<Map>::null(), strict_mode);
Handle<Map>::null(), strict_mode);
case TRANSITION: { case TRANSITION: {
Handle<Map> transition(lookup->GetTransitionTarget()); // Explicitly pass in the receiver map since LookupForWrite may have
// stored something else than the receiver in the holder.
Handle<Map> transition(
lookup->GetTransitionTarget(receiver->map()), isolate());
int descriptor = transition->LastAdded(); int descriptor = transition->LastAdded();
DescriptorArray* target_descriptors = transition->instance_descriptors(); DescriptorArray* target_descriptors = transition->instance_descriptors();
PropertyDetails details = target_descriptors->GetDetails(descriptor); PropertyDetails details = target_descriptors->GetDetails(descriptor);
if (details.type() == FIELD && details.attributes() == NONE) { if (details.type() == FIELD && details.attributes() == NONE) {
int field_index = target_descriptors->GetFieldIndex(descriptor);
return isolate()->stub_cache()->ComputeKeyedStoreField( return isolate()->stub_cache()->ComputeKeyedStoreField(
name, receiver, field_index, transition, strict_mode); name, receiver, lookup, transition, strict_mode);
} }
// fall through. // fall through.
} }
@ -2023,7 +2070,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
if (raw_function->is_compiled()) return raw_function; if (raw_function->is_compiled()) return raw_function;
Handle<JSFunction> function(raw_function); Handle<JSFunction> function(raw_function, isolate);
JSFunction::CompileLazy(function, CLEAR_EXCEPTION); JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
return *function; return *function;
} }
@ -2074,7 +2121,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) { RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
HandleScope scope(isolate); HandleScope scope(isolate);
ASSERT(args.length() == 3); ASSERT(args.length() == 3);
StoreIC ic(isolate); StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state, return ic.Store(state,
@ -2150,7 +2197,22 @@ RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) { RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
HandleScope scope(isolate); HandleScope scope(isolate);
ASSERT(args.length() == 3); ASSERT(args.length() == 3);
KeyedStoreIC ic(isolate); KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state,
Code::GetStrictMode(extra_ic_state),
args.at<Object>(0),
args.at<Object>(1),
args.at<Object>(2),
MISS);
}
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state, return ic.Store(state,
@ -2165,7 +2227,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) { RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
NoHandleAllocation na(isolate); NoHandleAllocation na(isolate);
ASSERT(args.length() == 3); ASSERT(args.length() == 3);
KeyedStoreIC ic(isolate); KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
Handle<Object> object = args.at<Object>(0); Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1); Handle<Object> key = args.at<Object>(1);
@ -2183,7 +2245,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) { RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
HandleScope scope(isolate); HandleScope scope(isolate);
ASSERT(args.length() == 3); ASSERT(args.length() == 3);
KeyedStoreIC ic(isolate); KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state, return ic.Store(state,
@ -2668,7 +2730,8 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HasInlinedSmiCode(address()), x, y); HasInlinedSmiCode(address()), x, y);
ICCompareStub stub(op_, new_left, new_right, state); ICCompareStub stub(op_, new_left, new_right, state);
if (state == KNOWN_OBJECT) { if (state == KNOWN_OBJECT) {
stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map())); stub.set_known_map(
Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate()));
} }
set_target(*stub.GetCode(isolate())); set_target(*stub.GetCode(isolate()));

7
deps/v8/src/ic.h

@ -176,6 +176,7 @@ class IC {
Handle<String> name, Handle<String> name,
Handle<Code> code); Handle<Code> code);
void CopyICToMegamorphicCache(Handle<String> name); void CopyICToMegamorphicCache(Handle<String> name);
bool IsTransitionedMapOfMonomorphicTarget(Map* receiver_map);
void PatchCache(State state, void PatchCache(State state,
StrictModeFlag strict_mode, StrictModeFlag strict_mode,
Handle<JSObject> receiver, Handle<JSObject> receiver,
@ -496,7 +497,7 @@ class KeyedLoadIC: public LoadIC {
class StoreIC: public IC { class StoreIC: public IC {
public: public:
explicit StoreIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { StoreIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
ASSERT(target()->is_store_stub() || target()->is_keyed_store_stub()); ASSERT(target()->is_store_stub() || target()->is_keyed_store_stub());
} }
@ -585,7 +586,8 @@ enum KeyedStoreIncrementLength {
class KeyedStoreIC: public StoreIC { class KeyedStoreIC: public StoreIC {
public: public:
explicit KeyedStoreIC(Isolate* isolate) : StoreIC(isolate) { KeyedStoreIC(FrameDepth depth, Isolate* isolate)
: StoreIC(depth, isolate) {
ASSERT(target()->is_keyed_store_stub()); ASSERT(target()->is_keyed_store_stub());
} }
@ -786,6 +788,7 @@ enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check); void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure); DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure);
} } // namespace v8::internal } } // namespace v8::internal

10
deps/v8/src/interpreter-irregexp.cc

@ -73,9 +73,15 @@ static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
unsigned int old_char = subject[from++]; unsigned int old_char = subject[from++];
unsigned int new_char = subject[current++]; unsigned int new_char = subject[current++];
if (old_char == new_char) continue; if (old_char == new_char) continue;
if (old_char - 'A' <= 'Z' - 'A') old_char |= 0x20; // Convert both characters to lower case.
if (new_char - 'A' <= 'Z' - 'A') new_char |= 0x20; old_char |= 0x20;
new_char |= 0x20;
if (old_char != new_char) return false; if (old_char != new_char) return false;
// Not letters in the ASCII range and Latin-1 range.
if (!(old_char - 'a' <= 'z' - 'a') &&
!(old_char - 224 <= 254 - 224 && old_char != 247)) {
return false;
}
} }
return true; return true;
} }

25
deps/v8/src/isolate.cc

@ -612,13 +612,16 @@ Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
limit = Max(limit, 0); // Ensure that limit is not negative. limit = Max(limit, 0); // Ensure that limit is not negative.
int initial_size = Min(limit, 10); int initial_size = Min(limit, 10);
Handle<FixedArray> elements = Handle<FixedArray> elements =
factory()->NewFixedArrayWithHoles(initial_size * 4); factory()->NewFixedArrayWithHoles(initial_size * 4 + 1);
// If the caller parameter is a function we skip frames until we're // If the caller parameter is a function we skip frames until we're
// under it before starting to collect. // under it before starting to collect.
bool seen_caller = !caller->IsJSFunction(); bool seen_caller = !caller->IsJSFunction();
int cursor = 0; // First element is reserved to store the number of non-strict frames.
int cursor = 1;
int frames_seen = 0; int frames_seen = 0;
int non_strict_frames = 0;
bool encountered_strict_function = false;
for (StackFrameIterator iter(this); for (StackFrameIterator iter(this);
!iter.done() && frames_seen < limit; !iter.done() && frames_seen < limit;
iter.Advance()) { iter.Advance()) {
@ -646,6 +649,17 @@ Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
Handle<JSFunction> fun = frames[i].function(); Handle<JSFunction> fun = frames[i].function();
Handle<Code> code = frames[i].code(); Handle<Code> code = frames[i].code();
Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this); Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this);
// The stack trace API should not expose receivers and function
// objects on frames deeper than the top-most one with a strict
// mode function. The number of non-strict frames is stored as
// first element in the result array.
if (!encountered_strict_function) {
if (!fun->shared()->is_classic_mode()) {
encountered_strict_function = true;
} else {
non_strict_frames++;
}
}
elements->set(cursor++, *recv); elements->set(cursor++, *recv);
elements->set(cursor++, *fun); elements->set(cursor++, *fun);
elements->set(cursor++, *code); elements->set(cursor++, *code);
@ -653,6 +667,7 @@ Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
} }
} }
} }
elements->set(0, Smi::FromInt(non_strict_frames));
Handle<JSArray> result = factory()->NewJSArrayWithElements(elements); Handle<JSArray> result = factory()->NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(cursor)); result->set_length(Smi::FromInt(cursor));
return result; return result;
@ -1679,6 +1694,7 @@ Isolate::Isolate()
code_stub_interface_descriptors_(NULL), code_stub_interface_descriptors_(NULL),
context_exit_happened_(false), context_exit_happened_(false),
cpu_profiler_(NULL), cpu_profiler_(NULL),
heap_profiler_(NULL),
deferred_handles_head_(NULL), deferred_handles_head_(NULL),
optimizing_compiler_thread_(this), optimizing_compiler_thread_(this),
marking_thread_(NULL), marking_thread_(NULL),
@ -1810,7 +1826,8 @@ void Isolate::Deinit() {
preallocated_message_space_ = NULL; preallocated_message_space_ = NULL;
PreallocatedMemoryThreadStop(); PreallocatedMemoryThreadStop();
HeapProfiler::TearDown(); delete heap_profiler_;
heap_profiler_ = NULL;
delete cpu_profiler_; delete cpu_profiler_;
cpu_profiler_ = NULL; cpu_profiler_ = NULL;
@ -2043,7 +2060,7 @@ bool Isolate::Init(Deserializer* des) {
logger_->SetUp(); logger_->SetUp();
cpu_profiler_ = new CpuProfiler(this); cpu_profiler_ = new CpuProfiler(this);
HeapProfiler::SetUp(); heap_profiler_ = new HeapProfiler(heap());
// Initialize other runtime facilities // Initialize other runtime facilities
#if defined(USE_SIMULATOR) #if defined(USE_SIMULATOR)

3
deps/v8/src/isolate.h

@ -368,7 +368,6 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
V(unsigned, ast_node_count, 0) \ V(unsigned, ast_node_count, 0) \
/* SafeStackFrameIterator activations count. */ \ /* SafeStackFrameIterator activations count. */ \
V(int, safe_stack_iterator_counter, 0) \ V(int, safe_stack_iterator_counter, 0) \
V(HeapProfiler*, heap_profiler, NULL) \
V(bool, observer_delivery_pending, false) \ V(bool, observer_delivery_pending, false) \
V(HStatistics*, hstatistics, NULL) \ V(HStatistics*, hstatistics, NULL) \
V(HTracer*, htracer, NULL) \ V(HTracer*, htracer, NULL) \
@ -976,6 +975,7 @@ class Isolate {
inline bool DebuggerHasBreakPoints(); inline bool DebuggerHasBreakPoints();
CpuProfiler* cpu_profiler() const { return cpu_profiler_; } CpuProfiler* cpu_profiler() const { return cpu_profiler_; }
HeapProfiler* heap_profiler() const { return heap_profiler_; }
#ifdef DEBUG #ifdef DEBUG
HistogramInfo* heap_histograms() { return heap_histograms_; } HistogramInfo* heap_histograms() { return heap_histograms_; }
@ -1313,6 +1313,7 @@ class Isolate {
Debug* debug_; Debug* debug_;
#endif #endif
CpuProfiler* cpu_profiler_; CpuProfiler* cpu_profiler_;
HeapProfiler* heap_profiler_;
#define GLOBAL_BACKING_STORE(type, name, initialvalue) \ #define GLOBAL_BACKING_STORE(type, name, initialvalue) \
type name##_; type name##_;

55
deps/v8/src/json-parser.h

@ -291,6 +291,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
// Parse a JSON object. Position must be right at '{'. // Parse a JSON object. Position must be right at '{'.
template <bool seq_ascii> template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() { Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
HandleScope scope(isolate());
Handle<JSObject> json_object = Handle<JSObject> json_object =
factory()->NewJSObject(object_constructor(), pretenure_); factory()->NewJSObject(object_constructor(), pretenure_);
ASSERT_EQ(c0_, '{'); ASSERT_EQ(c0_, '{');
@ -358,12 +359,13 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
} }
} }
AdvanceSkipWhitespace(); AdvanceSkipWhitespace();
return json_object; return scope.CloseAndEscape(json_object);
} }
// Parse a JSON array. Position must be right at '['. // Parse a JSON array. Position must be right at '['.
template <bool seq_ascii> template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() { Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
HandleScope scope(isolate());
ZoneScope zone_scope(zone(), DELETE_ON_EXIT); ZoneScope zone_scope(zone(), DELETE_ON_EXIT);
ZoneList<Handle<Object> > elements(4, zone()); ZoneList<Handle<Object> > elements(4, zone());
ASSERT_EQ(c0_, '['); ASSERT_EQ(c0_, '[');
@ -386,8 +388,9 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
for (int i = 0, n = elements.length(); i < n; i++) { for (int i = 0, n = elements.length(); i < n; i++) {
fast_elements->set(i, *elements[i]); fast_elements->set(i, *elements[i]);
} }
return factory()->NewJSArrayWithElements( Handle<Object> json_array = factory()->NewJSArrayWithElements(
fast_elements, FAST_ELEMENTS, pretenure_); fast_elements, FAST_ELEMENTS, pretenure_);
return scope.CloseAndEscape(json_array);
} }
@ -501,10 +504,10 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
int count = end - start; int count = end - start;
int max_length = count + source_length_ - position_; int max_length = count + source_length_ - position_;
int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count)); int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
Handle<StringType> seq_str = Handle<StringType> seq_string =
NewRawString<StringType>(factory(), length, pretenure_); NewRawString<StringType>(factory(), length, pretenure_);
// Copy prefix into seq_str. // Copy prefix into seq_str.
SinkChar* dest = seq_str->GetChars(); SinkChar* dest = seq_string->GetChars();
String::WriteToFlat(*prefix, dest, start, end); String::WriteToFlat(*prefix, dest, start, end);
while (c0_ != '"') { while (c0_ != '"') {
@ -512,7 +515,7 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
if (c0_ < 0x20) return Handle<String>::null(); if (c0_ < 0x20) return Handle<String>::null();
if (count >= length) { if (count >= length) {
// We need to create a longer sequential string for the result. // We need to create a longer sequential string for the result.
return SlowScanJsonString<StringType, SinkChar>(seq_str, 0, count); return SlowScanJsonString<StringType, SinkChar>(seq_string, 0, count);
} }
if (c0_ != '\\') { if (c0_ != '\\') {
// If the sink can contain UC16 characters, or source_ contains only // If the sink can contain UC16 characters, or source_ contains only
@ -522,11 +525,11 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
if (sizeof(SinkChar) == kUC16Size || if (sizeof(SinkChar) == kUC16Size ||
seq_ascii || seq_ascii ||
c0_ <= String::kMaxOneByteCharCode) { c0_ <= String::kMaxOneByteCharCode) {
SeqStringSet(seq_str, count++, c0_); SeqStringSet(seq_string, count++, c0_);
Advance(); Advance();
} else { } else {
// StringType is SeqOneByteString and we just read a non-ASCII char. // StringType is SeqOneByteString and we just read a non-ASCII char.
return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str, 0, count); return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string, 0, count);
} }
} else { } else {
Advance(); // Advance past the \. Advance(); // Advance past the \.
@ -534,22 +537,22 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
case '"': case '"':
case '\\': case '\\':
case '/': case '/':
SeqStringSet(seq_str, count++, c0_); SeqStringSet(seq_string, count++, c0_);
break; break;
case 'b': case 'b':
SeqStringSet(seq_str, count++, '\x08'); SeqStringSet(seq_string, count++, '\x08');
break; break;
case 'f': case 'f':
SeqStringSet(seq_str, count++, '\x0c'); SeqStringSet(seq_string, count++, '\x0c');
break; break;
case 'n': case 'n':
SeqStringSet(seq_str, count++, '\x0a'); SeqStringSet(seq_string, count++, '\x0a');
break; break;
case 'r': case 'r':
SeqStringSet(seq_str, count++, '\x0d'); SeqStringSet(seq_string, count++, '\x0d');
break; break;
case 't': case 't':
SeqStringSet(seq_str, count++, '\x09'); SeqStringSet(seq_string, count++, '\x09');
break; break;
case 'u': { case 'u': {
uc32 value = 0; uc32 value = 0;
@ -563,13 +566,13 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
} }
if (sizeof(SinkChar) == kUC16Size || if (sizeof(SinkChar) == kUC16Size ||
value <= String::kMaxOneByteCharCode) { value <= String::kMaxOneByteCharCode) {
SeqStringSet(seq_str, count++, value); SeqStringSet(seq_string, count++, value);
break; break;
} else { } else {
// StringType is SeqOneByteString and we just read a non-ASCII char. // StringType is SeqOneByteString and we just read a non-ASCII char.
position_ -= 6; // Rewind position_ to \ in \uxxxx. position_ -= 6; // Rewind position_ to \ in \uxxxx.
Advance(); Advance();
return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str, return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string,
0, 0,
count); count);
} }
@ -580,23 +583,13 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
Advance(); Advance();
} }
} }
// Shrink seq_string length to count.
if (isolate()->heap()->InNewSpace(*seq_str)) {
isolate()->heap()->new_space()->
template ShrinkStringAtAllocationBoundary<StringType>(
*seq_str, count);
} else {
int string_size = StringType::SizeFor(count);
int allocated_string_size = StringType::SizeFor(length);
int delta = allocated_string_size - string_size;
Address start_filler_object = seq_str->address() + string_size;
seq_str->set_length(count);
isolate()->heap()->CreateFillerObjectAt(start_filler_object, delta);
}
ASSERT_EQ('"', c0_); ASSERT_EQ('"', c0_);
// Advance past the last '"'. // Advance past the last '"'.
AdvanceSkipWhitespace(); AdvanceSkipWhitespace();
return seq_str;
// Shrink seq_string length to count and return.
return SeqString::Truncate(seq_string, count);
} }
@ -623,8 +616,8 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
int beg_pos = position_; int beg_pos = position_;
position_ = position; position_ = position;
return SlowScanJsonString<SeqOneByteString, uint8_t>(source_, return SlowScanJsonString<SeqOneByteString, uint8_t>(source_,
beg_pos, beg_pos,
position_); position_);
} }
if (c0 < 0x20) return Handle<String>::null(); if (c0 < 0x20) return Handle<String>::null();
if (static_cast<uint32_t>(c0) > if (static_cast<uint32_t>(c0) >

92
deps/v8/src/json-stringifier.h

@ -41,6 +41,9 @@ class BasicJsonStringifier BASE_EMBEDDED {
MaybeObject* Stringify(Handle<Object> object); MaybeObject* Stringify(Handle<Object> object);
INLINE(static MaybeObject* StringifyString(Isolate* isolate,
Handle<String> object));
private: private:
static const int kInitialPartLength = 32; static const int kInitialPartLength = 32;
static const int kMaxPartLength = 16 * 1024; static const int kMaxPartLength = 16 * 1024;
@ -52,7 +55,7 @@ class BasicJsonStringifier BASE_EMBEDDED {
void ChangeEncoding(); void ChangeEncoding();
void ShrinkCurrentPart(); INLINE(void ShrinkCurrentPart());
template <bool is_ascii, typename Char> template <bool is_ascii, typename Char>
INLINE(void Append_(Char c)); INLINE(void Append_(Char c));
@ -84,6 +87,11 @@ class BasicJsonStringifier BASE_EMBEDDED {
bool deferred_comma, bool deferred_comma,
bool deferred_key); bool deferred_key);
template <typename ResultType, typename Char>
INLINE(static MaybeObject* StringifyString_(Isolate* isolate,
Vector<Char> vector,
Handle<String> result));
// Entry point to serialize the object. // Entry point to serialize the object.
INLINE(Result SerializeObject(Handle<Object> obj)) { INLINE(Result SerializeObject(Handle<Object> obj)) {
return Serialize_<false>(obj, false, factory_->empty_string()); return Serialize_<false>(obj, false, factory_->empty_string());
@ -135,18 +143,18 @@ class BasicJsonStringifier BASE_EMBEDDED {
void SerializeString(Handle<String> object); void SerializeString(Handle<String> object);
template <typename SrcChar, typename DestChar> template <typename SrcChar, typename DestChar>
INLINE(void SerializeStringUnchecked_(const SrcChar* src, INLINE(static int SerializeStringUnchecked_(const SrcChar* src,
DestChar* dest, DestChar* dest,
int length)); int length));
template <bool is_ascii, typename Char> template <bool is_ascii, typename Char>
INLINE(void SerializeString_(Handle<String> string)); INLINE(void SerializeString_(Handle<String> string));
template <typename Char> template <typename Char>
INLINE(bool DoNotEscape(Char c)); INLINE(static bool DoNotEscape(Char c));
template <typename Char> template <typename Char>
INLINE(Vector<const Char> GetCharVector(Handle<String> string)); INLINE(static Vector<const Char> GetCharVector(Handle<String> string));
Result StackPush(Handle<Object> object); Result StackPush(Handle<Object> object);
void StackPop(); void StackPop();
@ -244,15 +252,15 @@ const char* const BasicJsonStringifier::JsonEscapeTable =
"\370\0 \371\0 \372\0 \373\0 " "\370\0 \371\0 \372\0 \373\0 "
"\374\0 \375\0 \376\0 \377\0 "; "\374\0 \375\0 \376\0 \377\0 ";
BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate) BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
: isolate_(isolate), current_index_(0), is_ascii_(true) { : isolate_(isolate), current_index_(0), is_ascii_(true) {
factory_ = isolate_->factory(); factory_ = isolate_->factory();
accumulator_store_ = Handle<JSValue>::cast( accumulator_store_ = Handle<JSValue>::cast(
factory_->ToObject(factory_->empty_string())); factory_->ToObject(factory_->empty_string()));
part_length_ = kInitialPartLength; part_length_ = kInitialPartLength;
current_part_ = factory_->NewRawOneByteString(kInitialPartLength); current_part_ = factory_->NewRawOneByteString(part_length_);
tojson_string_ = tojson_string_ = factory_->toJSON_string();
factory_->InternalizeOneByteString(STATIC_ASCII_VECTOR("toJSON"));
stack_ = factory_->NewJSArray(8); stack_ = factory_->NewJSArray(8);
} }
@ -275,6 +283,51 @@ MaybeObject* BasicJsonStringifier::Stringify(Handle<Object> object) {
} }
MaybeObject* BasicJsonStringifier::StringifyString(Isolate* isolate,
Handle<String> object) {
static const int kJsonQuoteWorstCaseBlowup = 6;
static const int kSpaceForQuotes = 2;
int worst_case_length =
object->length() * kJsonQuoteWorstCaseBlowup + kSpaceForQuotes;
if (worst_case_length > 32 * KB) { // Slow path if too large.
BasicJsonStringifier stringifier(isolate);
return stringifier.Stringify(object);
}
FlattenString(object);
String::FlatContent flat = object->GetFlatContent();
if (flat.IsAscii()) {
return StringifyString_<SeqOneByteString>(
isolate,
flat.ToOneByteVector(),
isolate->factory()->NewRawOneByteString(worst_case_length));
} else {
ASSERT(flat.IsTwoByte());
return StringifyString_<SeqTwoByteString>(
isolate,
flat.ToUC16Vector(),
isolate->factory()->NewRawTwoByteString(worst_case_length));
}
}
template <typename ResultType, typename Char>
MaybeObject* BasicJsonStringifier::StringifyString_(Isolate* isolate,
Vector<Char> vector,
Handle<String> result) {
AssertNoAllocation no_allocation;
int final_size = 0;
ResultType* dest = ResultType::cast(*result);
dest->Set(final_size++, '\"');
final_size += SerializeStringUnchecked_(vector.start(),
dest->GetChars() + 1,
vector.length());
dest->Set(final_size++, '\"');
return *SeqString::Truncate(Handle<SeqString>::cast(result), final_size);
}
template <bool is_ascii, typename Char> template <bool is_ascii, typename Char>
void BasicJsonStringifier::Append_(Char c) { void BasicJsonStringifier::Append_(Char c) {
if (is_ascii) { if (is_ascii) {
@ -638,8 +691,8 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
void BasicJsonStringifier::ShrinkCurrentPart() { void BasicJsonStringifier::ShrinkCurrentPart() {
ASSERT(current_index_ < part_length_); ASSERT(current_index_ < part_length_);
current_part_ = Handle<String>( current_part_ = SeqString::Truncate(Handle<SeqString>::cast(current_part_),
SeqString::cast(*current_part_)->Truncate(current_index_), isolate_); current_index_);
} }
@ -667,10 +720,9 @@ void BasicJsonStringifier::ChangeEncoding() {
template <typename SrcChar, typename DestChar> template <typename SrcChar, typename DestChar>
void BasicJsonStringifier::SerializeStringUnchecked_(const SrcChar* src, int BasicJsonStringifier::SerializeStringUnchecked_(const SrcChar* src,
DestChar* dest, DestChar* dest,
int length) { int length) {
dest += current_index_;
DestChar* dest_start = dest; DestChar* dest_start = dest;
// Assert that uc16 character is not truncated down to 8 bit. // Assert that uc16 character is not truncated down to 8 bit.
@ -688,7 +740,7 @@ void BasicJsonStringifier::SerializeStringUnchecked_(const SrcChar* src,
} }
} }
current_index_ += static_cast<int>(dest - dest_start); return static_cast<int>(dest - dest_start);
} }
@ -705,14 +757,14 @@ void BasicJsonStringifier::SerializeString_(Handle<String> string) {
AssertNoAllocation no_allocation; AssertNoAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string); Vector<const Char> vector = GetCharVector<Char>(string);
if (is_ascii) { if (is_ascii) {
SerializeStringUnchecked_( current_index_ += SerializeStringUnchecked_(
vector.start(), vector.start(),
SeqOneByteString::cast(*current_part_)->GetChars(), SeqOneByteString::cast(*current_part_)->GetChars() + current_index_,
length); length);
} else { } else {
SerializeStringUnchecked_( current_index_ += SerializeStringUnchecked_(
vector.start(), vector.start(),
SeqTwoByteString::cast(*current_part_)->GetChars(), SeqTwoByteString::cast(*current_part_)->GetChars() + current_index_,
length); length);
} }
} else { } else {

91
deps/v8/src/log.cc

@ -644,7 +644,17 @@ void Logger::ApiNamedSecurityCheck(Object* key) {
String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,check-security,\"%s\"\n", *str); ApiEvent("api,check-security,\"%s\"\n", *str);
} else if (key->IsSymbol()) { } else if (key->IsSymbol()) {
ApiEvent("api,check-security,symbol(hash %x)\n", Symbol::cast(key)->Hash()); Symbol* symbol = Symbol::cast(key);
if (symbol->name()->IsUndefined()) {
ApiEvent("api,check-security,symbol(hash %x)\n",
Symbol::cast(key)->Hash());
} else {
SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,check-security,symbol(\"%s\" hash %x)\n",
*str,
Symbol::cast(key)->Hash());
}
} else if (key->IsUndefined()) { } else if (key->IsUndefined()) {
ApiEvent("api,check-security,undefined\n"); ApiEvent("api,check-security,undefined\n");
} else { } else {
@ -833,8 +843,16 @@ void Logger::ApiNamedPropertyAccess(const char* tag,
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name); ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
} else { } else {
uint32_t hash = Symbol::cast(name)->Hash(); Symbol* symbol = Symbol::cast(name);
ApiEvent("api,%s,\"%s\",symbol(hash %x)\n", tag, *class_name, hash); uint32_t hash = symbol->Hash();
if (symbol->name()->IsUndefined()) {
ApiEvent("api,%s,\"%s\",symbol(hash %x)\n", tag, *class_name, hash);
} else {
SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",symbol(\"%s\" hash %x)\n",
tag, *class_name, *str, hash);
}
} }
} }
@ -902,7 +920,14 @@ void Logger::CallbackEventInternal(const char* prefix, Name* name,
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append(",1,\"%s%s\"", prefix, *str); msg.Append(",1,\"%s%s\"", prefix, *str);
} else { } else {
msg.Append(",1,symbol(hash %x)", prefix, Name::cast(name)->Hash()); Symbol* symbol = Symbol::cast(name);
if (symbol->name()->IsUndefined()) {
msg.Append(",1,symbol(hash %x)", prefix, symbol->Hash());
} else {
SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append(",1,symbol(\"%s\" hash %x)", prefix, *str, symbol->Hash());
}
} }
msg.Append('\n'); msg.Append('\n');
msg.WriteToLogFile(); msg.WriteToLogFile();
@ -978,8 +1003,15 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
if (name->IsString()) { if (name->IsString()) {
name_buffer_->AppendString(String::cast(name)); name_buffer_->AppendString(String::cast(name));
} else { } else {
name_buffer_->AppendBytes("symbol(hash "); Symbol* symbol = Symbol::cast(name);
name_buffer_->AppendHex(Name::cast(name)->Hash()); name_buffer_->AppendBytes("symbol(");
if (!symbol->name()->IsUndefined()) {
name_buffer_->AppendBytes("\"");
name_buffer_->AppendString(String::cast(symbol->name()));
name_buffer_->AppendBytes("\" ");
}
name_buffer_->AppendBytes("hash ");
name_buffer_->AppendHex(symbol->Hash());
name_buffer_->AppendByte(')'); name_buffer_->AppendByte(')');
} }
} }
@ -1006,7 +1038,14 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.AppendDetailed(String::cast(name), false); msg.AppendDetailed(String::cast(name), false);
msg.Append('"'); msg.Append('"');
} else { } else {
msg.Append("symbol(hash %x)", Name::cast(name)->Hash()); Symbol* symbol = Symbol::cast(name);
msg.Append("symbol(");
if (!symbol->name()->IsUndefined()) {
msg.Append("\"");
msg.AppendDetailed(String::cast(symbol->name()), false);
msg.Append("\" ");
}
msg.Append("hash %x)", symbol->Hash());
} }
msg.Append('\n'); msg.Append('\n');
msg.WriteToLogFile(); msg.WriteToLogFile();
@ -1036,8 +1075,15 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
if (name->IsString()) { if (name->IsString()) {
name_buffer_->AppendString(String::cast(name)); name_buffer_->AppendString(String::cast(name));
} else { } else {
name_buffer_->AppendBytes("symbol(hash "); Symbol* symbol = Symbol::cast(name);
name_buffer_->AppendHex(Name::cast(name)->Hash()); name_buffer_->AppendBytes("symbol(");
if (!symbol->name()->IsUndefined()) {
name_buffer_->AppendBytes("\"");
name_buffer_->AppendString(String::cast(symbol->name()));
name_buffer_->AppendBytes("\" ");
}
name_buffer_->AppendBytes("hash ");
name_buffer_->AppendHex(symbol->Hash());
name_buffer_->AppendByte(')'); name_buffer_->AppendByte(')');
} }
} }
@ -1073,7 +1119,14 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("\"%s\"", *str); msg.Append("\"%s\"", *str);
} else { } else {
msg.Append("symbol(hash %x)", Name::cast(name)->Hash()); Symbol* symbol = Symbol::cast(name);
msg.Append("symbol(");
if (!symbol->name()->IsUndefined()) {
msg.Append("\"");
msg.AppendDetailed(String::cast(symbol->name()), false);
msg.Append("\" ");
}
msg.Append("hash %x)", symbol->Hash());
} }
msg.Append(','); msg.Append(',');
msg.AppendAddress(shared->address()); msg.AppendAddress(shared->address());
@ -1138,7 +1191,14 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
String::cast(source)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); String::cast(source)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s", *sourcestr); msg.Append("%s", *sourcestr);
} else { } else {
msg.Append("symbol(hash %x)", Name::cast(source)->Hash()); Symbol* symbol = Symbol::cast(source);
msg.Append("symbol(");
if (!symbol->name()->IsUndefined()) {
msg.Append("\"");
msg.AppendDetailed(String::cast(symbol->name()), false);
msg.Append("\" ");
}
msg.Append("hash %x)", symbol->Hash());
} }
msg.Append(":%d\",", line); msg.Append(":%d\",", line);
msg.AppendAddress(shared->address()); msg.AppendAddress(shared->address());
@ -1358,7 +1418,14 @@ void Logger::SuspectReadEvent(Name* name, Object* obj) {
msg.Append(String::cast(name)); msg.Append(String::cast(name));
msg.Append('"'); msg.Append('"');
} else { } else {
msg.Append("symbol(hash %x)", Name::cast(name)->Hash()); Symbol* symbol = Symbol::cast(name);
msg.Append("symbol(");
if (!symbol->name()->IsUndefined()) {
msg.Append("\"");
msg.AppendDetailed(String::cast(symbol->name()), false);
msg.Append("\" ");
}
msg.Append("hash %x)", symbol->Hash());
} }
msg.Append('\n'); msg.Append('\n');
msg.WriteToLogFile(); msg.WriteToLogFile();

1
deps/v8/src/log.h

@ -77,6 +77,7 @@ class Semaphore;
class Ticker; class Ticker;
class Isolate; class Isolate;
class PositionsRecorder; class PositionsRecorder;
class CpuProfiler;
#undef LOG #undef LOG
#define LOG(isolate, Call) \ #define LOG(isolate, Call) \

5
deps/v8/src/macros.py

@ -99,7 +99,7 @@ macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined');
macro IS_NUMBER(arg) = (typeof(arg) === 'number'); macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string'); macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean'); macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
macro IS_SYMBOL(arg) = (%_IsSymbol(arg)); macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
macro IS_OBJECT(arg) = (%_IsObject(arg)); macro IS_OBJECT(arg) = (%_IsObject(arg));
macro IS_ARRAY(arg) = (%_IsArray(arg)); macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_FUNCTION(arg) = (%_IsFunction(arg)); macro IS_FUNCTION(arg) = (%_IsFunction(arg));
@ -110,11 +110,13 @@ macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date'); macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number'); macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String'); macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
macro IS_SYMBOL_WRAPPER(arg) = (%_ClassOf(arg) === 'Symbol');
macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean'); macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error'); macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script'); macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments'); macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global'); macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === '__ArrayBuffer');
macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg)); macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro FLOOR(arg) = $floor(arg); macro FLOOR(arg) = $floor(arg);
@ -141,6 +143,7 @@ const kBoundArgumentsStartIndex = 2;
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg)); macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0))); macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg))); macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
macro TO_POSITIVE_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? (arg > 0 ? arg : 0) : %NumberToPositiveInteger(ToNumber(arg)));
macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg))); macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0)); macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0); macro TO_UINT32(arg) = (arg >>> 0);

104
deps/v8/src/messages.js

@ -100,6 +100,7 @@ var kMessages = {
observe_notify_non_notifier: ["notify called on non-notifier object"], observe_notify_non_notifier: ["notify called on non-notifier object"],
// RangeError // RangeError
invalid_array_length: ["Invalid array length"], invalid_array_length: ["Invalid array length"],
invalid_array_buffer_length: ["Invalid array buffer length"],
stack_overflow: ["Maximum call stack size exceeded"], stack_overflow: ["Maximum call stack size exceeded"],
invalid_time_value: ["Invalid time value"], invalid_time_value: ["Invalid time value"],
// SyntaxError // SyntaxError
@ -150,6 +151,7 @@ var kMessages = {
cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"], cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"],
redef_external_array_element: ["Cannot redefine a property of an object with external array elements"], redef_external_array_element: ["Cannot redefine a property of an object with external array elements"],
harmony_const_assign: ["Assignment to constant variable."], harmony_const_assign: ["Assignment to constant variable."],
symbol_to_string: ["Conversion from symbol to string"],
invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"], invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"],
module_type_error: ["Module '", "%0", "' used improperly"], module_type_error: ["Module '", "%0", "' used improperly"],
module_export_undefined: ["Export '", "%0", "' is not defined in module"], module_export_undefined: ["Export '", "%0", "' is not defined in module"],
@ -745,64 +747,70 @@ function GetPositionInLine(message) {
function GetStackTraceLine(recv, fun, pos, isGlobal) { function GetStackTraceLine(recv, fun, pos, isGlobal) {
return new CallSite(recv, fun, pos).toString(); return new CallSite(recv, fun, pos, false).toString();
} }
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Error implementation // Error implementation
function CallSite(receiver, fun, pos) { var CallSiteReceiverKey = %CreateSymbol("receiver");
this.receiver = receiver; var CallSiteFunctionKey = %CreateSymbol("function");
this.fun = fun; var CallSitePositionKey = %CreateSymbol("position");
this.pos = pos; var CallSiteStrictModeKey = %CreateSymbol("strict mode");
function CallSite(receiver, fun, pos, strict_mode) {
this[CallSiteReceiverKey] = receiver;
this[CallSiteFunctionKey] = fun;
this[CallSitePositionKey] = pos;
this[CallSiteStrictModeKey] = strict_mode;
} }
function CallSiteGetThis() { function CallSiteGetThis() {
return this.receiver; return this[CallSiteStrictModeKey] ? void 0 : this[CallSiteReceiverKey];
} }
function CallSiteGetTypeName() { function CallSiteGetTypeName() {
return GetTypeName(this, false); return GetTypeName(this[CallSiteReceiverKey], false);
} }
function CallSiteIsToplevel() { function CallSiteIsToplevel() {
if (this.receiver == null) { if (this[CallSiteReceiverKey] == null) {
return true; return true;
} }
return IS_GLOBAL(this.receiver); return IS_GLOBAL(this[CallSiteReceiverKey]);
} }
function CallSiteIsEval() { function CallSiteIsEval() {
var script = %FunctionGetScript(this.fun); var script = %FunctionGetScript(this[CallSiteFunctionKey]);
return script && script.compilation_type == COMPILATION_TYPE_EVAL; return script && script.compilation_type == COMPILATION_TYPE_EVAL;
} }
function CallSiteGetEvalOrigin() { function CallSiteGetEvalOrigin() {
var script = %FunctionGetScript(this.fun); var script = %FunctionGetScript(this[CallSiteFunctionKey]);
return FormatEvalOrigin(script); return FormatEvalOrigin(script);
} }
function CallSiteGetScriptNameOrSourceURL() { function CallSiteGetScriptNameOrSourceURL() {
var script = %FunctionGetScript(this.fun); var script = %FunctionGetScript(this[CallSiteFunctionKey]);
return script ? script.nameOrSourceURL() : null; return script ? script.nameOrSourceURL() : null;
} }
function CallSiteGetFunction() { function CallSiteGetFunction() {
return this.fun; return this[CallSiteStrictModeKey] ? void 0 : this[CallSiteFunctionKey];
} }
function CallSiteGetFunctionName() { function CallSiteGetFunctionName() {
// See if the function knows its own name // See if the function knows its own name
var name = this.fun.name; var name = this[CallSiteFunctionKey].name;
if (name) { if (name) {
return name; return name;
} }
name = %FunctionGetInferredName(this.fun); name = %FunctionGetInferredName(this[CallSiteFunctionKey]);
if (name) { if (name) {
return name; return name;
} }
// Maybe this is an evaluation? // Maybe this is an evaluation?
var script = %FunctionGetScript(this.fun); var script = %FunctionGetScript(this[CallSiteFunctionKey]);
if (script && script.compilation_type == COMPILATION_TYPE_EVAL) { if (script && script.compilation_type == COMPILATION_TYPE_EVAL) {
return "eval"; return "eval";
} }
@ -812,26 +820,22 @@ function CallSiteGetFunctionName() {
function CallSiteGetMethodName() { function CallSiteGetMethodName() {
// See if we can find a unique property on the receiver that holds // See if we can find a unique property on the receiver that holds
// this function. // this function.
var ownName = this.fun.name; var receiver = this[CallSiteReceiverKey];
if (ownName && this.receiver && var fun = this[CallSiteFunctionKey];
(%_CallFunction(this.receiver, var ownName = fun.name;
ownName, if (ownName && receiver &&
ObjectLookupGetter) === this.fun || (%_CallFunction(receiver, ownName, ObjectLookupGetter) === fun ||
%_CallFunction(this.receiver, %_CallFunction(receiver, ownName, ObjectLookupSetter) === fun ||
ownName, (IS_OBJECT(receiver) && %GetDataProperty(receiver, ownName) === fun))) {
ObjectLookupSetter) === this.fun ||
(IS_OBJECT(this.receiver) &&
%GetDataProperty(this.receiver, ownName) === this.fun))) {
// To handle DontEnum properties we guess that the method has // To handle DontEnum properties we guess that the method has
// the same name as the function. // the same name as the function.
return ownName; return ownName;
} }
var name = null; var name = null;
for (var prop in this.receiver) { for (var prop in receiver) {
if (%_CallFunction(this.receiver, prop, ObjectLookupGetter) === this.fun || if (%_CallFunction(receiver, prop, ObjectLookupGetter) === fun ||
%_CallFunction(this.receiver, prop, ObjectLookupSetter) === this.fun || %_CallFunction(receiver, prop, ObjectLookupSetter) === fun ||
(IS_OBJECT(this.receiver) && (IS_OBJECT(receiver) && %GetDataProperty(receiver, prop) === fun)) {
%GetDataProperty(this.receiver, prop) === this.fun)) {
// If we find more than one match bail out to avoid confusion. // If we find more than one match bail out to avoid confusion.
if (name) { if (name) {
return null; return null;
@ -846,49 +850,49 @@ function CallSiteGetMethodName() {
} }
function CallSiteGetFileName() { function CallSiteGetFileName() {
var script = %FunctionGetScript(this.fun); var script = %FunctionGetScript(this[CallSiteFunctionKey]);
return script ? script.name : null; return script ? script.name : null;
} }
function CallSiteGetLineNumber() { function CallSiteGetLineNumber() {
if (this.pos == -1) { if (this[CallSitePositionKey] == -1) {
return null; return null;
} }
var script = %FunctionGetScript(this.fun); var script = %FunctionGetScript(this[CallSiteFunctionKey]);
var location = null; var location = null;
if (script) { if (script) {
location = script.locationFromPosition(this.pos, true); location = script.locationFromPosition(this[CallSitePositionKey], true);
} }
return location ? location.line + 1 : null; return location ? location.line + 1 : null;
} }
function CallSiteGetColumnNumber() { function CallSiteGetColumnNumber() {
if (this.pos == -1) { if (this[CallSitePositionKey] == -1) {
return null; return null;
} }
var script = %FunctionGetScript(this.fun); var script = %FunctionGetScript(this[CallSiteFunctionKey]);
var location = null; var location = null;
if (script) { if (script) {
location = script.locationFromPosition(this.pos, true); location = script.locationFromPosition(this[CallSitePositionKey], true);
} }
return location ? location.column + 1: null; return location ? location.column + 1: null;
} }
function CallSiteIsNative() { function CallSiteIsNative() {
var script = %FunctionGetScript(this.fun); var script = %FunctionGetScript(this[CallSiteFunctionKey]);
return script ? (script.type == TYPE_NATIVE) : false; return script ? (script.type == TYPE_NATIVE) : false;
} }
function CallSiteGetPosition() { function CallSiteGetPosition() {
return this.pos; return this[CallSitePositionKey];
} }
function CallSiteIsConstructor() { function CallSiteIsConstructor() {
var receiver = this.receiver; var receiver = this[CallSiteReceiverKey];
var constructor = var constructor =
IS_OBJECT(receiver) ? %GetDataProperty(receiver, "constructor") : null; IS_OBJECT(receiver) ? %GetDataProperty(receiver, "constructor") : null;
if (!constructor) return false; if (!constructor) return false;
return this.fun === constructor; return this[CallSiteFunctionKey] === constructor;
} }
function CallSiteToString() { function CallSiteToString() {
@ -931,7 +935,7 @@ function CallSiteToString() {
var isConstructor = this.isConstructor(); var isConstructor = this.isConstructor();
var isMethodCall = !(this.isToplevel() || isConstructor); var isMethodCall = !(this.isToplevel() || isConstructor);
if (isMethodCall) { if (isMethodCall) {
var typeName = GetTypeName(this, true); var typeName = GetTypeName(this[CallSiteReceiverKey], true);
var methodName = this.getMethodName(); var methodName = this.getMethodName();
if (functionName) { if (functionName) {
if (typeName && if (typeName &&
@ -1035,13 +1039,15 @@ function FormatErrorString(error) {
function GetStackFrames(raw_stack) { function GetStackFrames(raw_stack) {
var frames = new InternalArray(); var frames = new InternalArray();
for (var i = 0; i < raw_stack.length; i += 4) { var non_strict_frames = raw_stack[0];
for (var i = 1; i < raw_stack.length; i += 4) {
var recv = raw_stack[i]; var recv = raw_stack[i];
var fun = raw_stack[i + 1]; var fun = raw_stack[i + 1];
var code = raw_stack[i + 2]; var code = raw_stack[i + 2];
var pc = raw_stack[i + 3]; var pc = raw_stack[i + 3];
var pos = %FunctionGetPositionForOffset(code, pc); var pos = %FunctionGetPositionForOffset(code, pc);
frames.push(new CallSite(recv, fun, pos)); non_strict_frames--;
frames.push(new CallSite(recv, fun, pos, (non_strict_frames < 0)));
} }
return frames; return frames;
} }
@ -1069,16 +1075,16 @@ function FormatStackTrace(error_string, frames) {
} }
function GetTypeName(obj, requireConstructor) { function GetTypeName(receiver, requireConstructor) {
var constructor = obj.receiver.constructor; var constructor = receiver.constructor;
if (!constructor) { if (!constructor) {
return requireConstructor ? null : return requireConstructor ? null :
%_CallFunction(obj.receiver, ObjectToString); %_CallFunction(receiver, ObjectToString);
} }
var constructorName = constructor.name; var constructorName = constructor.name;
if (!constructorName) { if (!constructorName) {
return requireConstructor ? null : return requireConstructor ? null :
%_CallFunction(obj.receiver, ObjectToString); %_CallFunction(receiver, ObjectToString);
} }
return constructorName; return constructorName;
} }

1
deps/v8/src/mips/OWNERS

@ -0,0 +1 @@
plind44@gmail.com

10
deps/v8/src/mips/assembler-mips.cc

@ -490,7 +490,6 @@ bool Assembler::IsBranch(Instr instr) {
uint32_t opcode = GetOpcodeField(instr); uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr); uint32_t rt_field = GetRtField(instr);
uint32_t rs_field = GetRsField(instr); uint32_t rs_field = GetRsField(instr);
uint32_t label_constant = GetLabelConst(instr);
// Checks if the instruction is a branch. // Checks if the instruction is a branch.
return opcode == BEQ || return opcode == BEQ ||
opcode == BNE || opcode == BNE ||
@ -502,10 +501,13 @@ bool Assembler::IsBranch(Instr instr) {
opcode == BGTZL || opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ || (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) || rt_field == BLTZAL || rt_field == BGEZAL)) ||
(opcode == COP1 && rs_field == BC1) || // Coprocessor branch. (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
label_constant == 0; // Emitted label const in reg-exp engine.
} }
bool Assembler::IsEmittedConstant(Instr instr) {
uint32_t label_constant = GetLabelConst(instr);
return label_constant == 0; // Emitted label const in reg-exp engine.
}
bool Assembler::IsBeq(Instr instr) { bool Assembler::IsBeq(Instr instr) {
return GetOpcodeField(instr) == BEQ; return GetOpcodeField(instr) == BEQ;
@ -796,7 +798,7 @@ void Assembler::bind_to(Label* L, int pos) {
} }
target_at_put(fixup_pos, pos); target_at_put(fixup_pos, pos);
} else { } else {
ASSERT(IsJ(instr) || IsLui(instr)); ASSERT(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
target_at_put(fixup_pos, pos); target_at_put(fixup_pos, pos);
} }
} }

1
deps/v8/src/mips/assembler-mips.h

@ -942,6 +942,7 @@ class Assembler : public AssemblerBase {
static Instr SetAddImmediateOffset(Instr instr, int16_t offset); static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
static bool IsAndImmediate(Instr instr); static bool IsAndImmediate(Instr instr);
static bool IsEmittedConstant(Instr instr);
void CheckTrampolinePool(); void CheckTrampolinePool();

21
deps/v8/src/mips/builtins-mips.cc

@ -227,13 +227,12 @@ static void AllocateJSArray(MacroAssembler* masm,
(JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize); (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
__ sra(scratch1, array_size, kSmiTagSize); __ sra(scratch1, array_size, kSmiTagSize);
__ Addu(elements_array_end, elements_array_end, scratch1); __ Addu(elements_array_end, elements_array_end, scratch1);
__ AllocateInNewSpace( __ Allocate(elements_array_end,
elements_array_end, result,
result, scratch1,
scratch1, scratch2,
scratch2, gc_required,
gc_required, static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Allocated the JSArray. Now initialize the fields except for the elements // Allocated the JSArray. Now initialize the fields except for the elements
// array. // array.
@ -895,7 +894,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a1: constructor function // a1: constructor function
// a2: initial map // a2: initial map
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset)); __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
__ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS); __ Allocate(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to // Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array. // initial map and properties and elements are set to empty fixed array.
@ -974,7 +973,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// t4: JSObject // t4: JSObject
// t5: start of next object // t5: start of next object
__ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize)); __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
__ AllocateInNewSpace( __ Allocate(
a0, a0,
t5, t5,
t6, t6,
@ -1130,10 +1129,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ GetObjectType(v0, a1, a3); __ GetObjectType(v0, a1, a3);
__ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
// Symbols are "objects".
__ lbu(a3, FieldMemOperand(a1, Map::kInstanceTypeOffset));
__ Branch(&exit, eq, a3, Operand(SYMBOL_TYPE));
// Throw away the result of the constructor invocation and use the // Throw away the result of the constructor invocation and use the
// on-stack receiver as the result. // on-stack receiver as the result.
__ bind(&use_receiver); __ bind(&use_receiver);

141
deps/v8/src/mips/code-stubs-mips.cc

@ -62,6 +62,17 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
} }
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a2, a1, a0 };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
}
void TransitionElementsKindStub::InitializeInterfaceDescriptor( void TransitionElementsKindStub::InitializeInterfaceDescriptor(
Isolate* isolate, Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) { CodeStubInterfaceDescriptor* descriptor) {
@ -85,7 +96,7 @@ static void InitializeArrayConstructorDescriptor(Isolate* isolate,
// stack param count needs (constructor pointer, and single argument) // stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &a0; descriptor->stack_parameter_count_ = &a0;
descriptor->register_params_ = registers; descriptor->register_params_ = registers;
descriptor->extra_expression_stack_count_ = 1; descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ = descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ArrayConstructor_StubFailure); FUNCTION_ADDR(ArrayConstructor_StubFailure);
} }
@ -673,6 +684,8 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* not_number) { Label* not_number) {
ASSERT(!object.is(dst1) && !object.is(dst2));
__ AssertRootValue(heap_number_map, __ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex, Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered."); "HeapNumberMap register clobbered.");
@ -867,6 +880,10 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
ASSERT(!heap_number_map.is(object) && ASSERT(!heap_number_map.is(object) &&
!heap_number_map.is(scratch1) && !heap_number_map.is(scratch1) &&
!heap_number_map.is(scratch2)); !heap_number_map.is(scratch2));
// ARM uses pop/push and Ldlr to save dst_* and probably object registers in
// softfloat path. On MIPS there is no ldlr, 1st lw instruction may overwrite
// object register making the 2nd lw invalid.
ASSERT(!object.is(dst_mantissa) && !object.is(dst_exponent));
Label done, obj_is_not_smi; Label done, obj_is_not_smi;
@ -903,49 +920,24 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
if (destination == kCoreRegisters) { if (destination == kCoreRegisters) {
__ Move(dst_mantissa, dst_exponent, double_dst); __ Move(dst_mantissa, dst_exponent, double_dst);
} }
} else { } else {
ASSERT(!scratch1.is(object) && !scratch2.is(object));
// Load the double value in the destination registers. // Load the double value in the destination registers.
bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
if (save_registers) {
// Save both output registers, because the other one probably holds
// an important value too.
__ Push(dst_exponent, dst_mantissa);
}
__ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
__ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset)); __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
// Check for 0 and -0. // Check for 0 and -0.
Label zero;
__ And(scratch1, dst_exponent, Operand(~HeapNumber::kSignMask)); __ And(scratch1, dst_exponent, Operand(~HeapNumber::kSignMask));
__ Or(scratch1, scratch1, Operand(dst_mantissa)); __ Or(scratch1, scratch1, Operand(dst_mantissa));
__ Branch(&zero, eq, scratch1, Operand(zero_reg)); __ Branch(&done, eq, scratch1, Operand(zero_reg));
// Check that the value can be exactly represented by a 32-bit integer. // Check that the value can be exactly represented by a 32-bit integer.
// Jump to not_int32 if that's not the case. // Jump to not_int32 if that's not the case.
Label restore_input_and_miss;
DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2, DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
&restore_input_and_miss); not_int32);
// dst_* were trashed. Reload the double value. // dst_* were trashed. Reload the double value.
if (save_registers) {
__ Pop(dst_exponent, dst_mantissa);
}
__ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
__ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset)); __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
__ Branch(&done);
__ bind(&restore_input_and_miss);
if (save_registers) {
__ Pop(dst_exponent, dst_mantissa);
}
__ Branch(not_int32);
__ bind(&zero);
if (save_registers) {
__ Drop(2);
}
} }
__ bind(&done); __ bind(&done);
@ -2696,16 +2688,20 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
masm, destination, right, f14, a2, a3, heap_number_map, masm, destination, right, f14, a2, a3, heap_number_map,
scratch1, scratch2, fail); scratch1, scratch2, fail);
} }
// Use scratch3 as left in LoadNumber functions to avoid overwriting of
// left (a0) register.
__ mov(scratch3, left);
// Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
// jumps to |miss|. // jumps to |miss|.
if (left_type == BinaryOpIC::INT32) { if (left_type == BinaryOpIC::INT32) {
FloatingPointHelper::LoadNumberAsInt32Double( FloatingPointHelper::LoadNumberAsInt32Double(
masm, left, destination, f12, f16, a0, a1, heap_number_map, masm, scratch3, destination, f12, f16, a0, a1, heap_number_map,
scratch1, scratch2, f2, miss); scratch1, scratch2, f2, miss);
} else { } else {
Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
FloatingPointHelper::LoadNumber( FloatingPointHelper::LoadNumber(
masm, destination, left, f12, a0, a1, heap_number_map, masm, destination, scratch3, f12, a0, a1, heap_number_map,
scratch1, scratch2, fail); scratch1, scratch2, fail);
} }
} }
@ -4544,35 +4540,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
} }
void ArrayLengthStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver;
if (kind() == Code::KEYED_LOAD_IC) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
__ Branch(&miss, ne, a0,
Operand(masm->isolate()->factory()->length_string()));
receiver = a1;
} else {
ASSERT(kind() == Code::LOAD_IC);
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -- a0 : receiver
// -- sp[0] : receiver
// -----------------------------------
receiver = a0;
}
StubCompiler::GenerateLoadArrayLength(masm, receiver, a3, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
}
void FunctionPrototypeStub::Generate(MacroAssembler* masm) { void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss; Label miss;
Register receiver; Register receiver;
@ -4861,7 +4828,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize)); __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
// Do the allocation of all three objects in one go. // Do the allocation of all three objects in one go.
__ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT); __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
// v0 = address of new object(s) (tagged) // v0 = address of new object(s) (tagged)
// a2 = argument count (tagged) // a2 = argument count (tagged)
@ -5052,13 +5019,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
// Do the allocation of both objects in one go. // Do the allocation of both objects in one go.
__ AllocateInNewSpace(a1, __ Allocate(a1, v0, a2, a3, &runtime,
v0, static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
a2,
a3,
&runtime,
static_cast<AllocationFlags>(TAG_OBJECT |
SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context. // Get the arguments boilerplate from the current native context.
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@ -5578,7 +5540,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
(JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
__ srl(t1, a1, kSmiTagSize + kSmiShiftSize); __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
__ Addu(a2, t1, Operand(objects_size)); __ Addu(a2, t1, Operand(objects_size));
__ AllocateInNewSpace( __ Allocate(
a2, // In: Size, in words. a2, // In: Size, in words.
v0, // Out: Start of allocation (tagged). v0, // Out: Start of allocation (tagged).
a3, // Scratch register. a3, // Scratch register.
@ -7543,30 +7505,28 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
__ Branch(done, eq, entity_name, Operand(tmp)); __ Branch(done, eq, entity_name, Operand(tmp));
if (i != kInlinedProbes - 1) { // Load the hole ready for use below:
// Load the hole ready for use below: __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
__ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
// Stop if found the property.
__ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
// Stop if found the property. Label good;
__ Branch(miss, eq, entity_name, Operand(Handle<Name>(name))); __ Branch(&good, eq, entity_name, Operand(tmp));
Label good; // Check if the entry name is not a unique name.
__ Branch(&good, eq, entity_name, Operand(tmp)); __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ lbu(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
__ And(scratch0, entity_name, Operand(kIsInternalizedMask));
__ Branch(&good, ne, scratch0, Operand(zero_reg));
__ Branch(miss, ne, entity_name, Operand(SYMBOL_TYPE));
// Check if the entry name is not a unique name. __ bind(&good);
__ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ lbu(entity_name, // Restore the properties.
FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); __ lw(properties,
__ And(scratch0, entity_name, Operand(kIsInternalizedMask)); FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ Branch(&good, ne, scratch0, Operand(zero_reg));
__ Branch(miss, ne, entity_name, Operand(SYMBOL_TYPE));
__ bind(&good);
// Restore the properties.
__ lw(properties,
FieldMemOperand(receiver, JSObject::kPropertiesOffset));
}
} }
const int spill_mask = const int spill_mask =
@ -8127,6 +8087,9 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
int parameter_count_offset = int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ lw(a1, MemOperand(fp, parameter_count_offset)); __ lw(a1, MemOperand(fp, parameter_count_offset));
if (function_mode_ == JS_FUNCTION_STUB_MODE) {
__ Addu(a1, a1, Operand(1));
}
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ sll(a1, a1, kPointerSizeLog2); __ sll(a1, a1, kPointerSizeLog2);
__ Addu(sp, sp, a1); __ Addu(sp, sp, a1);

7
deps/v8/src/mips/codegen-mips.cc

@ -206,8 +206,9 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new FixedDoubleArray. // Allocate new FixedDoubleArray.
__ sll(scratch, t1, 2); __ sll(scratch, t1, 2);
__ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize); __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
__ AllocateInNewSpace(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS); __ Allocate(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
// t2: destination FixedDoubleArray, not tagged as heap object // t2: destination FixedDoubleArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map. // Set destination FixedDoubleArray's length and map.
__ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex); __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
__ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset)); __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
@ -351,7 +352,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Allocate new FixedArray. // Allocate new FixedArray.
__ sll(a0, t1, 1); __ sll(a0, t1, 1);
__ Addu(a0, a0, FixedDoubleArray::kHeaderSize); __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
__ AllocateInNewSpace(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS); __ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
// t2: destination FixedArray, not tagged as heap object // t2: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map. // Set destination FixedDoubleArray's length and map.
__ LoadRoot(t5, Heap::kFixedArrayMapRootIndex); __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
@ -702,7 +703,7 @@ void Code::PatchPlatformCodeAge(byte* sequence,
uint32_t young_length; uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length); byte* young_sequence = GetNoCodeAgeSequence(&young_length);
if (age == kNoAge) { if (age == kNoAge) {
memcpy(sequence, young_sequence, young_length); CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length); CPU::FlushICache(sequence, young_length);
} else { } else {
Code* stub = GetCodeAgeStub(age, parity); Code* stub = GetCodeAgeStub(age, parity);

30
deps/v8/src/mips/full-codegen-mips.cc

@ -2780,28 +2780,6 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
} }
void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a2);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a2, Operand(SYMBOL_TYPE), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1); ASSERT(args->length() == 1);
@ -4320,6 +4298,10 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ And(a1, a1, Operand(1 << Map::kIsUndetectable)); __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(eq, a1, Operand(zero_reg), Split(eq, a1, Operand(zero_reg),
if_true, if_false, fall_through); if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->symbol_string())) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, v0, a1);
Split(eq, a1, Operand(SYMBOL_TYPE), if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->boolean_string())) { } else if (check->Equals(isolate()->heap()->boolean_string())) {
__ LoadRoot(at, Heap::kTrueValueRootIndex); __ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at)); __ Branch(if_true, eq, v0, Operand(at));
@ -4351,10 +4333,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ LoadRoot(at, Heap::kNullValueRootIndex); __ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at)); __ Branch(if_true, eq, v0, Operand(at));
} }
if (FLAG_harmony_symbols) {
__ GetObjectType(v0, v0, a1);
__ Branch(if_true, eq, a1, Operand(SYMBOL_TYPE));
}
// Check for JS objects => true. // Check for JS objects => true.
__ GetObjectType(v0, v0, a1); __ GetObjectType(v0, v0, a1);
__ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));

90
deps/v8/src/mips/lithium-codegen-mips.cc

@ -237,7 +237,7 @@ bool LCodeGen::GeneratePrologue() {
__ sw(a0, target); __ sw(a0, target);
// Update the write barrier. This clobbers a3 and a0. // Update the write barrier. This clobbers a3 and a0.
__ RecordWriteContextSlot( __ RecordWriteContextSlot(
cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs); cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
} }
} }
Comment(";;; End allocate local context"); Comment(";;; End allocate local context");
@ -1492,13 +1492,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
} }
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->value());
__ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) { void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Register array = ToRegister(instr->value()); Register array = ToRegister(instr->value());
@ -1901,6 +1894,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string); __ bind(&not_string);
} }
if (expected.Contains(ToBooleanStub::SYMBOL)) {
// Symbol value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(true_label, eq, scratch, Operand(SYMBOL_TYPE));
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
CpuFeatureScope scope(masm(), FPU); CpuFeatureScope scope(masm(), FPU);
// heap number -> false iff +0, -0, or NaN. // heap number -> false iff +0, -0, or NaN.
@ -2687,7 +2687,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
target.offset(), target.offset(),
value, value,
scratch0(), scratch0(),
kRAHasBeenSaved, GetRAState(),
kSaveFPRegs, kSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
check_needed); check_needed);
@ -4030,7 +4030,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
HeapObject::kMapOffset, HeapObject::kMapOffset,
scratch, scratch,
temp, temp,
kRAHasBeenSaved, GetRAState(),
kSaveFPRegs, kSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
@ -4049,7 +4049,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
offset, offset,
value, value,
scratch, scratch,
kRAHasBeenSaved, GetRAState(),
kSaveFPRegs, kSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
check_needed); check_needed);
@ -4064,7 +4064,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
offset, offset,
value, value,
object, object,
kRAHasBeenSaved, GetRAState(),
kSaveFPRegs, kSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
check_needed); check_needed);
@ -4275,7 +4275,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
__ RecordWrite(elements, __ RecordWrite(elements,
key, key,
value, value,
kRAHasBeenSaved, GetRAState(),
kSaveFPRegs, kSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
check_needed); check_needed);
@ -4326,7 +4326,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier. // Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kRAHasBeenSaved, kDontSaveFPRegs); scratch, GetRAState(), kDontSaveFPRegs);
} else if (FLAG_compiled_transitions) { } else if (FLAG_compiled_transitions) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ mov(a0, object_reg); __ mov(a0, object_reg);
@ -4571,10 +4571,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
// Convert unsigned integer with specified number of leading zeroes in binary // Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double. // representation to IEEE 754 double.
// Integer to convert is passed in register hiword. // Integer to convert is passed in register src.
// Resulting double is returned in registers hiword:loword. // Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0. // This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm, static void GenerateUInt2Double(MacroAssembler* masm,
Register src,
Register hiword, Register hiword,
Register loword, Register loword,
Register scratch, Register scratch,
@ -4588,12 +4589,12 @@ static void GenerateUInt2Double(MacroAssembler* masm,
kBitsPerInt - mantissa_shift_for_hi_word; kBitsPerInt - mantissa_shift_for_hi_word;
masm->li(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); masm->li(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
if (mantissa_shift_for_hi_word > 0) { if (mantissa_shift_for_hi_word > 0) {
masm->sll(loword, hiword, mantissa_shift_for_lo_word); masm->sll(loword, src, mantissa_shift_for_lo_word);
masm->srl(hiword, hiword, mantissa_shift_for_hi_word); masm->srl(hiword, src, mantissa_shift_for_hi_word);
masm->Or(hiword, scratch, hiword); masm->Or(hiword, scratch, hiword);
} else { } else {
masm->mov(loword, zero_reg); masm->mov(loword, zero_reg);
masm->sll(hiword, hiword, mantissa_shift_for_hi_word); masm->sll(hiword, src, mantissa_shift_for_hi_word);
masm->Or(hiword, scratch, hiword); masm->Or(hiword, scratch, hiword);
} }
@ -4644,17 +4645,17 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ mtc1(src, dbl_scratch); __ mtc1(src, dbl_scratch);
__ Cvt_d_uw(dbl_scratch, dbl_scratch, f22); __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
} else { } else {
Label no_leading_zero, done; Label no_leading_zero, convert_done;
__ And(at, src, Operand(0x80000000)); __ And(at, src, Operand(0x80000000));
__ Branch(&no_leading_zero, ne, at, Operand(zero_reg)); __ Branch(&no_leading_zero, ne, at, Operand(zero_reg));
// Integer has one leading zeros. // Integer has one leading zeros.
GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 1); GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, t0, 1);
__ Branch(&done); __ Branch(&convert_done);
__ bind(&no_leading_zero); __ bind(&no_leading_zero);
GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 0); GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, t0, 0);
__ Branch(&done); __ bind(&convert_done);
} }
} }
@ -5344,12 +5345,12 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else { } else {
Register size = ToRegister(instr->size()); Register size = ToRegister(instr->size());
__ AllocateInNewSpace(size, __ Allocate(size,
result, result,
scratch, scratch,
scratch2, scratch2,
deferred->entry(), deferred->entry(),
flags); flags);
} }
__ bind(deferred->exit()); __ bind(deferred->exit());
@ -5771,6 +5772,13 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(zero_reg); cmp2 = Operand(zero_reg);
final_branch_condition = eq; final_branch_condition = eq;
} else if (type_name->Equals(heap()->symbol_string())) {
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, input, scratch);
cmp1 = scratch;
cmp2 = Operand(SYMBOL_TYPE);
final_branch_condition = eq;
} else if (type_name->Equals(heap()->boolean_string())) { } else if (type_name->Equals(heap()->boolean_string())) {
__ LoadRoot(at, Heap::kTrueValueRootIndex); __ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
@ -5814,27 +5822,15 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ LoadRoot(at, Heap::kNullValueRootIndex); __ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
} }
if (FLAG_harmony_symbols) { Register map = input;
// input is an object, it is safe to use GetObjectType in the delay slot. __ GetObjectType(input, map, scratch);
__ GetObjectType(input, input, scratch); __ Branch(false_label,
__ Branch(USE_DELAY_SLOT, true_label, eq, scratch, Operand(SYMBOL_TYPE)); lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
// Still an object, so the InstanceType can be loaded.
__ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
__ Branch(USE_DELAY_SLOT, false_label,
lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
} else {
// input is an object, it is safe to use GetObjectType in the delay slot.
__ GetObjectType(input, input, scratch);
__ Branch(USE_DELAY_SLOT, false_label,
lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
}
// Still an object, so the InstanceType can be loaded.
__ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
__ Branch(USE_DELAY_SLOT, false_label, __ Branch(USE_DELAY_SLOT, false_label,
gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
// Still an object, so the BitField can be loaded. // map is still valid, so the BitField can be loaded in delay slot.
// Check for undetectable objects => false. // Check for undetectable objects => false.
__ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, at, 1 << Map::kIsUndetectable); __ And(at, at, 1 << Map::kIsUndetectable);
cmp1 = at; cmp1 = at;
cmp2 = Operand(zero_reg); cmp2 = Operand(zero_reg);

4
deps/v8/src/mips/lithium-codegen-mips.h

@ -87,6 +87,10 @@ class LCodeGen BASE_EMBEDDED {
return !NeedsEagerFrame() && info()->is_deferred_calling(); return !NeedsEagerFrame() && info()->is_deferred_calling();
} }
RAStatus GetRAState() const {
return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved;
}
// Support for converting LOperands to assembler types. // Support for converting LOperands to assembler types.
// LOperand must be a register. // LOperand must be a register.
Register ToRegister(LOperand* op) const; Register ToRegister(LOperand* op) const;

43
deps/v8/src/mips/lithium-mips.cc

@ -871,6 +871,35 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LInstruction* instr = current->CompileToLithium(this); LInstruction* instr = current->CompileToLithium(this);
if (instr != NULL) { if (instr != NULL) {
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
// start. If this invariant doesn't hold, the register allocator can decide
// to insert a split of a range immediately before the instruction due to an
// already allocated register needing to be used for the instruction's fixed
// register constraint. In this case, The register allocator won't see an
// interference between the split child and the use-at-start (it would if
// the it was just a plain use), so it is free to move the split child into
// the same register that is used for the use-at-start.
// See https://code.google.com/p/chromium/issues/detail?id=201590
if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
int fixed = 0;
int used_at_start = 0;
for (UseIterator it(instr); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
if (operand->IsUsedAtStart()) ++used_at_start;
}
if (instr->Output() != NULL) {
if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
}
for (TempIterator it(instr); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
if (operand->HasFixedPolicy()) ++fixed;
}
ASSERT(fixed == 0 || used_at_start == 0);
}
#endif
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr); instr = AssignPointerMap(instr);
} }
@ -1116,7 +1145,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp); LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
return DefineFixedDouble(result, f4); return DefineFixedDouble(result, f4);
} else { } else {
LOperand* input = UseRegisterAtStart(instr->value()); LOperand* input = UseRegister(instr->value());
LOperand* temp = (op == kMathRound) ? FixedTemp(f6) : LOperand* temp = (op == kMathRound) ? FixedTemp(f6) :
(op == kMathFloor) ? TempRegister() : NULL; (op == kMathFloor) ? TempRegister() : NULL;
@ -1585,12 +1614,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
} }
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LJSArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayBaseLength( LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
HFixedArrayBaseLength* instr) { HFixedArrayBaseLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value()); LOperand* array = UseRegisterAtStart(instr->value());
@ -1697,11 +1720,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineAsRegister(res)); return AssignEnvironment(DefineAsRegister(res));
} else { } else {
ASSERT(to.IsInteger32()); ASSERT(to.IsInteger32());
LOperand* value = UseRegisterAtStart(instr->value()); LOperand* value = NULL;
LInstruction* res = NULL; LInstruction* res = NULL;
if (instr->value()->type().IsSmi()) { if (instr->value()->type().IsSmi()) {
value = UseRegisterAtStart(instr->value());
res = DefineAsRegister(new(zone()) LSmiUntag(value, false)); res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else { } else {
value = UseRegister(instr->value());
LOperand* temp1 = TempRegister(); LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL; : NULL;
@ -2081,7 +2106,7 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LOperand* new_map_reg = TempRegister(); LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result = LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, new_map_reg, NULL); new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
return DefineSameAsFirst(result); return result;
} else if (FLAG_compiled_transitions) { } else if (FLAG_compiled_transitions) {
LTransitionElementsKind* result = LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, NULL, NULL); new(zone()) LTransitionElementsKind(object, NULL, NULL);

17
deps/v8/src/mips/lithium-mips.h

@ -120,7 +120,6 @@ class LCodeGen;
V(IsStringAndBranch) \ V(IsStringAndBranch) \
V(IsSmiAndBranch) \ V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \ V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \ V(Label) \
V(LazyBailout) \ V(LazyBailout) \
V(LoadContextSlot) \ V(LoadContextSlot) \
@ -291,7 +290,6 @@ class LInstruction: public ZoneObject {
SetOncePointer<LPointerMap> pointer_map_; SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_; HValue* hydrogen_value_;
bool is_call_; bool is_call_;
bool is_save_doubles_;
}; };
@ -1111,19 +1109,6 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
}; };
class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LJSArrayLength(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> { class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LFixedArrayBaseLength(LOperand* value) { explicit LFixedArrayBaseLength(LOperand* value) {
@ -2060,7 +2045,7 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
}; };
class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> { class LTransitionElementsKind: public LTemplateInstruction<0, 1, 2> {
public: public:
LTransitionElementsKind(LOperand* object, LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp, LOperand* new_map_temp,

322
deps/v8/src/mips/macro-assembler-mips.cc

@ -1108,6 +1108,7 @@ void MacroAssembler::BranchF(Label* target,
FPURegister cmp1, FPURegister cmp1,
FPURegister cmp2, FPURegister cmp2,
BranchDelaySlot bd) { BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cc == al) { if (cc == al) {
Branch(bd, target); Branch(bd, target);
return; return;
@ -1700,6 +1701,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
if (rt.is_reg()) { if (rt.is_reg()) {
// NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
// rt. // rt.
BlockTrampolinePoolScope block_trampoline_pool(this);
r2 = rt.rm_; r2 = rt.rm_;
switch (cond) { switch (cond) {
case cc_always: case cc_always:
@ -1785,6 +1787,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
// Be careful to always use shifted_branch_offset only just before the // Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the // branch instruction, as the location will be remember for patching the
// target. // target.
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) { switch (cond) {
case cc_always: case cc_always:
b(offset); b(offset);
@ -1925,10 +1928,11 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
BranchDelaySlot bdslot) { BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt); BRANCH_ARGS_CHECK(cond, rs, rt);
int32_t offset; int32_t offset = 0;
Register r2 = no_reg; Register r2 = no_reg;
Register scratch = at; Register scratch = at;
if (rt.is_reg()) { if (rt.is_reg()) {
BlockTrampolinePoolScope block_trampoline_pool(this);
r2 = rt.rm_; r2 = rt.rm_;
// Be careful to always use shifted_branch_offset only just before the // Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the // branch instruction, as the location will be remember for patching the
@ -2035,6 +2039,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
// Be careful to always use shifted_branch_offset only just before the // Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the // branch instruction, as the location will be remember for patching the
// target. // target.
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) { switch (cond) {
case cc_always: case cc_always:
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
@ -2271,67 +2276,70 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
li(r2, rt); li(r2, rt);
} }
switch (cond) { {
case cc_always: BlockTrampolinePoolScope block_trampoline_pool(this);
bal(offset); switch (cond) {
break; case cc_always:
case eq: bal(offset);
bne(rs, r2, 2); break;
nop(); case eq:
bal(offset); bne(rs, r2, 2);
break; nop();
case ne: bal(offset);
beq(rs, r2, 2); break;
nop(); case ne:
bal(offset); beq(rs, r2, 2);
break; nop();
bal(offset);
break;
// Signed comparison. // Signed comparison.
case greater: case greater:
slt(scratch, r2, rs); slt(scratch, r2, rs);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
bgezal(scratch, offset); bgezal(scratch, offset);
break; break;
case greater_equal: case greater_equal:
slt(scratch, rs, r2); slt(scratch, rs, r2);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
bltzal(scratch, offset); bltzal(scratch, offset);
break; break;
case less: case less:
slt(scratch, rs, r2); slt(scratch, rs, r2);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
bgezal(scratch, offset); bgezal(scratch, offset);
break; break;
case less_equal: case less_equal:
slt(scratch, r2, rs); slt(scratch, r2, rs);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
bltzal(scratch, offset); bltzal(scratch, offset);
break; break;
// Unsigned comparison. // Unsigned comparison.
case Ugreater: case Ugreater:
sltu(scratch, r2, rs); sltu(scratch, r2, rs);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
bgezal(scratch, offset); bgezal(scratch, offset);
break; break;
case Ugreater_equal: case Ugreater_equal:
sltu(scratch, rs, r2); sltu(scratch, rs, r2);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
bltzal(scratch, offset); bltzal(scratch, offset);
break; break;
case Uless: case Uless:
sltu(scratch, rs, r2); sltu(scratch, rs, r2);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
bgezal(scratch, offset); bgezal(scratch, offset);
break; break;
case Uless_equal: case Uless_equal:
sltu(scratch, r2, rs); sltu(scratch, r2, rs);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
bltzal(scratch, offset); bltzal(scratch, offset);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
}
} }
// Emit a nop in the branch delay slot if required. // Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) if (bdslot == PROTECT)
@ -2353,7 +2361,7 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
BranchDelaySlot bdslot) { BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt); BRANCH_ARGS_CHECK(cond, rs, rt);
int32_t offset; int32_t offset = 0;
Register r2 = no_reg; Register r2 = no_reg;
Register scratch = at; Register scratch = at;
if (rt.is_reg()) { if (rt.is_reg()) {
@ -2363,80 +2371,82 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
li(r2, rt); li(r2, rt);
} }
switch (cond) { {
case cc_always: BlockTrampolinePoolScope block_trampoline_pool(this);
offset = shifted_branch_offset(L, false); switch (cond) {
bal(offset); case cc_always:
break; offset = shifted_branch_offset(L, false);
case eq: bal(offset);
bne(rs, r2, 2); break;
nop(); case eq:
offset = shifted_branch_offset(L, false); bne(rs, r2, 2);
bal(offset); nop();
break; offset = shifted_branch_offset(L, false);
case ne: bal(offset);
beq(rs, r2, 2); break;
nop(); case ne:
offset = shifted_branch_offset(L, false); beq(rs, r2, 2);
bal(offset); nop();
break; offset = shifted_branch_offset(L, false);
bal(offset);
break;
// Signed comparison. // Signed comparison.
case greater: case greater:
slt(scratch, r2, rs); slt(scratch, r2, rs);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bgezal(scratch, offset); bgezal(scratch, offset);
break; break;
case greater_equal: case greater_equal:
slt(scratch, rs, r2); slt(scratch, rs, r2);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bltzal(scratch, offset); bltzal(scratch, offset);
break; break;
case less: case less:
slt(scratch, rs, r2); slt(scratch, rs, r2);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bgezal(scratch, offset); bgezal(scratch, offset);
break; break;
case less_equal: case less_equal:
slt(scratch, r2, rs); slt(scratch, r2, rs);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bltzal(scratch, offset); bltzal(scratch, offset);
break; break;
// Unsigned comparison. // Unsigned comparison.
case Ugreater: case Ugreater:
sltu(scratch, r2, rs); sltu(scratch, r2, rs);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bgezal(scratch, offset); bgezal(scratch, offset);
break; break;
case Ugreater_equal: case Ugreater_equal:
sltu(scratch, rs, r2); sltu(scratch, rs, r2);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bltzal(scratch, offset); bltzal(scratch, offset);
break; break;
case Uless: case Uless:
sltu(scratch, rs, r2); sltu(scratch, rs, r2);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bgezal(scratch, offset); bgezal(scratch, offset);
break; break;
case Uless_equal: case Uless_equal:
sltu(scratch, r2, rs); sltu(scratch, r2, rs);
addiu(scratch, scratch, -1); addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bltzal(scratch, offset); bltzal(scratch, offset);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
}
} }
// Check that offset could actually hold on an int16_t. // Check that offset could actually hold on an int16_t.
ASSERT(is_int16(offset)); ASSERT(is_int16(offset));
@ -2992,13 +3002,12 @@ void MacroAssembler::Allocate(int object_size,
} }
void MacroAssembler::AllocateInNewSpace(Register object_size, void MacroAssembler::Allocate(Register object_size,
Register result, Register result,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* gc_required, Label* gc_required,
AllocationFlags flags) { AllocationFlags flags) {
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
if (!FLAG_inline_new) { if (!FLAG_inline_new) {
if (emit_debug_code()) { if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure. // Trash the registers to simulate an allocation failure.
@ -3019,19 +3028,19 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Check relative positions of allocation top and limit addresses. // Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be // ARM adds additional checks to make sure the ldm instruction can be
// used. On MIPS we don't have ldm so we don't need additional checks either. // used. On MIPS we don't have ldm so we don't need additional checks either.
ExternalReference new_space_allocation_top = ExternalReference allocation_top =
ExternalReference::new_space_allocation_top_address(isolate()); AllocationUtils::GetAllocationTopReference(isolate(), flags);
ExternalReference new_space_allocation_limit = ExternalReference allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate()); AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top = intptr_t top =
reinterpret_cast<intptr_t>(new_space_allocation_top.address()); reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit = intptr_t limit =
reinterpret_cast<intptr_t>(new_space_allocation_limit.address()); reinterpret_cast<intptr_t>(allocation_limit.address());
ASSERT((limit - top) == kPointerSize); ASSERT((limit - top) == kPointerSize);
// Set up allocation top address and object size registers. // Set up allocation top address and object size registers.
Register topaddr = scratch1; Register topaddr = scratch1;
li(topaddr, Operand(new_space_allocation_top)); li(topaddr, Operand(allocation_top));
// This code stores a temporary value in t9. // This code stores a temporary value in t9.
if ((flags & RESULT_CONTAINS_TOP) == 0) { if ((flags & RESULT_CONTAINS_TOP) == 0) {
@ -3110,12 +3119,12 @@ void MacroAssembler::AllocateTwoByteString(Register result,
And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space. // Allocate two-byte string in new space.
AllocateInNewSpace(scratch1, Allocate(scratch1,
result, result,
scratch2, scratch2,
scratch3, scratch3,
gc_required, gc_required,
TAG_OBJECT); TAG_OBJECT);
// Set the map, length and hash field. // Set the map, length and hash field.
InitializeNewString(result, InitializeNewString(result,
@ -3140,12 +3149,12 @@ void MacroAssembler::AllocateAsciiString(Register result,
And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space. // Allocate ASCII string in new space.
AllocateInNewSpace(scratch1, Allocate(scratch1,
result, result,
scratch2, scratch2,
scratch3, scratch3,
gc_required, gc_required,
TAG_OBJECT); TAG_OBJECT);
// Set the map, length and hash field. // Set the map, length and hash field.
InitializeNewString(result, InitializeNewString(result,
@ -5505,7 +5514,6 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
CodePatcher::CodePatcher(byte* address, int instructions) CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address), : address_(address),
instructions_(instructions),
size_(instructions * Assembler::kInstrSize), size_(instructions * Assembler::kInstrSize),
masm_(NULL, address, size_ + Assembler::kGap) { masm_(NULL, address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch. // Create a new macro assembler pointing to the address of the code to patch.

13
deps/v8/src/mips/macro-assembler-mips.h

@ -491,12 +491,12 @@ class MacroAssembler: public Assembler {
Label* gc_required, Label* gc_required,
AllocationFlags flags); AllocationFlags flags);
void AllocateInNewSpace(Register object_size, void Allocate(Register object_size,
Register result, Register result,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* gc_required, Label* gc_required,
AllocationFlags flags); AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after // Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. The caller must make sure that no pointers // it will no longer be allocated. The caller must make sure that no pointers
@ -1565,7 +1565,6 @@ class CodePatcher {
private: private:
byte* address_; // The address of the code being patched. byte* address_; // The address of the code being patched.
int instructions_; // Number of instructions of the expected patch size.
int size_; // Number of bytes of the expected patch size. int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code. MacroAssembler masm_; // Macro assembler used to generate the code.
}; };

1
deps/v8/src/mips/regexp-macro-assembler-mips.cc

@ -1005,6 +1005,7 @@ void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
int target = label->pos(); int target = label->pos();
__ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag)); __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
} else { } else {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Label after_constant; Label after_constant;
__ Branch(&after_constant); __ Branch(&after_constant);
int offset = masm_->pc_offset(); int offset = masm_->pc_offset();

28
deps/v8/src/mips/simulator-mips.cc

@ -132,8 +132,8 @@ void MipsDebugger::Stop(Instruction* instr) {
ASSERT(msg != NULL); ASSERT(msg != NULL);
// Update this stop description. // Update this stop description.
if (!watched_stops[code].desc) { if (!watched_stops_[code].desc) {
watched_stops[code].desc = msg; watched_stops_[code].desc = msg;
} }
if (strlen(msg) > 0) { if (strlen(msg) > 0) {
@ -163,8 +163,8 @@ void MipsDebugger::Stop(Instruction* instr) {
char* msg = *reinterpret_cast<char**>(sim_->get_pc() + char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
Instruction::kInstrSize); Instruction::kInstrSize);
// Update this stop description. // Update this stop description.
if (!sim_->watched_stops[code].desc) { if (!sim_->watched_stops_[code].desc) {
sim_->watched_stops[code].desc = msg; sim_->watched_stops_[code].desc = msg;
} }
PrintF("Simulator hit %s (%u)\n", msg, code); PrintF("Simulator hit %s (%u)\n", msg, code);
sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize); sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
@ -513,7 +513,7 @@ void MipsDebugger::Debug() {
int32_t words; int32_t words;
if (argc == next_arg) { if (argc == next_arg) {
words = 10; words = 10;
} else if (argc == next_arg + 1) { } else {
if (!GetValue(argv[next_arg], &words)) { if (!GetValue(argv[next_arg], &words)) {
words = 10; words = 10;
} }
@ -1636,33 +1636,33 @@ bool Simulator::IsStopInstruction(Instruction* instr) {
bool Simulator::IsEnabledStop(uint32_t code) { bool Simulator::IsEnabledStop(uint32_t code) {
ASSERT(code <= kMaxStopCode); ASSERT(code <= kMaxStopCode);
ASSERT(code > kMaxWatchpointCode); ASSERT(code > kMaxWatchpointCode);
return !(watched_stops[code].count & kStopDisabledBit); return !(watched_stops_[code].count & kStopDisabledBit);
} }
void Simulator::EnableStop(uint32_t code) { void Simulator::EnableStop(uint32_t code) {
if (!IsEnabledStop(code)) { if (!IsEnabledStop(code)) {
watched_stops[code].count &= ~kStopDisabledBit; watched_stops_[code].count &= ~kStopDisabledBit;
} }
} }
void Simulator::DisableStop(uint32_t code) { void Simulator::DisableStop(uint32_t code) {
if (IsEnabledStop(code)) { if (IsEnabledStop(code)) {
watched_stops[code].count |= kStopDisabledBit; watched_stops_[code].count |= kStopDisabledBit;
} }
} }
void Simulator::IncreaseStopCounter(uint32_t code) { void Simulator::IncreaseStopCounter(uint32_t code) {
ASSERT(code <= kMaxStopCode); ASSERT(code <= kMaxStopCode);
if ((watched_stops[code].count & ~(1 << 31)) == 0x7fffffff) { if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
PrintF("Stop counter for code %i has overflowed.\n" PrintF("Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n", code); "Enabling this code and reseting the counter to 0.\n", code);
watched_stops[code].count = 0; watched_stops_[code].count = 0;
EnableStop(code); EnableStop(code);
} else { } else {
watched_stops[code].count++; watched_stops_[code].count++;
} }
} }
@ -1677,12 +1677,12 @@ void Simulator::PrintStopInfo(uint32_t code) {
return; return;
} }
const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled"; const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled";
int32_t count = watched_stops[code].count & ~kStopDisabledBit; int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
// Don't print the state of unused breakpoints. // Don't print the state of unused breakpoints.
if (count != 0) { if (count != 0) {
if (watched_stops[code].desc) { if (watched_stops_[code].desc) {
PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
code, code, state, count, watched_stops[code].desc); code, code, state, count, watched_stops_[code].desc);
} else { } else {
PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n", PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
code, code, state, count); code, code, state, count);

6
deps/v8/src/mips/simulator-mips.h

@ -391,14 +391,14 @@ class Simulator {
static const uint32_t kStopDisabledBit = 1 << 31; static const uint32_t kStopDisabledBit = 1 << 31;
// A stop is enabled, meaning the simulator will stop when meeting the // A stop is enabled, meaning the simulator will stop when meeting the
// instruction, if bit 31 of watched_stops[code].count is unset. // instruction, if bit 31 of watched_stops_[code].count is unset.
// The value watched_stops[code].count & ~(1 << 31) indicates how many times // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through. // the breakpoint was hit or gone through.
struct StopCountAndDesc { struct StopCountAndDesc {
uint32_t count; uint32_t count;
char* desc; char* desc;
}; };
StopCountAndDesc watched_stops[kMaxStopCode + 1]; StopCountAndDesc watched_stops_[kMaxStopCode + 1];
}; };

45
deps/v8/src/mips/stub-cache-mips.cc

@ -415,7 +415,7 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// may be clobbered. // may be clobbered.
void StubCompiler::GenerateStoreField(MacroAssembler* masm, void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object, Handle<JSObject> object,
int index, LookupResult* lookup,
Handle<Map> transition, Handle<Map> transition,
Handle<Name> name, Handle<Name> name,
Register receiver_reg, Register receiver_reg,
@ -428,16 +428,6 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// a0 : value. // a0 : value.
Label exit; Label exit;
LookupResult lookup(masm->isolate());
object->Lookup(*name, &lookup);
if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
// In sloppy mode, we could just return the value and be done. However, we
// might be in strict mode, where we have to throw. Since we cannot tell,
// go into slow case unconditionally.
__ jmp(miss_label);
return;
}
// Check that the map of the object hasn't changed. // Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP; : REQUIRE_EXACT_MAP;
@ -452,8 +442,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Check that we are allowed to write this. // Check that we are allowed to write this.
if (!transition.is_null() && object->GetPrototype()->IsJSObject()) { if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
JSObject* holder; JSObject* holder;
if (lookup.IsFound()) { // holder == object indicates that no property was found.
holder = lookup.holder(); if (lookup->holder() != *object) {
holder = lookup->holder();
} else { } else {
// Find the top object. // Find the top object.
holder = *object; holder = *object;
@ -461,8 +452,19 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
holder = JSObject::cast(holder->GetPrototype()); holder = JSObject::cast(holder->GetPrototype());
} while (holder->GetPrototype()->IsJSObject()); } while (holder->GetPrototype()->IsJSObject());
} }
CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg, Register holder_reg = CheckPrototypes(
scratch1, scratch2, name, miss_restore_name); object, receiver_reg, Handle<JSObject>(holder), name_reg,
scratch1, scratch2, name, miss_restore_name);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
if (lookup->holder() == *object &&
!holder->HasFastProperties() &&
!holder->IsJSGlobalProxy() &&
!holder->IsJSGlobalObject()) {
GenerateDictionaryNegativeLookup(
masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
}
} }
// Stub never generated for non-global objects that require access // Stub never generated for non-global objects that require access
@ -483,6 +485,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return; return;
} }
int index;
if (!transition.is_null()) { if (!transition.is_null()) {
// Update the map of the object. // Update the map of the object.
__ li(scratch1, Operand(transition)); __ li(scratch1, Operand(transition));
@ -498,6 +501,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
kDontSaveFPRegs, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
} else {
index = lookup->GetFieldIndex().field_index();
} }
// Adjust for the number of properties stored in the object. Even in the // Adjust for the number of properties stored in the object. Even in the
@ -2424,6 +2431,12 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
// Check that the object is a symbol. // Check that the object is a symbol.
__ GetObjectType(a1, a1, a3); __ GetObjectType(a1, a1, a3);
__ Branch(&miss, ne, a3, Operand(SYMBOL_TYPE)); __ Branch(&miss, ne, a3, Operand(SYMBOL_TYPE));
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::SYMBOL_FUNCTION_INDEX, a0, &miss);
CheckPrototypes(
Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
a0, holder, a3, a1, t0, name, &miss);
break; break;
case NUMBER_CHECK: { case NUMBER_CHECK: {
@ -3010,7 +3023,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
__ Check(eq, "Instance size of initial map changed.", __ Check(eq, "Instance size of initial map changed.",
a3, Operand(instance_size >> kPointerSizeLog2)); a3, Operand(instance_size >> kPointerSizeLog2));
#endif #endif
__ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS); __ Allocate(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial // Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array. // map and properties and elements are set to empty fixed array.

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save