Browse Source

Upgrade V8 to 1.3.9

v0.7.4-release
Ryan 16 years ago
parent
commit
97ce138621
  1. 27
      deps/v8/ChangeLog
  2. 64
      deps/v8/SConstruct
  3. 352
      deps/v8/include/v8.h
  4. 42
      deps/v8/src/SConscript
  5. 185
      deps/v8/src/api.cc
  6. 24
      deps/v8/src/api.h
  7. 2
      deps/v8/src/apiutils.h
  8. 2
      deps/v8/src/arm/assembler-arm-inl.h
  9. 100
      deps/v8/src/arm/assembler-arm.cc
  10. 17
      deps/v8/src/arm/assembler-arm.h
  11. 2
      deps/v8/src/arm/builtins-arm.cc
  12. 82
      deps/v8/src/arm/codegen-arm.cc
  13. 64
      deps/v8/src/arm/disasm-arm.cc
  14. 43
      deps/v8/src/arm/macro-assembler-arm.cc
  15. 16
      deps/v8/src/arm/macro-assembler-arm.h
  16. 1192
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  17. 226
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  18. 173
      deps/v8/src/arm/simulator-arm.cc
  19. 27
      deps/v8/src/arm/simulator-arm.h
  20. 14
      deps/v8/src/arm/stub-cache-arm.cc
  21. 26
      deps/v8/src/arm/virtual-frame-arm.cc
  22. 42
      deps/v8/src/assembler.cc
  23. 13
      deps/v8/src/assembler.h
  24. 81
      deps/v8/src/builtins.cc
  25. 32
      deps/v8/src/checks.h
  26. 1
      deps/v8/src/code-stubs.h
  27. 7
      deps/v8/src/compiler.cc
  28. 2
      deps/v8/src/d8.js
  29. 9
      deps/v8/src/debug-delay.js
  30. 6
      deps/v8/src/debug.cc
  31. 16
      deps/v8/src/execution.cc
  32. 22
      deps/v8/src/execution.h
  33. 5
      deps/v8/src/frames-inl.h
  34. 9
      deps/v8/src/frames.h
  35. 23
      deps/v8/src/globals.h
  36. 12
      deps/v8/src/handles.cc
  37. 6
      deps/v8/src/handles.h
  38. 99
      deps/v8/src/heap.cc
  39. 164
      deps/v8/src/heap.h
  40. 37
      deps/v8/src/ia32/builtins-ia32.cc
  41. 42
      deps/v8/src/ia32/codegen-ia32.cc
  42. 2
      deps/v8/src/ia32/ic-ia32.cc
  43. 140
      deps/v8/src/ia32/macro-assembler-ia32.cc
  44. 49
      deps/v8/src/ia32/macro-assembler-ia32.h
  45. 41
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  46. 23
      deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  47. 5
      deps/v8/src/ia32/simulator-ia32.h
  48. 129
      deps/v8/src/ia32/stub-cache-ia32.cc
  49. 50
      deps/v8/src/jsregexp.cc
  50. 2
      deps/v8/src/mark-compact.cc
  51. 2
      deps/v8/src/messages.js
  52. 2
      deps/v8/src/objects-debug.cc
  53. 12
      deps/v8/src/objects-inl.h
  54. 53
      deps/v8/src/objects.cc
  55. 18
      deps/v8/src/objects.h
  56. 9
      deps/v8/src/parser.cc
  57. 7
      deps/v8/src/platform-win32.cc
  58. 2
      deps/v8/src/regexp-macro-assembler-irregexp-inl.h
  59. 2
      deps/v8/src/regexp-macro-assembler-irregexp.cc
  60. 3
      deps/v8/src/regexp-macro-assembler-irregexp.h
  61. 2
      deps/v8/src/regexp-macro-assembler-tracer.h
  62. 57
      deps/v8/src/regexp-macro-assembler.cc
  63. 27
      deps/v8/src/regexp-macro-assembler.h
  64. 63
      deps/v8/src/runtime.cc
  65. 3
      deps/v8/src/runtime.h
  66. 26
      deps/v8/src/serialize.cc
  67. 12
      deps/v8/src/stub-cache.cc
  68. 11
      deps/v8/src/stub-cache.h
  69. 13
      deps/v8/src/top.cc
  70. 4
      deps/v8/src/top.h
  71. 1
      deps/v8/src/v8-counters.h
  72. 4
      deps/v8/src/v8.cc
  73. 2
      deps/v8/src/version.cc
  74. 18
      deps/v8/src/x64/assembler-x64.cc
  75. 7
      deps/v8/src/x64/assembler-x64.h
  76. 72
      deps/v8/src/x64/builtins-x64.cc
  77. 3
      deps/v8/src/x64/cfg-x64.cc
  78. 481
      deps/v8/src/x64/codegen-x64.cc
  79. 8
      deps/v8/src/x64/codegen-x64.h
  80. 3
      deps/v8/src/x64/frames-x64.h
  81. 82
      deps/v8/src/x64/ic-x64.cc
  82. 200
      deps/v8/src/x64/macro-assembler-x64.cc
  83. 53
      deps/v8/src/x64/macro-assembler-x64.h
  84. 78
      deps/v8/src/x64/regexp-macro-assembler-x64.cc
  85. 27
      deps/v8/src/x64/regexp-macro-assembler-x64.h
  86. 20
      deps/v8/src/x64/register-allocator-x64-inl.h
  87. 4
      deps/v8/src/x64/register-allocator-x64.h
  88. 5
      deps/v8/src/x64/simulator-x64.h
  89. 142
      deps/v8/src/x64/stub-cache-x64.cc
  90. 8
      deps/v8/src/x64/virtual-frame-x64.cc
  91. 1
      deps/v8/src/x64/virtual-frame-x64.h
  92. 4
      deps/v8/test/cctest/cctest.status
  93. 61
      deps/v8/test/cctest/test-api.cc
  94. 4
      deps/v8/test/cctest/test-assembler-arm.cc
  95. 2
      deps/v8/test/cctest/test-heap.cc
  96. 6
      deps/v8/test/cctest/test-log-stack-tracer.cc
  97. 39
      deps/v8/test/cctest/test-regexp.cc
  98. 60
      deps/v8/test/cctest/test-thread-termination.cc
  99. 2
      deps/v8/test/cctest/test-utils.cc
  100. 4
      deps/v8/test/mjsunit/debug-stepin-constructor.js

27
deps/v8/ChangeLog

@ -1,3 +1,30 @@
2009-09-02: Version 1.3.9
Optimized stack guard checks on ARM.
Optimized API operations by inlining more in the API.
Optimized creation of objects from simple constructor functions.
Enabled a number of missing optimizations in the 64-bit port.
Implemented native-code support for regular expressions on ARM.
Stopped using the 'sahf' instruction on 64-bit machines that do
not support it.
Fixed a bug in the support for forceful termination of JavaScript
execution.
2009-08-26: Version 1.3.8
Changed the handling of idle notifications to allow idle
notifications when V8 has not yet been initialized.
Fixed ARM simulator compilation problem on Windows.
2009-08-25: Version 1.3.7 2009-08-25: Version 1.3.7
Reduced the size of generated code on ARM platforms by reducing Reduced the size of generated code on ARM platforms by reducing

64
deps/v8/SConstruct

@ -99,12 +99,10 @@ LIBRARY_FLAGS = {
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'], 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
'CPPPATH': [join(root_dir, 'src')], 'CPPPATH': [join(root_dir, 'src')],
'regexp:native': { 'regexp:native': {
'arch:ia32' : {
'CPPDEFINES': ['V8_NATIVE_REGEXP'] 'CPPDEFINES': ['V8_NATIVE_REGEXP']
}, },
'arch:x64' : { 'mode:debug': {
'CPPDEFINES': ['V8_NATIVE_REGEXP'] 'CPPDEFINES': ['V8_ENABLE_CHECKS']
}
} }
}, },
'gcc': { 'gcc': {
@ -178,17 +176,25 @@ LIBRARY_FLAGS = {
}, },
'msvc': { 'msvc': {
'all': { 'all': {
'DIALECTFLAGS': ['/nologo'],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'], 'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '/GR-', '/Gy'], 'CXXFLAGS': ['$CCFLAGS', '/GR-', '/Gy'],
'CPPDEFINES': ['WIN32', '_USE_32BIT_TIME_T'], 'CPPDEFINES': ['WIN32'],
'LINKFLAGS': ['/NOLOGO', '/MACHINE:X86', '/INCREMENTAL:NO', 'LINKFLAGS': ['/INCREMENTAL:NO', '/NXCOMPAT', '/IGNORE:4221'],
'/NXCOMPAT', '/IGNORE:4221'],
'ARFLAGS': ['/NOLOGO'],
'CCPDBFLAGS': ['/Zi'] 'CCPDBFLAGS': ['/Zi']
}, },
'verbose:off': {
'DIALECTFLAGS': ['/nologo'],
'ARFLAGS': ['/NOLOGO']
},
'arch:ia32': { 'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32'] 'CPPDEFINES': ['V8_TARGET_ARCH_IA32', '_USE_32BIT_TIME_T'],
'LINKFLAGS': ['/MACHINE:X86'],
'ARFLAGS': ['/MACHINE:X86']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'LINKFLAGS': ['/MACHINE:X64'],
'ARFLAGS': ['/MACHINE:X64']
}, },
'mode:debug': { 'mode:debug': {
'CCFLAGS': ['/Od', '/Gm'], 'CCFLAGS': ['/Od', '/Gm'],
@ -250,11 +256,13 @@ V8_EXTRA_FLAGS = {
}, },
'msvc': { 'msvc': {
'all': { 'all': {
'WARNINGFLAGS': ['/W3', '/WX', '/wd4355', '/wd4800'] 'WARNINGFLAGS': ['/WX', '/wd4355', '/wd4800']
}, },
'library:shared': { 'arch:ia32': {
'CPPDEFINES': ['BUILDING_V8_SHARED'], 'WARNINGFLAGS': ['/W3']
'LIBS': ['winmm', 'ws2_32'] },
'arch:x64': {
'WARNINGFLAGS': ['/W2']
}, },
'arch:arm': { 'arch:arm': {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM'], 'CPPDEFINES': ['V8_TARGET_ARCH_ARM'],
@ -352,7 +360,10 @@ CCTEST_EXTRA_FLAGS = {
}, },
'arch:ia32': { 'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32'] 'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
} },
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64']
},
} }
} }
@ -417,10 +428,15 @@ SAMPLE_FLAGS = {
}, },
'msvc': { 'msvc': {
'all': { 'all': {
'CCFLAGS': ['/nologo'],
'LINKFLAGS': ['/nologo'],
'LIBS': ['winmm', 'ws2_32'] 'LIBS': ['winmm', 'ws2_32']
}, },
'verbose:off': {
'CCFLAGS': ['/nologo'],
'LINKFLAGS': ['/NOLOGO']
},
'verbose:on': {
'LINKFLAGS': ['/VERBOSE']
},
'library:shared': { 'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED'] 'CPPDEFINES': ['USING_V8_SHARED']
}, },
@ -442,7 +458,12 @@ SAMPLE_FLAGS = {
} }
}, },
'arch:ia32': { 'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32'] 'CPPDEFINES': ['V8_TARGET_ARCH_IA32'],
'LINKFLAGS': ['/MACHINE:X86']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'LINKFLAGS': ['/MACHINE:X64']
}, },
'mode:debug': { 'mode:debug': {
'CCFLAGS': ['/Od'], 'CCFLAGS': ['/Od'],
@ -585,6 +606,11 @@ SIMPLE_OPTIONS = {
'values': ['dumb', 'readline'], 'values': ['dumb', 'readline'],
'default': 'dumb', 'default': 'dumb',
'help': 'the console to use for the d8 shell' 'help': 'the console to use for the d8 shell'
},
'verbose': {
'values': ['on', 'off'],
'default': 'off',
'help': 'more output from compiler and linker'
} }
} }

352
deps/v8/include/v8.h

@ -127,6 +127,12 @@ class FunctionTemplate;
class ObjectTemplate; class ObjectTemplate;
class Data; class Data;
namespace internal {
class Object;
}
// --- W e a k H a n d l e s // --- W e a k H a n d l e s
@ -227,8 +233,8 @@ template <class T> class V8EXPORT_INLINE Handle {
* The handles' references are not checked. * The handles' references are not checked.
*/ */
template <class S> bool operator==(Handle<S> that) const { template <class S> bool operator==(Handle<S> that) const {
void** a = reinterpret_cast<void**>(**this); internal::Object** a = reinterpret_cast<internal::Object**>(**this);
void** b = reinterpret_cast<void**>(*that); internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0; if (a == 0) return b == 0;
if (b == 0) return false; if (b == 0) return false;
return *a == *b; return *a == *b;
@ -245,7 +251,11 @@ template <class T> class V8EXPORT_INLINE Handle {
} }
template <class S> static inline Handle<T> Cast(Handle<S> that) { template <class S> static inline Handle<T> Cast(Handle<S> that) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
if (that.IsEmpty()) return Handle<T>(); if (that.IsEmpty()) return Handle<T>();
#endif
return Handle<T>(T::Cast(*that)); return Handle<T>(T::Cast(*that));
} }
@ -275,7 +285,11 @@ template <class T> class V8EXPORT_INLINE Local : public Handle<T> {
} }
template <class S> inline Local(S* that) : Handle<T>(that) { } template <class S> inline Local(S* that) : Handle<T>(that) { }
template <class S> static inline Local<T> Cast(Local<S> that) { template <class S> static inline Local<T> Cast(Local<S> that) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
if (that.IsEmpty()) return Local<T>(); if (that.IsEmpty()) return Local<T>();
#endif
return Local<T>(T::Cast(*that)); return Local<T>(T::Cast(*that));
} }
@ -344,7 +358,11 @@ template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> {
: Handle<T>(*that) { } : Handle<T>(*that) { }
template <class S> static inline Persistent<T> Cast(Persistent<S> that) { template <class S> static inline Persistent<T> Cast(Persistent<S> that) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
if (that.IsEmpty()) return Persistent<T>(); if (that.IsEmpty()) return Persistent<T>();
#endif
return Persistent<T>(T::Cast(*that)); return Persistent<T>(T::Cast(*that));
} }
@ -423,7 +441,7 @@ class V8EXPORT HandleScope {
/** /**
* Creates a new handle with the given value. * Creates a new handle with the given value.
*/ */
static void** CreateHandle(void* value); static internal::Object** CreateHandle(internal::Object* value);
private: private:
// Make it impossible to create heap-allocated or illegal handle // Make it impossible to create heap-allocated or illegal handle
@ -438,8 +456,8 @@ class V8EXPORT HandleScope {
class V8EXPORT Data { class V8EXPORT Data {
public: public:
int extensions; int extensions;
void** next; internal::Object** next;
void** limit; internal::Object** limit;
inline void Initialize() { inline void Initialize() {
extensions = -1; extensions = -1;
next = limit = NULL; next = limit = NULL;
@ -451,7 +469,7 @@ class V8EXPORT HandleScope {
// Allow for the active closing of HandleScopes which allows to pass a handle // Allow for the active closing of HandleScopes which allows to pass a handle
// from the HandleScope being closed to the next top most HandleScope. // from the HandleScope being closed to the next top most HandleScope.
bool is_closed_; bool is_closed_;
void** RawClose(void** value); internal::Object** RawClose(internal::Object** value);
friend class ImplementationUtilities; friend class ImplementationUtilities;
}; };
@ -671,7 +689,7 @@ class V8EXPORT Value : public Data {
* Returns true if this value is an instance of the String type. * Returns true if this value is an instance of the String type.
* See ECMA-262 8.4. * See ECMA-262 8.4.
*/ */
bool IsString() const; inline bool IsString() const;
/** /**
* Returns true if this value is a function. * Returns true if this value is a function.
@ -737,6 +755,10 @@ class V8EXPORT Value : public Data {
/** JS == */ /** JS == */
bool Equals(Handle<Value> that) const; bool Equals(Handle<Value> that) const;
bool StrictEquals(Handle<Value> that) const; bool StrictEquals(Handle<Value> that) const;
private:
inline bool QuickIsString() const;
bool FullIsString() const;
}; };
@ -868,7 +890,7 @@ class V8EXPORT String : public Primitive {
* Get the ExternalStringResource for an external string. Returns * Get the ExternalStringResource for an external string. Returns
* NULL if IsExternal() doesn't return true. * NULL if IsExternal() doesn't return true.
*/ */
ExternalStringResource* GetExternalStringResource() const; inline ExternalStringResource* GetExternalStringResource() const;
/** /**
* Get the ExternalAsciiStringResource for an external ascii string. * Get the ExternalAsciiStringResource for an external ascii string.
@ -876,7 +898,7 @@ class V8EXPORT String : public Primitive {
*/ */
ExternalAsciiStringResource* GetExternalAsciiStringResource() const; ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
static String* Cast(v8::Value* obj); static inline String* Cast(v8::Value* obj);
/** /**
* Allocates a new string from either utf-8 encoded or ascii data. * Allocates a new string from either utf-8 encoded or ascii data.
@ -1010,6 +1032,10 @@ class V8EXPORT String : public Primitive {
Value(const Value&); Value(const Value&);
void operator=(const Value&); void operator=(const Value&);
}; };
private:
void VerifyExternalStringResource(ExternalStringResource* val) const;
static void CheckCast(v8::Value* obj);
}; };
@ -1020,9 +1046,10 @@ class V8EXPORT Number : public Primitive {
public: public:
double Value() const; double Value() const;
static Local<Number> New(double value); static Local<Number> New(double value);
static Number* Cast(v8::Value* obj); static inline Number* Cast(v8::Value* obj);
private: private:
Number(); Number();
static void CheckCast(v8::Value* obj);
}; };
@ -1033,9 +1060,10 @@ class V8EXPORT Integer : public Number {
public: public:
static Local<Integer> New(int32_t value); static Local<Integer> New(int32_t value);
int64_t Value() const; int64_t Value() const;
static Integer* Cast(v8::Value* obj); static inline Integer* Cast(v8::Value* obj);
private: private:
Integer(); Integer();
static void CheckCast(v8::Value* obj);
}; };
@ -1074,7 +1102,9 @@ class V8EXPORT Date : public Value {
*/ */
double NumberValue() const; double NumberValue() const;
static Date* Cast(v8::Value* obj); static inline Date* Cast(v8::Value* obj);
private:
static void CheckCast(v8::Value* obj);
}; };
@ -1153,14 +1183,13 @@ class V8EXPORT Object : public Value {
/** Gets the number of internal fields for this Object. */ /** Gets the number of internal fields for this Object. */
int InternalFieldCount(); int InternalFieldCount();
/** Gets the value in an internal field. */ /** Gets the value in an internal field. */
Local<Value> GetInternalField(int index); inline Local<Value> GetInternalField(int index);
/** Sets the value in an internal field. */ /** Sets the value in an internal field. */
void SetInternalField(int index, Handle<Value> value); void SetInternalField(int index, Handle<Value> value);
// The two functions below do not perform index bounds checks and
// they do not check that the VM is still running. Use with caution.
/** Gets a native pointer from an internal field. */ /** Gets a native pointer from an internal field. */
void* GetPointerFromInternalField(int index); inline void* GetPointerFromInternalField(int index);
/** Sets a native pointer in an internal field. */ /** Sets a native pointer in an internal field. */
void SetPointerInInternalField(int index, void* value); void SetPointerInInternalField(int index, void* value);
@ -1223,9 +1252,17 @@ class V8EXPORT Object : public Value {
void SetIndexedPropertiesToPixelData(uint8_t* data, int length); void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
static Local<Object> New(); static Local<Object> New();
static Object* Cast(Value* obj); static inline Object* Cast(Value* obj);
private: private:
Object(); Object();
static void CheckCast(Value* obj);
Local<Value> CheckedGetInternalField(int index);
/**
* If quick access to the internal field is possible this method
* returns the value. Otherwise an empty handle is returned.
*/
inline Local<Value> UncheckedGetInternalField(int index);
}; };
@ -1243,9 +1280,10 @@ class V8EXPORT Array : public Object {
Local<Object> CloneElementAt(uint32_t index); Local<Object> CloneElementAt(uint32_t index);
static Local<Array> New(int length = 0); static Local<Array> New(int length = 0);
static Array* Cast(Value* obj); static inline Array* Cast(Value* obj);
private: private:
Array(); Array();
static void CheckCast(Value* obj);
}; };
@ -1259,9 +1297,10 @@ class V8EXPORT Function : public Object {
Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]); Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
void SetName(Handle<String> name); void SetName(Handle<String> name);
Handle<Value> GetName() const; Handle<Value> GetName() const;
static Function* Cast(Value* obj); static inline Function* Cast(Value* obj);
private: private:
Function(); Function();
static void CheckCast(Value* obj);
}; };
@ -1279,13 +1318,16 @@ class V8EXPORT Function : public Object {
class V8EXPORT External : public Value { class V8EXPORT External : public Value {
public: public:
static Local<Value> Wrap(void* data); static Local<Value> Wrap(void* data);
static void* Unwrap(Handle<Value> obj); static inline void* Unwrap(Handle<Value> obj);
static Local<External> New(void* value); static Local<External> New(void* value);
static External* Cast(Value* obj); static inline External* Cast(Value* obj);
void* Value() const; void* Value() const;
private: private:
External(); External();
static void CheckCast(v8::Value* obj);
static inline void* QuickUnwrap(Handle<v8::Value> obj);
static void* FullUnwrap(Handle<v8::Value> obj);
}; };
@ -2297,12 +2339,14 @@ class V8EXPORT V8 {
private: private:
V8(); V8();
static void** GlobalizeReference(void** handle); static internal::Object** GlobalizeReference(internal::Object** handle);
static void DisposeGlobal(void** global_handle); static void DisposeGlobal(internal::Object** global_handle);
static void MakeWeak(void** global_handle, void* data, WeakReferenceCallback); static void MakeWeak(internal::Object** global_handle,
static void ClearWeak(void** global_handle); void* data,
static bool IsGlobalNearDeath(void** global_handle); WeakReferenceCallback);
static bool IsGlobalWeak(void** global_handle); static void ClearWeak(internal::Object** global_handle);
static bool IsGlobalNearDeath(internal::Object** global_handle);
static bool IsGlobalWeak(internal::Object** global_handle);
template <class T> friend class Handle; template <class T> friend class Handle;
template <class T> friend class Local; template <class T> friend class Local;
@ -2641,6 +2685,76 @@ class V8EXPORT Locker {
// --- I m p l e m e n t a t i o n --- // --- I m p l e m e n t a t i o n ---
namespace internal {
// Tag information for HeapObject.
const int kHeapObjectTag = 1;
const int kHeapObjectTagSize = 2;
const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
// Tag information for Smi.
const int kSmiTag = 0;
const int kSmiTagSize = 1;
const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
/**
* This class exports constants and functionality from within v8 that
* is necessary to implement inline functions in the v8 api. Don't
* depend on functions and constants defined here.
*/
class Internals {
public:
// These values match non-compiler-dependent values defined within
// the implementation of v8.
static const int kHeapObjectMapOffset = 0;
static const int kMapInstanceTypeOffset = sizeof(void*) + sizeof(int);
static const int kStringResourceOffset = 2 * sizeof(void*);
static const int kProxyProxyOffset = sizeof(void*);
static const int kJSObjectHeaderSize = 3 * sizeof(void*);
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x03;
static const int kAlignedPointerShift = 2;
// These constants are compiler dependent so their values must be
// defined within the implementation.
static int kJSObjectType;
static int kFirstNonstringType;
static int kProxyType;
static inline bool HasHeapObjectTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kHeapObjectTag);
}
static inline bool HasSmiTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag);
}
static inline int SmiValue(internal::Object* value) {
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> kSmiTagSize;
}
static inline bool IsExternalTwoByteString(int instance_type) {
int representation = (instance_type & kFullStringRepresentationMask);
return representation == kExternalTwoByteRepresentationTag;
}
template <typename T>
static inline T ReadField(Object* ptr, int offset) {
uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
return *reinterpret_cast<T*>(addr);
}
};
}
template <class T> template <class T>
Handle<T>::Handle() : val_(0) { } Handle<T>::Handle() : val_(0) { }
@ -2652,7 +2766,7 @@ Local<T>::Local() : Handle<T>() { }
template <class T> template <class T>
Local<T> Local<T>::New(Handle<T> that) { Local<T> Local<T>::New(Handle<T> that) {
if (that.IsEmpty()) return Local<T>(); if (that.IsEmpty()) return Local<T>();
void** p = reinterpret_cast<void**>(*that); internal::Object** p = reinterpret_cast<internal::Object**>(*that);
return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(*p))); return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(*p)));
} }
@ -2660,7 +2774,7 @@ Local<T> Local<T>::New(Handle<T> that) {
template <class T> template <class T>
Persistent<T> Persistent<T>::New(Handle<T> that) { Persistent<T> Persistent<T>::New(Handle<T> that) {
if (that.IsEmpty()) return Persistent<T>(); if (that.IsEmpty()) return Persistent<T>();
void** p = reinterpret_cast<void**>(*that); internal::Object** p = reinterpret_cast<internal::Object**>(*that);
return Persistent<T>(reinterpret_cast<T*>(V8::GlobalizeReference(p))); return Persistent<T>(reinterpret_cast<T*>(V8::GlobalizeReference(p)));
} }
@ -2668,21 +2782,21 @@ Persistent<T> Persistent<T>::New(Handle<T> that) {
template <class T> template <class T>
bool Persistent<T>::IsNearDeath() const { bool Persistent<T>::IsNearDeath() const {
if (this->IsEmpty()) return false; if (this->IsEmpty()) return false;
return V8::IsGlobalNearDeath(reinterpret_cast<void**>(**this)); return V8::IsGlobalNearDeath(reinterpret_cast<internal::Object**>(**this));
} }
template <class T> template <class T>
bool Persistent<T>::IsWeak() const { bool Persistent<T>::IsWeak() const {
if (this->IsEmpty()) return false; if (this->IsEmpty()) return false;
return V8::IsGlobalWeak(reinterpret_cast<void**>(**this)); return V8::IsGlobalWeak(reinterpret_cast<internal::Object**>(**this));
} }
template <class T> template <class T>
void Persistent<T>::Dispose() { void Persistent<T>::Dispose() {
if (this->IsEmpty()) return; if (this->IsEmpty()) return;
V8::DisposeGlobal(reinterpret_cast<void**>(**this)); V8::DisposeGlobal(reinterpret_cast<internal::Object**>(**this));
} }
@ -2691,12 +2805,14 @@ Persistent<T>::Persistent() : Handle<T>() { }
template <class T> template <class T>
void Persistent<T>::MakeWeak(void* parameters, WeakReferenceCallback callback) { void Persistent<T>::MakeWeak(void* parameters, WeakReferenceCallback callback) {
V8::MakeWeak(reinterpret_cast<void**>(**this), parameters, callback); V8::MakeWeak(reinterpret_cast<internal::Object**>(**this),
parameters,
callback);
} }
template <class T> template <class T>
void Persistent<T>::ClearWeak() { void Persistent<T>::ClearWeak() {
V8::ClearWeak(reinterpret_cast<void**>(**this)); V8::ClearWeak(reinterpret_cast<internal::Object**>(**this));
} }
Local<Value> Arguments::operator[](int i) const { Local<Value> Arguments::operator[](int i) const {
@ -2752,7 +2868,8 @@ Local<Object> AccessorInfo::Holder() const {
template <class T> template <class T>
Local<T> HandleScope::Close(Handle<T> value) { Local<T> HandleScope::Close(Handle<T> value) {
void** after = RawClose(reinterpret_cast<void**>(*value)); internal::Object** before = reinterpret_cast<internal::Object**>(*value);
internal::Object** after = RawClose(before);
return Local<T>(reinterpret_cast<T*>(after)); return Local<T>(reinterpret_cast<T*>(after));
} }
@ -2781,6 +2898,171 @@ void Template::Set(const char* name, v8::Handle<Data> value) {
} }
Local<Value> Object::GetInternalField(int index) {
#ifndef V8_ENABLE_CHECKS
Local<Value> quick_result = UncheckedGetInternalField(index);
if (!quick_result.IsEmpty()) return quick_result;
#endif
return CheckedGetInternalField(index);
}
Local<Value> Object::UncheckedGetInternalField(int index) {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(this);
O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
if (instance_type == I::kJSObjectType) {
// If the object is a plain JSObject, which is the common case,
// we know where to find the internal fields and can return the
// value directly.
int offset = I::kJSObjectHeaderSize + (sizeof(void*) * index);
O* value = I::ReadField<O*>(obj, offset);
O** result = HandleScope::CreateHandle(value);
return Local<Value>(reinterpret_cast<Value*>(result));
} else {
return Local<Value>();
}
}
void* External::Unwrap(Handle<v8::Value> obj) {
#ifdef V8_ENABLE_CHECKS
return FullUnwrap(obj);
#else
return QuickUnwrap(obj);
#endif
}
void* External::QuickUnwrap(Handle<v8::Value> wrapper) {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(const_cast<v8::Value*>(*wrapper));
if (I::HasSmiTag(obj)) {
int value = I::SmiValue(obj) << I::kAlignedPointerShift;
return reinterpret_cast<void*>(value);
} else {
O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
if (instance_type == I::kProxyType) {
return I::ReadField<void*>(obj, I::kProxyProxyOffset);
} else {
return NULL;
}
}
}
void* Object::GetPointerFromInternalField(int index) {
return External::Unwrap(GetInternalField(index));
}
String* String::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<String*>(value);
}
String::ExternalStringResource* String::GetExternalStringResource() const {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(const_cast<String*>(this));
O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
String::ExternalStringResource* result;
if (I::IsExternalTwoByteString(instance_type)) {
void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
result = reinterpret_cast<String::ExternalStringResource*>(value);
} else {
result = NULL;
}
#ifdef V8_ENABLE_CHECKS
VerifyExternalStringResource(result);
#endif
return result;
}
bool Value::IsString() const {
#ifdef V8_ENABLE_CHECKS
return FullIsString();
#else
return QuickIsString();
#endif
}
bool Value::QuickIsString() const {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
if (!I::HasHeapObjectTag(obj)) return false;
O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
return (instance_type < I::kFirstNonstringType);
}
Number* Number::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<Number*>(value);
}
Integer* Integer::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<Integer*>(value);
}
Date* Date::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<Date*>(value);
}
Object* Object::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<Object*>(value);
}
Array* Array::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<Array*>(value);
}
Function* Function::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<Function*>(value);
}
External* External::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<External*>(value);
}
/** /**
* \example shell.cc * \example shell.cc
* A simple shell that takes a list of expressions on the * A simple shell that takes a list of expressions on the

42
deps/v8/src/SConscript

@ -63,32 +63,22 @@ SOURCES = {
'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc', 'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc',
'arm/virtual-frame-arm.cc' 'arm/virtual-frame-arm.cc'
], ],
'arch:ia32': { 'arch:ia32': [
'all': [ 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc',
'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc', 'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc', 'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc', 'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc', 'ia32/regexp-macro-assembler-ia32.cc', 'ia32/register-allocator-ia32.cc',
'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc', 'ia32/stub-cache-ia32.cc', 'ia32/virtual-frame-ia32.cc'
'ia32/virtual-frame-ia32.cc' ],
], 'arch:x64': [
'regexp:native': [ 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc',
'ia32/regexp-macro-assembler-ia32.cc', 'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
] 'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
}, 'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
'arch:x64': { 'x64/regexp-macro-assembler-x64.cc', 'x64/register-allocator-x64.cc',
'all': [ 'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc', ],
'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
'x64/register-allocator-x64.cc',
'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
],
'regexp:native': [
'x64/regexp-macro-assembler-x64.cc'
]
},
'simulator:arm': ['arm/simulator-arm.cc'], 'simulator:arm': ['arm/simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'], 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'], 'os:linux': ['platform-linux.cc', 'platform-posix.cc'],

185
deps/v8/src/api.cc

@ -75,7 +75,7 @@ namespace v8 {
i::V8::FatalProcessOutOfMemory(NULL); \ i::V8::FatalProcessOutOfMemory(NULL); \
} \ } \
bool call_depth_is_zero = thread_local.CallDepthIsZero(); \ bool call_depth_is_zero = thread_local.CallDepthIsZero(); \
i::Top::OptionalRescheduleException(call_depth_is_zero, false); \ i::Top::OptionalRescheduleException(call_depth_is_zero); \
return value; \ return value; \
} \ } \
} while (false) } while (false)
@ -100,7 +100,9 @@ static i::HandleScopeImplementer thread_local;
static FatalErrorCallback exception_behavior = NULL; static FatalErrorCallback exception_behavior = NULL;
int i::Internals::kJSObjectType = JS_OBJECT_TYPE;
int i::Internals::kFirstNonstringType = FIRST_NONSTRING_TYPE;
int i::Internals::kProxyType = PROXY_TYPE;
static void DefaultFatalErrorHandler(const char* location, static void DefaultFatalErrorHandler(const char* location,
const char* message) { const char* message) {
@ -223,7 +225,8 @@ ImplementationUtilities::HandleScopeData*
#ifdef DEBUG #ifdef DEBUG
void ImplementationUtilities::ZapHandleRange(void** begin, void** end) { void ImplementationUtilities::ZapHandleRange(i::Object** begin,
i::Object** end) {
i::HandleScope::ZapRange(begin, end); i::HandleScope::ZapRange(begin, end);
} }
#endif #endif
@ -349,49 +352,47 @@ bool SetResourceConstraints(ResourceConstraints* constraints) {
} }
void** V8::GlobalizeReference(void** obj) { i::Object** V8::GlobalizeReference(i::Object** obj) {
if (IsDeadCheck("V8::Persistent::New")) return NULL; if (IsDeadCheck("V8::Persistent::New")) return NULL;
LOG_API("Persistent::New"); LOG_API("Persistent::New");
i::Handle<i::Object> result = i::Handle<i::Object> result =
i::GlobalHandles::Create(*reinterpret_cast<i::Object**>(obj)); i::GlobalHandles::Create(*obj);
return reinterpret_cast<void**>(result.location()); return result.location();
} }
void V8::MakeWeak(void** object, void* parameters, void V8::MakeWeak(i::Object** object, void* parameters,
WeakReferenceCallback callback) { WeakReferenceCallback callback) {
LOG_API("MakeWeak"); LOG_API("MakeWeak");
i::GlobalHandles::MakeWeak(reinterpret_cast<i::Object**>(object), parameters, i::GlobalHandles::MakeWeak(object, parameters, callback);
callback);
} }
void V8::ClearWeak(void** obj) { void V8::ClearWeak(i::Object** obj) {
LOG_API("ClearWeak"); LOG_API("ClearWeak");
i::GlobalHandles::ClearWeakness(reinterpret_cast<i::Object**>(obj)); i::GlobalHandles::ClearWeakness(obj);
} }
bool V8::IsGlobalNearDeath(void** obj) { bool V8::IsGlobalNearDeath(i::Object** obj) {
LOG_API("IsGlobalNearDeath"); LOG_API("IsGlobalNearDeath");
if (!i::V8::IsRunning()) return false; if (!i::V8::IsRunning()) return false;
return i::GlobalHandles::IsNearDeath(reinterpret_cast<i::Object**>(obj)); return i::GlobalHandles::IsNearDeath(obj);
} }
bool V8::IsGlobalWeak(void** obj) { bool V8::IsGlobalWeak(i::Object** obj) {
LOG_API("IsGlobalWeak"); LOG_API("IsGlobalWeak");
if (!i::V8::IsRunning()) return false; if (!i::V8::IsRunning()) return false;
return i::GlobalHandles::IsWeak(reinterpret_cast<i::Object**>(obj)); return i::GlobalHandles::IsWeak(obj);
} }
void V8::DisposeGlobal(void** obj) { void V8::DisposeGlobal(i::Object** obj) {
LOG_API("DisposeGlobal"); LOG_API("DisposeGlobal");
if (!i::V8::IsRunning()) return; if (!i::V8::IsRunning()) return;
i::Object** ptr = reinterpret_cast<i::Object**>(obj); if ((*obj)->IsGlobalContext()) i::Heap::NotifyContextDisposed();
if ((*ptr)->IsGlobalContext()) i::Heap::NotifyContextDisposed(); i::GlobalHandles::Destroy(obj);
i::GlobalHandles::Destroy(ptr);
} }
// --- H a n d l e s --- // --- H a n d l e s ---
@ -415,9 +416,8 @@ int HandleScope::NumberOfHandles() {
} }
void** v8::HandleScope::CreateHandle(void* value) { i::Object** v8::HandleScope::CreateHandle(i::Object* value) {
return reinterpret_cast<void**>( return i::HandleScope::CreateHandle(value);
i::HandleScope::CreateHandle(reinterpret_cast<i::Object*>(value)));
} }
@ -481,7 +481,7 @@ v8::Local<v8::Value> Context::GetData() {
} }
void** v8::HandleScope::RawClose(void** value) { i::Object** v8::HandleScope::RawClose(i::Object** value) {
if (!ApiCheck(!is_closed_, if (!ApiCheck(!is_closed_,
"v8::HandleScope::Close()", "v8::HandleScope::Close()",
"Local scope has already been closed")) { "Local scope has already been closed")) {
@ -490,13 +490,13 @@ void** v8::HandleScope::RawClose(void** value) {
LOG_API("CloseHandleScope"); LOG_API("CloseHandleScope");
// Read the result before popping the handle block. // Read the result before popping the handle block.
i::Object* result = reinterpret_cast<i::Object*>(*value); i::Object* result = *value;
is_closed_ = true; is_closed_ = true;
i::HandleScope::Leave(&previous_); i::HandleScope::Leave(&previous_);
// Allocate a new handle on the previous handle block. // Allocate a new handle on the previous handle block.
i::Handle<i::Object> handle(result); i::Handle<i::Object> handle(result);
return reinterpret_cast<void**>(handle.location()); return handle.location();
} }
@ -1459,9 +1459,11 @@ bool Value::IsFunction() const {
} }
bool Value::IsString() const { bool Value::FullIsString() const {
if (IsDeadCheck("v8::Value::IsString()")) return false; if (IsDeadCheck("v8::Value::IsString()")) return false;
return Utils::OpenHandle(this)->IsString(); bool result = Utils::OpenHandle(this)->IsString();
ASSERT_EQ(result, QuickIsString());
return result;
} }
@ -1613,83 +1615,75 @@ Local<Integer> Value::ToInteger() const {
} }
External* External::Cast(v8::Value* that) { void External::CheckCast(v8::Value* that) {
if (IsDeadCheck("v8::External::Cast()")) return 0; if (IsDeadCheck("v8::External::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that); i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsProxy(), ApiCheck(obj->IsProxy(),
"v8::External::Cast()", "v8::External::Cast()",
"Could not convert to external"); "Could not convert to external");
return static_cast<External*>(that);
} }
v8::Object* v8::Object::Cast(Value* that) { void v8::Object::CheckCast(Value* that) {
if (IsDeadCheck("v8::Object::Cast()")) return 0; if (IsDeadCheck("v8::Object::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that); i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSObject(), ApiCheck(obj->IsJSObject(),
"v8::Object::Cast()", "v8::Object::Cast()",
"Could not convert to object"); "Could not convert to object");
return static_cast<v8::Object*>(that);
} }
v8::Function* v8::Function::Cast(Value* that) { void v8::Function::CheckCast(Value* that) {
if (IsDeadCheck("v8::Function::Cast()")) return 0; if (IsDeadCheck("v8::Function::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that); i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSFunction(), ApiCheck(obj->IsJSFunction(),
"v8::Function::Cast()", "v8::Function::Cast()",
"Could not convert to function"); "Could not convert to function");
return static_cast<v8::Function*>(that);
} }
v8::String* v8::String::Cast(v8::Value* that) { void v8::String::CheckCast(v8::Value* that) {
if (IsDeadCheck("v8::String::Cast()")) return 0; if (IsDeadCheck("v8::String::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that); i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsString(), ApiCheck(obj->IsString(),
"v8::String::Cast()", "v8::String::Cast()",
"Could not convert to string"); "Could not convert to string");
return static_cast<v8::String*>(that);
} }
v8::Number* v8::Number::Cast(v8::Value* that) { void v8::Number::CheckCast(v8::Value* that) {
if (IsDeadCheck("v8::Number::Cast()")) return 0; if (IsDeadCheck("v8::Number::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that); i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(), ApiCheck(obj->IsNumber(),
"v8::Number::Cast()", "v8::Number::Cast()",
"Could not convert to number"); "Could not convert to number");
return static_cast<v8::Number*>(that);
} }
v8::Integer* v8::Integer::Cast(v8::Value* that) { void v8::Integer::CheckCast(v8::Value* that) {
if (IsDeadCheck("v8::Integer::Cast()")) return 0; if (IsDeadCheck("v8::Integer::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that); i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(), ApiCheck(obj->IsNumber(),
"v8::Integer::Cast()", "v8::Integer::Cast()",
"Could not convert to number"); "Could not convert to number");
return static_cast<v8::Integer*>(that);
} }
v8::Array* v8::Array::Cast(Value* that) { void v8::Array::CheckCast(Value* that) {
if (IsDeadCheck("v8::Array::Cast()")) return 0; if (IsDeadCheck("v8::Array::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that); i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSArray(), ApiCheck(obj->IsJSArray(),
"v8::Array::Cast()", "v8::Array::Cast()",
"Could not convert to array"); "Could not convert to array");
return static_cast<v8::Array*>(that);
} }
v8::Date* v8::Date::Cast(v8::Value* that) { void v8::Date::CheckCast(v8::Value* that) {
if (IsDeadCheck("v8::Date::Cast()")) return 0; if (IsDeadCheck("v8::Date::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that); i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(i::Heap::Date_symbol()), ApiCheck(obj->HasSpecificClassOf(i::Heap::Date_symbol()),
"v8::Date::Cast()", "v8::Date::Cast()",
"Could not convert to date"); "Could not convert to date");
return static_cast<v8::Date*>(that);
} }
@ -2450,16 +2444,17 @@ bool v8::String::IsExternalAscii() const {
} }
v8::String::ExternalStringResource* void v8::String::VerifyExternalStringResource(
v8::String::GetExternalStringResource() const { v8::String::ExternalStringResource* value) const {
EnsureInitialized("v8::String::GetExternalStringResource()");
i::Handle<i::String> str = Utils::OpenHandle(this); i::Handle<i::String> str = Utils::OpenHandle(this);
v8::String::ExternalStringResource* expected;
if (i::StringShape(*str).IsExternalTwoByte()) { if (i::StringShape(*str).IsExternalTwoByte()) {
void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource(); void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
return reinterpret_cast<ExternalStringResource*>(resource); expected = reinterpret_cast<ExternalStringResource*>(resource);
} else { } else {
return NULL; expected = NULL;
} }
CHECK_EQ(expected, value);
} }
@ -2519,7 +2514,7 @@ int v8::Object::InternalFieldCount() {
} }
Local<Value> v8::Object::GetInternalField(int index) { Local<Value> v8::Object::CheckedGetInternalField(int index) {
if (IsDeadCheck("v8::Object::GetInternalField()")) return Local<Value>(); if (IsDeadCheck("v8::Object::GetInternalField()")) return Local<Value>();
i::Handle<i::JSObject> obj = Utils::OpenHandle(this); i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
if (!ApiCheck(index < obj->GetInternalFieldCount(), if (!ApiCheck(index < obj->GetInternalFieldCount(),
@ -2528,7 +2523,12 @@ Local<Value> v8::Object::GetInternalField(int index) {
return Local<Value>(); return Local<Value>();
} }
i::Handle<i::Object> value(obj->GetInternalField(index)); i::Handle<i::Object> value(obj->GetInternalField(index));
return Utils::ToLocal(value); Local<Value> result = Utils::ToLocal(value);
#ifdef DEBUG
Local<Value> unchecked = UncheckedGetInternalField(index);
ASSERT(unchecked.IsEmpty() || (unchecked == result));
#endif
return result;
} }
@ -2546,41 +2546,8 @@ void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
} }
void* v8::Object::GetPointerFromInternalField(int index) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Object* pointer = obj->GetInternalField(index);
if (pointer->IsSmi()) {
// Fast case, aligned native pointer.
return pointer;
}
// Read from uninitialized field.
if (!pointer->IsProxy()) {
// Play safe even if it's something unexpected.
ASSERT(pointer->IsUndefined());
return NULL;
}
// Unaligned native pointer.
return reinterpret_cast<void*>(i::Proxy::cast(pointer)->proxy());
}
void v8::Object::SetPointerInInternalField(int index, void* value) { void v8::Object::SetPointerInInternalField(int index, void* value) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this); SetInternalField(index, External::Wrap(value));
i::Object* as_object = reinterpret_cast<i::Object*>(value);
if (as_object->IsSmi()) {
// Aligned pointer, store as is.
obj->SetInternalField(index, as_object);
} else {
// Currently internal fields are used by DOM wrappers which only
// get garbage collected by the mark-sweep collector, so we
// pretenure the proxy.
HandleScope scope;
i::Handle<i::Proxy> proxy =
i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
if (!proxy.is_null()) obj->SetInternalField(index, *proxy);
}
} }
@ -2605,12 +2572,14 @@ bool v8::V8::Dispose() {
bool v8::V8::IdleNotification(bool is_high_priority) { bool v8::V8::IdleNotification(bool is_high_priority) {
if (!i::V8::IsRunning()) return false;
return i::V8::IdleNotification(is_high_priority); return i::V8::IdleNotification(is_high_priority);
} }
void v8::V8::LowMemoryNotification() { void v8::V8::LowMemoryNotification() {
#if defined(ANDROID) #if defined(ANDROID)
if (!i::V8::IsRunning()) return;
i::Heap::CollectAllGarbage(true); i::Heap::CollectAllGarbage(true);
#endif #endif
} }
@ -2836,8 +2805,6 @@ static void* ExternalValueImpl(i::Handle<i::Object> obj) {
static const intptr_t kAlignedPointerMask = 3; static const intptr_t kAlignedPointerMask = 3;
static const int kAlignedPointerShift = 2;
Local<Value> v8::External::Wrap(void* data) { Local<Value> v8::External::Wrap(void* data) {
STATIC_ASSERT(sizeof(data) == sizeof(i::Address)); STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
@ -2847,7 +2814,7 @@ Local<Value> v8::External::Wrap(void* data) {
if ((reinterpret_cast<intptr_t>(data) & kAlignedPointerMask) == 0) { if ((reinterpret_cast<intptr_t>(data) & kAlignedPointerMask) == 0) {
uintptr_t data_ptr = reinterpret_cast<uintptr_t>(data); uintptr_t data_ptr = reinterpret_cast<uintptr_t>(data);
intptr_t data_value = intptr_t data_value =
static_cast<intptr_t>(data_ptr >> kAlignedPointerShift); static_cast<intptr_t>(data_ptr >> i::Internals::kAlignedPointerShift);
STATIC_ASSERT(sizeof(data_ptr) == sizeof(data_value)); STATIC_ASSERT(sizeof(data_ptr) == sizeof(data_value));
if (i::Smi::IsIntptrValid(data_value)) { if (i::Smi::IsIntptrValid(data_value)) {
i::Handle<i::Object> obj(i::Smi::FromIntptr(data_value)); i::Handle<i::Object> obj(i::Smi::FromIntptr(data_value));
@ -2858,16 +2825,22 @@ Local<Value> v8::External::Wrap(void* data) {
} }
void* v8::External::Unwrap(v8::Handle<v8::Value> value) { void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
if (IsDeadCheck("v8::External::Unwrap()")) return 0; if (IsDeadCheck("v8::External::Unwrap()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(*value); i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
void* result;
if (obj->IsSmi()) { if (obj->IsSmi()) {
// The external value was an aligned pointer. // The external value was an aligned pointer.
uintptr_t result = static_cast<uintptr_t>( uintptr_t value = static_cast<uintptr_t>(
i::Smi::cast(*obj)->value()) << kAlignedPointerShift; i::Smi::cast(*obj)->value()) << i::Internals::kAlignedPointerShift;
return reinterpret_cast<void*>(result); result = reinterpret_cast<void*>(value);
} else if (obj->IsProxy()) {
result = ExternalValueImpl(obj);
} else {
result = NULL;
} }
return ExternalValueImpl(obj); ASSERT_EQ(result, QuickUnwrap(wrapper));
return result;
} }
@ -3729,19 +3702,17 @@ char* HandleScopeImplementer::RestoreThreadHelper(char* storage) {
void HandleScopeImplementer::Iterate( void HandleScopeImplementer::Iterate(
ObjectVisitor* v, ObjectVisitor* v,
List<void**>* blocks, List<i::Object**>* blocks,
v8::ImplementationUtilities::HandleScopeData* handle_data) { v8::ImplementationUtilities::HandleScopeData* handle_data) {
// Iterate over all handles in the blocks except for the last. // Iterate over all handles in the blocks except for the last.
for (int i = blocks->length() - 2; i >= 0; --i) { for (int i = blocks->length() - 2; i >= 0; --i) {
Object** block = Object** block = blocks->at(i);
reinterpret_cast<Object**>(blocks->at(i));
v->VisitPointers(block, &block[kHandleBlockSize]); v->VisitPointers(block, &block[kHandleBlockSize]);
} }
// Iterate over live handles in the last block (if any). // Iterate over live handles in the last block (if any).
if (!blocks->is_empty()) { if (!blocks->is_empty()) {
v->VisitPointers(reinterpret_cast<Object**>(blocks->last()), v->VisitPointers(blocks->last(), handle_data->next);
reinterpret_cast<Object**>(handle_data->next));
} }
} }
@ -3756,7 +3727,7 @@ void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) { char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
HandleScopeImplementer* thread_local = HandleScopeImplementer* thread_local =
reinterpret_cast<HandleScopeImplementer*>(storage); reinterpret_cast<HandleScopeImplementer*>(storage);
List<void**>* blocks_of_archived_thread = thread_local->Blocks(); List<internal::Object**>* blocks_of_archived_thread = thread_local->Blocks();
v8::ImplementationUtilities::HandleScopeData* handle_data_of_archived_thread = v8::ImplementationUtilities::HandleScopeData* handle_data_of_archived_thread =
&thread_local->handle_scope_data_; &thread_local->handle_scope_data_;
Iterate(v, blocks_of_archived_thread, handle_data_of_archived_thread); Iterate(v, blocks_of_archived_thread, handle_data_of_archived_thread);

24
deps/v8/src/api.h

@ -338,7 +338,7 @@ class HandleScopeImplementer {
static char* Iterate(v8::internal::ObjectVisitor* v, char* data); static char* Iterate(v8::internal::ObjectVisitor* v, char* data);
inline void** GetSpareOrNewBlock(); inline internal::Object** GetSpareOrNewBlock();
inline void DeleteExtensions(int extensions); inline void DeleteExtensions(int extensions);
inline void IncrementCallDepth() {call_depth++;} inline void IncrementCallDepth() {call_depth++;}
@ -356,13 +356,13 @@ class HandleScopeImplementer {
inline Handle<Object> RestoreContext(); inline Handle<Object> RestoreContext();
inline bool HasSavedContexts(); inline bool HasSavedContexts();
inline List<void**>* Blocks() { return &blocks; } inline List<internal::Object**>* Blocks() { return &blocks; }
inline bool IgnoreOutOfMemory() { return ignore_out_of_memory; } inline bool IgnoreOutOfMemory() { return ignore_out_of_memory; }
inline void SetIgnoreOutOfMemory(bool value) { ignore_out_of_memory = value; } inline void SetIgnoreOutOfMemory(bool value) { ignore_out_of_memory = value; }
private: private:
List<void**> blocks; List<internal::Object**> blocks;
Object** spare; Object** spare;
int call_depth; int call_depth;
// Used as a stack to keep track of entered contexts. // Used as a stack to keep track of entered contexts.
@ -374,7 +374,7 @@ class HandleScopeImplementer {
v8::ImplementationUtilities::HandleScopeData handle_scope_data_; v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
static void Iterate(ObjectVisitor* v, static void Iterate(ObjectVisitor* v,
List<void**>* blocks, List<internal::Object**>* blocks,
v8::ImplementationUtilities::HandleScopeData* handle_data); v8::ImplementationUtilities::HandleScopeData* handle_data);
char* RestoreThreadHelper(char* from); char* RestoreThreadHelper(char* from);
char* ArchiveThreadHelper(char* to); char* ArchiveThreadHelper(char* to);
@ -420,10 +420,10 @@ Handle<Object> HandleScopeImplementer::LastEnteredContext() {
// If there's a spare block, use it for growing the current scope. // If there's a spare block, use it for growing the current scope.
void** HandleScopeImplementer::GetSpareOrNewBlock() { internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
void** block = (spare != NULL) ? internal::Object** block = (spare != NULL) ?
reinterpret_cast<void**>(spare) : spare :
NewArray<void*>(kHandleBlockSize); NewArray<internal::Object*>(kHandleBlockSize);
spare = NULL; spare = NULL;
return block; return block;
} }
@ -435,18 +435,18 @@ void HandleScopeImplementer::DeleteExtensions(int extensions) {
spare = NULL; spare = NULL;
} }
for (int i = extensions; i > 1; --i) { for (int i = extensions; i > 1; --i) {
void** block = blocks.RemoveLast(); internal::Object** block = blocks.RemoveLast();
#ifdef DEBUG #ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange(block, v8::ImplementationUtilities::ZapHandleRange(block,
&block[kHandleBlockSize]); &block[kHandleBlockSize]);
#endif #endif
DeleteArray(block); DeleteArray(block);
} }
spare = reinterpret_cast<Object**>(blocks.RemoveLast()); spare = blocks.RemoveLast();
#ifdef DEBUG #ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange( v8::ImplementationUtilities::ZapHandleRange(
reinterpret_cast<void**>(spare), spare,
reinterpret_cast<void**>(&spare[kHandleBlockSize])); &spare[kHandleBlockSize]);
#endif #endif
} }

2
deps/v8/src/apiutils.h

@ -60,7 +60,7 @@ class ImplementationUtilities {
static HandleScopeData* CurrentHandleScope(); static HandleScopeData* CurrentHandleScope();
#ifdef DEBUG #ifdef DEBUG
static void ZapHandleRange(void** begin, void** end); static void ZapHandleRange(internal::Object** begin, internal::Object** end);
#endif #endif
}; };

2
deps/v8/src/arm/assembler-arm-inl.h

@ -204,7 +204,7 @@ void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) { if (buffer_space() <= kGap) {
GrowBuffer(); GrowBuffer();
} }
if (pc_offset() > next_buffer_check_) { if (pc_offset() >= next_buffer_check_) {
CheckConstPool(false, true); CheckConstPool(false, true);
} }
} }

100
deps/v8/src/arm/assembler-arm.cc

@ -329,19 +329,30 @@ const int kEndOfChain = -4;
int Assembler::target_at(int pos) { int Assembler::target_at(int pos) {
Instr instr = instr_at(pos); Instr instr = instr_at(pos);
if ((instr & ~Imm24Mask) == 0) {
// Emitted label constant, not part of a branch.
return instr - (Code::kHeaderSize - kHeapObjectTag);
}
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
int imm26 = ((instr & Imm24Mask) << 8) >> 6; int imm26 = ((instr & Imm24Mask) << 8) >> 6;
if ((instr & CondMask) == nv && (instr & B24) != 0) if ((instr & CondMask) == nv && (instr & B24) != 0)
// blx uses bit 24 to encode bit 2 of imm26 // blx uses bit 24 to encode bit 2 of imm26
imm26 += 2; imm26 += 2;
return pos + 8 + imm26; return pos + kPcLoadDelta + imm26;
} }
void Assembler::target_at_put(int pos, int target_pos) { void Assembler::target_at_put(int pos, int target_pos) {
int imm26 = target_pos - pos - 8;
Instr instr = instr_at(pos); Instr instr = instr_at(pos);
if ((instr & ~Imm24Mask) == 0) {
ASSERT(target_pos == kEndOfChain || target_pos >= 0);
// Emitted label constant, not part of a branch.
// Make label relative to Code* of generated Code object.
instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
return;
}
int imm26 = target_pos - (pos + kPcLoadDelta);
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
if ((instr & CondMask) == nv) { if ((instr & CondMask) == nv) {
// blx uses bit 24 to encode bit 2 of imm26 // blx uses bit 24 to encode bit 2 of imm26
@ -368,41 +379,45 @@ void Assembler::print(Label* L) {
while (l.is_linked()) { while (l.is_linked()) {
PrintF("@ %d ", l.pos()); PrintF("@ %d ", l.pos());
Instr instr = instr_at(l.pos()); Instr instr = instr_at(l.pos());
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx if ((instr & ~Imm24Mask) == 0) {
int cond = instr & CondMask; PrintF("value\n");
const char* b;
const char* c;
if (cond == nv) {
b = "blx";
c = "";
} else { } else {
if ((instr & B24) != 0) ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
b = "bl"; int cond = instr & CondMask;
else const char* b;
b = "b"; const char* c;
if (cond == nv) {
switch (cond) { b = "blx";
case eq: c = "eq"; break; c = "";
case ne: c = "ne"; break; } else {
case hs: c = "hs"; break; if ((instr & B24) != 0)
case lo: c = "lo"; break; b = "bl";
case mi: c = "mi"; break; else
case pl: c = "pl"; break; b = "b";
case vs: c = "vs"; break;
case vc: c = "vc"; break; switch (cond) {
case hi: c = "hi"; break; case eq: c = "eq"; break;
case ls: c = "ls"; break; case ne: c = "ne"; break;
case ge: c = "ge"; break; case hs: c = "hs"; break;
case lt: c = "lt"; break; case lo: c = "lo"; break;
case gt: c = "gt"; break; case mi: c = "mi"; break;
case le: c = "le"; break; case pl: c = "pl"; break;
case al: c = ""; break; case vs: c = "vs"; break;
default: case vc: c = "vc"; break;
c = ""; case hi: c = "hi"; break;
UNREACHABLE(); case ls: c = "ls"; break;
case ge: c = "ge"; break;
case lt: c = "lt"; break;
case gt: c = "gt"; break;
case le: c = "le"; break;
case al: c = ""; break;
default:
c = "";
UNREACHABLE();
}
} }
PrintF("%s%s\n", b, c);
} }
PrintF("%s%s\n", b, c);
next(&l); next(&l);
} }
} else { } else {
@ -670,8 +685,23 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
// Block the emission of the constant pool, since the branch instruction must // Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label // be emitted at the pc offset recorded by the label
BlockConstPoolBefore(pc_offset() + kInstrSize); BlockConstPoolBefore(pc_offset() + kInstrSize);
return target_pos - (pc_offset() + kPcLoadDelta);
}
return target_pos - pc_offset() - 8;
void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link
} else {
target_pos = kEndOfChain;
}
L->link_to(at_offset);
instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
}
} }

17
deps/v8/src/arm/assembler-arm.h

@ -39,7 +39,7 @@
#ifndef V8_ARM_ASSEMBLER_ARM_H_ #ifndef V8_ARM_ASSEMBLER_ARM_H_
#define V8_ARM_ASSEMBLER_ARM_H_ #define V8_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
#include "assembler.h" #include "assembler.h"
namespace v8 { namespace v8 {
@ -165,9 +165,10 @@ enum Coprocessor {
enum Condition { enum Condition {
eq = 0 << 28, // Z set equal. eq = 0 << 28, // Z set equal.
ne = 1 << 28, // Z clear not equal. ne = 1 << 28, // Z clear not equal.
cs = 2 << 28, // C set unsigned higher or same. nz = 1 << 28, // Z clear not zero.
cs = 2 << 28, // C set carry set.
hs = 2 << 28, // C set unsigned higher or same. hs = 2 << 28, // C set unsigned higher or same.
cc = 3 << 28, // C clear unsigned lower. cc = 3 << 28, // C clear carry clear.
lo = 3 << 28, // C clear unsigned lower. lo = 3 << 28, // C clear unsigned lower.
mi = 4 << 28, // N set negative. mi = 4 << 28, // N set negative.
pl = 5 << 28, // N clear positive or zero. pl = 5 << 28, // N clear positive or zero.
@ -420,6 +421,10 @@ class Assembler : public Malloced {
// Manages the jump elimination optimization if the second parameter is true. // Manages the jump elimination optimization if the second parameter is true.
int branch_offset(Label* L, bool jump_elimination_allowed); int branch_offset(Label* L, bool jump_elimination_allowed);
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
// Return the address in the constant pool of the code target address used by // Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc. // the branch/call instruction at pc.
INLINE(static Address target_address_address_at(Address pc)); INLINE(static Address target_address_address_at(Address pc));
@ -435,6 +440,10 @@ class Assembler : public Malloced {
// to jump to. // to jump to.
static const int kPatchReturnSequenceAddressOffset = 1; static const int kPatchReturnSequenceAddressOffset = 1;
// Difference between address of current opcode and value read from pc
// register.
static const int kPcLoadDelta = 8;
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Code generation // Code generation
@ -784,6 +793,8 @@ class Assembler : public Malloced {
// Record reloc info for current pc_ // Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
friend class RegExpMacroAssemblerARM;
}; };
} } // namespace v8::internal } } // namespace v8::internal

2
deps/v8/src/arm/builtins-arm.cc

@ -573,7 +573,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ mov(r0, Operand(r0, LSL, kSmiTagSize)); __ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ mov(r4, Operand(ArgumentsAdaptorFrame::SENTINEL)); __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit()); __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
__ add(fp, sp, Operand(3 * kPointerSize)); __ add(fp, sp, Operand(3 * kPointerSize));
} }

82
deps/v8/src/arm/codegen-arm.cc

@ -176,7 +176,8 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
} }
#endif #endif
// Allocate space for locals and initialize them. // Allocate space for locals and initialize them. This also checks
// for stack overflow.
frame_->AllocateStackSlots(); frame_->AllocateStackSlots();
// Initialize the function return target after the locals are set // Initialize the function return target after the locals are set
// up, because it needs the expected frame height from the frame. // up, because it needs the expected frame height from the frame.
@ -278,7 +279,6 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
frame_->CallRuntime(Runtime::kTraceEnter, 0); frame_->CallRuntime(Runtime::kTraceEnter, 0);
// Ignore the return value. // Ignore the return value.
} }
CheckStack();
// Compile the body of the function in a vanilla state. Don't // Compile the body of the function in a vanilla state. Don't
// bother compiling all the code if the scope has an illegal // bother compiling all the code if the scope has an illegal
@ -1110,8 +1110,19 @@ void CodeGenerator::CheckStack() {
VirtualFrame::SpilledScope spilled_scope; VirtualFrame::SpilledScope spilled_scope;
if (FLAG_check_stack) { if (FLAG_check_stack) {
Comment cmnt(masm_, "[ check stack"); Comment cmnt(masm_, "[ check stack");
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
// Put the lr setup instruction in the delay slot. The 'sizeof(Instr)' is
// added to the implicit 8 byte offset that always applies to operations
// with pc and gives a return address 12 bytes down.
masm_->add(lr, pc, Operand(sizeof(Instr)));
masm_->cmp(sp, Operand(ip));
StackCheckStub stub; StackCheckStub stub;
frame_->CallStub(&stub, 0); // Call the stub if lower.
masm_->mov(pc,
Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
RelocInfo::CODE_TARGET),
LeaveCC,
lo);
} }
} }
@ -3322,7 +3333,7 @@ void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
// Skip the arguments adaptor frame if it exists. // Skip the arguments adaptor frame if it exists.
Label check_frame_marker; Label check_frame_marker;
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset)); __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r1, Operand(ArgumentsAdaptorFrame::SENTINEL)); __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &check_frame_marker); __ b(ne, &check_frame_marker);
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset)); __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
@ -4934,36 +4945,21 @@ void CompareStub::Generate(MacroAssembler* masm) {
static void AllocateHeapNumber( static void AllocateHeapNumber(
MacroAssembler* masm, MacroAssembler* masm,
Label* need_gc, // Jump here if young space is full. Label* need_gc, // Jump here if young space is full.
Register result_reg, // The tagged address of the new heap number. Register result, // The tagged address of the new heap number.
Register allocation_top_addr_reg, // A scratch register. Register scratch1, // A scratch register.
Register scratch2) { // Another scratch register. Register scratch2) { // Another scratch register.
ExternalReference allocation_top = // Allocate an object in the heap for the heap number and tag it as a heap
ExternalReference::new_space_allocation_top_address(); // object.
ExternalReference allocation_limit = __ AllocateObjectInNewSpace(HeapNumber::kSize,
ExternalReference::new_space_allocation_limit_address(); result,
scratch1,
// allocat := the address of the allocation top variable. scratch2,
__ mov(allocation_top_addr_reg, Operand(allocation_top)); need_gc,
// result_reg := the old allocation top. true);
__ ldr(result_reg, MemOperand(allocation_top_addr_reg));
// scratch2 := the address of the allocation limit. // Get heap number map and store it in the allocated object.
__ mov(scratch2, Operand(allocation_limit)); __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
// scratch2 := the allocation limit. __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
__ ldr(scratch2, MemOperand(scratch2));
// result_reg := the new allocation top.
__ add(result_reg, result_reg, Operand(HeapNumber::kSize));
// Compare new new allocation top and limit.
__ cmp(result_reg, Operand(scratch2));
// Branch if out of space in young generation.
__ b(hi, need_gc);
// Store new allocation top.
__ str(result_reg, MemOperand(allocation_top_addr_reg)); // store new top
// Tag and adjust back to start of new object.
__ sub(result_reg, result_reg, Operand(HeapNumber::kSize - kHeapObjectTag));
// Get heap number map into scratch2.
__ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
// Store heap number map in new object.
__ str(scratch2, FieldMemOperand(result_reg, HeapObject::kMapOffset));
} }
@ -5623,17 +5619,11 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
void StackCheckStub::Generate(MacroAssembler* masm) { void StackCheckStub::Generate(MacroAssembler* masm) {
Label within_limit;
__ mov(ip, Operand(ExternalReference::address_of_stack_guard_limit()));
__ ldr(ip, MemOperand(ip));
__ cmp(sp, Operand(ip));
__ b(hs, &within_limit);
// Do tail-call to runtime routine. Runtime routines expect at least one // Do tail-call to runtime routine. Runtime routines expect at least one
// argument, so give it a Smi. // argument, so give it a Smi.
__ mov(r0, Operand(Smi::FromInt(0))); __ mov(r0, Operand(Smi::FromInt(0)));
__ push(r0); __ push(r0);
__ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1); __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
__ bind(&within_limit);
__ StubReturn(1); __ StubReturn(1);
} }
@ -5677,9 +5667,9 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
} else { } else {
AllocateHeapNumber(masm, &slow, r1, r2, r3); AllocateHeapNumber(masm, &slow, r1, r2, r3);
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ str(r2, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
__ mov(r0, Operand(r1)); __ mov(r0, Operand(r1));
@ -5984,9 +5974,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r2: receiver // r2: receiver
// r3: argc // r3: argc
// r4: argv // r4: argv
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
__ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
__ mov(r7, Operand(~ArgumentsAdaptorFrame::SENTINEL)); int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
__ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker))); __ mov(r6, Operand(Smi::FromInt(marker)));
__ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address))); __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
__ ldr(r5, MemOperand(r5)); __ ldr(r5, MemOperand(r5));
@ -6143,7 +6133,7 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
Label adaptor; Label adaptor;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL)); __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor); __ b(eq, &adaptor);
// Nothing to do: The formal number of parameters has already been // Nothing to do: The formal number of parameters has already been
@ -6172,7 +6162,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Label adaptor; Label adaptor;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL)); __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor); __ b(eq, &adaptor);
// Check index against formal parameters count limit passed in // Check index against formal parameters count limit passed in
@ -6214,7 +6204,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
Label runtime; Label runtime;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL)); __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &runtime); __ b(ne, &runtime);
// Patch the arguments.length and the parameters pointer. // Patch the arguments.length and the parameters pointer.

64
deps/v8/src/arm/disasm-arm.cc

@ -119,6 +119,7 @@ class Decoder {
void DecodeType5(Instr* instr); void DecodeType5(Instr* instr);
void DecodeType6(Instr* instr); void DecodeType6(Instr* instr);
void DecodeType7(Instr* instr); void DecodeType7(Instr* instr);
void DecodeUnconditional(Instr* instr);
const disasm::NameConverter& converter_; const disasm::NameConverter& converter_;
v8::internal::Vector<char> out_buffer_; v8::internal::Vector<char> out_buffer_;
@ -774,6 +775,67 @@ void Decoder::DecodeType7(Instr* instr) {
} }
void Decoder::DecodeUnconditional(Instr* instr) {
if (instr->Bits(7, 4) == 0xB && instr->Bits(27, 25) == 0 && instr->HasL()) {
Format(instr, "'memop'h'pu 'rd, ");
bool immediate = instr->HasB();
switch (instr->PUField()) {
case 0: {
// Post index, negative.
if (instr->HasW()) {
Unknown(instr);
break;
}
if (immediate) {
Format(instr, "['rn], #-'imm12");
} else {
Format(instr, "['rn], -'rm");
}
break;
}
case 1: {
// Post index, positive.
if (instr->HasW()) {
Unknown(instr);
break;
}
if (immediate) {
Format(instr, "['rn], #+'imm12");
} else {
Format(instr, "['rn], +'rm");
}
break;
}
case 2: {
// Pre index or offset, negative.
if (immediate) {
Format(instr, "['rn, #-'imm12]'w");
} else {
Format(instr, "['rn, -'rm]'w");
}
break;
}
case 3: {
// Pre index or offset, positive.
if (immediate) {
Format(instr, "['rn, #+'imm12]'w");
} else {
Format(instr, "['rn, +'rm]'w");
}
break;
}
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
break;
}
}
return;
}
Format(instr, "break 'msg");
}
// Disassemble the instruction at *instr_ptr into the output buffer. // Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) { int Decoder::InstructionDecode(byte* instr_ptr) {
Instr* instr = Instr::At(instr_ptr); Instr* instr = Instr::At(instr_ptr);
@ -782,7 +844,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
"%08x ", "%08x ",
instr->InstructionBits()); instr->InstructionBits());
if (instr->ConditionField() == special_condition) { if (instr->ConditionField() == special_condition) {
Format(instr, "break 'msg"); DecodeUnconditional(instr);
return Instr::kInstrSize; return Instr::kInstrSize;
} }
switch (instr->TypeField()) { switch (instr->TypeField()) {

43
deps/v8/src/arm/macro-assembler-arm.cc

@ -768,6 +768,44 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
} }
void MacroAssembler::AllocateObjectInNewSpace(int object_size,
Register result,
Register scratch1,
Register scratch2,
Label* gc_required,
bool tag_allocated_object) {
ASSERT(!result.is(scratch1));
ASSERT(!scratch1.is(scratch2));
// Load address of new object into result and allocation top address into
// scratch1.
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
mov(scratch1, Operand(new_space_allocation_top));
ldr(result, MemOperand(scratch1));
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
mov(scratch2, Operand(new_space_allocation_limit));
ldr(scratch2, MemOperand(scratch2));
add(result, result, Operand(object_size));
cmp(result, Operand(scratch2));
b(hi, gc_required);
// Update allocation top. result temporarily holds the new top,
str(result, MemOperand(scratch1));
// Tag and adjust back to start of new object.
if (tag_allocated_object) {
sub(result, result, Operand(object_size - kHeapObjectTag));
} else {
sub(result, result, Operand(object_size));
}
}
void MacroAssembler::CompareObjectType(Register function, void MacroAssembler::CompareObjectType(Register function,
Register map, Register map,
Register type_reg, Register type_reg,
@ -825,9 +863,9 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
} }
void MacroAssembler::CallStub(CodeStub* stub) { void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
Call(stub->GetCode(), RelocInfo::CODE_TARGET); Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
} }
@ -1022,4 +1060,5 @@ void MacroAssembler::Abort(const char* msg) {
// will not return here // will not return here
} }
} } // namespace v8::internal } } // namespace v8::internal

16
deps/v8/src/arm/macro-assembler-arm.h

@ -187,6 +187,20 @@ class MacroAssembler: public Assembler {
Label* miss); Label* miss);
// ---------------------------------------------------------------------------
// Allocation support
// Allocate an object in new space. If the new space is exhausted control
// continues at the gc_required label. The allocated object is returned in
// result. If the flag tag_allocated_object is true the result is tagged as
// as a heap object.
void AllocateObjectInNewSpace(int object_size,
Register result,
Register scratch1,
Register scratch2,
Label* gc_required,
bool tag_allocated_object);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Support functions. // Support functions.
@ -231,7 +245,7 @@ class MacroAssembler: public Assembler {
// Runtime calls // Runtime calls
// Call a code stub. // Call a code stub.
void CallStub(CodeStub* stub); void CallStub(CodeStub* stub, Condition cond = al);
void CallJSExitStub(CodeStub* stub); void CallJSExitStub(CodeStub* stub);
// Return from a code stub after popping its arguments. // Return from a code stub after popping its arguments.

1192
deps/v8/src/arm/regexp-macro-assembler-arm.cc

File diff suppressed because it is too large

226
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -31,12 +31,238 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#ifndef V8_NATIVE_REGEXP
class RegExpMacroAssemblerARM: public RegExpMacroAssembler { class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
public: public:
RegExpMacroAssemblerARM(); RegExpMacroAssemblerARM();
virtual ~RegExpMacroAssemblerARM(); virtual ~RegExpMacroAssemblerARM();
}; };
#else
class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerARM(Mode mode, int registers_to_save);
virtual ~RegExpMacroAssemblerARM();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
virtual void CheckCharacter(uint32_t c, Label* on_equal);
virtual void CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
virtual void CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckNotAtStart(Label* on_not_at_start);
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_not_equal);
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
uc16 minus,
uc16 mask,
Label* on_not_equal);
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
int cp_offset,
bool check_offset,
Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
virtual void LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds = true,
int characters = 1);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
virtual void PushCurrentPosition();
virtual void PushRegister(int register_index,
StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetRegister(int register_index, int to);
virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
static int CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame);
private:
// Offsets from frame_pointer() of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
// Register 4..11.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kReturnAddress + kPointerSize;
static const int kAtStart = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kAtStart + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
static const int kInputEnd = kFramePointer - kPointerSize;
static const int kInputStart = kInputEnd - kPointerSize;
static const int kStartIndex = kInputStart - kPointerSize;
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kInputStartMinusOne = kInputString - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
static const int kBacktrackConstantPoolSize = 4;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
// Check whether preemption has been requested.
void CheckPreemption();
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
void EmitBacktrackConstantPool();
int GetBacktrackConstantPoolEntry();
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
// The ebp-relative location of a regexp register.
MemOperand register_location(int register_index);
// Register holding the current input position as negative offset from
// the end of the string.
inline Register current_input_offset() { return r6; }
// The register containing the current character after LoadCurrentCharacter.
inline Register current_character() { return r7; }
// Register holding address of the end of the input string.
inline Register end_of_input_address() { return r10; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
inline Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
inline Register backtrack_stackpointer() { return r8; }
// Register holding pointer to the current code object.
inline Register code_pointer() { return r5; }
// Byte size of chars in the string to match (decided by the Mode argument)
inline int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is NULL, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to);
// Call and return internally in the generated code in a way that
// is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
inline void SafeCall(Label* to, Condition cond = al);
inline void SafeReturn();
inline void SafeCallTarget(Label* name);
// Pushes the value of a register on the backtrack stack. Decrements the
// stack pointer by a word size and stores the register's value there.
inline void Push(Register source);
// Pops a value from the backtrack stack. Reads the word at the stack pointer
// and increments it by a word size.
inline void Pop(Register target);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
// are word sized.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
inline void FrameAlign(int num_arguments, Register scratch);
// Calls a C function and cleans up the space for arguments allocated
// by FrameAlign. The called function is not allowed to trigger a garbage
// collection.
inline void CallCFunction(ExternalReference function,
int num_arguments);
// Calls a C function and cleans up the frame alignment done by
// by FrameAlign. The called function *is* allowed to trigger a garbage
// collection, but may not take more than four arguments (no arguments
// passed on the stack), and the first argument will be a pointer to the
// return address.
inline void CallCFunctionUsingStub(ExternalReference function,
int num_arguments);
MacroAssembler* masm_;
// Which mode to generate code for (ASCII or UC16).
Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
int num_saved_registers_;
// Manage a small pre-allocated pool for writing label targets
// to for pushing backtrack addresses.
int backtrack_constant_pool_offset_;
int backtrack_constant_pool_capacity_;
// Labels used internally.
Label entry_label_;
Label start_label_;
Label success_label_;
Label backtrack_label_;
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
};
#endif // V8_NATIVE_REGEXP
}} // namespace v8::internal }} // namespace v8::internal
#endif // V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_ #endif // V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_

173
deps/v8/src/arm/simulator-arm.cc

@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h> #include <stdlib.h>
#include <cstdarg>
#include "v8.h" #include "v8.h"
#include "disasm.h" #include "disasm.h"
@ -598,7 +598,7 @@ uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr; return *ptr;
} }
PrintF("Unaligned read at %x, pc=%p\n", addr, instr); PrintF("Unaligned unsigned halfword read at %x, pc=%p\n", addr, instr);
UNIMPLEMENTED(); UNIMPLEMENTED();
return 0; return 0;
} }
@ -609,7 +609,7 @@ int16_t Simulator::ReadH(int32_t addr, Instr* instr) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr); int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr; return *ptr;
} }
PrintF("Unaligned read at %x\n", addr); PrintF("Unaligned signed halfword read at %x\n", addr);
UNIMPLEMENTED(); UNIMPLEMENTED();
return 0; return 0;
} }
@ -621,7 +621,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) {
*ptr = value; *ptr = value;
return; return;
} }
PrintF("Unaligned write at %x, pc=%p\n", addr, instr); PrintF("Unaligned unsigned halfword write at %x, pc=%p\n", addr, instr);
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
@ -632,7 +632,7 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) {
*ptr = value; *ptr = value;
return; return;
} }
PrintF("Unaligned write at %x, pc=%p\n", addr, instr); PrintF("Unaligned halfword write at %x, pc=%p\n", addr, instr);
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
@ -1051,7 +1051,6 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
} }
set_register(r0, lo_res); set_register(r0, lo_res);
set_register(r1, hi_res); set_register(r1, hi_res);
set_register(r0, result);
} }
set_register(lr, saved_lr); set_register(lr, saved_lr);
set_pc(get_register(lr)); set_pc(get_register(lr));
@ -1417,8 +1416,12 @@ void Simulator::DecodeType01(Instr* instr) {
case CMN: { case CMN: {
if (instr->HasS()) { if (instr->HasS()) {
Format(instr, "cmn'cond 'rn, 'shift_rm"); // Format(instr, "cmn'cond 'rn, 'shift_rm");
Format(instr, "cmn'cond 'rn, 'imm"); // Format(instr, "cmn'cond 'rn, 'imm");
alu_out = rn_val + shifter_operand;
SetNZFlags(alu_out);
SetCFlag(!CarryFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
} else { } else {
ASSERT(type == 0); ASSERT(type == 0);
int rm = instr->RmField(); int rm = instr->RmField();
@ -1567,6 +1570,7 @@ void Simulator::DecodeType2(Instr* instr) {
void Simulator::DecodeType3(Instr* instr) { void Simulator::DecodeType3(Instr* instr) {
ASSERT(instr->Bit(4) == 0);
int rd = instr->RdField(); int rd = instr->RdField();
int rn = instr->RnField(); int rn = instr->RnField();
int32_t rn_val = get_register(rn); int32_t rn_val = get_register(rn);
@ -1606,7 +1610,12 @@ void Simulator::DecodeType3(Instr* instr) {
} }
} }
if (instr->HasB()) { if (instr->HasB()) {
UNIMPLEMENTED(); if (instr->HasL()) {
uint8_t byte = ReadB(addr);
set_register(rd, byte);
} else {
UNIMPLEMENTED();
}
} else { } else {
if (instr->HasL()) { if (instr->HasL()) {
set_register(rd, ReadW(addr, instr)); set_register(rd, ReadW(addr, instr));
@ -1631,12 +1640,13 @@ void Simulator::DecodeType4(Instr* instr) {
void Simulator::DecodeType5(Instr* instr) { void Simulator::DecodeType5(Instr* instr) {
// Format(instr, "b'l'cond 'target"); // Format(instr, "b'l'cond 'target");
int off = (instr->SImmed24Field() << 2) + 8; int off = (instr->SImmed24Field() << 2);
intptr_t pc = get_pc(); intptr_t pc_address = get_pc();
if (instr->HasLink()) { if (instr->HasLink()) {
set_register(lr, pc + Instr::kInstrSize); set_register(lr, pc_address + Instr::kInstrSize);
} }
set_pc(pc+off); int pc_reg = get_register(pc);
set_pc(pc_reg + off);
} }
@ -1655,14 +1665,75 @@ void Simulator::DecodeType7(Instr* instr) {
} }
void Simulator::DecodeUnconditional(Instr* instr) {
if (instr->Bits(7, 4) == 0x0B && instr->Bits(27, 25) == 0 && instr->HasL()) {
// Load halfword instruction, either register or immediate offset.
int rd = instr->RdField();
int rn = instr->RnField();
int32_t rn_val = get_register(rn);
int32_t addr = 0;
int32_t offset;
if (instr->Bit(22) == 0) {
// Register offset.
int rm = instr->RmField();
offset = get_register(rm);
} else {
// Immediate offset
offset = instr->Bits(3, 0) + (instr->Bits(11, 8) << 4);
}
switch (instr->PUField()) {
case 0: {
// Post index, negative.
ASSERT(!instr->HasW());
addr = rn_val;
rn_val -= offset;
set_register(rn, rn_val);
break;
}
case 1: {
// Post index, positive.
ASSERT(!instr->HasW());
addr = rn_val;
rn_val += offset;
set_register(rn, rn_val);
break;
}
case 2: {
// Pre index or offset, negative.
rn_val -= offset;
addr = rn_val;
if (instr->HasW()) {
set_register(rn, rn_val);
}
break;
}
case 3: {
// Pre index or offset, positive.
rn_val += offset;
addr = rn_val;
if (instr->HasW()) {
set_register(rn, rn_val);
}
break;
}
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
break;
}
}
// Not sign extending, so load as unsigned.
uint16_t halfword = ReadH(addr, instr);
set_register(rd, halfword);
} else {
UNIMPLEMENTED();
}
}
// Executes the current instruction. // Executes the current instruction.
void Simulator::InstructionDecode(Instr* instr) { void Simulator::InstructionDecode(Instr* instr) {
pc_modified_ = false; pc_modified_ = false;
if (instr->ConditionField() == special_condition) {
Debugger dbg(this);
dbg.Stop(instr);
return;
}
if (::v8::internal::FLAG_trace_sim) { if (::v8::internal::FLAG_trace_sim) {
disasm::NameConverter converter; disasm::NameConverter converter;
disasm::Disassembler dasm(converter); disasm::Disassembler dasm(converter);
@ -1672,7 +1743,9 @@ void Simulator::InstructionDecode(Instr* instr) {
reinterpret_cast<byte*>(instr)); reinterpret_cast<byte*>(instr));
PrintF(" 0x%x %s\n", instr, buffer.start()); PrintF(" 0x%x %s\n", instr, buffer.start());
} }
if (ConditionallyExecute(instr)) { if (instr->ConditionField() == special_condition) {
DecodeUnconditional(instr);
} else if (ConditionallyExecute(instr)) {
switch (instr->TypeField()) { switch (instr->TypeField()) {
case 0: case 0:
case 1: { case 1: {
@ -1748,19 +1821,35 @@ void Simulator::Execute() {
} }
Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2, int32_t Simulator::Call(byte* entry, int argument_count, ...) {
int32_t p3, int32_t p4) { va_list parameters;
// Setup parameters va_start(parameters, argument_count);
set_register(r0, p0); // Setup arguments
set_register(r1, p1);
set_register(r2, p2); // First four arguments passed in registers.
set_register(r3, p3); ASSERT(argument_count >= 4);
intptr_t* stack_pointer = reinterpret_cast<intptr_t*>(get_register(sp)); set_register(r0, va_arg(parameters, int32_t));
*(--stack_pointer) = p4; set_register(r1, va_arg(parameters, int32_t));
set_register(sp, reinterpret_cast<int32_t>(stack_pointer)); set_register(r2, va_arg(parameters, int32_t));
set_register(r3, va_arg(parameters, int32_t));
// Remaining arguments passed on stack.
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
if (OS::ActivationFrameAlignment() != 0) {
entry_stack &= -OS::ActivationFrameAlignment();
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
for (int i = 4; i < argument_count; i++) {
stack_argument[i - 4] = va_arg(parameters, int32_t);
}
va_end(parameters);
set_register(sp, entry_stack);
// Prepare to execute the code at entry // Prepare to execute the code at entry
set_register(pc, entry); set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation // Put down marker for end of simulation. The simulator will stop simulation
// when the PC reaches this value. By saving the "end simulation" value into // when the PC reaches this value. By saving the "end simulation" value into
// the LR the simulation stops when returning to this call point. // the LR the simulation stops when returning to this call point.
@ -1794,14 +1883,14 @@ Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2,
Execute(); Execute();
// Check that the callee-saved registers have been preserved. // Check that the callee-saved registers have been preserved.
CHECK_EQ(get_register(r4), callee_saved_value); CHECK_EQ(callee_saved_value, get_register(r4));
CHECK_EQ(get_register(r5), callee_saved_value); CHECK_EQ(callee_saved_value, get_register(r5));
CHECK_EQ(get_register(r6), callee_saved_value); CHECK_EQ(callee_saved_value, get_register(r6));
CHECK_EQ(get_register(r7), callee_saved_value); CHECK_EQ(callee_saved_value, get_register(r7));
CHECK_EQ(get_register(r8), callee_saved_value); CHECK_EQ(callee_saved_value, get_register(r8));
CHECK_EQ(get_register(r9), callee_saved_value); CHECK_EQ(callee_saved_value, get_register(r9));
CHECK_EQ(get_register(r10), callee_saved_value); CHECK_EQ(callee_saved_value, get_register(r10));
CHECK_EQ(get_register(r11), callee_saved_value); CHECK_EQ(callee_saved_value, get_register(r11));
// Restore callee-saved registers with the original value. // Restore callee-saved registers with the original value.
set_register(r4, r4_val); set_register(r4, r4_val);
@ -1813,8 +1902,12 @@ Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2,
set_register(r10, r10_val); set_register(r10, r10_val);
set_register(r11, r11_val); set_register(r11, r11_val);
int result = get_register(r0); // Pop stack passed arguments.
return reinterpret_cast<Object*>(result); CHECK_EQ(entry_stack, get_register(sp));
set_register(sp, original_stack);
int32_t result = get_register(r0);
return result;
} }
} } // namespace assembler::arm } } // namespace assembler::arm

27
deps/v8/src/arm/simulator-arm.h

@ -40,7 +40,7 @@
// When running without a simulator we call the entry directly. // When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(entry(p0, p1, p2, p3, p4)) (entry(p0, p1, p2, p3, p4))
// Calculated the stack limit beyond which we will throw stack overflow errors. // Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take // This macro must be called from a C++ method. It relies on being able to take
@ -49,13 +49,20 @@
#define GENERATED_CODE_STACK_LIMIT(limit) \ #define GENERATED_CODE_STACK_LIMIT(limit) \
(reinterpret_cast<uintptr_t>(this) - limit) (reinterpret_cast<uintptr_t>(this) - limit)
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
#else // defined(__arm__) #else // defined(__arm__)
// When running with the simulator transition into simulated execution at this // When running with the simulator transition into simulated execution at this
// point. // point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
assembler::arm::Simulator::current()->Call((int32_t)entry, (int32_t)p0, \ reinterpret_cast<Object*>( \
(int32_t)p1, (int32_t)p2, (int32_t)p3, (int32_t)p4) assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
// The simulator has its own stack. Thus it has a different stack limit from // The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. // the C-based native code.
@ -63,6 +70,10 @@
(assembler::arm::Simulator::current()->StackLimit()) (assembler::arm::Simulator::current()->StackLimit())
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::arm::Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
#include "constants-arm.h" #include "constants-arm.h"
@ -109,11 +120,10 @@ class Simulator {
// Call on program start. // Call on program start.
static void Initialize(); static void Initialize();
// V8 generally calls into generated code with 5 parameters. This is a // V8 generally calls into generated JS code with 5 parameters and into
// convenience function, which sets up the simulator state and grabs the // generated RegExp code with 7 parameters. This is a convenience function,
// result on return. // which sets up the simulator state and grabs the result on return.
v8::internal::Object* Call(int32_t entry, int32_t p0, int32_t p1, int32_t Call(byte* entry, int argument_count, ...);
int32_t p2, int32_t p3, int32_t p4);
private: private:
enum special_values { enum special_values {
@ -174,6 +184,7 @@ class Simulator {
void DecodeType5(Instr* instr); void DecodeType5(Instr* instr);
void DecodeType6(Instr* instr); void DecodeType6(Instr* instr);
void DecodeType7(Instr* instr); void DecodeType7(Instr* instr);
void DecodeUnconditional(Instr* instr);
// Executes one instruction. // Executes one instruction.
void InstructionDecode(Instr* instr); void InstructionDecode(Instr* instr);

14
deps/v8/src/arm/stub-cache-arm.cc

@ -791,7 +791,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call). // Jump to the cached code (tail call).
__ IncrementCounter(&Counters::call_global_inline, 1, r1, r3); __ IncrementCounter(&Counters::call_global_inline, 1, r2, r3);
ASSERT(function->is_compiled()); ASSERT(function->is_compiled());
Handle<Code> code(function->code()); Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count()); ParameterCount expected(function->shared()->formal_parameter_count());
@ -1342,6 +1342,18 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
} }
Object* ConstructStubCompiler::CompileConstructStub(
SharedFunctionInfo* shared) {
// Not implemented yet - just jump to generic stub.
Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode();
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

26
deps/v8/src/arm/virtual-frame-arm.cc

@ -102,7 +102,8 @@ void VirtualFrame::Enter() {
#ifdef DEBUG #ifdef DEBUG
// Verify that r1 contains a JS function. The following code relies // Verify that r1 contains a JS function. The following code relies
// on r2 being available for use. // on r2 being available for use.
{ Label map_check, done; if (FLAG_debug_code) {
Label map_check, done;
__ tst(r1, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
__ b(ne, &map_check); __ b(ne, &map_check);
__ stop("VirtualFrame::Enter - r1 is not a function (smi check)."); __ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
@ -140,9 +141,26 @@ void VirtualFrame::AllocateStackSlots() {
Adjust(count); Adjust(count);
// Initialize stack slots with 'undefined' value. // Initialize stack slots with 'undefined' value.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
for (int i = 0; i < count; i++) { }
__ push(ip); if (FLAG_check_stack) {
} __ LoadRoot(r2, Heap::kStackLimitRootIndex);
}
for (int i = 0; i < count; i++) {
__ push(ip);
}
if (FLAG_check_stack) {
// Put the lr setup instruction in the delay slot. The 'sizeof(Instr)' is
// added to the implicit 8 byte offset that always applies to operations
// with pc and gives a return address 12 bytes down.
masm()->add(lr, pc, Operand(sizeof(Instr)));
masm()->cmp(sp, Operand(r2));
StackCheckStub stub;
// Call the stub if lower.
masm()->mov(pc,
Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
RelocInfo::CODE_TARGET),
LeaveCC,
lo);
} }
} }

42
deps/v8/src/assembler.cc

@ -42,6 +42,20 @@
#include "serialize.h" #include "serialize.h"
#include "stub-cache.h" #include "stub-cache.h"
#include "regexp-stack.h" #include "regexp-stack.h"
#include "ast.h"
#include "regexp-macro-assembler.h"
// Include native regexp-macro-assembler.
#ifdef V8_NATIVE_REGEXP
#if V8_TARGET_ARCH_IA32
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
#endif // V8_NATIVE_REGEXP
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -597,6 +611,34 @@ ExternalReference ExternalReference::new_space_allocation_limit_address() {
return ExternalReference(Heap::NewSpaceAllocationLimitAddress()); return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
} }
#ifdef V8_NATIVE_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state() {
Address function;
#ifdef V8_TARGET_ARCH_X64
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
#elif V8_TARGET_ARCH_IA32
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#else
UNREACHABLE("Unexpected architecture");
#endif
return ExternalReference(Redirect(function));
}
ExternalReference ExternalReference::re_grow_stack() {
return ExternalReference(
Redirect(FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
}
ExternalReference ExternalReference::re_case_insensitive_compare_uc16() {
return ExternalReference(Redirect(
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
}
#endif
static double add_two_doubles(double x, double y) { static double add_two_doubles(double x, double y) {
return x + y; return x + y;

13
deps/v8/src/assembler.h

@ -431,6 +431,19 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference debug_step_in_fp_address(); static ExternalReference debug_step_in_fp_address();
#endif #endif
#ifdef V8_NATIVE_REGEXP
// C functions called from RegExp generated code.
// Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()
static ExternalReference re_case_insensitive_compare_uc16();
// Function RegExpMacroAssembler*::CheckStackGuardState()
static ExternalReference re_check_stack_guard_state();
// Function NativeRegExpMacroAssembler::GrowStack()
static ExternalReference re_grow_stack();
#endif
// This lets you register a function that rewrites all external references. // This lets you register a function that rewrites all external references.
// Used by the ARM simulator to catch calls to external references. // Used by the ARM simulator to catch calls to external references.
static void set_redirector(ExternalReferenceRedirector* redirector) { static void set_redirector(ExternalReferenceRedirector* redirector) {

81
deps/v8/src/builtins.cc

@ -28,6 +28,7 @@
#include "v8.h" #include "v8.h"
#include "api.h" #include "api.h"
#include "arguments.h"
#include "bootstrapper.h" #include "bootstrapper.h"
#include "builtins.h" #include "builtins.h"
#include "ic-inl.h" #include "ic-inl.h"
@ -47,17 +48,13 @@ namespace internal {
// BUILTIN_END // BUILTIN_END
// //
// In the body of the builtin function, the variable 'receiver' is visible. // In the body of the builtin function, the variable 'receiver' is visible.
// The arguments can be accessed through: // The arguments can be accessed through the Arguments object args.
// //
// BUILTIN_ARG(0): Receiver (also available as 'receiver') // args[0]: Receiver (also available as 'receiver')
// BUILTIN_ARG(1): First argument // args[1]: First argument
// ... // ...
// BUILTIN_ARG(n): Last argument // args[n]: Last argument
// // args.length(): Number of arguments including the receiver.
// and they evaluate to undefined values if too few arguments were
// passed to the builtin function invocation.
//
// __argc__ is the number of arguments including the receiver.
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
@ -65,21 +62,8 @@ namespace internal {
// builtin was invoked as a constructor as part of the // builtin was invoked as a constructor as part of the
// arguments. Maybe we also want to pass the called function? // arguments. Maybe we also want to pass the called function?
#define BUILTIN(name) \ #define BUILTIN(name) \
static Object* Builtin_##name(int __argc__, Object** __argv__) { \ static Object* Builtin_##name(Arguments args) { \
Handle<Object> receiver(&__argv__[0]); Handle<Object> receiver = args.at<Object>(0);
// Use an inline function to avoid evaluating the index (n) more than
// once in the BUILTIN_ARG macro.
static inline Object* __builtin_arg__(int n, int argc, Object** argv) {
ASSERT(n >= 0);
return (argc > n) ? argv[-n] : Heap::undefined_value();
}
// NOTE: Argument 0 is the receiver. The first 'real' argument is
// argument 1 - BUILTIN_ARG(1).
#define BUILTIN_ARG(n) (__builtin_arg__(n, __argc__, __argv__))
#define BUILTIN_END \ #define BUILTIN_END \
@ -168,8 +152,8 @@ BUILTIN(ArrayCode) {
// Optimize the case where there is one argument and the argument is a // Optimize the case where there is one argument and the argument is a
// small smi. // small smi.
if (__argc__ == 2) { if (args.length() == 2) {
Object* obj = BUILTIN_ARG(1); Object* obj = args[1];
if (obj->IsSmi()) { if (obj->IsSmi()) {
int len = Smi::cast(obj)->value(); int len = Smi::cast(obj)->value();
if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) { if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
@ -182,14 +166,14 @@ BUILTIN(ArrayCode) {
// Take the argument as the length. // Take the argument as the length.
obj = array->Initialize(0); obj = array->Initialize(0);
if (obj->IsFailure()) return obj; if (obj->IsFailure()) return obj;
if (__argc__ == 2) return array->SetElementsLength(BUILTIN_ARG(1)); if (args.length() == 2) return array->SetElementsLength(args[1]);
} }
// Optimize the case where there are no parameters passed. // Optimize the case where there are no parameters passed.
if (__argc__ == 1) return array->Initialize(4); if (args.length() == 1) return array->Initialize(4);
// Take the arguments as elements. // Take the arguments as elements.
int number_of_elements = __argc__ - 1; int number_of_elements = args.length() - 1;
Smi* len = Smi::FromInt(number_of_elements); Smi* len = Smi::FromInt(number_of_elements);
Object* obj = Heap::AllocateFixedArrayWithHoles(len->value()); Object* obj = Heap::AllocateFixedArrayWithHoles(len->value());
if (obj->IsFailure()) return obj; if (obj->IsFailure()) return obj;
@ -197,7 +181,7 @@ BUILTIN(ArrayCode) {
WriteBarrierMode mode = elms->GetWriteBarrierMode(); WriteBarrierMode mode = elms->GetWriteBarrierMode();
// Fill in the content // Fill in the content
for (int index = 0; index < number_of_elements; index++) { for (int index = 0; index < number_of_elements; index++) {
elms->set(index, BUILTIN_ARG(index+1), mode); elms->set(index, args[index+1], mode);
} }
// Set length and elements on the array. // Set length and elements on the array.
@ -217,13 +201,13 @@ BUILTIN(ArrayPush) {
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
// Set new length. // Set new length.
int new_length = len + __argc__ - 1; int new_length = len + args.length() - 1;
FixedArray* elms = FixedArray::cast(array->elements()); FixedArray* elms = FixedArray::cast(array->elements());
if (new_length <= elms->length()) { if (new_length <= elms->length()) {
// Backing storage has extra space for the provided values. // Backing storage has extra space for the provided values.
for (int index = 0; index < __argc__ - 1; index++) { for (int index = 0; index < args.length() - 1; index++) {
elms->set(index + len, BUILTIN_ARG(index+1)); elms->set(index + len, args[index+1]);
} }
} else { } else {
// New backing storage is needed. // New backing storage is needed.
@ -235,8 +219,8 @@ BUILTIN(ArrayPush) {
// Fill out the new array with old elements. // Fill out the new array with old elements.
for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode); for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode);
// Add the provided values. // Add the provided values.
for (int index = 0; index < __argc__ - 1; index++) { for (int index = 0; index < args.length() - 1; index++) {
new_elms->set(index + len, BUILTIN_ARG(index+1), mode); new_elms->set(index + len, args[index+1], mode);
} }
// Set the new backing storage. // Set the new backing storage.
array->set_elements(new_elms); array->set_elements(new_elms);
@ -353,7 +337,7 @@ BUILTIN(HandleApiCall) {
FunctionTemplateInfo* fun_data = FunctionTemplateInfo* fun_data =
FunctionTemplateInfo::cast(function->shared()->function_data()); FunctionTemplateInfo::cast(function->shared()->function_data());
Object* raw_holder = TypeCheck(__argc__, __argv__, fun_data); Object* raw_holder = TypeCheck(args.length(), &args[0], fun_data);
if (raw_holder->IsNull()) { if (raw_holder->IsNull()) {
// This function cannot be called with the given receiver. Abort! // This function cannot be called with the given receiver. Abort!
@ -380,19 +364,19 @@ BUILTIN(HandleApiCall) {
Handle<JSObject> holder_handle(JSObject::cast(raw_holder)); Handle<JSObject> holder_handle(JSObject::cast(raw_holder));
v8::Local<v8::Object> holder = v8::Utils::ToLocal(holder_handle); v8::Local<v8::Object> holder = v8::Utils::ToLocal(holder_handle);
LOG(ApiObjectAccess("call", JSObject::cast(*receiver))); LOG(ApiObjectAccess("call", JSObject::cast(*receiver)));
v8::Arguments args = v8::ImplementationUtilities::NewArguments( v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
data, data,
holder, holder,
callee, callee,
is_construct, is_construct,
reinterpret_cast<void**>(__argv__ - 1), reinterpret_cast<void**>(&args[0] - 1),
__argc__ - 1); args.length() - 1);
v8::Handle<v8::Value> value; v8::Handle<v8::Value> value;
{ {
// Leaving JavaScript. // Leaving JavaScript.
VMState state(EXTERNAL); VMState state(EXTERNAL);
value = callback(args); value = callback(new_args);
} }
if (value.IsEmpty()) { if (value.IsEmpty()) {
result = Heap::undefined_value(); result = Heap::undefined_value();
@ -413,13 +397,12 @@ BUILTIN_END
// API. The object can be called as either a constructor (using new) or just as // API. The object can be called as either a constructor (using new) or just as
// a function (without new). // a function (without new).
static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call, static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
int __argc__, Arguments args) {
Object** __argv__) {
// Non-functions are never called as constructors. Even if this is an object // Non-functions are never called as constructors. Even if this is an object
// called as a constructor the delegate call is not a construct call. // called as a constructor the delegate call is not a construct call.
ASSERT(!CalledAsConstructor()); ASSERT(!CalledAsConstructor());
Handle<Object> receiver(&__argv__[0]); Handle<Object> receiver = args.at<Object>(0);
// Get the object called. // Get the object called.
JSObject* obj = JSObject::cast(*receiver); JSObject* obj = JSObject::cast(*receiver);
@ -448,18 +431,18 @@ static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
Handle<JSFunction> callee_handle(constructor); Handle<JSFunction> callee_handle(constructor);
v8::Local<v8::Function> callee = v8::Utils::ToLocal(callee_handle); v8::Local<v8::Function> callee = v8::Utils::ToLocal(callee_handle);
LOG(ApiObjectAccess("call non-function", JSObject::cast(*receiver))); LOG(ApiObjectAccess("call non-function", JSObject::cast(*receiver)));
v8::Arguments args = v8::ImplementationUtilities::NewArguments( v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
data, data,
self, self,
callee, callee,
is_construct_call, is_construct_call,
reinterpret_cast<void**>(__argv__ - 1), reinterpret_cast<void**>(&args[0] - 1),
__argc__ - 1); args.length() - 1);
v8::Handle<v8::Value> value; v8::Handle<v8::Value> value;
{ {
// Leaving JavaScript. // Leaving JavaScript.
VMState state(EXTERNAL); VMState state(EXTERNAL);
value = callback(args); value = callback(new_args);
} }
if (value.IsEmpty()) { if (value.IsEmpty()) {
result = Heap::undefined_value(); result = Heap::undefined_value();
@ -476,7 +459,7 @@ static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
// Handle calls to non-function objects created through the API. This delegate // Handle calls to non-function objects created through the API. This delegate
// function is used when the call is a normal function call. // function is used when the call is a normal function call.
BUILTIN(HandleApiCallAsFunction) { BUILTIN(HandleApiCallAsFunction) {
return HandleApiCallAsFunctionOrConstructor(false, __argc__, __argv__); return HandleApiCallAsFunctionOrConstructor(false, args);
} }
BUILTIN_END BUILTIN_END
@ -484,7 +467,7 @@ BUILTIN_END
// Handle calls to non-function objects created through the API. This delegate // Handle calls to non-function objects created through the API. This delegate
// function is used when the call is a construct call. // function is used when the call is a construct call.
BUILTIN(HandleApiCallAsConstructor) { BUILTIN(HandleApiCallAsConstructor) {
return HandleApiCallAsFunctionOrConstructor(true, __argc__, __argv__); return HandleApiCallAsFunctionOrConstructor(true, args);
} }
BUILTIN_END BUILTIN_END

32
deps/v8/src/checks.h

@ -95,6 +95,38 @@ static inline void CheckNonEqualsHelper(const char* file,
} }
} }
#ifdef V8_TARGET_ARCH_X64
// Helper function used by the CHECK_EQ function when given intptr_t
// arguments. Should not be called directly.
static inline void CheckEqualsHelper(const char* file,
int line,
const char* expected_source,
intptr_t expected,
const char* value_source,
intptr_t value) {
if (expected != value) {
V8_Fatal(file, line,
"CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i",
expected_source, value_source, expected, value);
}
}
// Helper function used by the CHECK_NE function when given intptr_t
// arguments. Should not be called directly.
static inline void CheckNonEqualsHelper(const char* file,
int line,
const char* unexpected_source,
intptr_t unexpected,
const char* value_source,
intptr_t value) {
if (unexpected == value) {
V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i",
unexpected_source, value_source, value);
}
}
#endif // V8_TARGET_ARCH_X64
// Helper function used by the CHECK function when given string // Helper function used by the CHECK function when given string
// arguments. Should not be called directly. // arguments. Should not be called directly.

1
deps/v8/src/code-stubs.h

@ -57,6 +57,7 @@ class CodeStub BASE_EMBEDDED {
SetProperty, // ARM only SetProperty, // ARM only
InvokeBuiltin, // ARM only InvokeBuiltin, // ARM only
JSExit, // ARM only JSExit, // ARM only
RegExpCEntry, // ARM only
NUMBER_OF_IDS NUMBER_OF_IDS
}; };

7
deps/v8/src/compiler.cc

@ -425,6 +425,13 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
// Set the expected number of properties for instances. // Set the expected number of properties for instances.
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count()); SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
// Set the optimication hints after performing lazy compilation, as these are
// not set when the function is set up as a lazily compiled function.
shared->SetThisPropertyAssignmentsInfo(
lit->has_only_this_property_assignments(),
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
// Check the function has compiled code. // Check the function has compiled code.
ASSERT(shared->is_compiled()); ASSERT(shared->is_compiled());
return true; return true;

2
deps/v8/src/d8.js

@ -1143,7 +1143,7 @@ function DebugResponseDetails(response) {
* @constructor * @constructor
*/ */
function ProtocolPackage(json) { function ProtocolPackage(json) {
this.packet_ = eval('(' + json + ')'); this.packet_ = JSON.parse(json);
this.refs_ = []; this.refs_ = [];
if (this.packet_.refs) { if (this.packet_.refs) {
for (var i = 0; i < this.packet_.refs.length; i++) { for (var i = 0; i < this.packet_.refs.length; i++) {

9
deps/v8/src/debug-delay.js

@ -466,9 +466,14 @@ Debug.source = function(f) {
return %FunctionGetSourceCode(f); return %FunctionGetSourceCode(f);
}; };
Debug.assembler = function(f) { Debug.disassemble = function(f) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.'); if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
return %FunctionGetAssemblerCode(f); return %DebugDisassembleFunction(f);
};
Debug.disassembleConstructor = function(f) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
return %DebugDisassembleConstructor(f);
}; };
Debug.sourcePosition = function(f) { Debug.sourcePosition = function(f) {

6
deps/v8/src/debug.cc

@ -661,7 +661,7 @@ bool Debug::CompileDebuggerScript(int index) {
// Check for caught exceptions. // Check for caught exceptions.
if (caught_exception) { if (caught_exception) {
Handle<Object> message = MessageHandler::MakeMessageObject( Handle<Object> message = MessageHandler::MakeMessageObject(
"error_loading_debugger", NULL, HandleVector<Object>(&result, 1), "error_loading_debugger", NULL, Vector<Handle<Object> >::empty(),
Handle<String>()); Handle<String>());
MessageHandler::ReportMessage(NULL, message); MessageHandler::ReportMessage(NULL, message);
return false; return false;
@ -2001,9 +2001,7 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
event_listener_data_.location() }; event_listener_data_.location() };
Handle<Object> result = Execution::TryCall(fun, Top::global(), Handle<Object> result = Execution::TryCall(fun, Top::global(),
argc, argv, &caught_exception); argc, argv, &caught_exception);
if (caught_exception) { // Silently ignore exceptions from debug event listeners.
// Silently ignore exceptions from debug event listeners.
}
} }
} }
} }

16
deps/v8/src/execution.cc

@ -156,9 +156,12 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
ASSERT(catcher.HasCaught()); ASSERT(catcher.HasCaught());
ASSERT(Top::has_pending_exception()); ASSERT(Top::has_pending_exception());
ASSERT(Top::external_caught_exception()); ASSERT(Top::external_caught_exception());
bool is_bottom_call = HandleScopeImplementer::instance()->CallDepthIsZero(); if (Top::pending_exception() == Heap::termination_exception()) {
Top::OptionalRescheduleException(is_bottom_call, true); result = Factory::termination_exception();
result = v8::Utils::OpenHandle(*catcher.Exception()); } else {
result = v8::Utils::OpenHandle(*catcher.Exception());
}
Top::OptionalRescheduleException(true);
} }
ASSERT(!Top::has_pending_exception()); ASSERT(!Top::has_pending_exception());
@ -234,8 +237,9 @@ StackGuard::StackGuard() {
(thread_local_.climit_ == kInterruptLimit && (thread_local_.climit_ == kInterruptLimit &&
thread_local_.interrupt_flags_ != 0)); thread_local_.interrupt_flags_ != 0));
thread_local_.initial_jslimit_ = thread_local_.jslimit_ = uintptr_t limit = GENERATED_CODE_STACK_LIMIT(kLimitSize);
GENERATED_CODE_STACK_LIMIT(kLimitSize); thread_local_.initial_jslimit_ = thread_local_.jslimit_ = limit;
Heap::SetStackLimit(limit);
// NOTE: The check for overflow is not safe as there is no guarantee that // NOTE: The check for overflow is not safe as there is no guarantee that
// the running thread has its stack in all memory up to address 0x00000000. // the running thread has its stack in all memory up to address 0x00000000.
thread_local_.initial_climit_ = thread_local_.climit_ = thread_local_.initial_climit_ = thread_local_.climit_ =
@ -283,6 +287,7 @@ void StackGuard::SetStackLimit(uintptr_t limit) {
// leave them alone. // leave them alone.
if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) { if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) {
thread_local_.jslimit_ = limit; thread_local_.jslimit_ = limit;
Heap::SetStackLimit(limit);
} }
if (thread_local_.climit_ == thread_local_.initial_climit_) { if (thread_local_.climit_ == thread_local_.initial_climit_) {
thread_local_.climit_ = limit; thread_local_.climit_ = limit;
@ -397,6 +402,7 @@ char* StackGuard::ArchiveStackGuard(char* to) {
char* StackGuard::RestoreStackGuard(char* from) { char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access; ExecutionAccess access;
memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal)); memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
Heap::SetStackLimit(thread_local_.jslimit_);
return from + sizeof(ThreadLocal); return from + sizeof(ThreadLocal);
} }

22
deps/v8/src/execution.h

@ -175,6 +175,10 @@ class StackGuard BASE_EMBEDDED {
#endif #endif
static void Continue(InterruptFlag after_what); static void Continue(InterruptFlag after_what);
static uintptr_t jslimit() {
return thread_local_.jslimit_;
}
private: private:
// You should hold the ExecutionAccess lock when calling this method. // You should hold the ExecutionAccess lock when calling this method.
static bool IsSet(const ExecutionAccess& lock); static bool IsSet(const ExecutionAccess& lock);
@ -188,6 +192,7 @@ class StackGuard BASE_EMBEDDED {
// You should hold the ExecutionAccess lock when calling this method. // You should hold the ExecutionAccess lock when calling this method.
static void set_limits(uintptr_t value, const ExecutionAccess& lock) { static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
Heap::SetStackLimit(value);
thread_local_.jslimit_ = value; thread_local_.jslimit_ = value;
thread_local_.climit_ = value; thread_local_.climit_ = value;
} }
@ -200,6 +205,7 @@ class StackGuard BASE_EMBEDDED {
set_limits(kIllegalLimit, lock); set_limits(kIllegalLimit, lock);
} else { } else {
thread_local_.jslimit_ = thread_local_.initial_jslimit_; thread_local_.jslimit_ = thread_local_.initial_jslimit_;
Heap::SetStackLimit(thread_local_.jslimit_);
thread_local_.climit_ = thread_local_.initial_climit_; thread_local_.climit_ = thread_local_.initial_climit_;
} }
} }
@ -220,13 +226,15 @@ class StackGuard BASE_EMBEDDED {
class ThreadLocal { class ThreadLocal {
public: public:
ThreadLocal() ThreadLocal()
: initial_jslimit_(kIllegalLimit), : initial_jslimit_(kIllegalLimit),
jslimit_(kIllegalLimit), jslimit_(kIllegalLimit),
initial_climit_(kIllegalLimit), initial_climit_(kIllegalLimit),
climit_(kIllegalLimit), climit_(kIllegalLimit),
nesting_(0), nesting_(0),
postpone_interrupts_nesting_(0), postpone_interrupts_nesting_(0),
interrupt_flags_(0) {} interrupt_flags_(0) {
Heap::SetStackLimit(kIllegalLimit);
}
uintptr_t initial_jslimit_; uintptr_t initial_jslimit_;
uintptr_t jslimit_; uintptr_t jslimit_;
uintptr_t initial_climit_; uintptr_t initial_climit_;

5
deps/v8/src/frames-inl.h

@ -128,8 +128,9 @@ inline Address StandardFrame::ComputePCAddress(Address fp) {
inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) { inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
int context = Memory::int_at(fp + StandardFrameConstants::kContextOffset); Object* marker =
return context == ArgumentsAdaptorFrame::SENTINEL; Memory::Object_at(fp + StandardFrameConstants::kContextOffset);
return marker == Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR);
} }

9
deps/v8/src/frames.h

@ -434,15 +434,6 @@ class JavaScriptFrame: public StandardFrame {
// match the formal number of parameters. // match the formal number of parameters.
class ArgumentsAdaptorFrame: public JavaScriptFrame { class ArgumentsAdaptorFrame: public JavaScriptFrame {
public: public:
// This sentinel value is temporarily used to distinguish arguments
// adaptor frames from ordinary JavaScript frames. If a frame has
// the sentinel as its context, it is an arguments adaptor frame. It
// must be tagged as a small integer to avoid GC issues. Crud.
enum {
SENTINEL = (1 << kSmiTagSize) | kSmiTag,
NON_SENTINEL = ~SENTINEL
};
virtual Type type() const { return ARGUMENTS_ADAPTOR; } virtual Type type() const { return ARGUMENTS_ADAPTOR; }
// Determine the code for the frame. // Determine the code for the frame.

23
deps/v8/src/globals.h

@ -47,7 +47,14 @@ namespace internal {
#define V8_HOST_ARCH_ARM 1 #define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1 #define V8_HOST_ARCH_32_BIT 1
#else #else
#error Your architecture was not detected as supported by v8 #error Your host architecture was not detected as supported by v8
#endif
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
#define V8_TARGET_CAN_READ_UNALIGNED 1
#elif V8_TARGET_ARCH_ARM
#else
#error Your target architecture is not supported by v8
#endif #endif
// Support for alternative bool type. This is only enabled if the code is // Support for alternative bool type. This is only enabled if the code is
@ -134,17 +141,6 @@ const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
const intptr_t kPointerAlignment = (1 << kPointerSizeLog2); const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
const intptr_t kPointerAlignmentMask = kPointerAlignment - 1; const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
// Tag information for HeapObject.
const int kHeapObjectTag = 1;
const int kHeapObjectTagSize = 2;
const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
// Tag information for Smi.
const int kSmiTag = 0;
const int kSmiTagSize = 1;
const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
// Tag information for Failure. // Tag information for Failure.
const int kFailureTag = 3; const int kFailureTag = 3;
@ -429,9 +425,6 @@ enum StateTag {
#define HAS_FAILURE_TAG(value) \ #define HAS_FAILURE_TAG(value) \
((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag) ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
#define HAS_HEAP_OBJECT_TAG(value) \
((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) == kHeapObjectTag)
// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size // OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
#define OBJECT_SIZE_ALIGN(value) \ #define OBJECT_SIZE_ALIGN(value) \
(((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask) (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)

12
deps/v8/src/handles.cc

@ -53,8 +53,8 @@ int HandleScope::NumberOfHandles() {
} }
void** HandleScope::Extend() { Object** HandleScope::Extend() {
void** result = current_.next; Object** result = current_.next;
ASSERT(result == current_.limit); ASSERT(result == current_.limit);
// Make sure there's at least one scope on the stack and that the // Make sure there's at least one scope on the stack and that the
@ -68,7 +68,7 @@ void** HandleScope::Extend() {
// If there's more room in the last block, we use that. This is used // If there's more room in the last block, we use that. This is used
// for fast creation of scopes after scope barriers. // for fast creation of scopes after scope barriers.
if (!impl->Blocks()->is_empty()) { if (!impl->Blocks()->is_empty()) {
void** limit = &impl->Blocks()->last()[kHandleBlockSize]; Object** limit = &impl->Blocks()->last()[kHandleBlockSize];
if (current_.limit != limit) { if (current_.limit != limit) {
current_.limit = limit; current_.limit = limit;
} }
@ -96,10 +96,10 @@ void HandleScope::DeleteExtensions() {
} }
void HandleScope::ZapRange(void** start, void** end) { void HandleScope::ZapRange(Object** start, Object** end) {
if (start == NULL) return; if (start == NULL) return;
for (void** p = start; p < end; p++) { for (Object** p = start; p < end; p++) {
*p = reinterpret_cast<void*>(v8::internal::kHandleZapValue); *reinterpret_cast<Address*>(p) = v8::internal::kHandleZapValue;
} }
} }

6
deps/v8/src/handles.h

@ -121,7 +121,7 @@ class HandleScope {
// Creates a new handle with the given value. // Creates a new handle with the given value.
template <typename T> template <typename T>
static inline T** CreateHandle(T* value) { static inline T** CreateHandle(T* value) {
void** cur = current_.next; internal::Object** cur = current_.next;
if (cur == current_.limit) cur = Extend(); if (cur == current_.limit) cur = Extend();
// Update the current next field, set the value in the created // Update the current next field, set the value in the created
// handle, and return the result. // handle, and return the result.
@ -164,13 +164,13 @@ class HandleScope {
} }
// Extend the handle scope making room for more handles. // Extend the handle scope making room for more handles.
static void** Extend(); static internal::Object** Extend();
// Deallocates any extensions used by the current scope. // Deallocates any extensions used by the current scope.
static void DeleteExtensions(); static void DeleteExtensions();
// Zaps the handles in the half-open interval [start, end). // Zaps the handles in the half-open interval [start, end).
static void ZapRange(void** start, void** end); static void ZapRange(internal::Object** start, internal::Object** end);
friend class v8::HandleScope; friend class v8::HandleScope;
friend class v8::ImplementationUtilities; friend class v8::ImplementationUtilities;

99
deps/v8/src/heap.cc

@ -39,6 +39,9 @@
#include "scanner.h" #include "scanner.h"
#include "scopeinfo.h" #include "scopeinfo.h"
#include "v8threads.h" #include "v8threads.h"
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
#include "regexp-macro-assembler.h"
#endif
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -254,6 +257,7 @@ void Heap::ReportStatisticsAfterGC() {
void Heap::GarbageCollectionPrologue() { void Heap::GarbageCollectionPrologue() {
TranscendentalCache::Clear();
gc_count_++; gc_count_++;
#ifdef DEBUG #ifdef DEBUG
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
@ -465,9 +469,9 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
old_gen_allocation_limit_ = old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false; old_gen_exhausted_ = false;
} else {
Scavenge();
} }
Scavenge();
Counters::objs_since_last_young.Set(0); Counters::objs_since_last_young.Set(0);
PostGarbageCollectionProcessing(); PostGarbageCollectionProcessing();
@ -521,12 +525,6 @@ void Heap::MarkCompact(GCTracer* tracer) {
Counters::objs_since_last_full.Set(0); Counters::objs_since_last_full.Set(0);
context_disposed_pending_ = false; context_disposed_pending_ = false;
Scavenge();
// Shrink new space as much as possible after compacting full
// garbage collections.
if (is_compacting) new_space_.Shrink();
} }
@ -1326,6 +1324,14 @@ void Heap::CreateCEntryStub() {
} }
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
void Heap::CreateRegExpCEntryStub() {
RegExpCEntryStub stub;
set_re_c_entry_code(*stub.GetCode());
}
#endif
void Heap::CreateCEntryDebugBreakStub() { void Heap::CreateCEntryDebugBreakStub() {
CEntryDebugBreakStub stub; CEntryDebugBreakStub stub;
set_c_entry_debug_break_code(*stub.GetCode()); set_c_entry_debug_break_code(*stub.GetCode());
@ -1362,6 +1368,9 @@ void Heap::CreateFixedStubs() {
Heap::CreateCEntryDebugBreakStub(); Heap::CreateCEntryDebugBreakStub();
Heap::CreateJSEntryStub(); Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub(); Heap::CreateJSConstructEntryStub();
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
Heap::CreateRegExpCEntryStub();
#endif
} }
@ -2785,6 +2794,41 @@ STRUCT_LIST(MAKE_CASE)
} }
bool Heap::IdleNotification() {
static const int kIdlesBeforeCollection = 7;
static int number_idle_notifications = 0;
static int last_gc_count = gc_count_;
bool finished = false;
if (last_gc_count == gc_count_) {
number_idle_notifications++;
} else {
number_idle_notifications = 0;
last_gc_count = gc_count_;
}
if (number_idle_notifications >= kIdlesBeforeCollection) {
// The first time through we collect without forcing compaction.
// The second time through we force compaction and quit.
bool force_compaction =
number_idle_notifications > kIdlesBeforeCollection;
CollectAllGarbage(force_compaction);
last_gc_count = gc_count_;
if (force_compaction) {
// Shrink new space.
new_space_.Shrink();
number_idle_notifications = 0;
finished = true;
}
}
// Uncommit unused memory in new space.
Heap::UncommitFromSpace();
return finished;
}
#ifdef DEBUG #ifdef DEBUG
void Heap::Print() { void Heap::Print() {
@ -2950,7 +2994,7 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
#ifdef DEBUG #ifdef DEBUG
void Heap::ZapFromSpace() { void Heap::ZapFromSpace() {
ASSERT(HAS_HEAP_OBJECT_TAG(kFromSpaceZapValue)); ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
for (Address a = new_space_.FromSpaceLow(); for (Address a = new_space_.FromSpaceLow();
a < new_space_.FromSpaceHigh(); a < new_space_.FromSpaceHigh();
a += kPointerSize) { a += kPointerSize) {
@ -3223,6 +3267,17 @@ bool Heap::Setup(bool create_heap_objects) {
} }
void Heap::SetStackLimit(intptr_t limit) {
// On 64 bit machines, pointers are generally out of range of Smis. We write
// something that looks like an out of range Smi to the GC.
// Set up the special root array entry containing the stack guard.
// This is actually an address, but the tag makes the GC ignore it.
roots_[kStackLimitRootIndex] =
reinterpret_cast<Object*>((limit & ~kSmiTagMask) | kSmiTag);
}
void Heap::TearDown() { void Heap::TearDown() {
GlobalHandles::TearDown(); GlobalHandles::TearDown();
@ -3932,4 +3987,30 @@ bool Heap::GarbageCollectionGreedyCheck() {
} }
#endif #endif
TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
: type_(t) {
uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
uint32_t in1 = 0xffffffffu; // generated by the FPU.
for (int i = 0; i < kCacheSize; i++) {
elements_[i].in[0] = in0;
elements_[i].in[1] = in1;
elements_[i].output = NULL;
}
}
TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
void TranscendentalCache::Clear() {
for (int i = 0; i < kNumberOfCaches; i++) {
if (caches_[i] != NULL) {
delete caches_[i];
caches_[i] = NULL;
}
}
}
} } // namespace v8::internal } } // namespace v8::internal

164
deps/v8/src/heap.h

@ -28,15 +28,31 @@
#ifndef V8_HEAP_H_ #ifndef V8_HEAP_H_
#define V8_HEAP_H_ #define V8_HEAP_H_
#include <math.h>
#include "zone-inl.h" #include "zone-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Defines all the roots in Heap. // Defines all the roots in Heap.
#define STRONG_ROOT_LIST(V) \ #define UNCONDITIONAL_STRONG_ROOT_LIST(V) \
V(Map, meta_map, MetaMap) \ /* Cluster the most popular ones in a few cache lines here at the top. */ \
V(Smi, stack_limit, StackLimit) \
V(Object, undefined_value, UndefinedValue) \
V(Object, the_hole_value, TheHoleValue) \
V(Object, null_value, NullValue) \
V(Object, true_value, TrueValue) \
V(Object, false_value, FalseValue) \
V(Map, heap_number_map, HeapNumberMap) \ V(Map, heap_number_map, HeapNumberMap) \
V(Map, global_context_map, GlobalContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Map, meta_map, MetaMap) \
V(Object, termination_exception, TerminationException) \
V(Map, hash_table_map, HashTableMap) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(Map, short_string_map, ShortStringMap) \ V(Map, short_string_map, ShortStringMap) \
V(Map, medium_string_map, MediumStringMap) \ V(Map, medium_string_map, MediumStringMap) \
V(Map, long_string_map, LongStringMap) \ V(Map, long_string_map, LongStringMap) \
@ -95,11 +111,8 @@ namespace internal {
V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \ V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \
V(Map, byte_array_map, ByteArrayMap) \ V(Map, byte_array_map, ByteArrayMap) \
V(Map, pixel_array_map, PixelArrayMap) \ V(Map, pixel_array_map, PixelArrayMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Map, hash_table_map, HashTableMap) \
V(Map, context_map, ContextMap) \ V(Map, context_map, ContextMap) \
V(Map, catch_context_map, CatchContextMap) \ V(Map, catch_context_map, CatchContextMap) \
V(Map, global_context_map, GlobalContextMap) \
V(Map, code_map, CodeMap) \ V(Map, code_map, CodeMap) \
V(Map, oddball_map, OddballMap) \ V(Map, oddball_map, OddballMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \ V(Map, global_property_cell_map, GlobalPropertyCellMap) \
@ -109,17 +122,9 @@ namespace internal {
V(Map, one_pointer_filler_map, OnePointerFillerMap) \ V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
V(Object, nan_value, NanValue) \ V(Object, nan_value, NanValue) \
V(Object, undefined_value, UndefinedValue) \
V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Object, termination_exception, TerminationException) \
V(Object, minus_zero_value, MinusZeroValue) \ V(Object, minus_zero_value, MinusZeroValue) \
V(Object, null_value, NullValue) \
V(Object, true_value, TrueValue) \
V(Object, false_value, FalseValue) \
V(String, empty_string, EmptyString) \ V(String, empty_string, EmptyString) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Object, the_hole_value, TheHoleValue) \
V(Map, neander_map, NeanderMap) \ V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \ V(JSObject, message_listeners, MessageListeners) \
V(Proxy, prototype_accessors, PrototypeAccessors) \ V(Proxy, prototype_accessors, PrototypeAccessors) \
@ -132,8 +137,15 @@ namespace internal {
V(FixedArray, number_string_cache, NumberStringCache) \ V(FixedArray, number_string_cache, NumberStringCache) \
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, natives_source_cache, NativesSourceCache) \ V(FixedArray, natives_source_cache, NativesSourceCache) \
V(Object, last_script_id, LastScriptId) V(Object, last_script_id, LastScriptId) \
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
#define STRONG_ROOT_LIST(V) \
UNCONDITIONAL_STRONG_ROOT_LIST(V) \
V(Code, re_c_entry_code, RegExpCEntryCode)
#else
#define STRONG_ROOT_LIST(V) UNCONDITIONAL_STRONG_ROOT_LIST(V)
#endif
#define ROOT_LIST(V) \ #define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \ STRONG_ROOT_LIST(V) \
@ -227,6 +239,11 @@ class Heap : public AllStatic {
// Destroys all memory allocated by the heap. // Destroys all memory allocated by the heap.
static void TearDown(); static void TearDown();
// Sets the stack limit in the roots_ array. Some architectures generate code
// that looks here, because it is faster than loading from the static jslimit_
// variable.
static void SetStackLimit(intptr_t limit);
// Returns whether Setup has been called. // Returns whether Setup has been called.
static bool HasBeenSetup(); static bool HasBeenSetup();
@ -843,37 +860,7 @@ class Heap : public AllStatic {
} }
// Can be called when the embedding application is idle. // Can be called when the embedding application is idle.
static bool IdleNotification() { static bool IdleNotification();
static const int kIdlesBeforeCollection = 7;
static int number_idle_notifications = 0;
static int last_gc_count = gc_count_;
bool finished = false;
if (last_gc_count == gc_count_) {
number_idle_notifications++;
} else {
number_idle_notifications = 0;
last_gc_count = gc_count_;
}
if (number_idle_notifications >= kIdlesBeforeCollection) {
// The first time through we collect without forcing compaction.
// The second time through we force compaction and quit.
bool force_compaction =
number_idle_notifications > kIdlesBeforeCollection;
CollectAllGarbage(force_compaction);
last_gc_count = gc_count_;
if (force_compaction) {
number_idle_notifications = 0;
finished = true;
}
}
// Uncommit unused memory in new space.
Heap::UncommitFromSpace();
return finished;
}
// Declare all the root indices. // Declare all the root indices.
enum RootListIndex { enum RootListIndex {
@ -1048,6 +1035,8 @@ class Heap : public AllStatic {
static void CreateCEntryDebugBreakStub(); static void CreateCEntryDebugBreakStub();
static void CreateJSEntryStub(); static void CreateJSEntryStub();
static void CreateJSConstructEntryStub(); static void CreateJSConstructEntryStub();
static void CreateRegExpCEntryStub();
static void CreateFixedStubs(); static void CreateFixedStubs();
static Object* CreateOddball(Map* map, static Object* CreateOddball(Map* map,
@ -1533,6 +1522,91 @@ class GCTracer BASE_EMBEDDED {
int previous_marked_count_; int previous_marked_count_;
}; };
class TranscendentalCache {
public:
enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
explicit TranscendentalCache(Type t);
// Returns a heap number with f(input), where f is a math function specified
// by the 'type' argument.
static inline Object* Get(Type type, double input) {
TranscendentalCache* cache = caches_[type];
if (cache == NULL) {
caches_[type] = cache = new TranscendentalCache(type);
}
return cache->Get(input);
}
// The cache contains raw Object pointers. This method disposes of
// them before a garbage collection.
static void Clear();
private:
inline Object* Get(double input) {
Converter c;
c.dbl = input;
int hash = Hash(c);
Element e = elements_[hash];
if (e.in[0] == c.integers[0] &&
e.in[1] == c.integers[1]) {
ASSERT(e.output != NULL);
return e.output;
}
double answer = Calculate(input);
Object* heap_number = Heap::AllocateHeapNumber(answer);
if (!heap_number->IsFailure()) {
elements_[hash].in[0] = c.integers[0];
elements_[hash].in[1] = c.integers[1];
elements_[hash].output = heap_number;
}
return heap_number;
}
inline double Calculate(double input) {
switch (type_) {
case ACOS:
return acos(input);
case ASIN:
return asin(input);
case ATAN:
return atan(input);
case COS:
return cos(input);
case EXP:
return exp(input);
case LOG:
return log(input);
case SIN:
return sin(input);
case TAN:
return tan(input);
default:
return 0.0; // Never happens.
}
}
static const int kCacheSize = 512;
struct Element {
uint32_t in[2];
Object* output;
};
union Converter {
double dbl;
uint32_t integers[2];
};
inline static int Hash(const Converter& c) {
uint32_t hash = (c.integers[0] ^ c.integers[1]);
hash ^= hash >> 16;
hash ^= hash >> 8;
return (hash & (kCacheSize - 1));
}
static TranscendentalCache* caches_[kNumberOfCaches];
Element elements_[kCacheSize];
Type type_;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_HEAP_H_ #endif // V8_HEAP_H_

37
deps/v8/src/ia32/builtins-ia32.cc

@ -132,15 +132,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Make sure that the maximum heap object size will never cause us // Make sure that the maximum heap object size will never cause us
// problem here, because it is always greater than the maximum // problem here, because it is always greater than the maximum
// instance size that can be represented in a byte. // instance size that can be represented in a byte.
ASSERT(Heap::MaxObjectSizeInPagedSpace() >= (1 << kBitsPerByte)); ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize);
ExternalReference new_space_allocation_top = __ AllocateObjectInNewSpace(edi, ebx, edi, no_reg, &rt_call, false);
ExternalReference::new_space_allocation_top_address();
__ mov(ebx, Operand::StaticVariable(new_space_allocation_top));
__ add(edi, Operand(ebx)); // Calculate new top
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
__ cmp(edi, Operand::StaticVariable(new_space_allocation_limit));
__ j(above_equal, &rt_call);
// Allocated the JSObject, now initialize the fields. // Allocated the JSObject, now initialize the fields.
// eax: initial map // eax: initial map
// ebx: JSObject // ebx: JSObject
@ -165,15 +158,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ j(less, &loop); __ j(less, &loop);
} }
// Mostly done with the JSObject. Add the heap tag and store the new top, so // Add the object tag to make the JSObject real, so that we can continue and
// that we can continue and jump into the continuation code at any time from // jump into the continuation code at any time from now on. Any failures
// now on. Any failures need to undo the setting of the new top, so that the // need to undo the allocation, so that the heap is in a consistent state
// heap is in a consistent state and verifiable. // and verifiable.
// eax: initial map // eax: initial map
// ebx: JSObject // ebx: JSObject
// edi: start of next object // edi: start of next object
__ or_(Operand(ebx), Immediate(kHeapObjectTag)); __ or_(Operand(ebx), Immediate(kHeapObjectTag));
__ mov(Operand::StaticVariable(new_space_allocation_top), edi);
// Check if a non-empty properties array is needed. // Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is. // Allocate and initialize a FixedArray if it is.
@ -198,10 +190,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// edx: number of elements in properties array // edx: number of elements in properties array
ASSERT(Heap::MaxObjectSizeInPagedSpace() > ASSERT(Heap::MaxObjectSizeInPagedSpace() >
(FixedArray::kHeaderSize + 255*kPointerSize)); (FixedArray::kHeaderSize + 255*kPointerSize));
__ lea(ecx, Operand(edi, edx, times_pointer_size, FixedArray::kHeaderSize)); __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit)); times_pointer_size,
__ j(above_equal, &undo_allocation); edx,
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx); edi,
ecx,
no_reg,
&undo_allocation,
true);
// Initialize the FixedArray. // Initialize the FixedArray.
// ebx: JSObject // ebx: JSObject
@ -245,8 +241,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// allocated objects unused properties. // allocated objects unused properties.
// ebx: JSObject (previous new top) // ebx: JSObject (previous new top)
__ bind(&undo_allocation); __ bind(&undo_allocation);
__ xor_(Operand(ebx), Immediate(kHeapObjectTag)); // clear the heap tag __ UndoAllocationInNewSpace(ebx);
__ mov(Operand::StaticVariable(new_space_allocation_top), ebx);
} }
// Allocate the new receiver object using the runtime call. // Allocate the new receiver object using the runtime call.
@ -669,7 +664,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ mov(ebp, Operand(esp)); __ mov(ebp, Operand(esp));
// Store the arguments adaptor context sentinel. // Store the arguments adaptor context sentinel.
__ push(Immediate(ArgumentsAdaptorFrame::SENTINEL)); __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Push the function on the stack. // Push the function on the stack.
__ push(edi); __ push(edi);

42
deps/v8/src/ia32/codegen-ia32.cc

@ -2139,7 +2139,8 @@ void CodeGenerator::CallApplyLazy(Property* apply,
Label invoke, adapted; Label invoke, adapted;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
__ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL); __ cmp(Operand(ecx),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adapted); __ j(equal, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments. // No arguments adaptor frame. Copy fixed number of arguments.
@ -4912,7 +4913,7 @@ void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
// Skip the arguments adaptor frame if it exists. // Skip the arguments adaptor frame if it exists.
Label check_frame_marker; Label check_frame_marker;
__ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset), __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
Immediate(ArgumentsAdaptorFrame::SENTINEL)); Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &check_frame_marker); __ j(not_equal, &check_frame_marker);
__ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset)); __ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
@ -6947,21 +6948,18 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register result) { Register result) {
ExternalReference allocation_top = // Allocate heap number in new space.
ExternalReference::new_space_allocation_top_address(); __ AllocateObjectInNewSpace(HeapNumber::kSize,
ExternalReference allocation_limit = result,
ExternalReference::new_space_allocation_limit_address(); scratch1,
__ mov(Operand(scratch1), Immediate(allocation_top)); scratch2,
__ mov(result, Operand(scratch1, 0)); need_gc,
__ lea(scratch2, Operand(result, HeapNumber::kSize)); // scratch2: new top false);
__ cmp(scratch2, Operand::StaticVariable(allocation_limit));
__ j(above, need_gc, not_taken); // Set the map and tag the result.
__ mov(Operand(scratch1, 0), scratch2); // store new top
__ mov(Operand(result, HeapObject::kMapOffset), __ mov(Operand(result, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map())); Immediate(Factory::heap_number_map()));
// Tag old top and use as result. __ or_(Operand(result), Immediate(kHeapObjectTag));
__ add(Operand(result), Immediate(kHeapObjectTag));
} }
@ -7109,7 +7107,7 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
Label adaptor; Label adaptor;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
__ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL); __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor); __ j(equal, &adaptor);
// Nothing to do: The formal number of parameters has already been // Nothing to do: The formal number of parameters has already been
@ -7141,7 +7139,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Label adaptor; Label adaptor;
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
__ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL); __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor); __ j(equal, &adaptor);
// Check index against formal parameters count limit passed in // Check index against formal parameters count limit passed in
@ -7192,7 +7190,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
Label runtime; Label runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
__ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL); __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &runtime); __ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer. // Patch the arguments.length and the parameters pointer.
@ -7724,11 +7722,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ push(ebp); __ push(ebp);
__ mov(ebp, Operand(esp)); __ mov(ebp, Operand(esp));
// Save callee-saved registers (C calling conventions). // Push marker in two places.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
// Push something that is not an arguments adaptor. __ push(Immediate(Smi::FromInt(marker))); // context slot
__ push(Immediate(~ArgumentsAdaptorFrame::SENTINEL)); __ push(Immediate(Smi::FromInt(marker))); // function slot
__ push(Immediate(Smi::FromInt(marker))); // @ function offset // Save callee-saved registers (C calling conventions).
__ push(edi); __ push(edi);
__ push(esi); __ push(esi);
__ push(ebx); __ push(ebx);

2
deps/v8/src/ia32/ic-ia32.cc

@ -604,7 +604,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset)); __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(eax, FIRST_JS_OBJECT_TYPE); __ cmp(eax, FIRST_JS_OBJECT_TYPE);
__ j(less, &miss, not_taken); __ j(below, &miss, not_taken);
// If this assert fails, we have to check upper bound too. // If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);

140
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -620,6 +620,146 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
} }
void MacroAssembler::LoadAllocationTopHelper(
Register result,
Register result_end,
Register scratch,
bool result_contains_top_on_entry) {
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
// Just return if allocation top is already known.
if (result_contains_top_on_entry) {
// No use of scratch if allocation top is provided.
ASSERT(scratch.is(no_reg));
return;
}
// Move address of new object to result. Use scratch register if available.
if (scratch.is(no_reg)) {
mov(result, Operand::StaticVariable(new_space_allocation_top));
} else {
ASSERT(!scratch.is(result_end));
mov(Operand(scratch), Immediate(new_space_allocation_top));
mov(result, Operand(scratch, 0));
}
}
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch) {
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
// Update new top. Use scratch if available.
if (scratch.is(no_reg)) {
mov(Operand::StaticVariable(new_space_allocation_top), result_end);
} else {
mov(Operand(scratch, 0), result_end);
}
}
void MacroAssembler::AllocateObjectInNewSpace(
int object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result,
result_end,
scratch,
result_contains_top_on_entry);
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
lea(result_end, Operand(result, object_size));
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
}
void MacroAssembler::AllocateObjectInNewSpace(
int header_size,
ScaleFactor element_size,
Register element_count,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result,
result_end,
scratch,
result_contains_top_on_entry);
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
lea(result_end, Operand(result, element_count, element_size, header_size));
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
}
void MacroAssembler::AllocateObjectInNewSpace(
Register object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result,
result_end,
scratch,
result_contains_top_on_entry);
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
if (!object_size.is(result_end)) {
mov(result_end, object_size);
}
add(result_end, Operand(result));
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
}
void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
// Make sure the object has no tag before resetting top.
and_(Operand(object), Immediate(~kHeapObjectTagMask));
#ifdef DEBUG
cmp(object, Operand::StaticVariable(new_space_allocation_top));
Check(below, "Undo allocation of non allocated memory");
#endif
mov(Operand::StaticVariable(new_space_allocation_top), object);
}
void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen, void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
Register result, Register result,
Register op, Register op,

49
deps/v8/src/ia32/macro-assembler-ia32.h

@ -183,6 +183,48 @@ class MacroAssembler: public Assembler {
Label* miss); Label* miss);
// ---------------------------------------------------------------------------
// Allocation support
// Allocate an object in new space. If the new space is exhausted control
// continues at the gc_required label. The allocated object is returned in
// result and end of the new object is returned in result_end. The register
// scratch can be passed as no_reg in which case an additional object
// reference will be added to the reloc info. The returned pointers in result
// and result_end have not yet been tagged as heap objects. If
// result_contains_top_on_entry is true the contnt of result is known to be
// the allocation top on entry (could be result_end from a previous call to
// AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
void AllocateObjectInNewSpace(int object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry);
void AllocateObjectInNewSpace(int header_size,
ScaleFactor element_size,
Register element_count,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry);
void AllocateObjectInNewSpace(Register object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
// object(s) no longer allocated as they would be invalid when allocation is
// un-done.
void UndoAllocationInNewSpace(Register object);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Support functions. // Support functions.
@ -303,6 +345,13 @@ class MacroAssembler: public Assembler {
// Activation support. // Activation support.
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
Register result_end,
Register scratch,
bool result_contains_top_on_entry);
void UpdateAllocationTopHelper(Register result_end, Register scratch);
}; };

41
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -102,6 +102,7 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
success_label_(), success_label_(),
backtrack_label_(), backtrack_label_(),
exit_label_() { exit_label_() {
ASSERT_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later. __ jmp(&entry_label_); // We'll write the entry code later.
__ bind(&start_label_); // And then continue from here. __ bind(&start_label_); // And then continue from here.
} }
@ -337,8 +338,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ add(edx, Operand(esi)); __ add(edx, Operand(esi));
__ mov(Operand(esp, 0 * kPointerSize), edx); __ mov(Operand(esp, 0 * kPointerSize), edx);
Address function_address = FUNCTION_ADDR(&CaseInsensitiveCompareUC16); ExternalReference compare =
CallCFunction(function_address, argument_count); ExternalReference::re_case_insensitive_compare_uc16();
CallCFunction(compare, argument_count);
// Pop original values before reacting on result value. // Pop original values before reacting on result value.
__ pop(ebx); __ pop(ebx);
__ pop(backtrack_stackpointer()); __ pop(backtrack_stackpointer());
@ -745,7 +747,8 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ lea(eax, Operand(ebp, kStackHighEnd)); __ lea(eax, Operand(ebp, kStackHighEnd));
__ mov(Operand(esp, 1 * kPointerSize), eax); __ mov(Operand(esp, 1 * kPointerSize), eax);
__ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer()); __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
CallCFunction(FUNCTION_ADDR(&GrowStack), num_arguments); ExternalReference grow_stack = ExternalReference::re_grow_stack();
CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and // If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception. // must exit with a stack-overflow exception.
__ or_(eax, Operand(eax)); __ or_(eax, Operand(eax));
@ -817,7 +820,9 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacter(int cp_offset,
int characters) { int characters) {
ASSERT(cp_offset >= -1); // ^ and \b can look behind one character. ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works) ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
CheckPosition(cp_offset + characters - 1, on_end_of_input); if (check_bounds) {
CheckPosition(cp_offset + characters - 1, on_end_of_input);
}
LoadCurrentCharacterUnchecked(cp_offset, characters); LoadCurrentCharacterUnchecked(cp_offset, characters);
} }
@ -913,7 +918,9 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
// Next address on the stack (will be address of return address). // Next address on the stack (will be address of return address).
__ lea(eax, Operand(esp, -kPointerSize)); __ lea(eax, Operand(esp, -kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), eax); __ mov(Operand(esp, 0 * kPointerSize), eax);
CallCFunction(FUNCTION_ADDR(&CheckStackGuardState), num_arguments); ExternalReference check_stack_guard =
ExternalReference::re_check_stack_guard_state();
CallCFunction(check_stack_guard, num_arguments);
} }
@ -996,22 +1003,6 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
} }
Address RegExpMacroAssemblerIA32::GrowStack(Address stack_pointer,
Address* stack_base) {
size_t size = RegExpStack::stack_capacity();
Address old_stack_base = RegExpStack::stack_base();
ASSERT(old_stack_base == *stack_base);
ASSERT(stack_pointer <= old_stack_base);
ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
Address new_stack_base = RegExpStack::EnsureCapacity(size * 2);
if (new_stack_base == NULL) {
return NULL;
}
*stack_base = new_stack_base;
return new_stack_base - (old_stack_base - stack_pointer);
}
Operand RegExpMacroAssemblerIA32::register_location(int register_index) { Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
ASSERT(register_index < (1<<30)); ASSERT(register_index < (1<<30));
if (num_registers_ <= register_index) { if (num_registers_ <= register_index) {
@ -1135,9 +1126,9 @@ void RegExpMacroAssemblerIA32::FrameAlign(int num_arguments, Register scratch) {
} }
void RegExpMacroAssemblerIA32::CallCFunction(Address function_address, void RegExpMacroAssemblerIA32::CallCFunction(ExternalReference function,
int num_arguments) { int num_arguments) {
__ mov(Operand(eax), Immediate(reinterpret_cast<int32_t>(function_address))); __ mov(Operand(eax), Immediate(function));
__ call(Operand(eax)); __ call(Operand(eax));
if (OS::ActivationFrameAlignment() != 0) { if (OS::ActivationFrameAlignment() != 0) {
__ mov(esp, Operand(esp, num_arguments * kPointerSize)); __ mov(esp, Operand(esp, num_arguments * kPointerSize));
@ -1172,6 +1163,10 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
} }
void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
__ int3(); // Unused on ia32.
}
#undef __ #undef __
#endif // V8_NATIVE_REGEXP #endif // V8_NATIVE_REGEXP

23
deps/v8/src/ia32/regexp-macro-assembler-ia32.h

@ -107,6 +107,13 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
virtual void ClearRegisters(int reg_from, int reg_to); virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg); virtual void WriteStackPointerToRegister(int reg);
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
static int CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame);
private: private:
// Offsets from ebp of function parameters and stored registers. // Offsets from ebp of function parameters and stored registers.
static const int kFramePointer = 0; static const int kFramePointer = 0;
@ -144,23 +151,9 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// Check whether we are exceeding the stack limit on the backtrack stack. // Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit(); void CheckStackLimit();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
static int CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame);
// Generate a call to CheckStackGuardState. // Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch); void CallCheckStackGuardState(Register scratch);
// Called from RegExp if the backtrack stack limit is hit.
// Tries to expand the stack. Returns the new stack-pointer if
// successful, and updates the stack_top address, or returns 0 if unable
// to grow the stack.
// This function must not trigger a garbage collection.
static Address GrowStack(Address stack_pointer, Address* stack_top);
// The ebp-relative location of a regexp register. // The ebp-relative location of a regexp register.
Operand register_location(int register_index); Operand register_location(int register_index);
@ -209,7 +202,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// by FrameAlign. The called function is not allowed to trigger a garbage // by FrameAlign. The called function is not allowed to trigger a garbage
// collection, since that might move the code and invalidate the return // collection, since that might move the code and invalidate the return
// address (unless this is somehow accounted for). // address (unless this is somehow accounted for).
inline void CallCFunction(Address function_address, int num_arguments); inline void CallCFunction(ExternalReference function, int num_arguments);
MacroAssembler* masm_; MacroAssembler* masm_;

5
deps/v8/src/ia32/simulator-ia32.h

@ -44,4 +44,9 @@
(reinterpret_cast<uintptr_t>(this) >= limit ? \ (reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0) reinterpret_cast<uintptr_t>(this) - limit : 0)
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
#endif // V8_IA32_SIMULATOR_IA32_H_ #endif // V8_IA32_SIMULATOR_IA32_H_

129
deps/v8/src/ia32/stub-cache-ia32.cc

@ -1740,6 +1740,135 @@ Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
} }
// Specialized stub for constructing objects from functions which only have only
// simple assignments of the form this.x = ...; in their body.
Object* ConstructStubCompiler::CompileConstructStub(
SharedFunctionInfo* shared) {
// ----------- S t a t e -------------
// -- eax : argc
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
Label generic_stub_call;
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check to see whether there are any break points in the function code. If
// there are jump to the generic constructor stub which calls the actual
// code for the function thereby hitting the break points.
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
__ cmp(ebx, Factory::undefined_value());
__ j(not_equal, &generic_stub_call, not_taken);
#endif
// Load the initial map and verify that it is in fact a map.
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &generic_stub_call);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
__ j(not_equal, &generic_stub_call);
#ifdef DEBUG
// Cannot construct functions this way.
// edi: constructor
// ebx: initial map
__ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
__ Assert(not_equal, "Function constructed by construct stub.");
#endif
// Now allocate the JSObject on the heap by moving the new space allocation
// top forward.
// edi: constructor
// ebx: initial map
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ shl(ecx, kPointerSizeLog2);
// Make sure that the maximum heap object size will never cause us
// problems here.
ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize);
__ AllocateObjectInNewSpace(ecx, edx, ecx, no_reg, &generic_stub_call, false);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// ebx: initial map
// edx: JSObject (untagged)
__ mov(Operand(edx, JSObject::kMapOffset), ebx);
__ mov(ebx, Factory::empty_fixed_array());
__ mov(Operand(edx, JSObject::kPropertiesOffset), ebx);
__ mov(Operand(edx, JSObject::kElementsOffset), ebx);
// Push the allocated object to the stack. This is the object that will be
// returned (after it is tagged).
__ push(edx);
// eax: argc
// edx: JSObject (untagged)
// Load the address of the first in-object property into edx.
__ lea(edx, Operand(edx, JSObject::kHeaderSize));
// Calculate the location of the first argument. The stack contains the
// allocated object and the return address on top of the argc arguments.
__ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize));
// Use edi for holding undefined which is used in several places below.
__ mov(edi, Factory::undefined_value());
// eax: argc
// ecx: first argument
// edx: first in-object property of the JSObject
// edi: undefined
// Fill the initialized properties with a constant value or a passed argument
// depending on the this.x = ...; assignment in the function.
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
Label not_passed;
// Set the property to undefined.
__ mov(Operand(edx, i * kPointerSize), edi);
// Check if the argument assigned to the property is actually passed.
int arg_number = shared->GetThisPropertyAssignmentArgument(i);
__ cmp(eax, arg_number);
__ j(below_equal, &not_passed);
// Argument passed - find it on the stack.
__ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
__ mov(Operand(edx, i * kPointerSize), ebx);
__ bind(&not_passed);
} else {
// Set the property to the constant value.
Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
__ mov(Operand(edx, i * kPointerSize), Immediate(constant));
}
}
// Fill the unused in-object property fields with undefined.
for (int i = shared->this_property_assignments_count();
i < shared->CalculateInObjectProperties();
i++) {
__ mov(Operand(edx, i * kPointerSize), edi);
}
// Move argc to ebx and retrieve and tag the JSObject to return.
__ mov(ebx, eax);
__ pop(eax);
__ or_(Operand(eax), Immediate(kHeapObjectTag));
// Remove caller arguments and receiver from the stack and return.
__ pop(ecx);
__ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
__ push(ecx);
__ IncrementCounter(&Counters::constructed_objects, 1);
__ IncrementCounter(&Counters::constructed_objects_stub, 1);
__ ret(0);
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode();
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

50
deps/v8/src/jsregexp.cc

@ -51,6 +51,7 @@
#include "x64/macro-assembler-x64.h" #include "x64/macro-assembler-x64.h"
#include "x64/regexp-macro-assembler-x64.h" #include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h" #include "arm/regexp-macro-assembler-arm.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
@ -419,9 +420,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data())); Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data()));
#ifdef V8_NATIVE_REGEXP #ifdef V8_NATIVE_REGEXP
#ifdef V8_TARGET_ARCH_ARM
UNIMPLEMENTED();
#else // Native regexp supported.
OffsetsVector captures(number_of_capture_registers); OffsetsVector captures(number_of_capture_registers);
int* captures_vector = captures.vector(); int* captures_vector = captures.vector();
NativeRegExpMacroAssembler::Result res; NativeRegExpMacroAssembler::Result res;
@ -455,9 +454,9 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
SetCapture(*array, i, captures_vector[i]); SetCapture(*array, i, captures_vector[i]);
SetCapture(*array, i + 1, captures_vector[i + 1]); SetCapture(*array, i + 1, captures_vector[i + 1]);
} }
#endif // Native regexp supported.
#else // ! V8_NATIVE_REGEXP #else // ! V8_NATIVE_REGEXP
bool is_ascii = subject->IsAsciiRepresentation(); bool is_ascii = subject->IsAsciiRepresentation();
if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) { if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
return Handle<Object>::null(); return Handle<Object>::null();
@ -487,6 +486,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
SetCapture(*array, i, register_vector[i]); SetCapture(*array, i, register_vector[i]);
SetCapture(*array, i + 1, register_vector[i + 1]); SetCapture(*array, i + 1, register_vector[i + 1]);
} }
#endif // V8_NATIVE_REGEXP #endif // V8_NATIVE_REGEXP
SetLastCaptureCount(*array, number_of_capture_registers); SetLastCaptureCount(*array, number_of_capture_registers);
@ -1723,6 +1723,8 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
GetQuickCheckDetails(details, compiler, 0, trace->at_start() == Trace::FALSE); GetQuickCheckDetails(details, compiler, 0, trace->at_start() == Trace::FALSE);
if (details->cannot_match()) return false; if (details->cannot_match()) return false;
if (!details->Rationalize(compiler->ascii())) return false; if (!details->Rationalize(compiler->ascii())) return false;
ASSERT(details->characters() == 1 ||
compiler->macro_assembler()->CanReadUnaligned());
uint32_t mask = details->mask(); uint32_t mask = details->mask();
uint32_t value = details->value(); uint32_t value = details->value();
@ -2522,20 +2524,20 @@ void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler) { int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler) {
int preload_characters = EatsAtLeast(4, 0); int preload_characters = EatsAtLeast(4, 0);
#ifdef V8_HOST_CAN_READ_UNALIGNED if (compiler->macro_assembler()->CanReadUnaligned()) {
bool ascii = compiler->ascii(); bool ascii = compiler->ascii();
if (ascii) { if (ascii) {
if (preload_characters > 4) preload_characters = 4; if (preload_characters > 4) preload_characters = 4;
// We can't preload 3 characters because there is no machine instruction // We can't preload 3 characters because there is no machine instruction
// to do that. We can't just load 4 because we could be reading // to do that. We can't just load 4 because we could be reading
// beyond the end of the string, which could cause a memory fault. // beyond the end of the string, which could cause a memory fault.
if (preload_characters == 3) preload_characters = 2; if (preload_characters == 3) preload_characters = 2;
} else {
if (preload_characters > 2) preload_characters = 2;
}
} else { } else {
if (preload_characters > 2) preload_characters = 2; if (preload_characters > 1) preload_characters = 1;
} }
#else
if (preload_characters > 1) preload_characters = 1;
#endif
return preload_characters; return preload_characters;
} }
@ -4470,16 +4472,12 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
is_ascii ? NativeRegExpMacroAssembler::ASCII is_ascii ? NativeRegExpMacroAssembler::ASCII
: NativeRegExpMacroAssembler::UC16; : NativeRegExpMacroAssembler::UC16;
#ifdef V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
RegExpMacroAssemblerIA32 macro_assembler(mode, RegExpMacroAssemblerIA32 macro_assembler(mode, (data->capture_count + 1) * 2);
(data->capture_count + 1) * 2); #elif V8_TARGET_ARCH_X64
#endif RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2);
#ifdef V8_TARGET_ARCH_X64 #elif V8_TARGET_ARCH_ARM
RegExpMacroAssemblerX64 macro_assembler(mode, RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2);
(data->capture_count + 1) * 2);
#endif
#ifdef V8_TARGET_ARCH_ARM
UNIMPLEMENTED();
#endif #endif
#else // ! V8_NATIVE_REGEXP #else // ! V8_NATIVE_REGEXP

2
deps/v8/src/mark-compact.cc

@ -76,7 +76,7 @@ void MarkCompactCollector::CollectGarbage() {
SweepLargeObjectSpace(); SweepLargeObjectSpace();
if (compacting_collection_) { if (IsCompacting()) {
EncodeForwardingAddresses(); EncodeForwardingAddresses();
UpdatePointers(); UpdatePointers();

2
deps/v8/src/messages.js

@ -163,7 +163,7 @@ function FormatMessage(message) {
illegal_break: "Illegal break statement", illegal_break: "Illegal break statement",
illegal_continue: "Illegal continue statement", illegal_continue: "Illegal continue statement",
illegal_return: "Illegal return statement", illegal_return: "Illegal return statement",
error_loading_debugger: "Error loading debugger %0", error_loading_debugger: "Error loading debugger",
no_input_to_regexp: "No input to %0", no_input_to_regexp: "No input to %0",
result_not_primitive: "Result of %0 must be a primitive, was %1", result_not_primitive: "Result of %0 must be a primitive, was %1",
invalid_json: "String '%0' is not valid JSON", invalid_json: "String '%0' is not valid JSON",

2
deps/v8/src/objects-debug.cc

@ -463,6 +463,8 @@ void Map::MapPrint() {
PrintF(" - type: %s\n", TypeToString(instance_type())); PrintF(" - type: %s\n", TypeToString(instance_type()));
PrintF(" - instance size: %d\n", instance_size()); PrintF(" - instance size: %d\n", instance_size());
PrintF(" - inobject properties: %d\n", inobject_properties()); PrintF(" - inobject properties: %d\n", inobject_properties());
PrintF(" - pre-allocated property fields: %d\n",
pre_allocated_property_fields());
PrintF(" - unused property fields: %d\n", unused_property_fields()); PrintF(" - unused property fields: %d\n", unused_property_fields());
if (is_hidden_prototype()) { if (is_hidden_prototype()) {
PrintF(" - hidden_prototype\n"); PrintF(" - hidden_prototype\n");

12
deps/v8/src/objects-inl.h

@ -131,7 +131,7 @@ bool Object::IsSmi() {
bool Object::IsHeapObject() { bool Object::IsHeapObject() {
return HAS_HEAP_OBJECT_TAG(this); return Internals::HasHeapObjectTag(this);
} }
@ -300,6 +300,10 @@ uint32_t StringShape::full_representation_tag() {
} }
STATIC_CHECK((kStringRepresentationMask | kStringEncodingMask) ==
Internals::kFullStringRepresentationMask);
uint32_t StringShape::size_tag() { uint32_t StringShape::size_tag() {
return (type_ & kStringSizeMask); return (type_ & kStringSizeMask);
} }
@ -325,6 +329,10 @@ bool StringShape::IsExternalTwoByte() {
} }
STATIC_CHECK((kExternalStringTag | kTwoByteStringTag) ==
Internals::kExternalTwoByteRepresentationTag);
uc32 FlatStringReader::Get(int index) { uc32 FlatStringReader::Get(int index) {
ASSERT(0 <= index && index <= length_); ASSERT(0 <= index && index <= length_);
if (is_ascii_) { if (is_ascii_) {
@ -730,7 +738,7 @@ Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
int Smi::value() { int Smi::value() {
return static_cast<int>(reinterpret_cast<intptr_t>(this)) >> kSmiTagSize; return Internals::SmiValue(this);
} }

53
deps/v8/src/objects.cc

@ -2923,6 +2923,20 @@ Object* Map::CopyDropDescriptors() {
// Please note instance_type and instance_size are set when allocated. // Please note instance_type and instance_size are set when allocated.
Map::cast(result)->set_inobject_properties(inobject_properties()); Map::cast(result)->set_inobject_properties(inobject_properties());
Map::cast(result)->set_unused_property_fields(unused_property_fields()); Map::cast(result)->set_unused_property_fields(unused_property_fields());
// If the map has pre-allocated properties always start out with a descriptor
// array describing these properties.
if (pre_allocated_property_fields() > 0) {
ASSERT(constructor()->IsJSFunction());
JSFunction* ctor = JSFunction::cast(constructor());
Object* descriptors =
ctor->initial_map()->instance_descriptors()->RemoveTransitions();
if (descriptors->IsFailure()) return descriptors;
Map::cast(result)->set_instance_descriptors(
DescriptorArray::cast(descriptors));
Map::cast(result)->set_pre_allocated_property_fields(
pre_allocated_property_fields());
}
Map::cast(result)->set_bit_field(bit_field()); Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2()); Map::cast(result)->set_bit_field2(bit_field2());
Map::cast(result)->ClearCodeCache(); Map::cast(result)->ClearCodeCache();
@ -4800,7 +4814,6 @@ void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
bool only_this_property_assignments, bool only_this_property_assignments,
bool only_simple_this_property_assignments, bool only_simple_this_property_assignments,
FixedArray* assignments) { FixedArray* assignments) {
ASSERT(this_property_assignments()->IsUndefined());
set_compiler_hints(BooleanBit::set(compiler_hints(), set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlyThisPropertyAssignments, kHasOnlyThisPropertyAssignments,
only_this_property_assignments)); only_this_property_assignments));
@ -4812,6 +4825,18 @@ void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
} }
void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() {
set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlyThisPropertyAssignments,
false));
set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlySimpleThisPropertyAssignments,
false));
set_this_property_assignments(Heap::undefined_value());
set_this_property_assignments_count(0);
}
String* SharedFunctionInfo::GetThisPropertyAssignmentName(int index) { String* SharedFunctionInfo::GetThisPropertyAssignmentName(int index) {
Object* obj = this_property_assignments(); Object* obj = this_property_assignments();
ASSERT(obj->IsFixedArray()); ASSERT(obj->IsFixedArray());
@ -4822,6 +4847,32 @@ String* SharedFunctionInfo::GetThisPropertyAssignmentName(int index) {
} }
bool SharedFunctionInfo::IsThisPropertyAssignmentArgument(int index) {
Object* obj = this_property_assignments();
ASSERT(obj->IsFixedArray());
ASSERT(index < this_property_assignments_count());
obj = FixedArray::cast(obj)->get(index * 3 + 1);
return Smi::cast(obj)->value() != -1;
}
int SharedFunctionInfo::GetThisPropertyAssignmentArgument(int index) {
ASSERT(IsThisPropertyAssignmentArgument(index));
Object* obj =
FixedArray::cast(this_property_assignments())->get(index * 3 + 1);
return Smi::cast(obj)->value();
}
Object* SharedFunctionInfo::GetThisPropertyAssignmentConstant(int index) {
ASSERT(!IsThisPropertyAssignmentArgument(index));
Object* obj =
FixedArray::cast(this_property_assignments())->get(index * 3 + 2);
return obj;
}
// Support function for printing the source code to a StringStream // Support function for printing the source code to a StringStream
// without any allocation in the heap. // without any allocation in the heap.
void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator, void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,

18
deps/v8/src/objects.h

@ -1234,6 +1234,8 @@ class HeapObject: public Object {
static const int kMapOffset = Object::kHeaderSize; static const int kMapOffset = Object::kHeaderSize;
static const int kHeaderSize = kMapOffset + kPointerSize; static const int kHeaderSize = kMapOffset + kPointerSize;
STATIC_CHECK(kMapOffset == Internals::kHeapObjectMapOffset);
protected: protected:
// helpers for calling an ObjectVisitor to iterate over pointers in the // helpers for calling an ObjectVisitor to iterate over pointers in the
// half-open range [start, end) specified as integer offsets // half-open range [start, end) specified as integer offsets
@ -1664,6 +1666,8 @@ class JSObject: public HeapObject {
static const int kElementsOffset = kPropertiesOffset + kPointerSize; static const int kElementsOffset = kPropertiesOffset + kPointerSize;
static const int kHeaderSize = kElementsOffset + kPointerSize; static const int kHeaderSize = kElementsOffset + kPointerSize;
STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize);
Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index); Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
private: private:
@ -2631,7 +2635,7 @@ class Code: public HeapObject {
int ExecutableSize() { int ExecutableSize() {
// Check that the assumptions about the layout of the code object holds. // Check that the assumptions about the layout of the code object holds.
ASSERT_EQ(instruction_start() - address(), ASSERT_EQ(instruction_start() - address(),
Code::kHeaderSize); static_cast<intptr_t>(Code::kHeaderSize));
return instruction_size() + Code::kHeaderSize; return instruction_size() + Code::kHeaderSize;
} }
@ -2897,6 +2901,8 @@ class Map: public HeapObject {
static const int kBitFieldOffset = kInstanceAttributesOffset + 2; static const int kBitFieldOffset = kInstanceAttributesOffset + 2;
static const int kBitField2Offset = kInstanceAttributesOffset + 3; static const int kBitField2Offset = kInstanceAttributesOffset + 3;
STATIC_CHECK(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
// Bit positions for bit field. // Bit positions for bit field.
static const int kUnused = 0; // To be used for marking recently used maps. static const int kUnused = 0; // To be used for marking recently used maps.
static const int kHasNonInstancePrototype = 1; static const int kHasNonInstancePrototype = 1;
@ -3108,6 +3114,9 @@ class SharedFunctionInfo: public HeapObject {
bool has_only_simple_this_property_assignments, bool has_only_simple_this_property_assignments,
FixedArray* this_property_assignments); FixedArray* this_property_assignments);
// Clear information on assignments of the form this.x = ...;
void ClearThisPropertyAssignmentsInfo();
// Indicate that this function only consists of assignments of the form // Indicate that this function only consists of assignments of the form
// this.x = ...;. // this.x = ...;.
inline bool has_only_this_property_assignments(); inline bool has_only_this_property_assignments();
@ -3122,6 +3131,9 @@ class SharedFunctionInfo: public HeapObject {
inline int this_property_assignments_count(); inline int this_property_assignments_count();
inline void set_this_property_assignments_count(int value); inline void set_this_property_assignments_count(int value);
String* GetThisPropertyAssignmentName(int index); String* GetThisPropertyAssignmentName(int index);
bool IsThisPropertyAssignmentArgument(int index);
int GetThisPropertyAssignmentArgument(int index);
Object* GetThisPropertyAssignmentConstant(int index);
// [source code]: Source code for the function. // [source code]: Source code for the function.
bool HasSourceCode(); bool HasSourceCode();
@ -4128,6 +4140,8 @@ class ExternalString: public String {
static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize); static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
static const int kSize = kResourceOffset + kPointerSize; static const int kSize = kResourceOffset + kPointerSize;
STATIC_CHECK(kResourceOffset == Internals::kStringResourceOffset);
private: private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalString); DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalString);
}; };
@ -4341,6 +4355,8 @@ class Proxy: public HeapObject {
static const int kProxyOffset = HeapObject::kHeaderSize; static const int kProxyOffset = HeapObject::kHeaderSize;
static const int kSize = kProxyOffset + kPointerSize; static const int kSize = kProxyOffset + kPointerSize;
STATIC_CHECK(kProxyOffset == Internals::kProxyProxyOffset);
private: private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Proxy); DISALLOW_IMPLICIT_CONSTRUCTORS(Proxy);
}; };

9
deps/v8/src/parser.cc

@ -2397,12 +2397,6 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
// WithStatement :: // WithStatement ::
// 'with' '(' Expression ')' Statement // 'with' '(' Expression ')' Statement
// We do not allow the use of 'with' statements in the internal JS
// code. If 'with' statements were allowed, the simplified setup of
// the runtime context chain would allow access to properties in the
// global object from within a 'with' statement.
ASSERT(extension_ != NULL || !Bootstrapper::IsActive());
Expect(Token::WITH, CHECK_OK); Expect(Token::WITH, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK); Expect(Token::LPAREN, CHECK_OK);
Expression* expr = ParseExpression(true, CHECK_OK); Expression* expr = ParseExpression(true, CHECK_OK);
@ -3088,9 +3082,6 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
Handle<String> name = callee->name(); Handle<String> name = callee->name();
Variable* var = top_scope_->Lookup(name); Variable* var = top_scope_->Lookup(name);
if (var == NULL) { if (var == NULL) {
// We do not allow direct calls to 'eval' in our internal
// JS files. Use builtin functions instead.
ASSERT(extension_ != NULL || !Bootstrapper::IsActive());
top_scope_->RecordEvalCall(); top_scope_->RecordEvalCall();
is_potentially_direct_eval = true; is_potentially_direct_eval = true;
} }

7
deps/v8/src/platform-win32.cc

@ -1317,8 +1317,11 @@ double OS::nan_value() {
int OS::ActivationFrameAlignment() { int OS::ActivationFrameAlignment() {
// Floating point code runs faster if the stack is 8-byte aligned. #ifdef _WIN64
return 8; return 16; // Windows 64-bit ABI requires the stack to be 16-byte aligned.
#else
return 8; // Floating-point math runs faster with 8-byte alignment.
#endif
} }

2
deps/v8/src/regexp-macro-assembler-irregexp-inl.h

@ -38,6 +38,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#ifndef V8_NATIVE_REGEXP
void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte, void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
uint32_t twenty_four_bits) { uint32_t twenty_four_bits) {
@ -70,6 +71,7 @@ void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
pc_ += 4; pc_ += 4;
} }
#endif // ! V8_NATIVE_REGEXP
} } // namespace v8::internal } } // namespace v8::internal

2
deps/v8/src/regexp-macro-assembler-irregexp.cc

@ -36,6 +36,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#ifndef V8_NATIVE_REGEXP
RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer) RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer)
: buffer_(buffer), : buffer_(buffer),
@ -458,5 +459,6 @@ void RegExpMacroAssemblerIrregexp::Expand() {
} }
} }
#endif // !V8_NATIVE_REGEXP
} } // namespace v8::internal } } // namespace v8::internal

3
deps/v8/src/regexp-macro-assembler-irregexp.h

@ -31,6 +31,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#ifndef V8_NATIVE_REGEXP
class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler { class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
public: public:
@ -133,6 +134,8 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp); DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
}; };
#endif // !V8_NATIVE_REGEXP
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_ #endif // V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_

2
deps/v8/src/regexp-macro-assembler-tracer.h

@ -37,7 +37,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
explicit RegExpMacroAssemblerTracer(RegExpMacroAssembler* assembler); explicit RegExpMacroAssemblerTracer(RegExpMacroAssembler* assembler);
virtual ~RegExpMacroAssemblerTracer(); virtual ~RegExpMacroAssemblerTracer();
virtual int stack_limit_slack() { return assembler_->stack_limit_slack(); } virtual int stack_limit_slack() { return assembler_->stack_limit_slack(); }
virtual bool CanReadUnaligned() { return assembler_->CanReadUnaligned(); }
virtual void AdvanceCurrentPosition(int by); // Signed cp change. virtual void AdvanceCurrentPosition(int by); // Signed cp change.
virtual void AdvanceRegister(int reg, int by); // r[reg] += by. virtual void AdvanceRegister(int reg, int by); // r[reg] += by.
virtual void Backtrack(); virtual void Backtrack();

57
deps/v8/src/regexp-macro-assembler.cc

@ -30,6 +30,13 @@
#include "assembler.h" #include "assembler.h"
#include "regexp-stack.h" #include "regexp-stack.h"
#include "regexp-macro-assembler.h" #include "regexp-macro-assembler.h"
#if V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
#elif V8_TARGET_ARCH_IA32
#include "ia32/simulator-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/simulator-x64.h"
#endif
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -42,6 +49,15 @@ RegExpMacroAssembler::~RegExpMacroAssembler() {
} }
bool RegExpMacroAssembler::CanReadUnaligned() {
#ifdef V8_HOST_CAN_READ_UNALIGNED
return true;
#else
return false;
#endif
}
#ifdef V8_NATIVE_REGEXP // Avoid unused code, e.g., on ARM. #ifdef V8_NATIVE_REGEXP // Avoid unused code, e.g., on ARM.
NativeRegExpMacroAssembler::NativeRegExpMacroAssembler() { NativeRegExpMacroAssembler::NativeRegExpMacroAssembler() {
@ -51,6 +67,15 @@ NativeRegExpMacroAssembler::NativeRegExpMacroAssembler() {
NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() { NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() {
} }
bool NativeRegExpMacroAssembler::CanReadUnaligned() {
#ifdef V8_TARGET_CAN_READ_UNALIGNED
return true;
#else
return false;
#endif
}
const byte* NativeRegExpMacroAssembler::StringCharacterPosition( const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
String* subject, String* subject,
int start_index) { int start_index) {
@ -162,13 +187,14 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
RegExpStack stack; RegExpStack stack;
Address stack_base = RegExpStack::stack_base(); Address stack_base = RegExpStack::stack_base();
int result = matcher_func(input, int result = CALL_GENERATED_REGEXP_CODE(matcher_func,
start_offset, input,
input_start, start_offset,
input_end, input_start,
output, input_end,
at_start_val, output,
stack_base); at_start_val,
stack_base);
ASSERT(result <= SUCCESS); ASSERT(result <= SUCCESS);
ASSERT(result >= RETRY); ASSERT(result >= RETRY);
@ -213,5 +239,22 @@ int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
return 1; return 1;
} }
Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
Address* stack_base) {
size_t size = RegExpStack::stack_capacity();
Address old_stack_base = RegExpStack::stack_base();
ASSERT(old_stack_base == *stack_base);
ASSERT(stack_pointer <= old_stack_base);
ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
Address new_stack_base = RegExpStack::EnsureCapacity(size * 2);
if (new_stack_base == NULL) {
return NULL;
}
*stack_base = new_stack_base;
intptr_t stack_content_size = old_stack_base - stack_pointer;
return new_stack_base - stack_content_size;
}
#endif // V8_NATIVE_REGEXP #endif // V8_NATIVE_REGEXP
} } // namespace v8::internal } } // namespace v8::internal

27
deps/v8/src/regexp-macro-assembler.h

@ -61,6 +61,7 @@ class RegExpMacroAssembler {
// kCheckStackLimit flag to push operations (instead of kNoStackLimitCheck) // kCheckStackLimit flag to push operations (instead of kNoStackLimitCheck)
// at least once for every stack_limit() pushes that are executed. // at least once for every stack_limit() pushes that are executed.
virtual int stack_limit_slack() = 0; virtual int stack_limit_slack() = 0;
virtual bool CanReadUnaligned();
virtual void AdvanceCurrentPosition(int by) = 0; // Signed cp change. virtual void AdvanceCurrentPosition(int by) = 0; // Signed cp change.
virtual void AdvanceRegister(int reg, int by) = 0; // r[reg] += by. virtual void AdvanceRegister(int reg, int by) = 0; // r[reg] += by.
// Continues execution from the position pushed on the top of the backtrack // Continues execution from the position pushed on the top of the backtrack
@ -182,6 +183,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
NativeRegExpMacroAssembler(); NativeRegExpMacroAssembler();
virtual ~NativeRegExpMacroAssembler(); virtual ~NativeRegExpMacroAssembler();
virtual bool CanReadUnaligned();
static Result Match(Handle<Code> regexp, static Result Match(Handle<Code> regexp,
Handle<String> subject, Handle<String> subject,
@ -195,6 +197,13 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
Address byte_offset2, Address byte_offset2,
size_t byte_length); size_t byte_length);
// Called from RegExp if the backtrack stack limit is hit.
// Tries to expand the stack. Returns the new stack-pointer if
// successful, and updates the stack_top address, or returns 0 if unable
// to grow the stack.
// This function must not trigger a garbage collection.
static Address GrowStack(Address stack_pointer, Address* stack_top);
static const byte* StringCharacterPosition(String* subject, int start_index); static const byte* StringCharacterPosition(String* subject, int start_index);
static Result Execute(Code* code, static Result Execute(Code* code,
@ -205,7 +214,25 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
int* output, int* output,
bool at_start); bool at_start);
}; };
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
class RegExpCEntryStub: public CodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return RegExpCEntry; }
int MinorKey() { return 0; }
const char* GetName() { return "RegExpCEntryStub"; }
};
#endif // V8_NATIVE_REGEXP #endif // V8_NATIVE_REGEXP
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_REGEXP_MACRO_ASSEMBLER_H_ #endif // V8_REGEXP_MACRO_ASSEMBLER_H_

63
deps/v8/src/runtime.cc

@ -45,6 +45,7 @@
#include "v8threads.h" #include "v8threads.h"
#include "smart-pointer.h" #include "smart-pointer.h"
#include "parser.h" #include "parser.h"
#include "stub-cache.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -1235,6 +1236,9 @@ static Object* Runtime_SetCode(Arguments args) {
// Array, and Object, and some web code // Array, and Object, and some web code
// doesn't like seeing source code for constructors. // doesn't like seeing source code for constructors.
target->shared()->set_script(Heap::undefined_value()); target->shared()->set_script(Heap::undefined_value());
// Clear the optimization hints related to the compiled code as these are no
// longer valid when the code is overwritten.
target->shared()->ClearThisPropertyAssignmentsInfo();
context = Handle<Context>(fun->context()); context = Handle<Context>(fun->context());
// Make sure we get a fresh copy of the literal vector to avoid // Make sure we get a fresh copy of the literal vector to avoid
@ -4054,7 +4058,7 @@ static Object* Runtime_Math_acos(Arguments args) {
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]); CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::AllocateHeapNumber(acos(x)); return TranscendentalCache::Get(TranscendentalCache::ACOS, x);
} }
@ -4063,7 +4067,7 @@ static Object* Runtime_Math_asin(Arguments args) {
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]); CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::AllocateHeapNumber(asin(x)); return TranscendentalCache::Get(TranscendentalCache::ASIN, x);
} }
@ -4072,7 +4076,7 @@ static Object* Runtime_Math_atan(Arguments args) {
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]); CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::AllocateHeapNumber(atan(x)); return TranscendentalCache::Get(TranscendentalCache::ATAN, x);
} }
@ -4113,7 +4117,7 @@ static Object* Runtime_Math_cos(Arguments args) {
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]); CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::AllocateHeapNumber(cos(x)); return TranscendentalCache::Get(TranscendentalCache::COS, x);
} }
@ -4122,7 +4126,7 @@ static Object* Runtime_Math_exp(Arguments args) {
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]); CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::AllocateHeapNumber(exp(x)); return TranscendentalCache::Get(TranscendentalCache::EXP, x);
} }
@ -4140,7 +4144,7 @@ static Object* Runtime_Math_log(Arguments args) {
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]); CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::AllocateHeapNumber(log(x)); return TranscendentalCache::Get(TranscendentalCache::LOG, x);
} }
@ -4228,7 +4232,7 @@ static Object* Runtime_Math_sin(Arguments args) {
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]); CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::AllocateHeapNumber(sin(x)); return TranscendentalCache::Get(TranscendentalCache::SIN, x);
} }
@ -4246,7 +4250,7 @@ static Object* Runtime_Math_tan(Arguments args) {
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]); CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::AllocateHeapNumber(tan(x)); return TranscendentalCache::Get(TranscendentalCache::TAN, x);
} }
@ -4326,11 +4330,21 @@ static Object* Runtime_NewClosure(Arguments args) {
} }
static Handle<Code> ComputeConstructStub(Handle<Map> map) { static Code* ComputeConstructStub(Handle<SharedFunctionInfo> shared) {
// TODO(385): Change this to create a construct stub specialized for // TODO(385): Change this to create a construct stub specialized for
// the given map to make allocation of simple objects - and maybe // the given map to make allocation of simple objects - and maybe
// arrays - much faster. // arrays - much faster.
return Handle<Code>(Builtins::builtin(Builtins::JSConstructStubGeneric)); if (FLAG_inline_new
&& shared->has_only_simple_this_property_assignments()) {
ConstructStubCompiler compiler;
Object* code = compiler.CompileConstructStub(*shared);
if (code->IsFailure()) {
return Builtins::builtin(Builtins::JSConstructStubGeneric);
}
return Code::cast(code);
}
return Builtins::builtin(Builtins::JSConstructStubGeneric);
} }
@ -4373,15 +4387,25 @@ static Object* Runtime_NewObject(Arguments args) {
} }
} }
// The function should be compiled for the optimization hints to be available.
if (!function->shared()->is_compiled()) {
CompileLazyShared(Handle<SharedFunctionInfo>(function->shared()),
CLEAR_EXCEPTION,
0);
}
bool first_allocation = !function->has_initial_map(); bool first_allocation = !function->has_initial_map();
Handle<JSObject> result = Factory::NewJSObject(function); Handle<JSObject> result = Factory::NewJSObject(function);
if (first_allocation) { if (first_allocation) {
Handle<Map> map = Handle<Map>(function->initial_map()); Handle<Map> map = Handle<Map>(function->initial_map());
Handle<Code> stub = ComputeConstructStub(map); Handle<Code> stub = Handle<Code>(
ComputeConstructStub(Handle<SharedFunctionInfo>(function->shared())));
function->shared()->set_construct_stub(*stub); function->shared()->set_construct_stub(*stub);
} }
Counters::constructed_objects.Increment(); Counters::constructed_objects.Increment();
Counters::constructed_objects_runtime.Increment(); Counters::constructed_objects_runtime.Increment();
return *result; return *result;
} }
@ -7386,7 +7410,7 @@ static Object* Runtime_SystemBreak(Arguments args) {
} }
static Object* Runtime_FunctionGetAssemblerCode(Arguments args) { static Object* Runtime_DebugDisassembleFunction(Arguments args) {
#ifdef DEBUG #ifdef DEBUG
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
@ -7401,6 +7425,21 @@ static Object* Runtime_FunctionGetAssemblerCode(Arguments args) {
} }
static Object* Runtime_DebugDisassembleConstructor(Arguments args) {
#ifdef DEBUG
HandleScope scope;
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_CHECKED(JSFunction, func, 0);
if (!func->is_compiled() && !CompileLazy(func, KEEP_EXCEPTION)) {
return Failure::Exception();
}
func->shared()->construct_stub()->PrintLn();
#endif // DEBUG
return Heap::undefined_value();
}
static Object* Runtime_FunctionGetInferredName(Arguments args) { static Object* Runtime_FunctionGetInferredName(Arguments args) {
NoHandleAllocation ha; NoHandleAllocation ha;
ASSERT(args.length() == 1); ASSERT(args.length() == 1);

3
deps/v8/src/runtime.h

@ -303,7 +303,8 @@ namespace internal {
F(DebugConstructedBy, 2) \ F(DebugConstructedBy, 2) \
F(DebugGetPrototype, 1) \ F(DebugGetPrototype, 1) \
F(SystemBreak, 0) \ F(SystemBreak, 0) \
F(FunctionGetAssemblerCode, 1) \ F(DebugDisassembleFunction, 1) \
F(DebugDisassembleConstructor, 1) \
F(FunctionGetInferredName, 1) F(FunctionGetInferredName, 1)
#else #else
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) #define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)

26
deps/v8/src/serialize.cc

@ -734,6 +734,20 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED, UNCLASSIFIED,
17, 17,
"compare_doubles"); "compare_doubles");
#ifdef V8_NATIVE_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
UNCLASSIFIED,
18,
"NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
Add(ExternalReference::re_check_stack_guard_state().address(),
UNCLASSIFIED,
19,
"RegExpMacroAssembler*::CheckStackGuardState()");
Add(ExternalReference::re_grow_stack().address(),
UNCLASSIFIED,
20,
"NativeRegExpMacroAssembler::GrowStack()");
#endif
} }
@ -1118,6 +1132,11 @@ void Serializer::PutHeader() {
writer_->PutC(FLAG_debug_serialization ? '1' : '0'); writer_->PutC(FLAG_debug_serialization ? '1' : '0');
#else #else
writer_->PutC('0'); writer_->PutC('0');
#endif
#ifdef V8_NATIVE_REGEXP
writer_->PutC('N');
#else // Interpreted regexp
writer_->PutC('I');
#endif #endif
// Write sizes of paged memory spaces. Allocate extra space for the old // Write sizes of paged memory spaces. Allocate extra space for the old
// and code spaces, because objects in new space will be promoted to them. // and code spaces, because objects in new space will be promoted to them.
@ -1238,7 +1257,7 @@ Address Serializer::PutObject(HeapObject* obj) {
// Write out the object prologue: type, size, and simulated address of obj. // Write out the object prologue: type, size, and simulated address of obj.
writer_->PutC('['); writer_->PutC('[');
CHECK_EQ(0, size & kObjectAlignmentMask); CHECK_EQ(0, static_cast<int>(size & kObjectAlignmentMask));
writer_->PutInt(type); writer_->PutInt(type);
writer_->PutInt(size >> kObjectAlignmentBits); writer_->PutInt(size >> kObjectAlignmentBits);
PutEncodedAddress(addr); // encodes AllocationSpace PutEncodedAddress(addr); // encodes AllocationSpace
@ -1474,6 +1493,11 @@ void Deserializer::GetHeader() {
// In release mode, don't attempt to read a snapshot containing // In release mode, don't attempt to read a snapshot containing
// synchronization tags. // synchronization tags.
if (reader_.GetC() != '0') FATAL("Snapshot contains synchronization tags."); if (reader_.GetC() != '0') FATAL("Snapshot contains synchronization tags.");
#endif
#ifdef V8_NATIVE_REGEXP
reader_.ExpectC('N');
#else // Interpreted regexp.
reader_.ExpectC('I');
#endif #endif
// Ensure sufficient capacity in paged memory spaces to avoid growth // Ensure sufficient capacity in paged memory spaces to avoid growth
// during deserialization. // during deserialization.

12
deps/v8/src/stub-cache.cc

@ -1097,4 +1097,16 @@ Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
} }
Object* ConstructStubCompiler::GetCode() {
Code::Flags flags = Code::ComputeFlags(Code::STUB);
Object* result = GetCodeWithFlags(flags, "ConstructStub");
if (!result->IsFailure()) {
Code* code = Code::cast(result);
USE(code);
LOG(CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
}
return result;
}
} } // namespace v8::internal } } // namespace v8::internal

11
deps/v8/src/stub-cache.h

@ -561,6 +561,17 @@ class CallStubCompiler: public StubCompiler {
}; };
class ConstructStubCompiler: public StubCompiler {
public:
explicit ConstructStubCompiler() {}
Object* CompileConstructStub(SharedFunctionInfo* shared);
private:
Object* GetCode();
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_STUB_CACHE_H_ #endif // V8_STUB_CACHE_H_

13
deps/v8/src/top.cc

@ -855,23 +855,18 @@ void Top::TraceException(bool flag) {
} }
bool Top::OptionalRescheduleException(bool is_bottom_call, bool Top::OptionalRescheduleException(bool is_bottom_call) {
bool force_clear_catchable) {
// Allways reschedule out of memory exceptions. // Allways reschedule out of memory exceptions.
if (!is_out_of_memory()) { if (!is_out_of_memory()) {
bool is_termination_exception = bool is_termination_exception =
pending_exception() == Heap::termination_exception(); pending_exception() == Heap::termination_exception();
// Do not reschedule the exception if this is the bottom call or // Do not reschedule the exception if this is the bottom call.
// if we are asked to clear catchable exceptions. Termination bool clear_exception = is_bottom_call;
// exceptions are not catchable and are only cleared if this is
// the bottom call.
bool clear_exception = is_bottom_call ||
(force_clear_catchable && !is_termination_exception);
if (is_termination_exception) { if (is_termination_exception) {
thread_local_.external_caught_exception_ = false;
if (is_bottom_call) { if (is_bottom_call) {
thread_local_.external_caught_exception_ = false;
clear_pending_exception(); clear_pending_exception();
return false; return false;
} }

4
deps/v8/src/top.h

@ -157,8 +157,8 @@ class Top {
// exceptions. If an exception was thrown and not handled by an external // exceptions. If an exception was thrown and not handled by an external
// handler the exception is scheduled to be rethrown when we return to running // handler the exception is scheduled to be rethrown when we return to running
// JavaScript code. If an exception is scheduled true is returned. // JavaScript code. If an exception is scheduled true is returned.
static bool OptionalRescheduleException(bool is_bottom_call, static bool OptionalRescheduleException(bool is_bottom_call);
bool force_clear_catchable);
static bool* external_caught_exception_address() { static bool* external_caught_exception_address() {
return &thread_local_.external_caught_exception_; return &thread_local_.external_caught_exception_;

1
deps/v8/src/v8-counters.h

@ -141,6 +141,7 @@ namespace internal {
SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \ SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
SC(constructed_objects, V8.ConstructedObjects) \ SC(constructed_objects, V8.ConstructedObjects) \
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \ SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
SC(for_in, V8.ForIn) \ SC(for_in, V8.ForIn) \
SC(enum_cache_hits, V8.EnumCacheHits) \ SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \ SC(enum_cache_misses, V8.EnumCacheMisses) \

4
deps/v8/src/v8.cc

@ -98,6 +98,10 @@ bool V8::Initialize(Deserializer *des) {
StubCache::Clear(); StubCache::Clear();
} }
// Deserializing may put strange things in the root array's copy of the
// stack guard.
Heap::SetStackLimit(StackGuard::jslimit());
// Setup the CPU support. Must be done after heap setup and after // Setup the CPU support. Must be done after heap setup and after
// any deserialization because we have to have the initial heap // any deserialization because we have to have the initial heap
// objects in place for creating the code object used for probing. // objects in place for creating the code object used for probing.

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1 #define MAJOR_VERSION 1
#define MINOR_VERSION 3 #define MINOR_VERSION 3
#define BUILD_NUMBER 7 #define BUILD_NUMBER 9
#define PATCH_LEVEL 0 #define PATCH_LEVEL 0
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false

18
deps/v8/src/x64/assembler-x64.cc

@ -120,13 +120,23 @@ void CpuFeatures::Probe() {
supported_ = kDefaultCpuFeatures | (1 << CPUID); supported_ = kDefaultCpuFeatures | (1 << CPUID);
{ Scope fscope(CPUID); { Scope fscope(CPUID);
__ cpuid(); __ cpuid();
// Move the result from ecx:edx to rdi.
__ movl(rdi, rdx); // Zero-extended to 64 bits.
__ shl(rcx, Immediate(32));
__ or_(rdi, rcx);
// Get the sahf supported flag, from CPUID(0x80000001)
__ movq(rax, 0x80000001, RelocInfo::NONE);
__ cpuid();
} }
supported_ = kDefaultCpuFeatures; supported_ = kDefaultCpuFeatures;
// Move the result from ecx:edx to rax and make sure to mark the // Put the CPU flags in rax.
// CPUID feature as supported. // rax = (rcx & 1) | (rdi & ~1) | (1 << CPUID).
__ movl(rax, rdx); // Zero-extended to 64 bits. __ movl(rax, Immediate(1));
__ shl(rcx, Immediate(32)); __ and_(rcx, rax); // Bit 0 is set if SAHF instruction supported.
__ not_(rax);
__ and_(rax, rdi);
__ or_(rax, rcx); __ or_(rax, rcx);
__ or_(rax, Immediate(1 << CPUID)); __ or_(rax, Immediate(1 << CPUID));

7
deps/v8/src/x64/assembler-x64.h

@ -361,7 +361,12 @@ class CpuFeatures : public AllStatic {
// Feature flags bit positions. They are mostly based on the CPUID spec. // Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits -- // (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.) // feel free to change this if needed.)
enum Feature { SSE3 = 32, SSE2 = 26, CMOV = 15, RDTSC = 4, CPUID = 10 }; enum Feature { SSE3 = 32,
SSE2 = 26,
CMOV = 15,
RDTSC = 4,
CPUID = 10,
SAHF = 0};
// Detect features of the target CPU. Set safe defaults if the serializer // Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable). // is enabled (snapshots must be portable).
static void Probe(); static void Probe();

72
deps/v8/src/x64/builtins-x64.cc

@ -53,7 +53,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ movq(rbp, rsp); __ movq(rbp, rsp);
// Store the arguments adaptor context sentinel. // Store the arguments adaptor context sentinel.
__ push(Immediate(ArgumentsAdaptorFrame::SENTINEL)); __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Push the function on the stack. // Push the function on the stack.
__ push(rdi); __ push(rdi);
@ -139,9 +139,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill remaining expected arguments with undefined values. // Fill remaining expected arguments with undefined values.
Label fill; Label fill;
__ movq(kScratchRegister, __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
Factory::undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
__ bind(&fill); __ bind(&fill);
__ incq(rcx); __ incq(rcx);
__ push(kScratchRegister); __ push(kScratchRegister);
@ -218,9 +216,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ testl(rbx, Immediate(kSmiTagMask)); __ testl(rbx, Immediate(kSmiTagMask));
__ j(zero, &call_to_object); __ j(zero, &call_to_object);
__ Cmp(rbx, Factory::null_value()); __ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver); __ j(equal, &use_global_receiver);
__ Cmp(rbx, Factory::undefined_value()); __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &use_global_receiver); __ j(equal, &use_global_receiver);
__ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx); __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
@ -386,9 +384,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movq(rbx, Operand(rbp, kReceiverOffset)); __ movq(rbx, Operand(rbp, kReceiverOffset));
__ testl(rbx, Immediate(kSmiTagMask)); __ testl(rbx, Immediate(kSmiTagMask));
__ j(zero, &call_to_object); __ j(zero, &call_to_object);
__ Cmp(rbx, Factory::null_value()); __ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver); __ j(equal, &use_global_receiver);
__ Cmp(rbx, Factory::undefined_value()); __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &use_global_receiver); __ j(equal, &use_global_receiver);
// If given receiver is already a JavaScript object then there's no // If given receiver is already a JavaScript object then there's no
@ -542,22 +540,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// problem here, because it is always greater than the maximum // problem here, because it is always greater than the maximum
// instance size that can be represented in a byte. // instance size that can be represented in a byte.
ASSERT(Heap::MaxObjectSizeInPagedSpace() >= (1 << kBitsPerByte)); ASSERT(Heap::MaxObjectSizeInPagedSpace() >= (1 << kBitsPerByte));
ExternalReference new_space_allocation_top = __ AllocateObjectInNewSpace(rdi, rbx, rdi, no_reg, &rt_call, false);
ExternalReference::new_space_allocation_top_address();
__ movq(kScratchRegister, new_space_allocation_top);
__ movq(rbx, Operand(kScratchRegister, 0));
__ addq(rdi, rbx); // Calculate new top
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
__ movq(kScratchRegister, new_space_allocation_limit);
__ cmpq(rdi, Operand(kScratchRegister, 0));
__ j(above_equal, &rt_call);
// Allocated the JSObject, now initialize the fields. // Allocated the JSObject, now initialize the fields.
// rax: initial map // rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address). // rbx: JSObject (not HeapObject tagged - the actual address).
// rdi: start of next object // rdi: start of next object
__ movq(Operand(rbx, JSObject::kMapOffset), rax); __ movq(Operand(rbx, JSObject::kMapOffset), rax);
__ Move(rcx, Factory::empty_fixed_array()); __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
__ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx); __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
__ movq(Operand(rbx, JSObject::kElementsOffset), rcx); __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
// Set extra fields in the newly allocated object. // Set extra fields in the newly allocated object.
@ -565,7 +554,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// rbx: JSObject // rbx: JSObject
// rdi: start of next object // rdi: start of next object
{ Label loop, entry; { Label loop, entry;
__ Move(rdx, Factory::undefined_value()); __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ lea(rcx, Operand(rbx, JSObject::kHeaderSize)); __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
__ jmp(&entry); __ jmp(&entry);
__ bind(&loop); __ bind(&loop);
@ -576,16 +565,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ j(less, &loop); __ j(less, &loop);
} }
// Mostly done with the JSObject. Add the heap tag and store the new top, so // Add the object tag to make the JSObject real, so that we can continue and
// that we can continue and jump into the continuation code at any time from // jump into the continuation code at any time from now on. Any failures
// now on. Any failures need to undo the setting of the new top, so that the // need to undo the allocation, so that the heap is in a consistent state
// heap is in a consistent state and verifiable. // and verifiable.
// rax: initial map // rax: initial map
// rbx: JSObject // rbx: JSObject
// rdi: start of next object // rdi: start of next object
__ or_(rbx, Immediate(kHeapObjectTag)); __ or_(rbx, Immediate(kHeapObjectTag));
__ movq(kScratchRegister, new_space_allocation_top);
__ movq(Operand(kScratchRegister, 0), rdi);
// Check if a non-empty properties array is needed. // Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is. // Allocate and initialize a FixedArray if it is.
@ -610,18 +597,21 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// rdx: number of elements in properties array // rdx: number of elements in properties array
ASSERT(Heap::MaxObjectSizeInPagedSpace() > ASSERT(Heap::MaxObjectSizeInPagedSpace() >
(FixedArray::kHeaderSize + 255*kPointerSize)); (FixedArray::kHeaderSize + 255*kPointerSize));
__ lea(rax, Operand(rdi, rdx, times_pointer_size, FixedArray::kHeaderSize)); __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
__ movq(kScratchRegister, new_space_allocation_limit); times_pointer_size,
__ cmpq(rax, Operand(kScratchRegister, 0)); rdx,
__ j(above_equal, &undo_allocation); rdi,
__ store_rax(new_space_allocation_top); rax,
no_reg,
&undo_allocation,
true);
// Initialize the FixedArray. // Initialize the FixedArray.
// rbx: JSObject // rbx: JSObject
// rdi: FixedArray // rdi: FixedArray
// rdx: number of elements // rdx: number of elements
// rax: start of next object // rax: start of next object
__ Move(rcx, Factory::fixed_array_map()); __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
__ movq(Operand(rdi, JSObject::kMapOffset), rcx); // setup the map __ movq(Operand(rdi, JSObject::kMapOffset), rcx); // setup the map
__ movl(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length __ movl(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
@ -631,7 +621,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// rax: start of next object // rax: start of next object
// rdx: number of elements // rdx: number of elements
{ Label loop, entry; { Label loop, entry;
__ Move(rdx, Factory::undefined_value()); __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ lea(rcx, Operand(rdi, FixedArray::kHeaderSize)); __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
__ jmp(&entry); __ jmp(&entry);
__ bind(&loop); __ bind(&loop);
@ -659,9 +649,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// allocated objects unused properties. // allocated objects unused properties.
// rbx: JSObject (previous new top) // rbx: JSObject (previous new top)
__ bind(&undo_allocation); __ bind(&undo_allocation);
__ xor_(rbx, Immediate(kHeapObjectTag)); // clear the heap tag __ UndoAllocationInNewSpace(rbx);
__ movq(kScratchRegister, new_space_allocation_top);
__ movq(Operand(kScratchRegister, 0), rbx);
} }
// Allocate the new receiver object using the runtime call. // Allocate the new receiver object using the runtime call.
@ -756,7 +744,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// an internal frame and the pushed function and receiver, and // an internal frame and the pushed function and receiver, and
// register rax and rbx holds the argument count and argument array, // register rax and rbx holds the argument count and argument array,
// while rdi holds the function pointer and rsi the context. // while rdi holds the function pointer and rsi the context.
#ifdef __MSVC__ #ifdef _WIN64
// MSVC parameters in: // MSVC parameters in:
// rcx : entry (ignored) // rcx : entry (ignored)
// rdx : function // rdx : function
@ -766,7 +754,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Clear the context before we push it when entering the JS frame. // Clear the context before we push it when entering the JS frame.
__ xor_(rsi, rsi); __ xor_(rsi, rsi);
// Enter an internal frame.
__ EnterInternalFrame(); __ EnterInternalFrame();
// Load the function context into rsi. // Load the function context into rsi.
@ -783,7 +770,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset)); __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
// Load the function pointer into rdi. // Load the function pointer into rdi.
__ movq(rdi, rdx); __ movq(rdi, rdx);
#else // !defined(__MSVC__) #else // !defined(_WIN64)
// GCC parameters in: // GCC parameters in:
// rdi : entry (ignored) // rdi : entry (ignored)
// rsi : function // rsi : function
@ -807,7 +794,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Load the number of arguments and setup pointer to the arguments. // Load the number of arguments and setup pointer to the arguments.
__ movq(rax, rcx); __ movq(rax, rcx);
__ movq(rbx, r8); __ movq(rbx, r8);
#endif // __MSVC__ #endif // _WIN64
// Set up the roots register.
ExternalReference roots_address = ExternalReference::roots_address();
__ movq(r13, roots_address);
// Current stack contents: // Current stack contents:
// [rsp + 2 * kPointerSize ... ]: Internal frame // [rsp + 2 * kPointerSize ... ]: Internal frame
// [rsp + kPointerSize] : function // [rsp + kPointerSize] : function

3
deps/v8/src/x64/cfg-x64.cc

@ -71,8 +71,7 @@ void EntryNode::Compile(MacroAssembler* masm) {
__ push(rdi); __ push(rdi);
int count = CfgGlobals::current()->fun()->scope()->num_stack_slots(); int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
if (count > 0) { if (count > 0) {
__ movq(kScratchRegister, Factory::undefined_value(), __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
RelocInfo::EMBEDDED_OBJECT);
for (int i = 0; i < count; i++) { for (int i = 0; i < count; i++) {
__ push(kScratchRegister); __ push(kScratchRegister);
} }

481
deps/v8/src/x64/codegen-x64.cc

@ -537,7 +537,6 @@ bool CodeGenerator::HasValidEntryRegisters() {
&& (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0)) && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
&& (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0)) && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
&& (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0)) && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
&& (allocator()->count(r13) == (frame()->is_used(r13) ? 1 : 0))
&& (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0)); && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
} }
#endif #endif
@ -649,6 +648,196 @@ void DeferredReferenceSetKeyedValue::Generate() {
} }
class CallFunctionStub: public CodeStub {
public:
CallFunctionStub(int argc, InLoopFlag in_loop)
: argc_(argc), in_loop_(in_loop) { }
void Generate(MacroAssembler* masm);
private:
int argc_;
InLoopFlag in_loop_;
#ifdef DEBUG
void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
#endif
Major MajorKey() { return CallFunction; }
int MinorKey() { return argc_; }
InLoopFlag InLoop() { return in_loop_; }
};
void CodeGenerator::CallApplyLazy(Property* apply,
Expression* receiver,
VariableProxy* arguments,
int position) {
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
JumpTarget slow, done;
// Load the apply function onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Reference ref(this, apply);
ref.GetValue(NOT_INSIDE_TYPEOF);
ASSERT(ref.type() == Reference::NAMED);
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
Load(receiver);
LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
// Check if the arguments object has been lazily allocated
// already. If so, just use that instead of copying the arguments
// from the stack. This also deals with cases where a local variable
// named 'arguments' has been introduced.
frame_->Dup();
Result probe = frame_->Pop();
bool try_lazy = true;
if (probe.is_constant()) {
try_lazy = probe.handle()->IsTheHole();
} else {
__ Cmp(probe.reg(), Factory::the_hole_value());
probe.Unuse();
slow.Branch(not_equal);
}
if (try_lazy) {
JumpTarget build_args;
// Get rid of the arguments object probe.
frame_->Drop();
// Before messing with the execution stack, we sync all
// elements. This is bound to happen anyway because we're
// about to call a function.
frame_->SyncRange(0, frame_->element_count() - 1);
// Check that the receiver really is a JavaScript object.
{ frame_->PushElementAt(0);
Result receiver = frame_->Pop();
receiver.ToRegister();
__ testl(receiver.reg(), Immediate(kSmiTagMask));
build_args.Branch(zero);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
build_args.Branch(below);
}
// Verify that we're invoking Function.prototype.apply.
{ frame_->PushElementAt(1);
Result apply = frame_->Pop();
apply.ToRegister();
__ testl(apply.reg(), Immediate(kSmiTagMask));
build_args.Branch(zero);
Result tmp = allocator_->Allocate();
__ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
build_args.Branch(not_equal);
__ movq(tmp.reg(),
FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
__ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
apply_code);
build_args.Branch(not_equal);
}
// Get the function receiver from the stack. Check that it
// really is a function.
__ movq(rdi, Operand(rsp, 2 * kPointerSize));
__ testl(rdi, Immediate(kSmiTagMask));
build_args.Branch(zero);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
build_args.Branch(not_equal);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
Label invoke, adapted;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments.
__ movq(rax, Immediate(scope_->num_parameters()));
for (int i = 0; i < scope_->num_parameters(); i++) {
__ push(frame_->ParameterAt(i));
}
__ jmp(&invoke);
// Arguments adaptor frame present. Copy arguments from there, but
// avoid copying too many arguments to avoid stack overflows.
__ bind(&adapted);
static const uint32_t kArgumentsLimit = 1 * KB;
__ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ shrl(rax, Immediate(kSmiTagSize));
__ movq(rcx, rax);
__ cmpq(rax, Immediate(kArgumentsLimit));
build_args.Branch(above);
// Loop through the arguments pushing them onto the execution
// stack. We don't inform the virtual frame of the push, so we don't
// have to worry about getting rid of the elements from the virtual
// frame.
Label loop;
__ bind(&loop);
__ testl(rcx, rcx);
__ j(zero, &invoke);
__ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
__ decl(rcx);
__ jmp(&loop);
// Invoke the function. The virtual frame knows about the receiver
// so make sure to forget that explicitly.
__ bind(&invoke);
ParameterCount actual(rax);
__ InvokeFunction(rdi, actual, CALL_FUNCTION);
frame_->Forget(1);
Result result = allocator()->Allocate(rax);
frame_->SetElementAt(0, &result);
done.Jump();
// Slow-case: Allocate the arguments object since we know it isn't
// there, and fall-through to the slow-case where we call
// Function.prototype.apply.
build_args.Bind();
Result arguments_object = StoreArgumentsObject(false);
frame_->Push(&arguments_object);
slow.Bind();
}
// Flip the apply function and the function to call on the stack, so
// the function looks like the receiver of the apply call. This way,
// the generic Function.prototype.apply implementation can deal with
// the call like it usually does.
Result a2 = frame_->Pop();
Result a1 = frame_->Pop();
Result ap = frame_->Pop();
Result fn = frame_->Pop();
frame_->Push(&ap);
frame_->Push(&fn);
frame_->Push(&a1);
frame_->Push(&a2);
CallFunctionStub call_function(2, NOT_IN_LOOP);
Result res = frame_->CallStub(&call_function, 3);
frame_->Push(&res);
// All done. Restore context register after call.
if (try_lazy) done.Bind();
frame_->RestoreContextRegister();
}
class DeferredStackCheck: public DeferredCode { class DeferredStackCheck: public DeferredCode {
public: public:
DeferredStackCheck() { DeferredStackCheck() {
@ -668,37 +857,13 @@ void DeferredStackCheck::Generate() {
void CodeGenerator::CheckStack() { void CodeGenerator::CheckStack() {
if (FLAG_check_stack) { if (FLAG_check_stack) {
DeferredStackCheck* deferred = new DeferredStackCheck; DeferredStackCheck* deferred = new DeferredStackCheck;
ExternalReference stack_guard_limit = __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
ExternalReference::address_of_stack_guard_limit();
__ movq(kScratchRegister, stack_guard_limit);
__ cmpq(rsp, Operand(kScratchRegister, 0));
deferred->Branch(below); deferred->Branch(below);
deferred->BindExit(); deferred->BindExit();
} }
} }
class CallFunctionStub: public CodeStub {
public:
CallFunctionStub(int argc, InLoopFlag in_loop)
: argc_(argc), in_loop_(in_loop) { }
void Generate(MacroAssembler* masm);
private:
int argc_;
InLoopFlag in_loop_;
#ifdef DEBUG
void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
#endif
Major MajorKey() { return CallFunction; }
int MinorKey() { return argc_; }
InLoopFlag InLoop() { return in_loop_; }
};
void CodeGenerator::VisitAndSpill(Statement* statement) { void CodeGenerator::VisitAndSpill(Statement* statement) {
// TODO(X64): No architecture specific code. Move to shared location. // TODO(X64): No architecture specific code. Move to shared location.
ASSERT(in_spilled_code()); ASSERT(in_spilled_code());
@ -772,9 +937,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
// 'undefined') because we may have a (legal) redeclaration and we // 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value. // must not destroy the current value.
if (node->mode() == Variable::CONST) { if (node->mode() == Variable::CONST) {
__ movq(kScratchRegister, Factory::the_hole_value(), frame_->EmitPush(Heap::kTheHoleValueRootIndex);
RelocInfo::EMBEDDED_OBJECT);
frame_->EmitPush(kScratchRegister);
} else if (node->fun() != NULL) { } else if (node->fun() != NULL) {
Load(node->fun()); Load(node->fun());
} else { } else {
@ -1480,9 +1643,9 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPop(rax); frame_->EmitPop(rax);
// rax: value to be iterated over // rax: value to be iterated over
__ Cmp(rax, Factory::undefined_value()); __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
exit.Branch(equal); exit.Branch(equal);
__ Cmp(rax, Factory::null_value()); __ CompareRoot(rax, Heap::kNullValueRootIndex);
exit.Branch(equal); exit.Branch(equal);
// Stack layout in body: // Stack layout in body:
@ -1518,7 +1681,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Runtime::kGetPropertyNamesFast) // Runtime::kGetPropertyNamesFast)
__ movq(rdx, rax); __ movq(rdx, rax);
__ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset)); __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
__ Cmp(rcx, Factory::meta_map()); __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
fixed_array.Branch(not_equal); fixed_array.Branch(not_equal);
// Get enum cache // Get enum cache
@ -1587,7 +1750,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ movq(rbx, rax); __ movq(rbx, rax);
// If the property has been removed while iterating, we just skip it. // If the property has been removed while iterating, we just skip it.
__ Cmp(rbx, Factory::null_value()); __ CompareRoot(rbx, Heap::kNullValueRootIndex);
node->continue_target()->Branch(equal); node->continue_target()->Branch(equal);
end_del_check.Bind(); end_del_check.Bind();
@ -1862,10 +2025,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
// Fake a top of stack value (unneeded when FALLING) and set the // Fake a top of stack value (unneeded when FALLING) and set the
// state in ecx, then jump around the unlink blocks if any. // state in ecx, then jump around the unlink blocks if any.
__ movq(kScratchRegister, frame_->EmitPush(Heap::kUndefinedValueRootIndex);
Factory::undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
frame_->EmitPush(kScratchRegister);
__ movq(rcx, Immediate(Smi::FromInt(FALLING))); __ movq(rcx, Immediate(Smi::FromInt(FALLING)));
if (nof_unlinks > 0) { if (nof_unlinks > 0) {
finally_block.Jump(); finally_block.Jump();
@ -1910,10 +2070,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
frame_->EmitPush(rax); frame_->EmitPush(rax);
} else { } else {
// Fake TOS for targets that shadowed breaks and continues. // Fake TOS for targets that shadowed breaks and continues.
__ movq(kScratchRegister, frame_->EmitPush(Heap::kUndefinedValueRootIndex);
Factory::undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
frame_->EmitPush(kScratchRegister);
} }
__ movq(rcx, Immediate(Smi::FromInt(JUMPING + i))); __ movq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
if (--nof_unlinks > 0) { if (--nof_unlinks > 0) {
@ -2155,7 +2312,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
// jump to the deferred code passing the literals array. // jump to the deferred code passing the literals array.
DeferredRegExpLiteral* deferred = DeferredRegExpLiteral* deferred =
new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node); new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
__ Cmp(boilerplate.reg(), Factory::undefined_value()); __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
deferred->Branch(equal); deferred->Branch(equal);
deferred->BindExit(); deferred->BindExit();
literals.Unuse(); literals.Unuse();
@ -2226,7 +2383,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
// If so, jump to the deferred code passing the literals array. // If so, jump to the deferred code passing the literals array.
DeferredObjectLiteral* deferred = DeferredObjectLiteral* deferred =
new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node); new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
__ Cmp(boilerplate.reg(), Factory::undefined_value()); __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
deferred->Branch(equal); deferred->Branch(equal);
deferred->BindExit(); deferred->BindExit();
literals.Unuse(); literals.Unuse();
@ -2359,7 +2516,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// If so, jump to the deferred code passing the literals array. // If so, jump to the deferred code passing the literals array.
DeferredArrayLiteral* deferred = DeferredArrayLiteral* deferred =
new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node); new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
__ Cmp(boilerplate.reg(), Factory::undefined_value()); __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
deferred->Branch(equal); deferred->Branch(equal);
deferred->BindExit(); deferred->BindExit();
literals.Unuse(); literals.Unuse();
@ -2612,27 +2769,40 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)' // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// TODO(X64): Consider optimizing Function.prototype.apply calls Handle<String> name = Handle<String>::cast(literal->handle());
// with arguments object. Requires lazy arguments allocation;
// see http://codereview.chromium.org/147075.
// Push the name of the function and the receiver onto the stack. if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
frame_->Push(literal->handle()); name->IsEqualTo(CStrVector("apply")) &&
Load(property->obj()); args->length() == 2 &&
args->at(1)->AsVariableProxy() != NULL &&
args->at(1)->AsVariableProxy()->IsArguments()) {
// Use the optimized Function.prototype.apply that avoids
// allocating lazily allocated arguments objects.
CallApplyLazy(property,
args->at(0),
args->at(1)->AsVariableProxy(),
node->position());
// Load the arguments. } else {
int arg_count = args->length(); // Push the name of the function and the receiver onto the stack.
for (int i = 0; i < arg_count; i++) { frame_->Push(name);
Load(args->at(i)); Load(property->obj());
}
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
}
// Call the IC initialization code. // Call the IC initialization code.
CodeForSourcePosition(node->position()); CodeForSourcePosition(node->position());
Result result = Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count, loop_nesting()); arg_count,
frame_->RestoreContextRegister(); loop_nesting());
// Replace the function on the stack with the result. frame_->RestoreContextRegister();
frame_->SetElementAt(0, &result); // Replace the function on the stack with the result.
frame_->SetElementAt(0, &result);
}
} else { } else {
// ------------------------------------------- // -------------------------------------------
@ -3304,7 +3474,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->true_target()->Branch(zero); destination()->true_target()->Branch(zero);
frame_->Spill(answer.reg()); frame_->Spill(answer.reg());
__ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
__ Cmp(answer.reg(), Factory::heap_number_map()); __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
answer.Unuse(); answer.Unuse();
destination()->Split(equal); destination()->Split(equal);
@ -3323,14 +3493,14 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->Split(below); // Unsigned byte comparison needed. destination()->Split(below); // Unsigned byte comparison needed.
} else if (check->Equals(Heap::boolean_symbol())) { } else if (check->Equals(Heap::boolean_symbol())) {
__ Cmp(answer.reg(), Factory::true_value()); __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
destination()->true_target()->Branch(equal); destination()->true_target()->Branch(equal);
__ Cmp(answer.reg(), Factory::false_value()); __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
answer.Unuse(); answer.Unuse();
destination()->Split(equal); destination()->Split(equal);
} else if (check->Equals(Heap::undefined_symbol())) { } else if (check->Equals(Heap::undefined_symbol())) {
__ Cmp(answer.reg(), Factory::undefined_value()); __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
destination()->true_target()->Branch(equal); destination()->true_target()->Branch(equal);
__ testl(answer.reg(), Immediate(kSmiTagMask)); __ testl(answer.reg(), Immediate(kSmiTagMask));
@ -3355,7 +3525,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
} else if (check->Equals(Heap::object_symbol())) { } else if (check->Equals(Heap::object_symbol())) {
__ testl(answer.reg(), Immediate(kSmiTagMask)); __ testl(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero); destination()->false_target()->Branch(zero);
__ Cmp(answer.reg(), Factory::null_value()); __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
destination()->true_target()->Branch(equal); destination()->true_target()->Branch(equal);
// It can be an undetectable object. // It can be an undetectable object.
@ -3473,7 +3643,7 @@ void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
// Skip the arguments adaptor frame if it exists. // Skip the arguments adaptor frame if it exists.
Label check_frame_marker; Label check_frame_marker;
__ cmpq(Operand(fp.reg(), StandardFrameConstants::kContextOffset), __ cmpq(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
Immediate(ArgumentsAdaptorFrame::SENTINEL)); Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &check_frame_marker); __ j(not_equal, &check_frame_marker);
__ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset)); __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
@ -3564,7 +3734,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
// If the index is negative or non-smi trigger the slow case. // If the index is negative or non-smi trigger the slow case.
ASSERT(kSmiTag == 0); ASSERT(kSmiTag == 0);
__ testl(index.reg(), __ testl(index.reg(),
Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000U))); Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
__ j(not_zero, &slow_case); __ j(not_zero, &slow_case);
// Untag the index. // Untag the index.
__ sarl(index.reg(), Immediate(kSmiTagSize)); __ sarl(index.reg(), Immediate(kSmiTagSize));
@ -3649,7 +3819,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
__ bind(&slow_case); __ bind(&slow_case);
// Move the undefined value into the result register, which will // Move the undefined value into the result register, which will
// trigger the slow case. // trigger the slow case.
__ Move(temp.reg(), Factory::undefined_value()); __ LoadRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
__ bind(&end); __ bind(&end);
frame_->Push(&temp); frame_->Push(&temp);
@ -4092,15 +4262,15 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
// Fast case checks. // Fast case checks.
// 'false' => false. // 'false' => false.
__ Cmp(value.reg(), Factory::false_value()); __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
dest->false_target()->Branch(equal); dest->false_target()->Branch(equal);
// 'true' => true. // 'true' => true.
__ Cmp(value.reg(), Factory::true_value()); __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
dest->true_target()->Branch(equal); dest->true_target()->Branch(equal);
// 'undefined' => false. // 'undefined' => false.
__ Cmp(value.reg(), Factory::undefined_value()); __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
dest->false_target()->Branch(equal); dest->false_target()->Branch(equal);
// Smi => false iff zero. // Smi => false iff zero.
@ -4319,10 +4489,9 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
value, value,
&slow)); &slow));
if (potential_slot->var()->mode() == Variable::CONST) { if (potential_slot->var()->mode() == Variable::CONST) {
__ Cmp(value.reg(), Factory::the_hole_value()); __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
done.Branch(not_equal, &value); done.Branch(not_equal, &value);
__ movq(value.reg(), Factory::undefined_value(), __ LoadRoot(value.reg(), Heap::kUndefinedValueRootIndex);
RelocInfo::EMBEDDED_OBJECT);
} }
// There is always control flow to slow from // There is always control flow to slow from
// ContextSlotOperandCheckExtensions so we have to jump around // ContextSlotOperandCheckExtensions so we have to jump around
@ -4360,9 +4529,9 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
Comment cmnt(masm_, "[ Load const"); Comment cmnt(masm_, "[ Load const");
JumpTarget exit; JumpTarget exit;
__ movq(rcx, SlotOperand(slot, rcx)); __ movq(rcx, SlotOperand(slot, rcx));
__ Cmp(rcx, Factory::the_hole_value()); __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
exit.Branch(not_equal); exit.Branch(not_equal);
__ movq(rcx, Factory::undefined_value(), RelocInfo::EMBEDDED_OBJECT); __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
exit.Bind(); exit.Bind();
frame_->EmitPush(rcx); frame_->EmitPush(rcx);
@ -4416,7 +4585,7 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
// indicates that we haven't loaded the arguments object yet, we // indicates that we haven't loaded the arguments object yet, we
// need to do it now. // need to do it now.
JumpTarget exit; JumpTarget exit;
__ Cmp(value.reg(), Factory::the_hole_value()); __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
frame_->Push(&value); frame_->Push(&value);
exit.Branch(not_equal); exit.Branch(not_equal);
Result arguments = StoreArgumentsObject(false); Result arguments = StoreArgumentsObject(false);
@ -4477,7 +4646,7 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
VirtualFrame::SpilledScope spilled_scope; VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Init const"); Comment cmnt(masm_, "[ Init const");
__ movq(rcx, SlotOperand(slot, rcx)); __ movq(rcx, SlotOperand(slot, rcx));
__ Cmp(rcx, Factory::the_hole_value()); __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
exit.Branch(not_equal); exit.Branch(not_equal);
} }
@ -4561,7 +4730,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
__ movq(tmp.reg(), context); __ movq(tmp.reg(), context);
} }
// Load map for comparison into register, outside loop. // Load map for comparison into register, outside loop.
__ Move(kScratchRegister, Factory::global_context_map()); __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
__ bind(&next); __ bind(&next);
// Terminate at global context. // Terminate at global context.
__ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset)); __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
@ -4665,7 +4834,7 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
// been assigned a proper value. // been assigned a proper value.
skip_arguments = !arguments.handle()->IsTheHole(); skip_arguments = !arguments.handle()->IsTheHole();
} else { } else {
__ Cmp(arguments.reg(), Factory::the_hole_value()); __ CompareRoot(arguments.reg(), Heap::kTheHoleValueRootIndex);
arguments.Unuse(); arguments.Unuse();
done.Branch(not_equal); done.Branch(not_equal);
} }
@ -4803,7 +4972,7 @@ void CodeGenerator::Comparison(Condition cc,
right_side.Unuse(); right_side.Unuse();
left_side.Unuse(); left_side.Unuse();
operand.ToRegister(); operand.ToRegister();
__ Cmp(operand.reg(), Factory::null_value()); __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
if (strict) { if (strict) {
operand.Unuse(); operand.Unuse();
dest->Split(equal); dest->Split(equal);
@ -4811,7 +4980,7 @@ void CodeGenerator::Comparison(Condition cc,
// The 'null' value is only equal to 'undefined' if using non-strict // The 'null' value is only equal to 'undefined' if using non-strict
// comparisons. // comparisons.
dest->true_target()->Branch(equal); dest->true_target()->Branch(equal);
__ Cmp(operand.reg(), Factory::undefined_value()); __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
dest->true_target()->Branch(equal); dest->true_target()->Branch(equal);
__ testl(operand.reg(), Immediate(kSmiTagMask)); __ testl(operand.reg(), Immediate(kSmiTagMask));
dest->false_target()->Branch(equal); dest->false_target()->Branch(equal);
@ -5354,7 +5523,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
overwrite_mode); overwrite_mode);
// Check for negative or non-Smi left hand side. // Check for negative or non-Smi left hand side.
__ testl(operand->reg(), __ testl(operand->reg(),
Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000))); Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000)));
deferred->Branch(not_zero); deferred->Branch(not_zero);
if (int_value < 0) int_value = -int_value; if (int_value < 0) int_value = -int_value;
if (int_value == 1) { if (int_value == 1) {
@ -5894,7 +6063,7 @@ void Reference::GetValue(TypeofState typeof_state) {
// Check that the key is a non-negative smi. // Check that the key is a non-negative smi.
__ testl(key.reg(), __ testl(key.reg(),
Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000u))); Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000u)));
deferred->Branch(not_zero); deferred->Branch(not_zero);
// Get the elements array from the receiver and check that it // Get the elements array from the receiver and check that it
@ -5931,7 +6100,7 @@ void Reference::GetValue(TypeofState typeof_state) {
FixedArray::kHeaderSize - kHeapObjectTag)); FixedArray::kHeaderSize - kHeapObjectTag));
elements.Unuse(); elements.Unuse();
index.Unuse(); index.Unuse();
__ Cmp(value.reg(), Factory::the_hole_value()); __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
deferred->Branch(equal); deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1); __ IncrementCounter(&Counters::keyed_load_inline, 1);
@ -6140,7 +6309,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ movq(rax, Operand(rsp, 1 * kPointerSize)); __ movq(rax, Operand(rsp, 1 * kPointerSize));
// 'null' => false. // 'null' => false.
__ Cmp(rax, Factory::null_value()); __ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, &false_result); __ j(equal, &false_result);
// Get the map and type of the heap object. // Get the map and type of the heap object.
@ -6171,7 +6340,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ bind(&not_string); __ bind(&not_string);
// HeapNumber => false iff +0, -0, or NaN. // HeapNumber => false iff +0, -0, or NaN.
// These three cases set C3 when compared to zero in the FPU. // These three cases set C3 when compared to zero in the FPU.
__ Cmp(rdx, Factory::heap_number_map()); __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &true_result); __ j(not_equal, &true_result);
// TODO(x64): Don't use fp stack, use MMX registers? // TODO(x64): Don't use fp stack, use MMX registers?
__ fldz(); // Load zero onto fp stack __ fldz(); // Load zero onto fp stack
@ -6217,7 +6386,7 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) { if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
// If the product is zero and the non-zero factor is negative, // If the product is zero and the non-zero factor is negative,
// the spec requires us to return floating point negative zero. // the spec requires us to return floating point negative zero.
if (answer != 0 || (left >= 0 && right >= 0)) { if (answer != 0 || (left + right) >= 0) {
answer_object = Smi::FromInt(static_cast<int>(answer)); answer_object = Smi::FromInt(static_cast<int>(answer));
} }
} }
@ -6285,24 +6454,54 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
void UnarySubStub::Generate(MacroAssembler* masm) { void UnarySubStub::Generate(MacroAssembler* masm) {
Label slow; Label slow;
Label done; Label done;
Label try_float;
Label special;
// Check whether the value is a smi. // Check whether the value is a smi.
__ testl(rax, Immediate(kSmiTagMask)); __ testl(rax, Immediate(kSmiTagMask));
// TODO(X64): Add inline code that handles floats, as on ia32 platform. __ j(not_zero, &try_float);
__ j(not_zero, &slow);
// Enter runtime system if the value of the smi is zero // Enter runtime system if the value of the smi is zero
// to make sure that we switch between 0 and -0. // to make sure that we switch between 0 and -0.
// Also enter it if the value of the smi is Smi::kMinValue // Also enter it if the value of the smi is Smi::kMinValue
__ testl(rax, Immediate(0x7FFFFFFE)); __ testl(rax, Immediate(0x7FFFFFFE));
__ j(zero, &slow); __ j(zero, &special);
__ neg(rax); __ neg(rax);
__ jmp(&done); __ jmp(&done);
__ bind(&special);
// Either zero or -0x4000000, neither of which become a smi when negated.
__ testl(rax, rax);
__ j(not_zero, &slow);
__ Move(rax, Factory::minus_zero_value());
__ jmp(&done);
// Enter runtime system. // Enter runtime system.
__ bind(&slow); __ bind(&slow);
__ pop(rcx); // pop return address __ pop(rcx); // pop return address
__ push(rax); __ push(rax);
__ push(rcx); // push return address __ push(rcx); // push return address
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
__ jmp(&done);
// Try floating point case.
__ bind(&try_float);
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ Cmp(rdx, Factory::heap_number_map());
__ j(not_equal, &slow);
// Operand is a float, negate its value by flipping sign bit.
__ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(kScratchRegister, Immediate(0x01));
__ shl(kScratchRegister, Immediate(63));
__ xor_(rdx, kScratchRegister); // Flip sign.
// rdx is value to store.
if (overwrite_) {
__ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
} else {
FloatingPointHelper::AllocateHeapNumber(masm, &slow, rbx, rcx);
// rcx: allocated 'empty' number
__ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
__ movq(rax, rcx);
}
__ bind(&done); __ bind(&done);
__ StubReturn(1); __ StubReturn(1);
@ -6377,7 +6576,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// One operand is a smi. // One operand is a smi.
// Check whether the non-smi is a heap number. // Check whether the non-smi is a heap number.
ASSERT_EQ(1, kSmiTagMask); ASSERT_EQ(static_cast<intptr_t>(1), kSmiTagMask);
// rcx still holds rax & kSmiTag, which is either zero or one. // rcx still holds rax & kSmiTag, which is either zero or one.
__ decq(rcx); // If rax is a smi, all 1s, else all 0s. __ decq(rcx); // If rax is a smi, all 1s, else all 0s.
__ movq(rbx, rdx); __ movq(rbx, rdx);
@ -6584,7 +6783,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the function prototype. // Loop through the prototype chain looking for the function prototype.
Label loop, is_instance, is_not_instance; Label loop, is_instance, is_not_instance;
__ Move(kScratchRegister, Factory::null_value()); __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
__ bind(&loop); __ bind(&loop);
__ cmpq(rcx, rbx); __ cmpq(rcx, rbx);
__ j(equal, &is_instance); __ j(equal, &is_instance);
@ -6618,7 +6817,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
Label runtime; Label runtime;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL)); __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &runtime); __ j(not_equal, &runtime);
// Value in rcx is Smi encoded. // Value in rcx is Smi encoded.
@ -6651,7 +6850,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Label adaptor; Label adaptor;
__ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset)); __ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset));
__ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL)); __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor); __ j(equal, &adaptor);
// Check index against formal parameters count limit passed in // Check index against formal parameters count limit passed in
@ -6701,7 +6900,7 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
Label adaptor; Label adaptor;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL)); __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor); __ j(equal, &adaptor);
// Nothing to do: The formal number of parameters has already been // Nothing to do: The formal number of parameters has already been
@ -6763,10 +6962,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (do_gc) { if (do_gc) {
// Pass failure code returned from last attempt as first argument to GC. // Pass failure code returned from last attempt as first argument to GC.
#ifdef __MSVC__ #ifdef _WIN64
__ movq(rcx, rax); // argc. __ movq(rcx, rax);
#else // ! defined(__MSVC__) #else // ! defined(_WIN64)
__ movq(rdi, rax); // argv. __ movq(rdi, rax);
#endif #endif
__ movq(kScratchRegister, __ movq(kScratchRegister,
FUNCTION_ADDR(Runtime::PerformGC), FUNCTION_ADDR(Runtime::PerformGC),
@ -6782,11 +6981,14 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
} }
// Call C function. // Call C function.
#ifdef __MSVC__ #ifdef _WIN64
// MSVC passes arguments in rcx, rdx, r8, r9 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
__ movq(rcx, r14); // argc. // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
__ movq(rdx, r15); // argv. __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
#else // ! defined(__MSVC__) __ movq(Operand(rsp, 5 * kPointerSize), r15); // argv.
// Pass a pointer to the Arguments object as the first argument.
__ lea(rcx, Operand(rsp, 4 * kPointerSize));
#else // ! defined(_WIN64)
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9. // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc. __ movq(rdi, r14); // argc.
__ movq(rsi, r15); // argv. __ movq(rsi, r15); // argv.
@ -6835,7 +7037,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Special handling of termination exceptions which are uncatchable // Special handling of termination exceptions which are uncatchable
// by javascript code. // by javascript code.
__ Cmp(rax, Factory::termination_exception()); __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
__ j(equal, throw_termination_exception); __ j(equal, throw_termination_exception);
// Handle normal exception. // Handle normal exception.
@ -7012,11 +7214,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ push(rbp); __ push(rbp);
__ movq(rbp, rsp); __ movq(rbp, rsp);
// Save callee-saved registers (X64 calling conventions). // Push the stack frame type marker twice.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
// Push something that is not an arguments adaptor. __ push(Immediate(Smi::FromInt(marker))); // context slot
__ push(Immediate(ArgumentsAdaptorFrame::NON_SENTINEL)); __ push(Immediate(Smi::FromInt(marker))); // function slot
__ push(Immediate(Smi::FromInt(marker))); // @ function offset // Save callee-saved registers (X64 calling conventions).
__ push(r12); __ push(r12);
__ push(r13); __ push(r13);
__ push(r14); __ push(r14);
@ -7139,24 +7341,18 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
Label* need_gc, Label* need_gc,
Register scratch, Register scratch,
Register result) { Register result) {
ExternalReference allocation_top = // Allocate heap number in new space.
ExternalReference::new_space_allocation_top_address(); __ AllocateObjectInNewSpace(HeapNumber::kSize,
ExternalReference allocation_limit = result,
ExternalReference::new_space_allocation_limit_address(); scratch,
__ movq(scratch, allocation_top); // scratch: address of allocation top. no_reg,
__ movq(result, Operand(scratch, 0)); need_gc,
__ addq(result, Immediate(HeapNumber::kSize)); // New top. false);
__ movq(kScratchRegister, allocation_limit);
__ cmpq(result, Operand(kScratchRegister, 0)); // Set the map and tag the result.
__ j(above, need_gc); __ addq(result, Immediate(kHeapObjectTag));
__ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
__ movq(Operand(scratch, 0), result); // store new top
__ addq(result, Immediate(kHeapObjectTag - HeapNumber::kSize));
__ movq(kScratchRegister,
Factory::heap_number_map(),
RelocInfo::EMBEDDED_OBJECT);
__ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); __ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
// Tag old top and use as result.
} }
@ -7556,18 +7752,29 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ fild_s(Operand(rsp, 0 * kPointerSize)); __ fild_s(Operand(rsp, 0 * kPointerSize));
__ fucompp(); __ fucompp();
__ fnstsw_ax(); __ fnstsw_ax();
__ sahf(); // TODO(X64): Not available. if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
__ j(not_zero, &operand_conversion_failure); __ sahf();
__ j(parity_even, &operand_conversion_failure); __ j(not_zero, &operand_conversion_failure);
__ j(parity_even, &operand_conversion_failure);
} else {
__ and_(rax, Immediate(0x4400));
__ cmpl(rax, Immediate(0x4000));
__ j(not_zero, &operand_conversion_failure);
}
// Check if left operand is int32. // Check if left operand is int32.
__ fist_s(Operand(rsp, 1 * kPointerSize)); __ fist_s(Operand(rsp, 1 * kPointerSize));
__ fild_s(Operand(rsp, 1 * kPointerSize)); __ fild_s(Operand(rsp, 1 * kPointerSize));
__ fucompp(); __ fucompp();
__ fnstsw_ax(); __ fnstsw_ax();
__ sahf(); // TODO(X64): Not available. Test bits in ax directly if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
__ j(not_zero, &operand_conversion_failure); __ sahf();
__ j(parity_even, &operand_conversion_failure); __ j(not_zero, &operand_conversion_failure);
__ j(parity_even, &operand_conversion_failure);
} else {
__ and_(rax, Immediate(0x4400));
__ cmpl(rax, Immediate(0x4000));
__ j(not_zero, &operand_conversion_failure);
}
} }
// Get int32 operands and perform bitop. // Get int32 operands and perform bitop.

8
deps/v8/src/x64/codegen-x64.h

@ -481,6 +481,14 @@ class CodeGenerator: public AstVisitor {
void CallWithArguments(ZoneList<Expression*>* arguments, int position); void CallWithArguments(ZoneList<Expression*>* arguments, int position);
// Use an optimized version of Function.prototype.apply that avoid
// allocating the arguments object and just copies the arguments
// from the stack.
void CallApplyLazy(Property* apply,
Expression* receiver,
VariableProxy* arguments,
int position);
void CheckStack(); void CheckStack();
struct InlineRuntimeLUT { struct InlineRuntimeLUT {

3
deps/v8/src/x64/frames-x64.h

@ -60,6 +60,7 @@ class StackHandlerConstants : public AllStatic {
class EntryFrameConstants : public AllStatic { class EntryFrameConstants : public AllStatic {
public: public:
static const int kCallerFPOffset = -10 * kPointerSize; static const int kCallerFPOffset = -10 * kPointerSize;
static const int kArgvOffset = 6 * kPointerSize;
}; };
@ -90,10 +91,12 @@ class StandardFrameConstants : public AllStatic {
class JavaScriptFrameConstants : public AllStatic { class JavaScriptFrameConstants : public AllStatic {
public: public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset; static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kSavedRegistersOffset = +2 * kPointerSize; static const int kSavedRegistersOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset; static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize; static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize; static const int kReceiverOffset = -1 * kPointerSize;
}; };

82
deps/v8/src/x64/ic-x64.cc

@ -339,7 +339,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&fast); __ bind(&fast);
__ movq(rax, Operand(rcx, rax, times_pointer_size, __ movq(rax, Operand(rcx, rax, times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag)); FixedArray::kHeaderSize - kHeapObjectTag));
__ Cmp(rax, Factory::the_hole_value()); __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to consult GetProperty // In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched. // to ensure the prototype chain is searched.
__ j(equal, &slow); __ j(equal, &slow);
@ -613,9 +613,9 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Check for boolean. // Check for boolean.
__ bind(&non_string); __ bind(&non_string);
__ Cmp(rdx, Factory::true_value()); __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
__ j(equal, &boolean); __ j(equal, &boolean);
__ Cmp(rdx, Factory::false_value()); __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
__ j(not_equal, &miss); __ j(not_equal, &miss);
__ bind(&boolean); __ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype( StubCompiler::GenerateLoadGlobalFunctionPrototype(
@ -785,7 +785,19 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) { void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); // ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
StubCompiler::GenerateLoadFunctionPrototype(masm, rax, rdx, rbx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
} }
@ -805,7 +817,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
StubCache::GenerateProbe(masm, flags, rax, rcx, rbx, rdx); StubCache::GenerateProbe(masm, flags, rax, rcx, rbx, rdx);
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
} }
@ -819,7 +831,52 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
} }
void LoadIC::GenerateNormal(MacroAssembler* masm) { void LoadIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
Label miss, probe, global;
__ movq(rax, Operand(rsp, kPointerSize));
// Check that the receiver isn't a smi.
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &miss);
// Check that the receiver is a valid JS object.
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
__ j(below, &miss);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
// Check for access to global object (unlikely).
__ CmpInstanceType(rbx, JS_GLOBAL_PROXY_TYPE);
__ j(equal, &global);
// Check for non-global object that requires access check.
__ testl(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &miss);
// Search the dictionary placing the result in eax.
__ bind(&probe);
GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx);
GenerateCheckNonObjectOrLoaded(masm, &miss, rax);
__ ret(0);
// Global object access: Check access rights.
__ bind(&global);
__ CheckAccessGlobalProxy(rax, rdx, &miss);
__ jmp(&probe);
// Cache miss: Restore receiver from stack and jump to runtime.
__ bind(&miss);
__ movq(rax, Operand(rsp, 1 * kPointerSize));
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
} }
@ -906,6 +963,21 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
} }
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
__ movq(rdx, Operand(rsp, kPointerSize));
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
// Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss))); Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
} }

200
deps/v8/src/x64/macro-assembler-x64.cc

@ -46,6 +46,22 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
} }
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
movq(destination, Operand(r13, index << kPointerSizeLog2));
}
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
push(Operand(r13, index << kPointerSizeLog2));
}
void MacroAssembler::CompareRoot(Register with,
Heap::RootListIndex index) {
cmpq(with, Operand(r13, index << kPointerSizeLog2));
}
static void RecordWriteHelper(MacroAssembler* masm, static void RecordWriteHelper(MacroAssembler* masm,
Register object, Register object,
@ -276,7 +292,7 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) { if (num_arguments > 0) {
addq(rsp, Immediate(num_arguments * kPointerSize)); addq(rsp, Immediate(num_arguments * kPointerSize));
} }
movq(rax, Factory::undefined_value(), RelocInfo::EMBEDDED_OBJECT); LoadRoot(rax, Heap::kUndefinedValueRootIndex);
} }
@ -584,8 +600,14 @@ void MacroAssembler::FCmp() {
fcompp(); fcompp();
push(rax); push(rax);
fnstsw_ax(); fnstsw_ax();
// TODO(X64): Check that sahf is safe to use, using CPUProbe. if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
sahf(); sahf();
} else {
shrl(rax, Immediate(8));
and_(rax, Immediate(0xFF));
push(rax);
popfq();
}
pop(rax); pop(rax);
} }
@ -628,7 +650,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// If the prototype or initial map is the hole, don't return it and // If the prototype or initial map is the hole, don't return it and
// simply miss the cache instead. This will allow us to allocate a // simply miss the cache instead. This will allow us to allocate a
// prototype object on-demand in the runtime system. // prototype object on-demand in the runtime system.
Cmp(result, Factory::the_hole_value()); CompareRoot(result, Heap::kTheHoleValueRootIndex);
j(equal, miss); j(equal, miss);
// If the function does not have an initial map, we're done. // If the function does not have an initial map, we're done.
@ -994,9 +1016,6 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
} }
#endif #endif
// Reserve space for two arguments: argc and argv
subq(rsp, Immediate(2 * kPointerSize));
// Get the required frame alignment for the OS. // Get the required frame alignment for the OS.
static const int kFrameAlignment = OS::ActivationFrameAlignment(); static const int kFrameAlignment = OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) { if (kFrameAlignment > 0) {
@ -1005,6 +1024,17 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
and_(rsp, kScratchRegister); and_(rsp, kScratchRegister);
} }
#ifdef _WIN64
// Reserve space for the Arguments object. The Windows 64-bit ABI
// requires us to pass this structure as a pointer to its location on
// the stack. The structure contains 2 pointers.
// The structure on the stack must be 16-byte aligned.
// We also need backing space for 4 parameters, even though
// we only pass one parameter, and it is in a register.
subq(rsp, Immediate(6 * kPointerSize));
ASSERT(kFrameAlignment == 2 * kPointerSize); // Change the padding if needed.
#endif
// Patch the saved entry sp. // Patch the saved entry sp.
movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp); movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
} }
@ -1179,12 +1209,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Preserve original value of holder_reg. // Preserve original value of holder_reg.
push(holder_reg); push(holder_reg);
movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset)); movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
Cmp(holder_reg, Factory::null_value()); CompareRoot(holder_reg, Heap::kNullValueRootIndex);
Check(not_equal, "JSGlobalProxy::context() should not be null."); Check(not_equal, "JSGlobalProxy::context() should not be null.");
// Read the first word and compare to global_context_map(), // Read the first word and compare to global_context_map(),
movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset)); movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
Cmp(holder_reg, Factory::global_context_map()); CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
Check(equal, "JSGlobalObject::global_context should be a global context."); Check(equal, "JSGlobalObject::global_context should be a global context.");
pop(holder_reg); pop(holder_reg);
} }
@ -1201,4 +1231,156 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
} }
void MacroAssembler::LoadAllocationTopHelper(
Register result,
Register result_end,
Register scratch,
bool result_contains_top_on_entry) {
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
// Just return if allocation top is already known.
if (result_contains_top_on_entry) {
// No use of scratch if allocation top is provided.
ASSERT(scratch.is(no_reg));
return;
}
// Move address of new object to result. Use scratch register if available.
if (scratch.is(no_reg)) {
movq(kScratchRegister, new_space_allocation_top);
movq(result, Operand(kScratchRegister, 0));
} else {
ASSERT(!scratch.is(result_end));
movq(scratch, new_space_allocation_top);
movq(result, Operand(scratch, 0));
}
}
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch) {
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
// Update new top.
if (result_end.is(rax)) {
// rax can be stored directly to a memory location.
store_rax(new_space_allocation_top);
} else {
// Register required - use scratch provided if available.
if (scratch.is(no_reg)) {
movq(kScratchRegister, new_space_allocation_top);
movq(Operand(kScratchRegister, 0), result_end);
} else {
movq(Operand(scratch, 0), result_end);
}
}
}
void MacroAssembler::AllocateObjectInNewSpace(
int object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result,
result_end,
scratch,
result_contains_top_on_entry);
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
lea(result_end, Operand(result, object_size));
movq(kScratchRegister, new_space_allocation_limit);
cmpq(result_end, Operand(kScratchRegister, 0));
j(above, gc_required);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
}
void MacroAssembler::AllocateObjectInNewSpace(
int header_size,
ScaleFactor element_size,
Register element_count,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result,
result_end,
scratch,
result_contains_top_on_entry);
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
lea(result_end, Operand(result, element_count, element_size, header_size));
movq(kScratchRegister, new_space_allocation_limit);
cmpq(result_end, Operand(kScratchRegister, 0));
j(above, gc_required);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
}
void MacroAssembler::AllocateObjectInNewSpace(
Register object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry) {
// Load address of new object into result.
LoadAllocationTopHelper(result,
result_end,
scratch,
result_contains_top_on_entry);
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
if (!object_size.is(result_end)) {
movq(result_end, object_size);
}
addq(result_end, result);
movq(kScratchRegister, new_space_allocation_limit);
cmpq(result_end, Operand(kScratchRegister, 0));
j(above, gc_required);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
}
void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
// Make sure the object has no tag before resetting top.
and_(object, Immediate(~kHeapObjectTagMask));
movq(kScratchRegister, new_space_allocation_top);
#ifdef DEBUG
cmpq(object, Operand(kScratchRegister, 0));
Check(below, "Undo allocation of non allocated memory");
#endif
movq(Operand(kScratchRegister, 0), object);
}
} } // namespace v8::internal } } // namespace v8::internal

53
deps/v8/src/x64/macro-assembler-x64.h

@ -66,6 +66,10 @@ class MacroAssembler: public Assembler {
public: public:
MacroAssembler(void* buffer, int size); MacroAssembler(void* buffer, int size);
void LoadRoot(Register destination, Heap::RootListIndex index);
void CompareRoot(Register with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// GC Support // GC Support
@ -222,6 +226,48 @@ class MacroAssembler: public Assembler {
Label* miss); Label* miss);
// ---------------------------------------------------------------------------
// Allocation support
// Allocate an object in new space. If the new space is exhausted control
// continues at the gc_required label. The allocated object is returned in
// result and end of the new object is returned in result_end. The register
// scratch can be passed as no_reg in which case an additional object
// reference will be added to the reloc info. The returned pointers in result
// and result_end have not yet been tagged as heap objects. If
// result_contains_top_on_entry is true the content of result is known to be
// the allocation top on entry (could be result_end from a previous call to
// AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
void AllocateObjectInNewSpace(int object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry);
void AllocateObjectInNewSpace(int header_size,
ScaleFactor element_size,
Register element_count,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry);
void AllocateObjectInNewSpace(Register object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
// object(s) no longer allocated as they would be invalid when allocation is
// un-done.
void UndoAllocationInNewSpace(Register object);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Support functions. // Support functions.
@ -341,6 +387,13 @@ class MacroAssembler: public Assembler {
// Activation support. // Activation support.
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
Register result_end,
Register scratch,
bool result_contains_top_on_entry);
void UpdateAllocationTopHelper(Register result_end, Register scratch);
}; };

78
deps/v8/src/x64/regexp-macro-assembler-x64.cc

@ -39,6 +39,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#ifdef V8_NATIVE_REGEXP
/* /*
* This assembler uses the following register assignment convention * This assembler uses the following register assignment convention
* - rdx : currently loaded character(s) as ASCII or UC16. Must be loaded using * - rdx : currently loaded character(s) as ASCII or UC16. Must be loaded using
@ -110,6 +112,7 @@ RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
success_label_(), success_label_(),
backtrack_label_(), backtrack_label_(),
exit_label_() { exit_label_() {
ASSERT_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code when we know more. __ jmp(&entry_label_); // We'll write the entry code when we know more.
__ bind(&start_label_); // And then continue from here. __ bind(&start_label_); // And then continue from here.
} }
@ -319,7 +322,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
} else { } else {
ASSERT(mode_ == UC16); ASSERT(mode_ == UC16);
// Save important/volatile registers before calling C function. // Save important/volatile registers before calling C function.
#ifndef __MSVC__ #ifndef _WIN64
// Callee save on Win64 // Callee save on Win64
__ push(rsi); __ push(rsi);
__ push(rdi); __ push(rdi);
@ -333,7 +336,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Address byte_offset1 - Address captured substring's start. // Address byte_offset1 - Address captured substring's start.
// Address byte_offset2 - Address of current character position. // Address byte_offset2 - Address of current character position.
// size_t byte_length - length of capture in bytes(!) // size_t byte_length - length of capture in bytes(!)
#ifdef __MSVC__ #ifdef _WIN64
// Compute and set byte_offset1 (start of capture). // Compute and set byte_offset1 (start of capture).
__ lea(rcx, Operand(rsi, rdx, times_1, 0)); __ lea(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2. // Set byte_offset2.
@ -350,13 +353,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Set byte_length. // Set byte_length.
__ movq(rdx, rbx); __ movq(rdx, rbx);
#endif #endif
Address function_address = FUNCTION_ADDR(&CaseInsensitiveCompareUC16); ExternalReference compare =
CallCFunction(function_address, num_arguments); ExternalReference::re_case_insensitive_compare_uc16();
CallCFunction(compare, num_arguments);
// Restore original values before reacting on result value. // Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_->CodeObject()); __ Move(code_object_pointer(), masm_->CodeObject());
__ pop(backtrack_stackpointer()); __ pop(backtrack_stackpointer());
#ifndef __MSVC__ #ifndef _WIN64
__ pop(rdi); __ pop(rdi);
__ pop(rsi); __ pop(rsi);
#endif #endif
@ -604,7 +608,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(rbp, rsp); __ movq(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond // Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc. // to order of kBackup_ebx etc.
#ifdef __MSVC__ #ifdef _WIN64
// MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots. // MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots.
// Store register parameters in pre-allocated stack slots, // Store register parameters in pre-allocated stack slots,
__ movq(Operand(rbp, kInputString), rcx); __ movq(Operand(rbp, kInputString), rcx);
@ -740,7 +744,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Exit and return rax // Exit and return rax
__ bind(&exit_label_); __ bind(&exit_label_);
#ifdef __MSVC__ #ifdef _WIN64
// Restore callee save registers. // Restore callee save registers.
__ lea(rsp, Operand(rbp, kLastCalleeSaveRegister)); __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
__ pop(rbx); __ pop(rbx);
@ -794,7 +798,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Label grow_failed; Label grow_failed;
// Save registers before calling C function // Save registers before calling C function
#ifndef __MSVC__ #ifndef _WIN64
// Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI. // Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
__ push(rsi); __ push(rsi);
__ push(rdi); __ push(rdi);
@ -803,16 +807,17 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Call GrowStack(backtrack_stackpointer()) // Call GrowStack(backtrack_stackpointer())
int num_arguments = 2; int num_arguments = 2;
FrameAlign(num_arguments); FrameAlign(num_arguments);
#ifdef __MSVC__ #ifdef _WIN64
// Microsoft passes parameters in rcx, rdx. // Microsoft passes parameters in rcx, rdx.
// First argument, backtrack stackpointer, is already in rcx. // First argument, backtrack stackpointer, is already in rcx.
__ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument __ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
#else #else
// AMD64 ABI passes paremeters in rdi, rsi. // AMD64 ABI passes parameters in rdi, rsi.
__ movq(rdi, backtrack_stackpointer()); // First argument. __ movq(rdi, backtrack_stackpointer()); // First argument.
__ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument. __ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
#endif #endif
CallCFunction(FUNCTION_ADDR(&GrowStack), num_arguments); ExternalReference grow_stack = ExternalReference::re_grow_stack();
CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and // If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception. // must exit with a stack-overflow exception.
__ testq(rax, rax); __ testq(rax, rax);
@ -821,7 +826,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(backtrack_stackpointer(), rax); __ movq(backtrack_stackpointer(), rax);
// Restore saved registers and continue. // Restore saved registers and continue.
__ Move(code_object_pointer(), masm_->CodeObject()); __ Move(code_object_pointer(), masm_->CodeObject());
#ifndef __MSVC__ #ifndef _WIN64
__ pop(rdi); __ pop(rdi);
__ pop(rsi); __ pop(rsi);
#endif #endif
@ -889,7 +894,9 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacter(int cp_offset,
int characters) { int characters) {
ASSERT(cp_offset >= -1); // ^ and \b can look behind one character. ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works) ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
CheckPosition(cp_offset + characters - 1, on_end_of_input); if (check_bounds) {
CheckPosition(cp_offset + characters - 1, on_end_of_input);
}
LoadCurrentCharacterUnchecked(cp_offset, characters); LoadCurrentCharacterUnchecked(cp_offset, characters);
} }
@ -980,7 +987,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
// store anything volatile in a C call or overwritten by this function. // store anything volatile in a C call or overwritten by this function.
int num_arguments = 3; int num_arguments = 3;
FrameAlign(num_arguments); FrameAlign(num_arguments);
#ifdef __MSVC__ #ifdef _WIN64
// Second argument: Code* of self. (Do this before overwriting r8). // Second argument: Code* of self. (Do this before overwriting r8).
__ movq(rdx, code_object_pointer()); __ movq(rdx, code_object_pointer());
// Third argument: RegExp code frame pointer. // Third argument: RegExp code frame pointer.
@ -997,7 +1004,9 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
// return address). // return address).
__ lea(rdi, Operand(rsp, -kPointerSize)); __ lea(rdi, Operand(rsp, -kPointerSize));
#endif #endif
CallCFunction(FUNCTION_ADDR(&CheckStackGuardState), num_arguments); ExternalReference stack_check =
ExternalReference::re_check_stack_guard_state();
CallCFunction(stack_check, num_arguments);
} }
@ -1080,23 +1089,6 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
} }
Address RegExpMacroAssemblerX64::GrowStack(Address stack_pointer,
Address* stack_base) {
size_t size = RegExpStack::stack_capacity();
Address old_stack_base = RegExpStack::stack_base();
ASSERT(old_stack_base == *stack_base);
ASSERT(stack_pointer <= old_stack_base);
ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
Address new_stack_base = RegExpStack::EnsureCapacity(size * 2);
if (new_stack_base == NULL) {
return NULL;
}
*stack_base = new_stack_base;
intptr_t stack_content_size = old_stack_base - stack_pointer;
return new_stack_base - stack_content_size;
}
Operand RegExpMacroAssemblerX64::register_location(int register_index) { Operand RegExpMacroAssemblerX64::register_location(int register_index) {
ASSERT(register_index < (1<<30)); ASSERT(register_index < (1<<30));
if (num_registers_ <= register_index) { if (num_registers_ <= register_index) {
@ -1242,10 +1234,10 @@ void RegExpMacroAssemblerX64::FrameAlign(int num_arguments) {
// (on Win64 only) and the original value of rsp. // (on Win64 only) and the original value of rsp.
__ movq(kScratchRegister, rsp); __ movq(kScratchRegister, rsp);
ASSERT(IsPowerOf2(frameAlignment)); ASSERT(IsPowerOf2(frameAlignment));
#ifdef __MSVC__ #ifdef _WIN64
// Allocate space for parameters and old rsp. // Allocate space for parameters and old rsp.
__ subq(rsp, Immediate((num_arguments + 1) * kPointerSize)); __ subq(rsp, Immediate((num_arguments + 1) * kPointerSize));
__ and_(rsp, -frameAlignment); __ and_(rsp, Immediate(-frameAlignment));
__ movq(Operand(rsp, num_arguments * kPointerSize), kScratchRegister); __ movq(Operand(rsp, num_arguments * kPointerSize), kScratchRegister);
#else #else
// Allocate space for old rsp. // Allocate space for old rsp.
@ -1256,17 +1248,16 @@ void RegExpMacroAssemblerX64::FrameAlign(int num_arguments) {
} }
void RegExpMacroAssemblerX64::CallCFunction(Address function_address, void RegExpMacroAssemblerX64::CallCFunction(ExternalReference function,
int num_arguments) { int num_arguments) {
// Don't compile regexps with serialization enabled. The addresses of the C++ __ movq(rax, function);
// function being called isn't relocatable.
ASSERT(!Serializer::enabled());
__ movq(rax, reinterpret_cast<intptr_t>(function_address), RelocInfo::NONE);
__ call(rax); __ call(rax);
ASSERT(OS::ActivationFrameAlignment() != 0); ASSERT(OS::ActivationFrameAlignment() != 0);
#ifdef __MSVC__ #ifdef _WIN64
__ movq(rsp, Operand(rsp, num_arguments * kPointerSize)); __ movq(rsp, Operand(rsp, num_arguments * kPointerSize));
#else #else
// All arguments passed in registers.
ASSERT(num_arguments <= 6);
__ pop(rsp); __ pop(rsp);
#endif #endif
} }
@ -1297,5 +1288,12 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
} }
void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
__ int3(); // Unused on x64.
}
#undef __ #undef __
#endif // V8_NATIVE_REGEXP
}} // namespace v8::internal }} // namespace v8::internal

27
deps/v8/src/x64/regexp-macro-assembler-x64.h

@ -31,6 +31,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#ifdef V8_NATIVE_REGEXP
class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
public: public:
RegExpMacroAssemblerX64(Mode mode, int registers_to_save); RegExpMacroAssemblerX64(Mode mode, int registers_to_save);
@ -113,6 +115,13 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
int* output, int* output,
bool at_start); bool at_start);
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
static int CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame);
private: private:
// Offsets from rbp of function parameters and stored registers. // Offsets from rbp of function parameters and stored registers.
static const int kFramePointer = 0; static const int kFramePointer = 0;
@ -181,23 +190,9 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Check whether we are exceeding the stack limit on the backtrack stack. // Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit(); void CheckStackLimit();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
static int CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame);
// Generate a call to CheckStackGuardState. // Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(); void CallCheckStackGuardState();
// Called from RegExp if the backtrack stack limit is hit.
// Tries to expand the stack. Returns the new stack-pointer if
// successful, and updates the stack_top address, or returns 0 if unable
// to grow the stack.
// This function must not trigger a garbage collection.
static Address GrowStack(Address stack_pointer, Address* stack_top);
// The rbp-relative location of a regexp register. // The rbp-relative location of a regexp register.
Operand register_location(int register_index); Operand register_location(int register_index);
@ -264,7 +259,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// by FrameAlign. The called function is not allowed to trigger a garbage // by FrameAlign. The called function is not allowed to trigger a garbage
// collection, since that might move the code and invalidate the return // collection, since that might move the code and invalidate the return
// address (unless this is somehow accounted for by the called function). // address (unless this is somehow accounted for by the called function).
inline void CallCFunction(Address function_address, int num_arguments); inline void CallCFunction(ExternalReference function, int num_arguments);
MacroAssembler* masm_; MacroAssembler* masm_;
@ -290,6 +285,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
Label stack_overflow_label_; Label stack_overflow_label_;
}; };
#endif // V8_NATIVE_REGEXP
}} // namespace v8::internal }} // namespace v8::internal
#endif // V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_ #endif // V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_

20
deps/v8/src/x64/register-allocator-x64-inl.h

@ -51,18 +51,18 @@ int RegisterAllocator::ToNumber(Register reg) {
2, // rcx 2, // rcx
3, // rdx 3, // rdx
1, // rbx 1, // rbx
-1, // rsp -1, // rsp Stack pointer.
-1, // rbp -1, // rbp Frame pointer.
-1, // rsi -1, // rsi Context.
4, // rdi 4, // rdi
5, // r8 5, // r8
6, // r9 6, // r9
-1, // r10 -1, // r10 Scratch register.
7, // r11 9, // r11
11, // r12 10, // r12
10, // r13 -1, // r13 Roots array. This is callee saved.
8, // r14 7, // r14
9 // r15 8 // r15
}; };
return kNumbers[reg.code()]; return kNumbers[reg.code()];
} }
@ -71,7 +71,7 @@ int RegisterAllocator::ToNumber(Register reg) {
Register RegisterAllocator::ToRegister(int num) { Register RegisterAllocator::ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters); ASSERT(num >= 0 && num < kNumRegisters);
const Register kRegisters[] = const Register kRegisters[] =
{ rax, rbx, rcx, rdx, rdi, r8, r9, r11, r14, r15, r13, r12 }; { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r15, r11, r12 };
return kRegisters[num]; return kRegisters[num];
} }

4
deps/v8/src/x64/register-allocator-x64.h

@ -33,9 +33,7 @@ namespace internal {
class RegisterAllocatorConstants : public AllStatic { class RegisterAllocatorConstants : public AllStatic {
public: public:
// Register allocation is not yet implemented on x64, but C++ static const int kNumRegisters = 11;
// forbids 0-length arrays so we use 1 as the number of registers.
static const int kNumRegisters = 12;
static const int kInvalidRegister = -1; static const int kInvalidRegister = -1;
}; };

5
deps/v8/src/x64/simulator-x64.h

@ -45,4 +45,9 @@
(reinterpret_cast<uintptr_t>(this) >= limit ? \ (reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0) reinterpret_cast<uintptr_t>(this) - limit : 0)
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
#endif // V8_X64_SIMULATOR_X64_H_ #endif // V8_X64_SIMULATOR_X64_H_

142
deps/v8/src/x64/stub-cache-x64.cc

@ -434,7 +434,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
holder_obj); holder_obj);
Label interceptor_failed; Label interceptor_failed;
__ Cmp(rax, Factory::no_interceptor_result_sentinel()); __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
__ j(equal, &interceptor_failed); __ j(equal, &interceptor_failed);
__ LeaveInternalFrame(); __ LeaveInternalFrame();
__ ret(0); __ ret(0);
@ -612,7 +612,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ pop(receiver); // restore holder __ pop(receiver); // restore holder
__ LeaveInternalFrame(); __ LeaveInternalFrame();
__ Cmp(rax, Factory::no_interceptor_result_sentinel()); __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
Label invoke; Label invoke;
__ j(not_equal, &invoke); __ j(not_equal, &invoke);
@ -755,9 +755,9 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case BOOLEAN_CHECK: { case BOOLEAN_CHECK: {
Label fast; Label fast;
// Check that the object is a boolean. // Check that the object is a boolean.
__ Cmp(rdx, Factory::true_value()); __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
__ j(equal, &fast); __ j(equal, &fast);
__ Cmp(rdx, Factory::false_value()); __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
__ j(not_equal, &miss); __ j(not_equal, &miss);
__ bind(&fast); __ bind(&fast);
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
@ -1125,10 +1125,10 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// Check for deleted property if property can actually be deleted. // Check for deleted property if property can actually be deleted.
if (!is_dont_delete) { if (!is_dont_delete) {
__ Cmp(rax, Factory::the_hole_value()); __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(equal, &miss); __ j(equal, &miss);
} else if (FLAG_debug_code) { } else if (FLAG_debug_code) {
__ Cmp(rax, Factory::the_hole_value()); __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ Check(not_equal, "DontDelete cells can't contain the hole"); __ Check(not_equal, "DontDelete cells can't contain the hole");
} }
@ -1738,6 +1738,136 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
} }
// Specialized stub for constructing objects from functions which only have only
// simple assignments of the form this.x = ...; in their body.
Object* ConstructStubCompiler::CompileConstructStub(
SharedFunctionInfo* shared) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[4] : last argument
// -----------------------------------
Label generic_stub_call;
// Use r8 for holding undefined which is used in several places below.
__ Move(r8, Factory::undefined_value());
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check to see whether there are any break points in the function code. If
// there are jump to the generic constructor stub which calls the actual
// code for the function thereby hitting the break points.
__ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kDebugInfoOffset));
__ cmpq(rbx, r8);
__ j(not_equal, &generic_stub_call);
#endif
// Load the initial map and verify that it is in fact a map.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ testq(rbx, Immediate(kSmiTagMask));
__ j(zero, &generic_stub_call);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
__ j(not_equal, &generic_stub_call);
#ifdef DEBUG
// Cannot construct functions this way.
// rdi: constructor
// rbx: initial map
__ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
__ Assert(not_equal, "Function constructed by construct stub.");
#endif
// Now allocate the JSObject in new space.
// rdi: constructor
// rbx: initial map
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ shl(rcx, Immediate(kPointerSizeLog2));
// Make sure that the maximum heap object size will never cause us
// problems here.
ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize);
__ AllocateObjectInNewSpace(rcx, rdx, rcx, no_reg, &generic_stub_call, false);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// rbx: initial map
// rdx: JSObject (untagged)
__ movq(Operand(rdx, JSObject::kMapOffset), rbx);
__ Move(rbx, Factory::empty_fixed_array());
__ movq(Operand(rdx, JSObject::kPropertiesOffset), rbx);
__ movq(Operand(rdx, JSObject::kElementsOffset), rbx);
// rax: argc
// rdx: JSObject (untagged)
// Load the address of the first in-object property into r9.
__ lea(r9, Operand(rdx, JSObject::kHeaderSize));
// Calculate the location of the first argument. The stack contains only the
// return address on top of the argc arguments.
__ lea(rcx, Operand(rsp, rax, times_pointer_size, 0));
// rax: argc
// rcx: first argument
// rdx: JSObject (untagged)
// r8: undefined
// r9: first in-object property of the JSObject
// Fill the initialized properties with a constant value or a passed argument
// depending on the this.x = ...; assignment in the function.
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
Label not_passed;
// Set the property to undefined.
__ movq(Operand(r9, i * kPointerSize), r8);
// Check if the argument assigned to the property is actually passed.
int arg_number = shared->GetThisPropertyAssignmentArgument(i);
__ cmpq(rax, Immediate(arg_number));
__ j(below_equal, &not_passed);
// Argument passed - find it on the stack.
__ movq(rbx, Operand(rcx, arg_number * -kPointerSize));
__ movq(Operand(r9, i * kPointerSize), rbx);
__ bind(&not_passed);
} else {
// Set the property to the constant value.
Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
__ Move(Operand(r9, i * kPointerSize), constant);
}
}
// Fill the unused in-object property fields with undefined.
for (int i = shared->this_property_assignments_count();
i < shared->CalculateInObjectProperties();
i++) {
__ movq(Operand(r9, i * kPointerSize), r8);
}
// rax: argc
// rdx: JSObject (untagged)
// Move argc to rbx and the JSObject to return to rax and tag it.
__ movq(rbx, rax);
__ movq(rax, rdx);
__ or_(rax, Immediate(kHeapObjectTag));
// rax: JSObject
// rbx: argc
// Remove caller arguments and receiver from the stack and return.
__ pop(rcx);
__ lea(rsp, Operand(rsp, rbx, times_pointer_size, 1 * kPointerSize));
__ push(rcx);
__ IncrementCounter(&Counters::constructed_objects, 1);
__ IncrementCounter(&Counters::constructed_objects_stub, 1);
__ ret(0);
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode();
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

8
deps/v8/src/x64/virtual-frame-x64.cc

@ -205,6 +205,14 @@ void VirtualFrame::EmitPush(Handle<Object> value) {
} }
void VirtualFrame::EmitPush(Heap::RootListIndex index) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ PushRoot(index);
}
void VirtualFrame::Drop(int count) { void VirtualFrame::Drop(int count) {
ASSERT(count >= 0); ASSERT(count >= 0);
ASSERT(height() >= count); ASSERT(height() >= count);

1
deps/v8/src/x64/virtual-frame-x64.h

@ -375,6 +375,7 @@ class VirtualFrame : public ZoneObject {
// corresponding push instruction. // corresponding push instruction.
void EmitPush(Register reg); void EmitPush(Register reg);
void EmitPush(const Operand& operand); void EmitPush(const Operand& operand);
void EmitPush(Heap::RootListIndex index);
void EmitPush(Immediate immediate); void EmitPush(Immediate immediate);
// Uses kScratchRegister, emits appropriate relocation info. // Uses kScratchRegister, emits appropriate relocation info.
void EmitPush(Handle<Object> value); void EmitPush(Handle<Object> value);

4
deps/v8/test/cctest/cctest.status

@ -50,6 +50,10 @@ test-api/RegExpInterruption: SKIP
test-api/OutOfMemory: SKIP test-api/OutOfMemory: SKIP
test-api/OutOfMemoryNested: SKIP test-api/OutOfMemoryNested: SKIP
# BUG(432): Fail on ARM hardware.
test-regexp/MacroAssemblerNativeSimple: PASS || FAIL
test-regexp/MacroAssemblerNativeSimpleUC16: PASS || FAIL
# BUG(355): Test crashes on ARM. # BUG(355): Test crashes on ARM.
test-log/ProfLazyMode: SKIP test-log/ProfLazyMode: SKIP

61
deps/v8/test/cctest/test-api.cc

@ -2673,40 +2673,67 @@ THREADED_TEST(SimpleExtensions) {
} }
static const char* kEvalExtensionSource = static const char* kEvalExtensionSource1 =
"function UseEval() {" "function UseEval1() {"
" var x = 42;" " var x = 42;"
" return eval('x');" " return eval('x');"
"}"; "}";
static const char* kEvalExtensionSource2 =
"(function() {"
" var x = 42;"
" function e() {"
" return eval('x');"
" }"
" this.UseEval2 = e;"
"})()";
THREADED_TEST(UseEvalFromExtension) { THREADED_TEST(UseEvalFromExtension) {
v8::HandleScope handle_scope; v8::HandleScope handle_scope;
v8::RegisterExtension(new Extension("evaltest", kEvalExtensionSource)); v8::RegisterExtension(new Extension("evaltest1", kEvalExtensionSource1));
const char* extension_names[] = { "evaltest" }; v8::RegisterExtension(new Extension("evaltest2", kEvalExtensionSource2));
v8::ExtensionConfiguration extensions(1, extension_names); const char* extension_names[] = { "evaltest1", "evaltest2" };
v8::ExtensionConfiguration extensions(2, extension_names);
v8::Handle<Context> context = Context::New(&extensions); v8::Handle<Context> context = Context::New(&extensions);
Context::Scope lock(context); Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("UseEval()"))->Run(); v8::Handle<Value> result = Script::Compile(v8_str("UseEval1()"))->Run();
CHECK_EQ(result, v8::Integer::New(42));
result = Script::Compile(v8_str("UseEval2()"))->Run();
CHECK_EQ(result, v8::Integer::New(42)); CHECK_EQ(result, v8::Integer::New(42));
} }
static const char* kWithExtensionSource = static const char* kWithExtensionSource1 =
"function UseWith() {" "function UseWith1() {"
" var x = 42;" " var x = 42;"
" with({x:87}) { return x; }" " with({x:87}) { return x; }"
"}"; "}";
static const char* kWithExtensionSource2 =
"(function() {"
" var x = 42;"
" function e() {"
" with ({x:87}) { return x; }"
" }"
" this.UseWith2 = e;"
"})()";
THREADED_TEST(UseWithFromExtension) { THREADED_TEST(UseWithFromExtension) {
v8::HandleScope handle_scope; v8::HandleScope handle_scope;
v8::RegisterExtension(new Extension("withtest", kWithExtensionSource)); v8::RegisterExtension(new Extension("withtest1", kWithExtensionSource1));
const char* extension_names[] = { "withtest" }; v8::RegisterExtension(new Extension("withtest2", kWithExtensionSource2));
v8::ExtensionConfiguration extensions(1, extension_names); const char* extension_names[] = { "withtest1", "withtest2" };
v8::ExtensionConfiguration extensions(2, extension_names);
v8::Handle<Context> context = Context::New(&extensions); v8::Handle<Context> context = Context::New(&extensions);
Context::Scope lock(context); Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("UseWith()"))->Run(); v8::Handle<Value> result = Script::Compile(v8_str("UseWith1()"))->Run();
CHECK_EQ(result, v8::Integer::New(87));
result = Script::Compile(v8_str("UseWith2()"))->Run();
CHECK_EQ(result, v8::Integer::New(87)); CHECK_EQ(result, v8::Integer::New(87));
} }
@ -7815,6 +7842,7 @@ THREADED_TEST(PixelArray) {
free(pixel_data); free(pixel_data);
} }
THREADED_TEST(ScriptContextDependence) { THREADED_TEST(ScriptContextDependence) {
v8::HandleScope scope; v8::HandleScope scope;
LocalContext c1; LocalContext c1;
@ -7830,6 +7858,7 @@ THREADED_TEST(ScriptContextDependence) {
CHECK_EQ(indep->Run()->Int32Value(), 101); CHECK_EQ(indep->Run()->Int32Value(), 101);
} }
THREADED_TEST(StackTrace) { THREADED_TEST(StackTrace) {
v8::HandleScope scope; v8::HandleScope scope;
LocalContext context; LocalContext context;
@ -7842,3 +7871,11 @@ THREADED_TEST(StackTrace) {
v8::String::Utf8Value stack(try_catch.StackTrace()); v8::String::Utf8Value stack(try_catch.StackTrace());
CHECK(strstr(*stack, "at foo (stack-trace-test") != NULL); CHECK(strstr(*stack, "at foo (stack-trace-test") != NULL);
} }
// Test that idle notification can be handled when V8 has not yet been
// set up.
THREADED_TEST(IdleNotification) {
for (int i = 0; i < 100; i++) v8::V8::IdleNotification(true);
for (int i = 0; i < 100; i++) v8::V8::IdleNotification(false);
}

4
deps/v8/test/cctest/test-assembler-arm.cc

@ -185,7 +185,7 @@ TEST(3) {
Label L, C; Label L, C;
__ mov(ip, Operand(sp)); __ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | sp.bit() | lr.bit()); __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
__ sub(fp, ip, Operand(4)); __ sub(fp, ip, Operand(4));
__ mov(r4, Operand(r0)); __ mov(r4, Operand(r0));
__ ldr(r0, MemOperand(r4, OFFSET_OF(T, i))); __ ldr(r0, MemOperand(r4, OFFSET_OF(T, i)));
@ -199,7 +199,7 @@ TEST(3) {
__ add(r0, r2, Operand(r0)); __ add(r0, r2, Operand(r0));
__ mov(r2, Operand(r2, ASR, 3)); __ mov(r2, Operand(r2, ASR, 3));
__ strh(r2, MemOperand(r4, OFFSET_OF(T, s))); __ strh(r2, MemOperand(r4, OFFSET_OF(T, s)));
__ ldm(ia, sp, r4.bit() | fp.bit() | sp.bit() | pc.bit()); __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
CodeDesc desc; CodeDesc desc;
assm.GetCode(&desc); assm.GetCode(&desc);

2
deps/v8/test/cctest/test-heap.cc

@ -179,7 +179,7 @@ TEST(HeapObjects) {
TEST(Tagging) { TEST(Tagging) {
InitializeVM(); InitializeVM();
int request = 24; int request = 24;
ASSERT_EQ(request, OBJECT_SIZE_ALIGN(request)); CHECK_EQ(request, static_cast<int>(OBJECT_SIZE_ALIGN(request)));
CHECK(Smi::FromInt(42)->IsSmi()); CHECK(Smi::FromInt(42)->IsSmi());
CHECK(Failure::RetryAfterGC(request, NEW_SPACE)->IsFailure()); CHECK(Failure::RetryAfterGC(request, NEW_SPACE)->IsFailure());
CHECK_EQ(request, Failure::RetryAfterGC(request, NEW_SPACE)->requested()); CHECK_EQ(request, Failure::RetryAfterGC(request, NEW_SPACE)->requested());

6
deps/v8/test/cctest/test-log-stack-tracer.cc

@ -336,8 +336,10 @@ static void CFuncDoTrace() {
#elif defined _MSC_VER && defined V8_TARGET_ARCH_IA32 #elif defined _MSC_VER && defined V8_TARGET_ARCH_IA32
__asm mov [fp], ebp // NOLINT __asm mov [fp], ebp // NOLINT
#elif defined _MSC_VER && defined V8_TARGET_ARCH_X64 #elif defined _MSC_VER && defined V8_TARGET_ARCH_X64
// FIXME: I haven't really tried to compile it. // TODO(X64): __asm extension is not supported by the Microsoft Visual C++
__asm movq [fp], rbp // NOLINT // 64-bit compiler.
fp = 0;
UNIMPLEMENTED();
#endif #endif
DoTrace(fp); DoTrace(fp);
} }

39
deps/v8/test/cctest/test-regexp.cc

@ -40,6 +40,7 @@
#include "regexp-macro-assembler-irregexp.h" #include "regexp-macro-assembler-irregexp.h"
#ifdef V8_NATIVE_REGEXP #ifdef V8_NATIVE_REGEXP
#ifdef V8_TARGET_ARCH_ARM #ifdef V8_TARGET_ARCH_ARM
#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h" #include "arm/regexp-macro-assembler-arm.h"
#endif #endif
#ifdef V8_TARGET_ARCH_X64 #ifdef V8_TARGET_ARCH_X64
@ -605,11 +606,12 @@ TEST(DispatchTableConstruction) {
#ifdef V8_NATIVE_REGEXP #ifdef V8_NATIVE_REGEXP
#ifdef V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler; typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler;
#endif #elif V8_TARGET_ARCH_X64
#ifdef V8_TARGET_ARCH_X64
typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler; typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
#endif #endif
class ContextInitializer { class ContextInitializer {
@ -845,7 +847,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) {
v8::V8::Initialize(); v8::V8::Initialize();
ContextInitializer initializer; ContextInitializer initializer;
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 3); ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4);
m.WriteCurrentPositionToRegister(0, 0); m.WriteCurrentPositionToRegister(0, 0);
m.AdvanceCurrentPosition(2); m.AdvanceCurrentPosition(2);
@ -870,7 +872,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) {
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input); Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress(); Address start_adr = seq_input->GetCharsAddress();
int output[3]; int output[4];
NativeRegExpMacroAssembler::Result result = NativeRegExpMacroAssembler::Result result =
Execute(*code, Execute(*code,
*input, *input,
@ -884,6 +886,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) {
CHECK_EQ(0, output[0]); CHECK_EQ(0, output[0]);
CHECK_EQ(2, output[1]); CHECK_EQ(2, output[1]);
CHECK_EQ(6, output[2]); CHECK_EQ(6, output[2]);
CHECK_EQ(-1, output[3]);
} }
@ -891,7 +894,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
v8::V8::Initialize(); v8::V8::Initialize();
ContextInitializer initializer; ContextInitializer initializer;
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::UC16, 3); ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::UC16, 4);
m.WriteCurrentPositionToRegister(0, 0); m.WriteCurrentPositionToRegister(0, 0);
m.AdvanceCurrentPosition(2); m.AdvanceCurrentPosition(2);
@ -918,7 +921,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
Handle<SeqTwoByteString> seq_input = Handle<SeqTwoByteString>::cast(input); Handle<SeqTwoByteString> seq_input = Handle<SeqTwoByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress(); Address start_adr = seq_input->GetCharsAddress();
int output[3]; int output[4];
NativeRegExpMacroAssembler::Result result = NativeRegExpMacroAssembler::Result result =
Execute(*code, Execute(*code,
*input, *input,
@ -932,6 +935,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
CHECK_EQ(0, output[0]); CHECK_EQ(0, output[0]);
CHECK_EQ(2, output[1]); CHECK_EQ(2, output[1]);
CHECK_EQ(6, output[2]); CHECK_EQ(6, output[2]);
CHECK_EQ(-1, output[3]);
} }
@ -1055,12 +1059,12 @@ TEST(MacroAssemblerNativeRegisters) {
v8::V8::Initialize(); v8::V8::Initialize();
ContextInitializer initializer; ContextInitializer initializer;
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 5); ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 6);
uc16 foo_chars[3] = {'f', 'o', 'o'}; uc16 foo_chars[3] = {'f', 'o', 'o'};
Vector<const uc16> foo(foo_chars, 3); Vector<const uc16> foo(foo_chars, 3);
enum registers { out1, out2, out3, out4, out5, sp, loop_cnt }; enum registers { out1, out2, out3, out4, out5, out6, sp, loop_cnt };
Label fail; Label fail;
Label backtrack; Label backtrack;
m.WriteCurrentPositionToRegister(out1, 0); // Output: [0] m.WriteCurrentPositionToRegister(out1, 0); // Output: [0]
@ -1114,7 +1118,7 @@ TEST(MacroAssemblerNativeRegisters) {
m.GoTo(&loop3); m.GoTo(&loop3);
m.Bind(&exit_loop3); m.Bind(&exit_loop3);
m.PopCurrentPosition(); m.PopCurrentPosition();
m.WriteCurrentPositionToRegister(out5, 0); // [0,3,6,9,9] m.WriteCurrentPositionToRegister(out5, 0); // [0,3,6,9,9,-1]
m.Succeed(); m.Succeed();
@ -1132,15 +1136,15 @@ TEST(MacroAssemblerNativeRegisters) {
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input); Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress(); Address start_adr = seq_input->GetCharsAddress();
int output[5]; int output[6];
NativeRegExpMacroAssembler::Result result = NativeRegExpMacroAssembler::Result result =
Execute(*code, Execute(*code,
*input, *input,
0, 0,
start_adr, start_adr,
start_adr + input->length(), start_adr + input->length(),
output, output,
true); true);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result); CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]); CHECK_EQ(0, output[0]);
@ -1148,6 +1152,7 @@ TEST(MacroAssemblerNativeRegisters) {
CHECK_EQ(6, output[2]); CHECK_EQ(6, output[2]);
CHECK_EQ(9, output[3]); CHECK_EQ(9, output[3]);
CHECK_EQ(9, output[4]); CHECK_EQ(9, output[4]);
CHECK_EQ(-1, output[5]);
} }

60
deps/v8/test/cctest/test-thread-termination.cc

@ -193,3 +193,63 @@ TEST(TerminateMultipleV8Threads) {
delete semaphore; delete semaphore;
semaphore = NULL; semaphore = NULL;
} }
int call_count = 0;
v8::Handle<v8::Value> TerminateOrReturnObject(const v8::Arguments& args) {
if (++call_count == 10) {
v8::V8::TerminateExecution();
return v8::Undefined();
}
v8::Local<v8::Object> result = v8::Object::New();
result->Set(v8::String::New("x"), v8::Integer::New(42));
return result;
}
v8::Handle<v8::Value> LoopGetProperty(const v8::Arguments& args) {
v8::TryCatch try_catch;
v8::Script::Compile(v8::String::New("function f() {"
" try {"
" while(true) {"
" terminate_or_return_object().x;"
" }"
" fail();"
" } catch(e) {"
" fail();"
" }"
"}"
"f()"))->Run();
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
return v8::Undefined();
}
// Test that we correctly handle termination exceptions if they are
// triggered by the creation of error objects in connection with ICs.
TEST(TerminateLoadICException) {
v8::HandleScope scope;
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
global->Set(v8::String::New("terminate_or_return_object"),
v8::FunctionTemplate::New(TerminateOrReturnObject));
global->Set(v8::String::New("fail"), v8::FunctionTemplate::New(Fail));
global->Set(v8::String::New("loop"),
v8::FunctionTemplate::New(LoopGetProperty));
v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
v8::Context::Scope context_scope(context);
// Run a loop that will be infinite if thread termination does not work.
v8::Handle<v8::String> source =
v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
call_count = 0;
v8::Script::Compile(source)->Run();
// Test that we can run the code again after thread termination.
call_count = 0;
v8::Script::Compile(source)->Run();
context.Dispose();
}

2
deps/v8/test/cctest/test-utils.cc

@ -158,7 +158,7 @@ TEST(Utils1) {
// int8_t and intptr_t signed integers. // int8_t and intptr_t signed integers.
CHECK_EQ(-2, -8 >> 2); CHECK_EQ(-2, -8 >> 2);
CHECK_EQ(-2, static_cast<int8_t>(-8) >> 2); CHECK_EQ(-2, static_cast<int8_t>(-8) >> 2);
CHECK_EQ(-2, static_cast<intptr_t>(-8) >> 2); CHECK_EQ(-2, static_cast<int>(static_cast<intptr_t>(-8) >> 2));
} }

4
deps/v8/test/mjsunit/debug-stepin-constructor.js

@ -59,6 +59,10 @@ function f() {
break_break_point_hit_count = 0; break_break_point_hit_count = 0;
f(); f();
assertEquals(5, break_break_point_hit_count); assertEquals(5, break_break_point_hit_count);
f();
assertEquals(10, break_break_point_hit_count);
f();
assertEquals(15, break_break_point_hit_count);
// Test step into constructor with builtin constructor. // Test step into constructor with builtin constructor.
function g() { function g() {

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save