Browse Source

Upgrade V8 to 2.2.23

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
facb904c5d
  1. 36
      deps/v8/ChangeLog
  2. 388
      deps/v8/include/v8.h
  3. 10
      deps/v8/src/api.h
  4. 10
      deps/v8/src/arm/assembler-arm.cc
  5. 4
      deps/v8/src/arm/assembler-arm.h
  6. 679
      deps/v8/src/arm/codegen-arm.cc
  7. 5
      deps/v8/src/arm/codegen-arm.h
  8. 9
      deps/v8/src/arm/disasm-arm.cc
  9. 8
      deps/v8/src/arm/full-codegen-arm.cc
  10. 179
      deps/v8/src/arm/ic-arm.cc
  11. 61
      deps/v8/src/arm/macro-assembler-arm.cc
  12. 28
      deps/v8/src/arm/macro-assembler-arm.h
  13. 9
      deps/v8/src/arm/simulator-arm.cc
  14. 3
      deps/v8/src/arm/stub-cache-arm.cc
  15. 5
      deps/v8/src/builtins.cc
  16. 1
      deps/v8/src/builtins.h
  17. 14
      deps/v8/src/codegen.cc
  18. 25
      deps/v8/src/codegen.h
  19. 194
      deps/v8/src/date.js
  20. 16
      deps/v8/src/debug-debugger.js
  21. 32
      deps/v8/src/debug.cc
  22. 22
      deps/v8/src/debug.h
  23. 6
      deps/v8/src/factory.cc
  24. 12
      deps/v8/src/factory.h
  25. 2
      deps/v8/src/frames.cc
  26. 6
      deps/v8/src/globals.h
  27. 31
      deps/v8/src/heap.cc
  28. 248
      deps/v8/src/ia32/codegen-ia32.cc
  29. 8
      deps/v8/src/ia32/full-codegen-ia32.cc
  30. 201
      deps/v8/src/ia32/ic-ia32.cc
  31. 129
      deps/v8/src/ia32/macro-assembler-ia32.cc
  32. 39
      deps/v8/src/ia32/macro-assembler-ia32.h
  33. 294
      deps/v8/src/ia32/stub-cache-ia32.cc
  34. 33
      deps/v8/src/ic-inl.h
  35. 157
      deps/v8/src/ic.cc
  36. 12
      deps/v8/src/ic.h
  37. 169
      deps/v8/src/liveedit-debugger.js
  38. 20
      deps/v8/src/liveedit.cc
  39. 5
      deps/v8/src/macros.py
  40. 4
      deps/v8/src/messages.js
  41. 68
      deps/v8/src/objects-inl.h
  42. 59
      deps/v8/src/objects.cc
  43. 57
      deps/v8/src/objects.h
  44. 17
      deps/v8/src/parser.cc
  45. 93
      deps/v8/src/rewriter.cc
  46. 157
      deps/v8/src/runtime.cc
  47. 1
      deps/v8/src/runtime.h
  48. 4
      deps/v8/src/spaces.cc
  49. 124
      deps/v8/src/stub-cache.cc
  50. 36
      deps/v8/src/stub-cache.h
  51. 4
      deps/v8/src/v8-counters.h
  52. 39
      deps/v8/src/v8dll-main.cc
  53. 25
      deps/v8/src/v8natives.js
  54. 2
      deps/v8/src/version.cc
  55. 4
      deps/v8/src/x64/builtins-x64.cc
  56. 322
      deps/v8/src/x64/codegen-x64.cc
  57. 3
      deps/v8/src/x64/codegen-x64.h
  58. 4
      deps/v8/src/x64/frames-x64.h
  59. 8
      deps/v8/src/x64/full-codegen-x64.cc
  60. 194
      deps/v8/src/x64/ic-x64.cc
  61. 232
      deps/v8/src/x64/macro-assembler-x64.cc
  62. 47
      deps/v8/src/x64/macro-assembler-x64.h
  63. 11
      deps/v8/src/x64/register-allocator-x64-inl.h
  64. 2
      deps/v8/src/x64/register-allocator-x64.h
  65. 3
      deps/v8/src/x64/stub-cache-x64.cc
  66. 7
      deps/v8/src/x64/virtual-frame-x64.h
  67. 84
      deps/v8/test/cctest/test-api.cc
  68. 35
      deps/v8/test/cctest/test-debug.cc
  69. 5
      deps/v8/test/cctest/test-disasm-arm.cc
  70. 153
      deps/v8/test/cctest/test-macro-assembler-x64.cc
  71. 11
      deps/v8/test/es5conform/es5conform.status
  72. 51
      deps/v8/test/mjsunit/call-stub.js
  73. 9
      deps/v8/test/mjsunit/date.js
  74. 3
      deps/v8/test/mjsunit/debug-liveedit-3.js
  75. 3
      deps/v8/test/mjsunit/debug-liveedit-breakpoints.js
  76. 3
      deps/v8/test/mjsunit/debug-liveedit-newsource.js
  77. 2
      deps/v8/test/mjsunit/fuzz-natives.js
  78. 11
      deps/v8/test/mjsunit/math-min-max.js
  79. 129
      deps/v8/test/mjsunit/math-pow.js
  80. 157
      deps/v8/test/mjsunit/object-prevent-extensions.js
  81. 65
      deps/v8/test/mjsunit/store-dictionary.js
  82. 57
      deps/v8/test/mjsunit/string-replace-with-empty.js
  83. 33
      deps/v8/test/mjsunit/value-of.js
  84. 57
      deps/v8/tools/gyp/v8.gyp
  85. 30
      deps/v8/tools/js2c.py

36
deps/v8/ChangeLog

@ -1,3 +1,28 @@
2010-07-07: Version 2.2.23
API change: Convert Unicode code points outside the basic multilingual
plane to the replacement character. Previous behavior was to silently
truncate the value to 16 bits.
Fixed crash: handle all flat string types in regexp replace.
Prevent invalid pre-parsing data passed in through the API from
crashing V8.
Performance improvements on all platforms.
2010-07-05: Version 2.2.22
Added ES5 Object.isExtensible and Object.preventExtensions.
Enabled building V8 as a DLL.
Fixed a bug in date code where -0 was not interpreted as 0
(issue 736).
Performance improvements on all platforms.
2010-06-30: Version 2.2.21
Fix bug in externalizing some ASCII strings (Chromium issue 47824).
@ -13,18 +38,21 @@
Provide actual breakpoints locations in response to setBreakpoint
and listBreakpoints requests.
2010-06-28: Version 2.2.20
Fix bug with for-in on x64 platform (issue 748).
Fix crash bug on x64 platform (issue 756).
Fix bug in Object.getOwnPropertyNames. (chromium issue 41243).
Fix a bug on ARM that caused the result of 1 << x to be
Fix a bug on ARM that caused the result of 1 << x to be
miscalculated for some inputs.
Performance improvements on all platforms.
2010-06-23: Version 2.2.19
Fix bug that causes the build to break when profillingsupport=off
@ -64,11 +92,11 @@
2010-06-09: Version 2.2.16
Removed the SetExternalStringDiposeCallback API. Changed the
Removed the SetExternalStringDiposeCallback API. Changed the
disposal of external string resources to call a virtual Dispose
method on the resource.
Added support for more precise break points when debugging and
Added support for more precise break points when debugging and
stepping.
Memory usage improvements on all platforms.

388
deps/v8/include/v8.h

@ -61,10 +61,6 @@ typedef unsigned __int64 uint64_t;
// the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8
// static library or building a program which uses the V8 static library neither
// BUILDING_V8_SHARED nor USING_V8_SHARED should be defined.
// The reason for having both V8EXPORT and V8EXPORT_INLINE is that classes which
// have their code inside this header file need to have __declspec(dllexport)
// when building the DLL but cannot have __declspec(dllimport) when building
// a program which uses the DLL.
#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
build configuration to ensure that at most one of these is set
@ -72,13 +68,10 @@ typedef unsigned __int64 uint64_t;
#ifdef BUILDING_V8_SHARED
#define V8EXPORT __declspec(dllexport)
#define V8EXPORT_INLINE __declspec(dllexport)
#elif USING_V8_SHARED
#define V8EXPORT __declspec(dllimport)
#define V8EXPORT_INLINE
#else
#define V8EXPORT
#define V8EXPORT_INLINE
#endif // BUILDING_V8_SHARED
#else // _WIN32
@ -90,10 +83,8 @@ typedef unsigned __int64 uint64_t;
// export symbols when we are building a static library.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#define V8EXPORT __attribute__ ((visibility("default")))
#define V8EXPORT_INLINE __attribute__ ((visibility("default")))
#else // defined(__GNUC__) && (__GNUC__ >= 4)
#define V8EXPORT
#define V8EXPORT_INLINE
#endif // defined(__GNUC__) && (__GNUC__ >= 4)
#endif // _WIN32
@ -185,7 +176,7 @@ typedef void (*WeakReferenceCallback)(Persistent<Value> object,
* behind the scenes and the same rules apply to these values as to
* their handles.
*/
template <class T> class V8EXPORT_INLINE Handle {
template <class T> class Handle {
public:
/**
@ -196,7 +187,7 @@ template <class T> class V8EXPORT_INLINE Handle {
/**
* Creates a new handle for the specified value.
*/
explicit Handle(T* val) : val_(val) { }
inline explicit Handle(T* val) : val_(val) { }
/**
* Creates a handle for the contents of the specified handle. This
@ -221,16 +212,16 @@ template <class T> class V8EXPORT_INLINE Handle {
/**
* Returns true if the handle is empty.
*/
bool IsEmpty() const { return val_ == 0; }
inline bool IsEmpty() const { return val_ == 0; }
T* operator->() const { return val_; }
inline T* operator->() const { return val_; }
T* operator*() const { return val_; }
inline T* operator*() const { return val_; }
/**
* Sets the handle to be empty. IsEmpty() will then return true.
*/
void Clear() { this->val_ = 0; }
inline void Clear() { this->val_ = 0; }
/**
* Checks whether two handles are the same.
@ -238,7 +229,7 @@ template <class T> class V8EXPORT_INLINE Handle {
* to which they refer are identical.
* The handles' references are not checked.
*/
template <class S> bool operator==(Handle<S> that) const {
template <class S> inline bool operator==(Handle<S> that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(**this);
internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
@ -252,7 +243,7 @@ template <class T> class V8EXPORT_INLINE Handle {
* the objects to which they refer are different.
* The handles' references are not checked.
*/
template <class S> bool operator!=(Handle<S> that) const {
template <class S> inline bool operator!=(Handle<S> that) const {
return !operator==(that);
}
@ -281,7 +272,7 @@ template <class T> class V8EXPORT_INLINE Handle {
* handle scope are destroyed when the handle scope is destroyed. Hence it
* is not necessary to explicitly deallocate local handles.
*/
template <class T> class V8EXPORT_INLINE Local : public Handle<T> {
template <class T> class Local : public Handle<T> {
public:
inline Local();
template <class S> inline Local(Local<S> that)
@ -332,7 +323,7 @@ template <class T> class V8EXPORT_INLINE Local : public Handle<T> {
* different storage cells but rather two references to the same
* storage cell.
*/
template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> {
template <class T> class Persistent : public Handle<T> {
public:
/**
@ -563,11 +554,12 @@ class V8EXPORT ScriptData { // NOLINT
/**
* The origin, within a file, of a script.
*/
class V8EXPORT ScriptOrigin {
class ScriptOrigin {
public:
ScriptOrigin(Handle<Value> resource_name,
Handle<Integer> resource_line_offset = Handle<Integer>(),
Handle<Integer> resource_column_offset = Handle<Integer>())
inline ScriptOrigin(
Handle<Value> resource_name,
Handle<Integer> resource_line_offset = Handle<Integer>(),
Handle<Integer> resource_column_offset = Handle<Integer>())
: resource_name_(resource_name),
resource_line_offset_(resource_line_offset),
resource_column_offset_(resource_column_offset) { }
@ -841,30 +833,30 @@ class V8EXPORT StackFrame {
/**
* The superclass of all JavaScript values and objects.
*/
class V8EXPORT Value : public Data {
class Value : public Data {
public:
/**
* Returns true if this value is the undefined value. See ECMA-262
* 4.3.10.
*/
bool IsUndefined() const;
V8EXPORT bool IsUndefined() const;
/**
* Returns true if this value is the null value. See ECMA-262
* 4.3.11.
*/
bool IsNull() const;
V8EXPORT bool IsNull() const;
/**
* Returns true if this value is true.
*/
bool IsTrue() const;
V8EXPORT bool IsTrue() const;
/**
* Returns true if this value is false.
*/
bool IsFalse() const;
V8EXPORT bool IsFalse() const;
/**
* Returns true if this value is an instance of the String type.
@ -875,92 +867,92 @@ class V8EXPORT Value : public Data {
/**
* Returns true if this value is a function.
*/
bool IsFunction() const;
V8EXPORT bool IsFunction() const;
/**
* Returns true if this value is an array.
*/
bool IsArray() const;
V8EXPORT bool IsArray() const;
/**
* Returns true if this value is an object.
*/
bool IsObject() const;
V8EXPORT bool IsObject() const;
/**
* Returns true if this value is boolean.
*/
bool IsBoolean() const;
V8EXPORT bool IsBoolean() const;
/**
* Returns true if this value is a number.
*/
bool IsNumber() const;
V8EXPORT bool IsNumber() const;
/**
* Returns true if this value is external.
*/
bool IsExternal() const;
V8EXPORT bool IsExternal() const;
/**
* Returns true if this value is a 32-bit signed integer.
*/
bool IsInt32() const;
V8EXPORT bool IsInt32() const;
/**
* Returns true if this value is a 32-bit unsigned integer.
*/
bool IsUint32() const;
V8EXPORT bool IsUint32() const;
/**
* Returns true if this value is a Date.
*/
bool IsDate() const;
V8EXPORT bool IsDate() const;
Local<Boolean> ToBoolean() const;
Local<Number> ToNumber() const;
Local<String> ToString() const;
Local<String> ToDetailString() const;
Local<Object> ToObject() const;
Local<Integer> ToInteger() const;
Local<Uint32> ToUint32() const;
Local<Int32> ToInt32() const;
V8EXPORT Local<Boolean> ToBoolean() const;
V8EXPORT Local<Number> ToNumber() const;
V8EXPORT Local<String> ToString() const;
V8EXPORT Local<String> ToDetailString() const;
V8EXPORT Local<Object> ToObject() const;
V8EXPORT Local<Integer> ToInteger() const;
V8EXPORT Local<Uint32> ToUint32() const;
V8EXPORT Local<Int32> ToInt32() const;
/**
* Attempts to convert a string to an array index.
* Returns an empty handle if the conversion fails.
*/
Local<Uint32> ToArrayIndex() const;
V8EXPORT Local<Uint32> ToArrayIndex() const;
bool BooleanValue() const;
double NumberValue() const;
int64_t IntegerValue() const;
uint32_t Uint32Value() const;
int32_t Int32Value() const;
V8EXPORT bool BooleanValue() const;
V8EXPORT double NumberValue() const;
V8EXPORT int64_t IntegerValue() const;
V8EXPORT uint32_t Uint32Value() const;
V8EXPORT int32_t Int32Value() const;
/** JS == */
bool Equals(Handle<Value> that) const;
bool StrictEquals(Handle<Value> that) const;
V8EXPORT bool Equals(Handle<Value> that) const;
V8EXPORT bool StrictEquals(Handle<Value> that) const;
private:
inline bool QuickIsString() const;
bool FullIsString() const;
V8EXPORT bool FullIsString() const;
};
/**
* The superclass of primitive values. See ECMA-262 4.3.2.
*/
class V8EXPORT Primitive : public Value { };
class Primitive : public Value { };
/**
* A primitive boolean value (ECMA-262, 4.3.14). Either the true
* or false value.
*/
class V8EXPORT Boolean : public Primitive {
class Boolean : public Primitive {
public:
bool Value() const;
V8EXPORT bool Value() const;
static inline Handle<Boolean> New(bool value);
};
@ -968,19 +960,19 @@ class V8EXPORT Boolean : public Primitive {
/**
* A JavaScript string value (ECMA-262, 4.3.17).
*/
class V8EXPORT String : public Primitive {
class String : public Primitive {
public:
/**
* Returns the number of characters in this string.
*/
int Length() const;
V8EXPORT int Length() const;
/**
* Returns the number of bytes in the UTF-8 encoded
* representation of this string.
*/
int Utf8Length() const;
V8EXPORT int Utf8Length() const;
/**
* Write the contents of the string to an external buffer.
@ -1007,33 +999,33 @@ class V8EXPORT String : public Primitive {
HINT_MANY_WRITES_EXPECTED = 1
};
int Write(uint16_t* buffer,
int start = 0,
int length = -1,
WriteHints hints = NO_HINTS) const; // UTF-16
int WriteAscii(char* buffer,
int start = 0,
int length = -1,
WriteHints hints = NO_HINTS) const; // ASCII
int WriteUtf8(char* buffer,
int length = -1,
int* nchars_ref = NULL,
WriteHints hints = NO_HINTS) const; // UTF-8
V8EXPORT int Write(uint16_t* buffer,
int start = 0,
int length = -1,
WriteHints hints = NO_HINTS) const; // UTF-16
V8EXPORT int WriteAscii(char* buffer,
int start = 0,
int length = -1,
WriteHints hints = NO_HINTS) const; // ASCII
V8EXPORT int WriteUtf8(char* buffer,
int length = -1,
int* nchars_ref = NULL,
WriteHints hints = NO_HINTS) const; // UTF-8
/**
* A zero length string.
*/
static v8::Local<v8::String> Empty();
V8EXPORT static v8::Local<v8::String> Empty();
/**
* Returns true if the string is external
*/
bool IsExternal() const;
V8EXPORT bool IsExternal() const;
/**
* Returns true if the string is both external and ascii
*/
bool IsExternalAscii() const;
V8EXPORT bool IsExternalAscii() const;
class V8EXPORT ExternalStringResourceBase {
public:
@ -1124,7 +1116,7 @@ class V8EXPORT String : public Primitive {
* Get the ExternalAsciiStringResource for an external ascii string.
* Returns NULL if IsExternalAscii() doesn't return true.
*/
ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
V8EXPORT ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
static inline String* Cast(v8::Value* obj);
@ -1137,19 +1129,20 @@ class V8EXPORT String : public Primitive {
* 'strlen' to determine the buffer length, it might be
* wrong if 'data' contains a null character.
*/
static Local<String> New(const char* data, int length = -1);
V8EXPORT static Local<String> New(const char* data, int length = -1);
/** Allocates a new string from utf16 data.*/
static Local<String> New(const uint16_t* data, int length = -1);
V8EXPORT static Local<String> New(const uint16_t* data, int length = -1);
/** Creates a symbol. Returns one if it exists already.*/
static Local<String> NewSymbol(const char* data, int length = -1);
V8EXPORT static Local<String> NewSymbol(const char* data, int length = -1);
/**
* Creates a new string by concatenating the left and the right strings
* passed in as parameters.
*/
static Local<String> Concat(Handle<String> left, Handle<String>right);
V8EXPORT static Local<String> Concat(Handle<String> left,
Handle<String>right);
/**
* Creates a new external string using the data defined in the given
@ -1159,7 +1152,7 @@ class V8EXPORT String : public Primitive {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
static Local<String> NewExternal(ExternalStringResource* resource);
V8EXPORT static Local<String> NewExternal(ExternalStringResource* resource);
/**
* Associate an external string resource with this string by transforming it
@ -1170,7 +1163,7 @@ class V8EXPORT String : public Primitive {
* The string is not modified if the operation fails. See NewExternal for
* information on the lifetime of the resource.
*/
bool MakeExternal(ExternalStringResource* resource);
V8EXPORT bool MakeExternal(ExternalStringResource* resource);
/**
* Creates a new external string using the ascii data defined in the given
@ -1180,7 +1173,8 @@ class V8EXPORT String : public Primitive {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
static Local<String> NewExternal(ExternalAsciiStringResource* resource);
V8EXPORT static Local<String> NewExternal(
ExternalAsciiStringResource* resource);
/**
* Associate an external string resource with this string by transforming it
@ -1191,18 +1185,20 @@ class V8EXPORT String : public Primitive {
* The string is not modified if the operation fails. See NewExternal for
* information on the lifetime of the resource.
*/
bool MakeExternal(ExternalAsciiStringResource* resource);
V8EXPORT bool MakeExternal(ExternalAsciiStringResource* resource);
/**
* Returns true if this string can be made external.
*/
bool CanMakeExternal();
V8EXPORT bool CanMakeExternal();
/** Creates an undetectable string from the supplied ascii or utf-8 data.*/
static Local<String> NewUndetectable(const char* data, int length = -1);
V8EXPORT static Local<String> NewUndetectable(const char* data,
int length = -1);
/** Creates an undetectable string from the supplied utf-16 data.*/
static Local<String> NewUndetectable(const uint16_t* data, int length = -1);
V8EXPORT static Local<String> NewUndetectable(const uint16_t* data,
int length = -1);
/**
* Converts an object to a utf8-encoded character array. Useful if
@ -1273,21 +1269,21 @@ class V8EXPORT String : public Primitive {
};
private:
void VerifyExternalStringResource(ExternalStringResource* val) const;
static void CheckCast(v8::Value* obj);
V8EXPORT void VerifyExternalStringResource(ExternalStringResource* val) const;
V8EXPORT static void CheckCast(v8::Value* obj);
};
/**
* A JavaScript number value (ECMA-262, 4.3.20)
*/
class V8EXPORT Number : public Primitive {
class Number : public Primitive {
public:
double Value() const;
static Local<Number> New(double value);
V8EXPORT double Value() const;
V8EXPORT static Local<Number> New(double value);
static inline Number* Cast(v8::Value* obj);
private:
Number();
V8EXPORT Number();
static void CheckCast(v8::Value* obj);
};
@ -1295,56 +1291,56 @@ class V8EXPORT Number : public Primitive {
/**
* A JavaScript value representing a signed integer.
*/
class V8EXPORT Integer : public Number {
class Integer : public Number {
public:
static Local<Integer> New(int32_t value);
static Local<Integer> NewFromUnsigned(uint32_t value);
int64_t Value() const;
V8EXPORT static Local<Integer> New(int32_t value);
V8EXPORT static Local<Integer> NewFromUnsigned(uint32_t value);
V8EXPORT int64_t Value() const;
static inline Integer* Cast(v8::Value* obj);
private:
Integer();
static void CheckCast(v8::Value* obj);
V8EXPORT Integer();
V8EXPORT static void CheckCast(v8::Value* obj);
};
/**
* A JavaScript value representing a 32-bit signed integer.
*/
class V8EXPORT Int32 : public Integer {
class Int32 : public Integer {
public:
int32_t Value() const;
V8EXPORT int32_t Value() const;
private:
Int32();
V8EXPORT Int32();
};
/**
* A JavaScript value representing a 32-bit unsigned integer.
*/
class V8EXPORT Uint32 : public Integer {
class Uint32 : public Integer {
public:
uint32_t Value() const;
V8EXPORT uint32_t Value() const;
private:
Uint32();
V8EXPORT Uint32();
};
/**
* An instance of the built-in Date constructor (ECMA-262, 15.9).
*/
class V8EXPORT Date : public Value {
class Date : public Value {
public:
static Local<Value> New(double time);
V8EXPORT static Local<Value> New(double time);
/**
* A specialization of Value::NumberValue that is more efficient
* because we know the structure of this object.
*/
double NumberValue() const;
V8EXPORT double NumberValue() const;
static inline Date* Cast(v8::Value* obj);
private:
static void CheckCast(v8::Value* obj);
V8EXPORT static void CheckCast(v8::Value* obj);
};
@ -1403,14 +1399,14 @@ enum AccessControl {
/**
* A JavaScript object (ECMA-262, 4.3.3)
*/
class V8EXPORT Object : public Value {
class Object : public Value {
public:
bool Set(Handle<Value> key,
Handle<Value> value,
PropertyAttribute attribs = None);
V8EXPORT bool Set(Handle<Value> key,
Handle<Value> value,
PropertyAttribute attribs = None);
bool Set(uint32_t index,
Handle<Value> value);
V8EXPORT bool Set(uint32_t index,
Handle<Value> value);
// Sets a local property on this object bypassing interceptors and
// overriding accessors or read-only properties.
@ -1420,34 +1416,34 @@ class V8EXPORT Object : public Value {
// will only be returned if the interceptor doesn't return a value.
//
// Note also that this only works for named properties.
bool ForceSet(Handle<Value> key,
Handle<Value> value,
PropertyAttribute attribs = None);
V8EXPORT bool ForceSet(Handle<Value> key,
Handle<Value> value,
PropertyAttribute attribs = None);
Local<Value> Get(Handle<Value> key);
V8EXPORT Local<Value> Get(Handle<Value> key);
Local<Value> Get(uint32_t index);
V8EXPORT Local<Value> Get(uint32_t index);
// TODO(1245389): Replace the type-specific versions of these
// functions with generic ones that accept a Handle<Value> key.
bool Has(Handle<String> key);
V8EXPORT bool Has(Handle<String> key);
bool Delete(Handle<String> key);
V8EXPORT bool Delete(Handle<String> key);
// Delete a property on this object bypassing interceptors and
// ignoring dont-delete attributes.
bool ForceDelete(Handle<Value> key);
V8EXPORT bool ForceDelete(Handle<Value> key);
bool Has(uint32_t index);
V8EXPORT bool Has(uint32_t index);
bool Delete(uint32_t index);
V8EXPORT bool Delete(uint32_t index);
bool SetAccessor(Handle<String> name,
AccessorGetter getter,
AccessorSetter setter = 0,
Handle<Value> data = Handle<Value>(),
AccessControl settings = DEFAULT,
PropertyAttribute attribute = None);
V8EXPORT bool SetAccessor(Handle<String> name,
AccessorGetter getter,
AccessorSetter setter = 0,
Handle<Value> data = Handle<Value>(),
AccessControl settings = DEFAULT,
PropertyAttribute attribute = None);
/**
* Returns an array containing the names of the enumerable properties
@ -1455,78 +1451,80 @@ class V8EXPORT Object : public Value {
* array returned by this method contains the same values as would
* be enumerated by a for-in statement over this object.
*/
Local<Array> GetPropertyNames();
V8EXPORT Local<Array> GetPropertyNames();
/**
* Get the prototype object. This does not skip objects marked to
* be skipped by __proto__ and it does not consult the security
* handler.
*/
Local<Value> GetPrototype();
V8EXPORT Local<Value> GetPrototype();
/**
* Set the prototype object. This does not skip objects marked to
* be skipped by __proto__ and it does not consult the security
* handler.
*/
bool SetPrototype(Handle<Value> prototype);
V8EXPORT bool SetPrototype(Handle<Value> prototype);
/**
* Finds an instance of the given function template in the prototype
* chain.
*/
Local<Object> FindInstanceInPrototypeChain(Handle<FunctionTemplate> tmpl);
V8EXPORT Local<Object> FindInstanceInPrototypeChain(
Handle<FunctionTemplate> tmpl);
/**
* Call builtin Object.prototype.toString on this object.
* This is different from Value::ToString() that may call
* user-defined toString function. This one does not.
*/
Local<String> ObjectProtoToString();
V8EXPORT Local<String> ObjectProtoToString();
/** Gets the number of internal fields for this Object. */
int InternalFieldCount();
V8EXPORT int InternalFieldCount();
/** Gets the value in an internal field. */
inline Local<Value> GetInternalField(int index);
/** Sets the value in an internal field. */
void SetInternalField(int index, Handle<Value> value);
V8EXPORT void SetInternalField(int index, Handle<Value> value);
/** Gets a native pointer from an internal field. */
inline void* GetPointerFromInternalField(int index);
/** Sets a native pointer in an internal field. */
void SetPointerInInternalField(int index, void* value);
V8EXPORT void SetPointerInInternalField(int index, void* value);
// Testers for local properties.
bool HasRealNamedProperty(Handle<String> key);
bool HasRealIndexedProperty(uint32_t index);
bool HasRealNamedCallbackProperty(Handle<String> key);
V8EXPORT bool HasRealNamedProperty(Handle<String> key);
V8EXPORT bool HasRealIndexedProperty(uint32_t index);
V8EXPORT bool HasRealNamedCallbackProperty(Handle<String> key);
/**
* If result.IsEmpty() no real property was located in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
Local<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
V8EXPORT Local<Value> GetRealNamedPropertyInPrototypeChain(
Handle<String> key);
/**
* If result.IsEmpty() no real property was located on the object or
* in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
Local<Value> GetRealNamedProperty(Handle<String> key);
V8EXPORT Local<Value> GetRealNamedProperty(Handle<String> key);
/** Tests for a named lookup interceptor.*/
bool HasNamedLookupInterceptor();
V8EXPORT bool HasNamedLookupInterceptor();
/** Tests for an index lookup interceptor.*/
bool HasIndexedLookupInterceptor();
V8EXPORT bool HasIndexedLookupInterceptor();
/**
* Turns on access check on the object if the object is an instance of
* a template that has access check callbacks. If an object has no
* access check info, the object cannot be accessed by anyone.
*/
void TurnOnAccessCheck();
V8EXPORT void TurnOnAccessCheck();
/**
* Returns the identity hash for this object. The current implemenation uses
@ -1535,7 +1533,7 @@ class V8EXPORT Object : public Value {
* The return value will never be 0. Also, it is not guaranteed to be
* unique.
*/
int GetIdentityHash();
V8EXPORT int GetIdentityHash();
/**
* Access hidden properties on JavaScript objects. These properties are
@ -1543,9 +1541,9 @@ class V8EXPORT Object : public Value {
* C++ API. Hidden properties introduced by V8 internally (for example the
* identity hash) are prefixed with "v8::".
*/
bool SetHiddenValue(Handle<String> key, Handle<Value> value);
Local<Value> GetHiddenValue(Handle<String> key);
bool DeleteHiddenValue(Handle<String> key);
V8EXPORT bool SetHiddenValue(Handle<String> key, Handle<Value> value);
V8EXPORT Local<Value> GetHiddenValue(Handle<String> key);
V8EXPORT bool DeleteHiddenValue(Handle<String> key);
/**
* Returns true if this is an instance of an api function (one
@ -1554,13 +1552,13 @@ class V8EXPORT Object : public Value {
* conservative and may return true for objects that haven't actually
* been modified.
*/
bool IsDirty();
V8EXPORT bool IsDirty();
/**
* Clone this object with a fast but shallow copy. Values will point
* to the same values as the original object.
*/
Local<Object> Clone();
V8EXPORT Local<Object> Clone();
/**
* Set the backing store of the indexed properties to be managed by the
@ -1569,7 +1567,7 @@ class V8EXPORT Object : public Value {
* Note: The embedding program still owns the data and needs to ensure that
* the backing store is preserved while V8 has a reference.
*/
void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
V8EXPORT void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
bool HasIndexedPropertiesInPixelData();
uint8_t* GetIndexedPropertiesPixelData();
int GetIndexedPropertiesPixelDataLength();
@ -1581,21 +1579,22 @@ class V8EXPORT Object : public Value {
* Note: The embedding program still owns the data and needs to ensure that
* the backing store is preserved while V8 has a reference.
*/
void SetIndexedPropertiesToExternalArrayData(void* data,
ExternalArrayType array_type,
int number_of_elements);
V8EXPORT void SetIndexedPropertiesToExternalArrayData(
void* data,
ExternalArrayType array_type,
int number_of_elements);
bool HasIndexedPropertiesInExternalArrayData();
void* GetIndexedPropertiesExternalArrayData();
ExternalArrayType GetIndexedPropertiesExternalArrayDataType();
int GetIndexedPropertiesExternalArrayDataLength();
static Local<Object> New();
V8EXPORT static Local<Object> New();
static inline Object* Cast(Value* obj);
private:
Object();
static void CheckCast(Value* obj);
Local<Value> CheckedGetInternalField(int index);
void* SlowGetPointerFromInternalField(int index);
V8EXPORT Object();
V8EXPORT static void CheckCast(Value* obj);
V8EXPORT Local<Value> CheckedGetInternalField(int index);
V8EXPORT void* SlowGetPointerFromInternalField(int index);
/**
* If quick access to the internal field is possible this method
@ -1608,20 +1607,20 @@ class V8EXPORT Object : public Value {
/**
* An instance of the built-in array constructor (ECMA-262, 15.4.2).
*/
class V8EXPORT Array : public Object {
class Array : public Object {
public:
uint32_t Length() const;
V8EXPORT uint32_t Length() const;
/**
* Clones an element at index |index|. Returns an empty
* handle if cloning fails (for any reason).
*/
Local<Object> CloneElementAt(uint32_t index);
V8EXPORT Local<Object> CloneElementAt(uint32_t index);
static Local<Array> New(int length = 0);
V8EXPORT static Local<Array> New(int length = 0);
static inline Array* Cast(Value* obj);
private:
Array();
V8EXPORT Array();
static void CheckCast(Value* obj);
};
@ -1629,25 +1628,27 @@ class V8EXPORT Array : public Object {
/**
* A JavaScript function object (ECMA-262, 15.3).
*/
class V8EXPORT Function : public Object {
class Function : public Object {
public:
Local<Object> NewInstance() const;
Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
void SetName(Handle<String> name);
Handle<Value> GetName() const;
V8EXPORT Local<Object> NewInstance() const;
V8EXPORT Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
V8EXPORT Local<Value> Call(Handle<Object> recv,
int argc,
Handle<Value> argv[]);
V8EXPORT void SetName(Handle<String> name);
V8EXPORT Handle<Value> GetName() const;
/**
* Returns zero based line number of function body and
* kLineOffsetNotFound if no information available.
*/
int GetScriptLineNumber() const;
ScriptOrigin GetScriptOrigin() const;
V8EXPORT int GetScriptLineNumber() const;
V8EXPORT ScriptOrigin GetScriptOrigin() const;
static inline Function* Cast(Value* obj);
static const int kLineOffsetNotFound;
V8EXPORT static const int kLineOffsetNotFound;
private:
Function();
static void CheckCast(Value* obj);
V8EXPORT Function();
V8EXPORT static void CheckCast(Value* obj);
};
@ -1662,19 +1663,19 @@ class V8EXPORT Function : public Object {
* value Unwrap should be used, all other operations on that object will lead
* to unpredictable results.
*/
class V8EXPORT External : public Value {
class External : public Value {
public:
static Local<Value> Wrap(void* data);
V8EXPORT static Local<Value> Wrap(void* data);
static inline void* Unwrap(Handle<Value> obj);
static Local<External> New(void* value);
V8EXPORT static Local<External> New(void* value);
static inline External* Cast(Value* obj);
void* Value() const;
V8EXPORT void* Value() const;
private:
External();
static void CheckCast(v8::Value* obj);
V8EXPORT External();
V8EXPORT static void CheckCast(v8::Value* obj);
static inline void* QuickUnwrap(Handle<v8::Value> obj);
static void* FullUnwrap(Handle<v8::Value> obj);
V8EXPORT static void* FullUnwrap(Handle<v8::Value> obj);
};
@ -1704,7 +1705,7 @@ class V8EXPORT Template : public Data {
* including the receiver, the number and values of arguments, and
* the holder of the function.
*/
class V8EXPORT Arguments {
class Arguments {
public:
inline int Length() const;
inline Local<Value> operator[](int i) const;
@ -1714,7 +1715,6 @@ class V8EXPORT Arguments {
inline bool IsConstructCall() const;
inline Local<Value> Data() const;
private:
Arguments();
friend class ImplementationUtilities;
inline Arguments(Local<Value> data,
Local<Object> holder,
@ -3001,7 +3001,7 @@ class V8EXPORT Context {
* Stack-allocated class which sets the execution context for all
* operations executed within a local scope.
*/
class V8EXPORT Scope {
class Scope {
public:
inline Scope(Handle<Context> context) : context_(context) {
context_->Enter();
@ -3320,6 +3320,17 @@ void Persistent<T>::ClearWeak() {
V8::ClearWeak(reinterpret_cast<internal::Object**>(**this));
}
Arguments::Arguments(v8::Local<v8::Value> data,
v8::Local<v8::Object> holder,
v8::Local<v8::Function> callee,
bool is_construct_call,
void** values, int length)
: data_(data), holder_(holder), callee_(callee),
is_construct_call_(is_construct_call),
values_(values), length_(length) { }
Local<Value> Arguments::operator[](int i) const {
if (i < 0 || length_ <= i) return Local<Value>(*Undefined());
return Local<Value>(reinterpret_cast<Value*>(values_ - i));
@ -3580,7 +3591,6 @@ Local<Object> AccessorInfo::Holder() const {
#undef V8EXPORT
#undef V8EXPORT_INLINE
#undef TYPE_CHECK

10
deps/v8/src/api.h

@ -134,16 +134,6 @@ class ApiFunction {
};
v8::Arguments::Arguments(v8::Local<v8::Value> data,
v8::Local<v8::Object> holder,
v8::Local<v8::Function> callee,
bool is_construct_call,
void** values, int length)
: data_(data), holder_(holder), callee_(callee),
is_construct_call_(is_construct_call),
values_(values), length_(length) { }
enum ExtensionTraversalState {
UNVISITED, VISITED, INSTALLED
};

10
deps/v8/src/arm/assembler-arm.cc

@ -1801,6 +1801,16 @@ void Assembler::vstr(const DwVfpRegister src,
}
void Assembler::vmov(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
// Dd = Dm
// Instruction details available in ARM DDI 0406B, A8-642.
emit(cond | 0xE*B24 | 0xB*B20 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
}
void Assembler::vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,

4
deps/v8/src/arm/assembler-arm.h

@ -930,6 +930,10 @@ class Assembler : public Malloced {
const Register base,
int offset, // Offset must be a multiple of 4.
const Condition cond = al);
void vmov(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,

679
deps/v8/src/arm/codegen-arm.cc

@ -748,37 +748,43 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
JumpTarget* false_target) {
// Note: The generated code snippet does not change stack variables.
// Only the condition code should be set.
bool known_smi = frame_->KnownSmiAt(0);
Register tos = frame_->PopToRegister();
// Fast case checks
// Check if the value is 'false'.
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(tos, ip);
false_target->Branch(eq);
if (!known_smi) {
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(tos, ip);
false_target->Branch(eq);
// Check if the value is 'true'.
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(tos, ip);
true_target->Branch(eq);
// Check if the value is 'true'.
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(tos, ip);
true_target->Branch(eq);
// Check if the value is 'undefined'.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(tos, ip);
false_target->Branch(eq);
// Check if the value is 'undefined'.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(tos, ip);
false_target->Branch(eq);
}
// Check if the value is a smi.
__ cmp(tos, Operand(Smi::FromInt(0)));
false_target->Branch(eq);
__ tst(tos, Operand(kSmiTagMask));
true_target->Branch(eq);
// Slow case: call the runtime.
frame_->EmitPush(tos);
frame_->CallRuntime(Runtime::kToBool, 1);
// Convert the result (r0) to a condition code.
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(r0, ip);
if (!known_smi) {
false_target->Branch(eq);
__ tst(tos, Operand(kSmiTagMask));
true_target->Branch(eq);
// Slow case: call the runtime.
frame_->EmitPush(tos);
frame_->CallRuntime(Runtime::kToBool, 1);
// Convert the result (r0) to a condition code.
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(r0, ip);
}
cc_reg_ = ne;
}
@ -1745,11 +1751,15 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
val = node->fun(); // NULL if we don't have a function
}
if (val != NULL) {
WriteBarrierCharacter wb_info =
val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
// Set initial value.
Reference target(this, node->proxy());
Load(val);
target.SetValue(NOT_CONST_INIT);
target.SetValue(NOT_CONST_INIT, wb_info);
// Get rid of the assigned value (declarations are statements).
frame_->Drop();
@ -2485,13 +2495,13 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
if (each.size() > 0) {
__ ldr(r0, frame_->ElementAt(each.size()));
frame_->EmitPush(r0);
each.SetValue(NOT_CONST_INIT);
each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
frame_->Drop(2);
} else {
// If the reference was to a slot we rely on the convenient property
// that it doesn't matter whether a value (eg, r3 pushed above) is
// right on top of or right underneath a zero-sized reference.
each.SetValue(NOT_CONST_INIT);
each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
frame_->Drop();
}
}
@ -3646,6 +3656,8 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
// Evaluate the receiver subexpression.
Load(prop->obj());
WriteBarrierCharacter wb_info;
// Change to slow case in the beginning of an initialization block to
// avoid the quadratic behavior of repeatedly adding fast properties.
if (node->starts_initialization_block()) {
@ -3667,7 +3679,7 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
// [tos] : key
// [tos+1] : receiver
// [tos+2] : receiver if at the end of an initialization block
//
// Evaluate the right-hand side.
if (node->is_compound()) {
// For a compound assignment the right-hand side is a binary operation
@ -3699,9 +3711,13 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
inline_smi);
}
wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
} else {
// For non-compound assignment just load the right-hand side.
Load(node->value());
wb_info = node->value()->AsLiteral() != NULL ?
NEVER_NEWSPACE :
(node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
}
// Stack layout:
@ -3713,7 +3729,7 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
// Perform the assignment. It is safe to ignore constants here.
ASSERT(node->op() != Token::INIT_CONST);
CodeForSourcePosition(node->position());
EmitKeyedStore(prop->key()->type());
EmitKeyedStore(prop->key()->type(), wb_info);
frame_->EmitPush(r0);
// Stack layout:
@ -4291,7 +4307,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
} else {
CpuFeatures::Scope scope(VFP3);
JumpTarget runtime, done;
Label not_minus_half, allocate_return;
Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
@ -4299,18 +4315,74 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
// Get base and exponent to registers.
Register exponent = frame_->PopToRegister();
Register base = frame_->PopToRegister(exponent);
Register heap_number_map = no_reg;
// Set the frame for the runtime jump target. The code below jumps to the
// jump target label so the frame needs to be established before that.
ASSERT(runtime.entry_frame() == NULL);
runtime.set_entry_frame(frame_);
__ BranchOnSmi(exponent, runtime.entry_label());
__ BranchOnNotSmi(exponent, &exponent_nonsmi);
__ BranchOnNotSmi(base, &base_nonsmi);
heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Exponent is a smi and base is a smi. Get the smi value into vfp register
// d1.
__ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
__ b(&powi);
__ bind(&base_nonsmi);
// Exponent is smi and base is non smi. Get the double value from the base
// into vfp register d1.
__ ObjectToDoubleVFPRegister(base, d1,
scratch1, scratch2, heap_number_map, s0,
runtime.entry_label());
__ bind(&powi);
// Load 1.0 into d0.
__ mov(scratch2, Operand(0x3ff00000));
__ mov(scratch1, Operand(0));
__ vmov(d0, scratch1, scratch2);
// Get the absolute untagged value of the exponent and use that for the
// calculation.
__ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
__ rsb(scratch1, scratch1, Operand(0), LeaveCC, mi); // Negate if negative.
__ vmov(d2, d0, mi); // 1.0 needed in d2 later if exponent is negative.
// Run through all the bits in the exponent. The result is calculated in d0
// and d1 holds base^(bit^2).
Label more_bits;
__ bind(&more_bits);
__ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
__ vmul(d0, d0, d1, cs); // Multiply with base^(bit^2) if bit is set.
__ vmul(d1, d1, d1, ne); // Don't bother calculating next d1 if done.
__ b(ne, &more_bits);
// If exponent is positive we are done.
__ cmp(exponent, Operand(0));
__ b(ge, &allocate_return);
// If exponent is negative result is 1/result (d2 already holds 1.0 in that
// case). However if d0 has reached infinity this will not provide the
// correct result, so call runtime if that is the case.
__ mov(scratch2, Operand(0x7FF00000));
__ mov(scratch1, Operand(0));
__ vmov(d1, scratch1, scratch2); // Load infinity into d1.
__ vcmp(d0, d1);
__ vmrs(pc);
runtime.Branch(eq); // d0 reached infinity.
__ vdiv(d0, d2, d0);
__ b(&allocate_return);
__ bind(&exponent_nonsmi);
// Special handling of raising to the power of -0.5 and 0.5. First check
// that the value is a heap number and that the lower bits (which for both
// values are zero).
Register heap_number_map = r6;
heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
__ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
@ -4319,7 +4391,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
__ tst(scratch2, scratch2);
runtime.Branch(ne);
// Load the e
// Load the higher bits (which contains the floating point exponent).
__ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));
// Compare exponent with -0.5.
@ -4356,8 +4428,10 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
__ vsqrt(d0, d0);
__ bind(&allocate_return);
__ AllocateHeapNumberWithValue(
base, d0, scratch1, scratch2, heap_number_map, runtime.entry_label());
Register scratch3 = r5;
__ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
heap_number_map, runtime.entry_label());
__ mov(base, scratch3);
done.Jump();
runtime.Bind();
@ -5349,9 +5423,13 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0); // r0 has result
} else {
bool overwrite =
bool can_overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
bool no_negative_zero = node->expression()->no_negative_zero();
Load(node->expression());
switch (op) {
case Token::NOT:
@ -5362,7 +5440,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
case Token::SUB: {
frame_->PopToR0();
GenericUnaryOpStub stub(Token::SUB, overwrite);
GenericUnaryOpStub stub(
Token::SUB,
overwrite,
no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
frame_->CallStub(&stub, 0);
frame_->EmitPush(r0); // r0 has result
break;
@ -5451,7 +5532,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
__ sub(value, value, Operand(Smi::FromInt(1)));
}
frame_->EmitPush(value);
target.SetValue(NOT_CONST_INIT);
target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
if (is_postfix) frame_->Pop();
ASSERT_EQ(original_height + 1, frame_->height());
return;
@ -5550,7 +5631,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// Set the target with the result, leaving the result on
// top of the stack. Removes the target from the stack if
// it has a non-zero size.
if (!is_const) target.SetValue(NOT_CONST_INIT);
if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
}
// Postfix: Discard the new value and use the old.
@ -6283,7 +6364,8 @@ void CodeGenerator::EmitKeyedLoad() {
}
void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
void CodeGenerator::EmitKeyedStore(StaticType* key_type,
WriteBarrierCharacter wb_info) {
// Generate inlined version of the keyed store if the code is in a loop
// and the key is likely to be a smi.
if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
@ -6299,25 +6381,45 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
__ IncrementCounter(&Counters::keyed_store_inline, 1,
scratch1, scratch2);
// Load the value, key and receiver from the stack.
bool value_is_harmless = frame_->KnownSmiAt(0);
if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
bool key_is_smi = frame_->KnownSmiAt(1);
Register value = frame_->PopToRegister();
Register key = frame_->PopToRegister(value);
VirtualFrame::SpilledScope spilled(frame_);
Register receiver = r2;
frame_->EmitPop(receiver);
#ifdef DEBUG
bool we_remembered_the_write_barrier = value_is_harmless;
#endif
// The deferred code expects value, key and receiver in registers.
DeferredReferenceSetKeyedValue* deferred =
new DeferredReferenceSetKeyedValue(value, key, receiver);
// Check that the value is a smi. As this inlined code does not set the
// write barrier it is only possible to store smi values.
__ tst(value, Operand(kSmiTagMask));
deferred->Branch(ne);
if (!value_is_harmless) {
// If the value is not likely to be a Smi then let's test the fixed array
// for new space instead. See below.
if (wb_info == LIKELY_SMI) {
__ tst(value, Operand(kSmiTagMask));
deferred->Branch(ne);
#ifdef DEBUG
we_remembered_the_write_barrier = true;
#endif
}
}
// Check that the key is a smi.
__ tst(key, Operand(kSmiTagMask));
deferred->Branch(ne);
if (!key_is_smi) {
// Check that the key is a smi.
__ tst(key, Operand(kSmiTagMask));
deferred->Branch(ne);
}
// Check that the receiver is a heap object.
__ tst(receiver, Operand(kSmiTagMask));
@ -6333,24 +6435,35 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
__ cmp(scratch1, key);
deferred->Branch(ls); // Unsigned less equal.
// Get the elements array from the receiver.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
if (!value_is_harmless && wb_info != LIKELY_SMI) {
Label ok;
__ and_(scratch2, scratch1, Operand(ExternalReference::new_space_mask()));
__ cmp(scratch2, Operand(ExternalReference::new_space_start()));
__ tst(value, Operand(kSmiTagMask), ne);
deferred->Branch(ne);
#ifdef DEBUG
we_remembered_the_write_barrier = true;
#endif
}
// Check that the elements array is not a dictionary.
__ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
// The following instructions are the part of the inlined store keyed
// property code which can be patched. Therefore the exact number of
// instructions generated need to be fixed, so the constant pool is blocked
// while generating this code.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
#ifdef DEBUG
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
// Read the fixed array map from the constant pool (not from the root
// array) so that the value can be patched. When debugging, we patch this
// comparison to always fail so that we will hit the IC call in the
// deferred code which will allow the debugger to break for fast case
// stores.
#ifdef DEBUG
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
__ mov(scratch3, Operand(Factory::fixed_array_map()));
__ cmp(scratch2, scratch3);
deferred->Branch(ne);
@ -6367,6 +6480,8 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
}
ASSERT(we_remembered_the_write_barrier);
deferred->BindExit();
} else {
frame()->CallKeyedStoreIC();
@ -6464,7 +6579,7 @@ void Reference::GetValue() {
}
void Reference::SetValue(InitState init_state) {
void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
ASSERT(!is_illegal());
ASSERT(!cgen_->has_cc());
MacroAssembler* masm = cgen_->masm();
@ -6496,7 +6611,7 @@ void Reference::SetValue(InitState init_state) {
Property* property = expression_->AsProperty();
ASSERT(property != NULL);
cgen_->CodeForSourcePosition(property->position());
cgen_->EmitKeyedStore(property->key()->type());
cgen_->EmitKeyedStore(property->key()->type(), wb_info);
frame->EmitPush(r0);
set_unloaded();
break;
@ -7170,22 +7285,42 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Fast negative check for symbol-to-symbol equality.
static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
Label* possible_strings,
Label* not_both_strings) {
// r2 is object type of r0.
// Ensure that no non-strings have the symbol bit set.
ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
Label object_test;
ASSERT(kSymbolTag != 0);
__ tst(r2, Operand(kIsNotStringMask));
__ b(ne, &object_test);
__ tst(r2, Operand(kIsSymbolMask));
__ b(eq, slow);
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
__ b(eq, possible_strings);
__ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
__ b(ge, not_both_strings);
__ tst(r3, Operand(kIsSymbolMask));
__ b(eq, slow);
__ b(eq, possible_strings);
// Both are symbols. We already checked they weren't the same pointer
// so they are not equal.
__ mov(r0, Operand(1)); // Non-zero indicates not equal.
__ mov(pc, Operand(lr)); // Return.
__ bind(&object_test);
__ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, not_both_strings);
__ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, not_both_strings);
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
// equal to undefined.
__ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
__ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
__ and_(r0, r2, Operand(r3));
__ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
__ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
__ mov(pc, Operand(lr)); // Return.
}
@ -7301,7 +7436,8 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
void RecordWriteStub::Generate(MacroAssembler* masm) {
__ RecordWriteHelper(object_, Operand(offset_), offset_, scratch_);
__ add(offset_, object_, Operand(offset_));
__ RecordWriteHelper(object_, offset_, scratch_);
__ Ret();
}
@ -7398,9 +7534,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
// symbols.
if (cc_ == eq && !strict_) {
// Either jumps to slow or returns the answer. Assumes that r2 is the type
// of r0 on entry.
EmitCheckForSymbols(masm, &flat_string_check);
// Returns an answer for two symbols or two detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that r2 is the type of r0 on entry.
EmitCheckForSymbolsOrObjects(masm, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
@ -7511,189 +7648,197 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ Swap(r0, r1, ip);
}
if (ShouldGenerateFPCode()) {
Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
// The type transition also calculates the answer.
bool generate_code_to_calculate_answer = true;
if (ShouldGenerateFPCode()) {
if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
GenerateTypeTransition(masm);
GenerateTypeTransition(masm); // Tail call.
generate_code_to_calculate_answer = false;
break;
default:
break;
}
// Restore heap number map register.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
}
if (mode_ == NO_OVERWRITE) {
// In the case where there is no chance of an overwritable float we may as
// well do the allocation immediately while r0 and r1 are untouched.
__ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
}
// Move r0 to a double in r2-r3.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_RIGHT) {
__ mov(r5, Operand(r0)); // Overwrite this heap number.
}
if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
// Load the double from tagged HeapNumber r0 to d7.
__ sub(r7, r0, Operand(kHeapObjectTag));
__ vldr(d7, r7, HeapNumber::kValueOffset);
} else {
// Calling convention says that second double is in r2 and r3.
__ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
}
__ jmp(&finished_loading_r0);
__ bind(&r0_is_smi);
if (mode_ == OVERWRITE_RIGHT) {
// We can't overwrite a Smi so get address of new heap number into r5.
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
if (generate_code_to_calculate_answer) {
Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
if (mode_ == NO_OVERWRITE) {
// In the case where there is no chance of an overwritable float we may
// as well do the allocation immediately while r0 and r1 are untouched.
__ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
}
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi in r0 to double in d7.
__ mov(r7, Operand(r0, ASR, kSmiTagSize));
__ vmov(s15, r7);
__ vcvt_f64_s32(d7, s15);
if (!use_fp_registers) {
__ vmov(r2, r3, d7);
// Move r0 to a double in r2-r3.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_RIGHT) {
__ mov(r5, Operand(r0)); // Overwrite this heap number.
}
if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
// Load the double from tagged HeapNumber r0 to d7.
__ sub(r7, r0, Operand(kHeapObjectTag));
__ vldr(d7, r7, HeapNumber::kValueOffset);
} else {
// Calling convention says that second double is in r2 and r3.
__ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
}
__ jmp(&finished_loading_r0);
__ bind(&r0_is_smi);
if (mode_ == OVERWRITE_RIGHT) {
// We can't overwrite a Smi so get address of new heap number into r5.
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub3(r3, r2, r7, r4);
__ push(lr);
__ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
// HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
// r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
Label r1_is_not_smi;
if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &r1_is_not_smi);
GenerateTypeTransition(masm);
// Restore heap number map register.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&r1_is_smi);
}
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi in r0 to double in d7.
__ mov(r7, Operand(r0, ASR, kSmiTagSize));
__ vmov(s15, r7);
__ vcvt_f64_s32(d7, s15);
if (!use_fp_registers) {
__ vmov(r2, r3, d7);
}
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub3(r3, r2, r7, r4);
__ push(lr);
__ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
__ bind(&finished_loading_r0);
// HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
// r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
Label r1_is_not_smi;
if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &r1_is_not_smi);
GenerateTypeTransition(masm); // Tail call.
}
// Move r1 to a double in r0-r1.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ bind(&r1_is_not_smi);
__ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_LEFT) {
__ mov(r5, Operand(r1)); // Overwrite this heap number.
}
if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
// Load the double from tagged HeapNumber r1 to d6.
__ sub(r7, r1, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
} else {
// Calling convention says that first double is in r0 and r1.
__ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
}
__ jmp(&finished_loading_r1);
__ bind(&r1_is_smi);
if (mode_ == OVERWRITE_LEFT) {
// We can't overwrite a Smi so get address of new heap number into r5.
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
__ bind(&finished_loading_r0);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi in r1 to double in d6.
__ mov(r7, Operand(r1, ASR, kSmiTagSize));
__ vmov(s13, r7);
__ vcvt_f64_s32(d6, s13);
if (!use_fp_registers) {
__ vmov(r0, r1, d6);
// Move r1 to a double in r0-r1.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ bind(&r1_is_not_smi);
__ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_LEFT) {
__ mov(r5, Operand(r1)); // Overwrite this heap number.
}
if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
// Load the double from tagged HeapNumber r1 to d6.
__ sub(r7, r1, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
} else {
// Calling convention says that first double is in r0 and r1.
__ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
}
__ jmp(&finished_loading_r1);
__ bind(&r1_is_smi);
if (mode_ == OVERWRITE_LEFT) {
// We can't overwrite a Smi so get address of new heap number into r5.
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub4(r1, r0, r7, r9);
__ push(lr);
__ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
__ bind(&finished_loading_r1);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi in r1 to double in d6.
__ mov(r7, Operand(r1, ASR, kSmiTagSize));
__ vmov(s13, r7);
__ vcvt_f64_s32(d6, s13);
if (!use_fp_registers) {
__ vmov(r0, r1, d6);
}
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub4(r1, r0, r7, r9);
__ push(lr);
__ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
__ bind(&do_the_call);
// If we are inlining the operation using VFP3 instructions for
// add, subtract, multiply, or divide, the arguments are in d6 and d7.
if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement
// double precision, add, subtract, multiply, divide.
if (Token::MUL == op_) {
__ vmul(d5, d6, d7);
} else if (Token::DIV == op_) {
__ vdiv(d5, d6, d7);
} else if (Token::ADD == op_) {
__ vadd(d5, d6, d7);
} else if (Token::SUB == op_) {
__ vsub(d5, d6, d7);
__ bind(&finished_loading_r1);
}
if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
__ bind(&do_the_call);
// If we are inlining the operation using VFP3 instructions for
// add, subtract, multiply, or divide, the arguments are in d6 and d7.
if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement
// double precision, add, subtract, multiply, divide.
if (Token::MUL == op_) {
__ vmul(d5, d6, d7);
} else if (Token::DIV == op_) {
__ vdiv(d5, d6, d7);
} else if (Token::ADD == op_) {
__ vadd(d5, d6, d7);
} else if (Token::SUB == op_) {
__ vsub(d5, d6, d7);
} else {
UNREACHABLE();
}
__ sub(r0, r5, Operand(kHeapObjectTag));
__ vstr(d5, r0, HeapNumber::kValueOffset);
__ add(r0, r0, Operand(kHeapObjectTag));
__ mov(pc, lr);
} else {
UNREACHABLE();
// If we did not inline the operation, then the arguments are in:
// r0: Left value (least significant part of mantissa).
// r1: Left value (sign, exponent, top of mantissa).
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
// r5: Address of heap number for result.
__ push(lr); // For later.
__ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
// Call C routine that may not cause GC or other trouble. r5 is callee
// save.
__ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
// Store answer in the overwritable heap number.
#if !defined(USE_ARM_EABI)
// Double returned in fp coprocessor register 0 and 1, encoded as
// register cr8. Offsets must be divisible by 4 for coprocessor so we
// need to substract the tag from r5.
__ sub(r4, r5, Operand(kHeapObjectTag));
__ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
#else
// Double returned in registers 0 and 1.
__ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
#endif
__ mov(r0, Operand(r5));
// And we are done.
__ pop(pc);
}
__ sub(r0, r5, Operand(kHeapObjectTag));
__ vstr(d5, r0, HeapNumber::kValueOffset);
__ add(r0, r0, Operand(kHeapObjectTag));
__ mov(pc, lr);
} else {
// If we did not inline the operation, then the arguments are in:
// r0: Left value (least significant part of mantissa).
// r1: Left value (sign, exponent, top of mantissa).
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
// r5: Address of heap number for result.
__ push(lr); // For later.
__ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
// Call C routine that may not cause GC or other trouble. r5 is callee
// save.
__ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
// Store answer in the overwritable heap number.
#if !defined(USE_ARM_EABI)
// Double returned in fp coprocessor register 0 and 1, encoded as register
// cr8. Offsets must be divisible by 4 for coprocessor so we need to
// substract the tag from r5.
__ sub(r4, r5, Operand(kHeapObjectTag));
__ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
#else
// Double returned in registers 0 and 1.
__ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
#endif
__ mov(r0, Operand(r5));
// And we are done.
__ pop(pc);
}
}
if (!generate_code_to_calculate_answer &&
!slow_reverse.is_linked() &&
!slow.is_linked()) {
return;
}
if (lhs.is(r0)) {
__ b(&slow);
__ bind(&slow_reverse);
@ -7913,7 +8058,11 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
// The code below for writing into heap numbers isn't capable of writing
// the register as an unsigned int so we go to slow case if we hit this
// case.
__ b(mi, &slow);
if (CpuFeatures::IsSupported(VFP3)) {
__ b(mi, &result_not_a_smi);
} else {
__ b(mi, &slow);
}
break;
case Token::SHL:
// Use only the 5 least significant bits of the shift count.
@ -7957,10 +8106,24 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
// result.
__ mov(r0, Operand(r5));
// Tail call that writes the int32 in r2 to the heap number in r0, using
// r3 as scratch. r0 is preserved and returned.
WriteInt32ToHeapNumberStub stub(r2, r0, r3);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r2);
if (op_ == Token::SHR) {
__ vcvt_f64_u32(d0, s0);
} else {
__ vcvt_f64_s32(d0, s0);
}
__ sub(r3, r0, Operand(kHeapObjectTag));
__ vstr(d0, r3, HeapNumber::kValueOffset);
__ Ret();
} else {
// Tail call that writes the int32 in r2 to the heap number in r0, using
// r3 as scratch. r0 is preserved and returned.
WriteInt32ToHeapNumberStub stub(r2, r0, r3);
__ TailCallStub(&stub);
}
if (mode_ != NO_OVERWRITE) {
__ bind(&have_to_allocate);
@ -8597,29 +8760,15 @@ void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ Push(r1, r0);
// Internal frame is necessary to handle exceptions properly.
__ EnterInternalFrame();
// Call the stub proper to get the result in r0.
__ Call(&get_result);
__ LeaveInternalFrame();
__ push(r0);
__ mov(r0, Operand(Smi::FromInt(MinorKey())));
__ push(r0);
__ mov(r0, Operand(Smi::FromInt(op_)));
__ push(r0);
__ mov(r2, Operand(Smi::FromInt(MinorKey())));
__ mov(r1, Operand(Smi::FromInt(op_)));
__ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
__ push(r0);
__ Push(r2, r1, r0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
6,
5,
1);
// The entry point for the result calculation is assumed to be immediately
// after this sequence.
__ bind(&get_result);
}
@ -8751,16 +8900,23 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
// Go slow case if the value of the expression is zero
// to make sure that we switch between 0 and -0.
__ cmp(r0, Operand(0));
__ b(eq, &slow);
// The value of the expression is a smi that is not zero. Try
// optimistic subtraction '0 - value'.
__ rsb(r1, r0, Operand(0), SetCC);
__ b(vs, &slow);
__ mov(r0, Operand(r1)); // Set r0 to result.
__ b(&done);
if (negative_zero_ == kStrictNegativeZero) {
// If we have to check for zero, then we can check for the max negative
// smi while we are at it.
__ bic(ip, r0, Operand(0x80000000), SetCC);
__ b(eq, &slow);
__ rsb(r0, r0, Operand(0));
__ StubReturn(1);
} else {
// The value of the expression is a smi and 0 is OK for -0. Try
// optimistic subtraction '0 - value'.
__ rsb(r0, r0, Operand(0), SetCC);
__ StubReturn(1, vc);
// We don't have to reverse the optimistic neg since the only case
// where we fall through is the minimum negative Smi, which is the case
// where the neg leaves the register unchanged.
__ jmp(&slow); // Go slow on max negative Smi.
}
__ bind(&try_float);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
@ -8768,7 +8924,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ cmp(r1, heap_number_map);
__ b(ne, &slow);
// r0 is a heap number. Get a new heap number in r1.
if (overwrite_) {
if (overwrite_ == UNARY_OVERWRITE) {
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
@ -8801,7 +8957,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ b(&done);
__ bind(&try_float);
if (!overwrite_) {
if (!overwrite_ == UNARY_OVERWRITE) {
// Allocate a fresh heap number, but don't overwrite r0 until
// we're sure we can do it without going through the slow case
// that needs the value in r0.
@ -8809,12 +8965,21 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ mov(r0, Operand(r2));
}
// WriteInt32ToHeapNumberStub does not trigger GC, so we do not
// have to set up a frame.
WriteInt32ToHeapNumberStub stub(r1, r0, r2);
__ push(lr);
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r1);
__ vcvt_f64_s32(d0, s0);
__ sub(r2, r0, Operand(kHeapObjectTag));
__ vstr(d0, r2, HeapNumber::kValueOffset);
} else {
// WriteInt32ToHeapNumberStub does not trigger GC, so we do not
// have to set up a frame.
WriteInt32ToHeapNumberStub stub(r1, r0, r2);
__ push(lr);
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
} else {
UNIMPLEMENTED();
}

5
deps/v8/src/arm/codegen-arm.h

@ -44,6 +44,7 @@ class RegisterFile;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
// -------------------------------------------------------------------------
@ -100,7 +101,7 @@ class Reference BASE_EMBEDDED {
// on the expression stack. The value is stored in the location specified
// by the reference, and is left on top of the stack, after the reference
// is popped from beneath it (unloaded).
void SetValue(InitState init_state);
void SetValue(InitState init_state, WriteBarrierCharacter wb);
// This is in preparation for something that uses the reference on the stack.
// If we need this reference afterwards get then dup it now. Otherwise mark
@ -384,7 +385,7 @@ class CodeGenerator: public AstVisitor {
// Store a keyed property. Key and receiver are on the stack and the value is
// in r0. Result is returned in r0.
void EmitKeyedStore(StaticType* key_type);
void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,

9
deps/v8/src/arm/disasm-arm.cc

@ -1047,7 +1047,14 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) {
// Other data processing instructions
if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
// vmov register to register.
if (instr->SzField() == 0x1) {
Format(instr, "vmov.f64'cond 'Dd, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);

8
deps/v8/src/arm/full-codegen-arm.cc

@ -2736,9 +2736,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::SUB: {
Comment cmt(masm_, "[ UnaryOperation (SUB)");
bool overwrite =
bool can_overwrite =
(expr->expression()->AsBinaryOperation() != NULL &&
expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
GenericUnaryOpStub stub(Token::SUB, overwrite);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register r0.
@ -2750,9 +2752,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::BIT_NOT: {
Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
bool overwrite =
bool can_overwrite =
(expr->expression()->AsBinaryOperation() != NULL &&
expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register r0.

179
deps/v8/src/arm/ic-arm.cc

@ -64,12 +64,12 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register elements,
Register t0,
Register t1,
Label* miss) {
static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
Register receiver,
Register elements,
Register t0,
Register t1,
Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// elements: holds the property dictionary on fall through.
@ -105,33 +105,16 @@ static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
}
// Helper function used from LoadIC/CallIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
// name: Property name. It is not clobbered if a jump to the miss label is
// done
// result: Register for the result. It is only updated if a jump to the miss
// label is not done. Can be the same as elements or name clobbering
// one of these in the case of not jumping to the miss label.
// The two scratch registers need to be different from elements, name and
// result.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
Register name,
Register result,
Register scratch1,
Register scratch2) {
// Main use of the scratch registers.
// scratch1: Used as temporary and to hold the capacity of the property
// dictionary.
// scratch2: Used as temporary.
Label done;
// Probe the string dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found. Jump to
// the |miss| label otherwise.
static void GenerateStringDictionaryProbes(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register scratch1,
Register scratch2) {
// Compute the capacity mask.
const int kCapacityOffset = StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
@ -170,16 +153,56 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
__ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
__ cmp(name, Operand(ip));
if (i != kProbes - 1) {
__ b(eq, &done);
__ b(eq, done);
} else {
__ b(ne, miss);
}
}
}
// Check that the value is a normal property.
// Helper function used from LoadIC/CallIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
// name: Property name. It is not clobbered if a jump to the miss label is
// done
// result: Register for the result. It is only updated if a jump to the miss
// label is not done. Can be the same as elements or name clobbering
// one of these in the case of not jumping to the miss label.
// The two scratch registers need to be different from elements, name and
// result.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
Register name,
Register result,
Register scratch1,
Register scratch2) {
// Main use of the scratch registers.
// scratch1: Used as temporary and to hold the capacity of the property
// dictionary.
// scratch2: Used as temporary.
Label done;
// Probe the dictionary.
GenerateStringDictionaryProbes(masm,
miss,
&done,
elements,
name,
scratch1,
scratch2);
// If probing finds an entry check that the value is a normal
// property.
__ bind(&done); // scratch2 == elements + 4 * index
__ ldr(scratch1,
FieldMemOperand(scratch2, kElementsStartOffset + 2 * kPointerSize));
const int kElementsStartOffset = StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
__ tst(scratch1, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ b(ne, miss);
@ -189,6 +212,63 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
}
// Helper function used from StoreIC::GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
// name: Property name. It is not clobbered if a jump to the miss label is
// done
// value: The value to store.
// The two scratch registers need to be different from elements, name and
// result.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryStore(MacroAssembler* masm,
Label* miss,
Register elements,
Register name,
Register value,
Register scratch1,
Register scratch2) {
// Main use of the scratch registers.
// scratch1: Used as temporary and to hold the capacity of the property
// dictionary.
// scratch2: Used as temporary.
Label done;
// Probe the dictionary.
GenerateStringDictionaryProbes(masm,
miss,
&done,
elements,
name,
scratch1,
scratch2);
// If probing finds an entry in the dictionary check that the value
// is a normal property that is not read only.
__ bind(&done); // scratch2 == elements + 4 * index
const int kElementsStartOffset = StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask
= (PropertyDetails::TypeField::mask() |
PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
__ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
__ tst(scratch1, Operand(kTypeAndReadOnlyMask));
__ b(ne, miss);
// Store the value at the masked, scaled index and return.
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
__ str(value, MemOperand(scratch2));
// Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value);
__ RecordWrite(elements, scratch2, scratch1);
}
static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
@ -560,7 +640,7 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
GenerateDictionaryLoadReceiverCheck(masm, r1, r0, r3, r4, &miss);
GenerateStringDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
// r0: elements
// Search the dictionary - put result in register r1.
@ -815,7 +895,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -----------------------------------
Label miss;
GenerateDictionaryLoadReceiverCheck(masm, r0, r1, r3, r4, &miss);
GenerateStringDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
// r1: elements
GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
@ -2138,6 +2218,27 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
}
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label miss;
GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
__ IncrementCounter(&Counters::store_normal_hit, 1, r4, r5);
__ Ret();
__ bind(&miss);
__ IncrementCounter(&Counters::store_normal_miss, 1, r4, r5);
GenerateMiss(masm);
}
#undef __

61
deps/v8/src/arm/macro-assembler-arm.cc

@ -310,32 +310,28 @@ void MacroAssembler::StoreRoot(Register source,
void MacroAssembler::RecordWriteHelper(Register object,
Operand offset,
Register scratch0,
Register scratch1) {
Register address,
Register scratch) {
if (FLAG_debug_code) {
// Check that the object is not in new space.
Label not_in_new_space;
InNewSpace(object, scratch1, ne, &not_in_new_space);
InNewSpace(object, scratch, ne, &not_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(&not_in_new_space);
}
// Add offset into the object.
add(scratch0, object, offset);
// Calculate page address.
Bfc(object, 0, kPageSizeBits);
// Calculate region number.
Ubfx(scratch0, scratch0, Page::kRegionSizeLog2,
Ubfx(address, address, Page::kRegionSizeLog2,
kPageSizeBits - Page::kRegionSizeLog2);
// Mark region dirty.
ldr(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
orr(scratch1, scratch1, Operand(ip, LSL, scratch0));
str(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
orr(scratch, scratch, Operand(ip, LSL, address));
str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
}
@ -368,8 +364,11 @@ void MacroAssembler::RecordWrite(Register object,
// region marks for new space pages.
InNewSpace(object, scratch0, eq, &done);
// Add offset into the object.
add(scratch0, object, offset);
// Record the actual write.
RecordWriteHelper(object, offset, scratch0, scratch1);
RecordWriteHelper(object, scratch0, scratch1);
bind(&done);
@ -383,6 +382,38 @@ void MacroAssembler::RecordWrite(Register object,
}
// Will clobber 4 registers: object, address, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object,
Register address,
Register scratch) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
Label done;
// First, test that the object is not in the new space. We cannot set
// region marks for new space pages.
InNewSpace(object, scratch, eq, &done);
// Record the actual write.
RecordWriteHelper(object, address, scratch);
bind(&done);
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(address, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
}
}
void MacroAssembler::Ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
ASSERT(src.rm().is(no_reg));
@ -1341,12 +1372,12 @@ void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
}
void MacroAssembler::StubReturn(int argc) {
void MacroAssembler::StubReturn(int argc, Condition cond) {
ASSERT(argc >= 1 && generating_stub());
if (argc > 1) {
add(sp, sp, Operand((argc - 1) * kPointerSize));
add(sp, sp, Operand((argc - 1) * kPointerSize), LeaveCC, cond);
}
Ret();
Ret(cond);
}

28
deps/v8/src/arm/macro-assembler-arm.h

@ -137,22 +137,32 @@ class MacroAssembler: public Assembler {
Label* branch);
// For the page containing |object| mark the region covering [object+offset]
// For the page containing |object| mark the region covering [address]
// dirty. The object address must be in the first 8K of an allocated page.
void RecordWriteHelper(Register object,
Operand offset,
Register scratch0,
Register scratch1);
Register address,
Register scratch);
// For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page.
// The 'scratch' registers are used in the implementation and all 3 registers
// are clobbered by the operation, as well as the ip register.
// For the page containing |object| mark the region covering
// [object+offset] dirty. The object address must be in the first 8K
// of an allocated page. The 'scratch' registers are used in the
// implementation and all 3 registers are clobbered by the
// operation, as well as the ip register. RecordWrite updates the
// write barrier even when storing smis.
void RecordWrite(Register object,
Operand offset,
Register scratch0,
Register scratch1);
// For the page containing |object| mark the region covering
// [address] dirty. The object address must be in the first 8K of an
// allocated page. All 3 registers are clobbered by the operation,
// as well as the ip register. RecordWrite updates the write barrier
// even when storing smis.
void RecordWrite(Register object,
Register address,
Register scratch);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
ASSERT(!src1.is(src2));
@ -527,7 +537,7 @@ class MacroAssembler: public Assembler {
void TailCallStub(CodeStub* stub, Condition cond = al);
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
void StubReturn(int argc, Condition cond = al);
// Call a runtime routine.
void CallRuntime(Runtime::Function* f, int num_arguments);

9
deps/v8/src/arm/simulator-arm.cc

@ -2276,7 +2276,14 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) {
// Other data processing instructions
if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
// vmov register to register.
if (instr->SzField() == 0x1) {
set_d_register_from_double(vd, get_double_from_d_register(vm));
} else {
UNREACHABLE(); // Not used by V8.
}
} else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);

3
deps/v8/src/arm/stub-cache-arm.cc

@ -741,7 +741,8 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
Register scratch,
String* name,
int save_at_depth,
Label* miss) {
Label* miss,
Register extra) {
// Check that the maps haven't changed.
Register result =
masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,

5
deps/v8/src/builtins.cc

@ -1263,6 +1263,11 @@ static void Generate_StoreIC_Miss(MacroAssembler* masm) {
}
static void Generate_StoreIC_Normal(MacroAssembler* masm) {
StoreIC::GenerateNormal(masm);
}
static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
StoreIC::GenerateMegamorphic(masm);
}

1
deps/v8/src/builtins.h

@ -98,6 +98,7 @@ enum BuiltinExtraArguments {
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \
V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \

14
deps/v8/src/codegen.cc

@ -460,11 +460,17 @@ void CodeGenerator::CodeForSourcePosition(int pos) {
const char* GenericUnaryOpStub::GetName() {
switch (op_) {
case Token::SUB:
return overwrite_
? "GenericUnaryOpStub_SUB_Overwrite"
: "GenericUnaryOpStub_SUB_Alloc";
if (negative_zero_ == kStrictNegativeZero) {
return overwrite_ == UNARY_OVERWRITE
? "GenericUnaryOpStub_SUB_Overwrite_Strict0"
: "GenericUnaryOpStub_SUB_Alloc_Strict0";
} else {
return overwrite_ == UNARY_OVERWRITE
? "GenericUnaryOpStub_SUB_Overwrite_Ignore0"
: "GenericUnaryOpStub_SUB_Alloc_Ignore0";
}
case Token::BIT_NOT:
return overwrite_
return overwrite_ == UNARY_OVERWRITE
? "GenericUnaryOpStub_BIT_NOT_Overwrite"
: "GenericUnaryOpStub_BIT_NOT_Alloc";
default:

25
deps/v8/src/codegen.h

@ -75,6 +75,7 @@
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };
// Types of uncatchable exceptions.
enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
@ -414,21 +415,33 @@ class InstanceofStub: public CodeStub {
};
enum NegativeZeroHandling {
kStrictNegativeZero,
kIgnoreNegativeZero
};
class GenericUnaryOpStub : public CodeStub {
public:
GenericUnaryOpStub(Token::Value op, bool overwrite)
: op_(op), overwrite_(overwrite) { }
GenericUnaryOpStub(Token::Value op,
UnaryOverwriteMode overwrite,
NegativeZeroHandling negative_zero = kStrictNegativeZero)
: op_(op), overwrite_(overwrite), negative_zero_(negative_zero) { }
private:
Token::Value op_;
bool overwrite_;
UnaryOverwriteMode overwrite_;
NegativeZeroHandling negative_zero_;
class OverwriteField: public BitField<int, 0, 1> {};
class OpField: public BitField<Token::Value, 1, kMinorBits - 1> {};
class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
class NegativeZeroField: public BitField<NegativeZeroHandling, 1, 1> {};
class OpField: public BitField<Token::Value, 2, kMinorBits - 2> {};
Major MajorKey() { return GenericUnaryOp; }
int MinorKey() {
return OpField::encode(op_) | OverwriteField::encode(overwrite_);
return OpField::encode(op_) |
OverwriteField::encode(overwrite_) |
NegativeZeroField::encode(negative_zero_);
}
void Generate(MacroAssembler* masm);

194
deps/v8/src/date.js

@ -347,9 +347,10 @@ function DateFromTime(t) {
function MakeDay(year, month, date) {
if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
year = TO_INTEGER(year);
month = TO_INTEGER(month);
date = TO_INTEGER(date);
// Convert to integer and map -0 to 0.
year = TO_INTEGER_MAP_MINUS_ZERO(year);
month = TO_INTEGER_MAP_MINUS_ZERO(month);
date = TO_INTEGER_MAP_MINUS_ZERO(date);
if (year < kMinYear || year > kMaxYear ||
month < kMinMonth || month > kMaxMonth ||
@ -452,111 +453,6 @@ var Date_cache = {
});
// Helper functions.
function GetTimeFrom(aDate) {
return DATE_VALUE(aDate);
}
function GetMillisecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MS_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCMillisecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MS_FROM_TIME(t);
}
function GetSecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return SEC_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCSecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return SEC_FROM_TIME(t);
}
function GetMinutesFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MIN_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCMinutesFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MIN_FROM_TIME(t);
}
function GetHoursFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return HOUR_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCHoursFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return HOUR_FROM_TIME(t);
}
function GetFullYearFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
var cache = Date_cache;
if (cache.time === t) return cache.year;
return YEAR_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCFullYearFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return YEAR_FROM_TIME(t);
}
function GetMonthFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MONTH_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCMonthFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MONTH_FROM_TIME(t);
}
function GetDateFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return DATE_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCDateFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return DATE_FROM_TIME(t);
}
%FunctionSetPrototype($Date, new $Date($NaN));
@ -736,37 +632,50 @@ function DateGetTime() {
// ECMA 262 - 15.9.5.10
function DateGetFullYear() {
return GetFullYearFrom(this)
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
var cache = Date_cache;
if (cache.time === t) return cache.year;
return YEAR_FROM_TIME(LocalTimeNoCheck(t));
}
// ECMA 262 - 15.9.5.11
function DateGetUTCFullYear() {
return GetUTCFullYearFrom(this)
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return YEAR_FROM_TIME(t);
}
// ECMA 262 - 15.9.5.12
function DateGetMonth() {
return GetMonthFrom(this);
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return MONTH_FROM_TIME(LocalTimeNoCheck(t));
}
// ECMA 262 - 15.9.5.13
function DateGetUTCMonth() {
return GetUTCMonthFrom(this);
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return MONTH_FROM_TIME(t);
}
// ECMA 262 - 15.9.5.14
function DateGetDate() {
return GetDateFrom(this);
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return DATE_FROM_TIME(LocalTimeNoCheck(t));
}
// ECMA 262 - 15.9.5.15
function DateGetUTCDate() {
return GetUTCDateFrom(this);
var t = DATE_VALUE(this);
return NAN_OR_DATE_FROM_TIME(t);
}
@ -788,49 +697,62 @@ function DateGetUTCDay() {
// ECMA 262 - 15.9.5.18
function DateGetHours() {
return GetHoursFrom(this);
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return HOUR_FROM_TIME(LocalTimeNoCheck(t));
}
// ECMA 262 - 15.9.5.19
function DateGetUTCHours() {
return GetUTCHoursFrom(this);
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return HOUR_FROM_TIME(t);
}
// ECMA 262 - 15.9.5.20
function DateGetMinutes() {
return GetMinutesFrom(this);
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return MIN_FROM_TIME(LocalTimeNoCheck(t));
}
// ECMA 262 - 15.9.5.21
function DateGetUTCMinutes() {
return GetUTCMinutesFrom(this);
var t = DATE_VALUE(this);
return NAN_OR_MIN_FROM_TIME(t);
}
// ECMA 262 - 15.9.5.22
function DateGetSeconds() {
return GetSecondsFrom(this);
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return SEC_FROM_TIME(LocalTimeNoCheck(t));
}
// ECMA 262 - 15.9.5.23
function DateGetUTCSeconds() {
return GetUTCSecondsFrom(this);
var t = DATE_VALUE(this);
return NAN_OR_SEC_FROM_TIME(t);
}
// ECMA 262 - 15.9.5.24
function DateGetMilliseconds() {
return GetMillisecondsFrom(this);
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return MS_FROM_TIME(LocalTimeNoCheck(t));
}
// ECMA 262 - 15.9.5.25
function DateGetUTCMilliseconds() {
return GetUTCMillisecondsFrom(this);
var t = DATE_VALUE(this);
return NAN_OR_MS_FROM_TIME(t);
}
@ -871,7 +793,7 @@ function DateSetUTCMilliseconds(ms) {
function DateSetSeconds(sec, ms) {
var t = LocalTime(DATE_VALUE(this));
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? GetMillisecondsFrom(this) : ToNumber(ms);
ms = %_ArgumentsLength() < 2 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@ -881,7 +803,7 @@ function DateSetSeconds(sec, ms) {
function DateSetUTCSeconds(sec, ms) {
var t = DATE_VALUE(this);
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
ms = %_ArgumentsLength() < 2 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@ -892,8 +814,8 @@ function DateSetMinutes(min, sec, ms) {
var t = LocalTime(DATE_VALUE(this));
min = ToNumber(min);
var argc = %_ArgumentsLength();
sec = argc < 2 ? GetSecondsFrom(this) : ToNumber(sec);
ms = argc < 3 ? GetMillisecondsFrom(this) : ToNumber(ms);
sec = argc < 2 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
ms = argc < 3 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@ -904,8 +826,8 @@ function DateSetUTCMinutes(min, sec, ms) {
var t = DATE_VALUE(this);
min = ToNumber(min);
var argc = %_ArgumentsLength();
sec = argc < 2 ? GetUTCSecondsFrom(this) : ToNumber(sec);
ms = argc < 3 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
sec = argc < 2 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
ms = argc < 3 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@ -916,9 +838,9 @@ function DateSetHours(hour, min, sec, ms) {
var t = LocalTime(DATE_VALUE(this));
hour = ToNumber(hour);
var argc = %_ArgumentsLength();
min = argc < 2 ? GetMinutesFrom(this) : ToNumber(min);
sec = argc < 3 ? GetSecondsFrom(this) : ToNumber(sec);
ms = argc < 4 ? GetMillisecondsFrom(this) : ToNumber(ms);
min = argc < 2 ? NAN_OR_MIN_FROM_TIME(t) : ToNumber(min);
sec = argc < 3 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
ms = argc < 4 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
var time = MakeTime(hour, min, sec, ms);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@ -929,9 +851,9 @@ function DateSetUTCHours(hour, min, sec, ms) {
var t = DATE_VALUE(this);
hour = ToNumber(hour);
var argc = %_ArgumentsLength();
min = argc < 2 ? GetUTCMinutesFrom(this) : ToNumber(min);
sec = argc < 3 ? GetUTCSecondsFrom(this) : ToNumber(sec);
ms = argc < 4 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
min = argc < 2 ? NAN_OR_MIN_FROM_TIME(t) : ToNumber(min);
sec = argc < 3 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
ms = argc < 4 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
var time = MakeTime(hour, min, sec, ms);
return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@ -959,7 +881,7 @@ function DateSetUTCDate(date) {
function DateSetMonth(month, date) {
var t = LocalTime(DATE_VALUE(this));
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? GetDateFrom(this) : ToNumber(date);
date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
var day = MakeDay(YEAR_FROM_TIME(t), month, date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@ -969,7 +891,7 @@ function DateSetMonth(month, date) {
function DateSetUTCMonth(month, date) {
var t = DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? GetUTCDateFrom(this) : ToNumber(date);
date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
var day = MakeDay(YEAR_FROM_TIME(t), month, date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}

16
deps/v8/src/debug-debugger.js

@ -2070,6 +2070,7 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response)
return response.failed('Missing arguments');
}
var script_id = request.arguments.script_id;
var preview_only = !!request.arguments.preview_only;
var scripts = %DebugGetLoadedScripts();
@ -2092,18 +2093,9 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response)
var new_source = request.arguments.new_source;
try {
Debug.LiveEdit.SetScriptSource(the_script, new_source, change_log);
} catch (e) {
if (e instanceof Debug.LiveEdit.Failure) {
// Let's treat it as a "success" so that body with change_log will be
// sent back. "change_log" will have "failure" field set.
change_log.push( { failure: true, message: e.toString() } );
} else {
throw e;
}
}
response.body = {change_log: change_log};
var result_description = Debug.LiveEdit.SetScriptSource(the_script,
new_source, preview_only, change_log);
response.body = {change_log: change_log, result: result_description};
};

32
deps/v8/src/debug.cc

@ -472,8 +472,9 @@ void BreakLocationIterator::ClearDebugBreakAtIC() {
RelocInfo::Mode mode = rmode();
if (RelocInfo::IsCodeTarget(mode)) {
AssertNoAllocation nogc;
Address target = original_rinfo()->target_address();
Handle<Code> code(Code::GetCodeFromTargetAddress(target));
Code* code = Code::GetCodeFromTargetAddress(target);
// Restore the inlined version of keyed stores to get back to the
// fast case. We need to patch back the keyed store because no
@ -684,6 +685,12 @@ void Debug::Setup(bool create_heap_objects) {
void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
// We need to clear all breakpoints associated with the function to restore
// original code and avoid patching the code twice later because
// the function will live in the heap until next gc, and can be found by
// Runtime::FindSharedFunctionInfoInScript.
BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
it.ClearAllDebugBreak();
RemoveDebugInfo(node->debug_info());
#ifdef DEBUG
node = Debug::debug_info_list_;
@ -854,7 +861,7 @@ Object* Debug::Break(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 0);
thread_local_.frames_are_dropped_ = false;
thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it;
@ -932,12 +939,22 @@ Object* Debug::Break(Arguments args) {
PrepareStep(step_action, step_count);
}
if (thread_local_.frames_are_dropped_) {
// We must have been calling IC stub. Do not return there anymore.
if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
SetAfterBreakTarget(frame);
} else if (thread_local_.frame_drop_mode_ == FRAME_DROPPED_IN_IC_CALL) {
// We must have been calling IC stub. Do not go there anymore.
Code* plain_return = Builtins::builtin(Builtins::PlainReturn_LiveEdit);
thread_local_.after_break_target_ = plain_return->entry();
} else if (thread_local_.frame_drop_mode_ ==
FRAME_DROPPED_IN_DEBUG_SLOT_CALL) {
// Debug break slot stub does not return normally, instead it manually
// cleans the stack and jumps. We should patch the jump address.
Code* plain_return = Builtins::builtin(Builtins::FrameDropper_LiveEdit);
thread_local_.after_break_target_ = plain_return->entry();
} else if (thread_local_.frame_drop_mode_ == FRAME_DROPPED_IN_DIRECT_CALL) {
// Nothing to do, after_break_target is not used here.
} else {
SetAfterBreakTarget(frame);
UNREACHABLE();
}
return Heap::undefined_value();
@ -1749,8 +1766,9 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
}
void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id) {
thread_local_.frames_are_dropped_ = true;
void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
FrameDropMode mode) {
thread_local_.frame_drop_mode_ = mode;
thread_local_.break_frame_id_ = new_break_frame_id;
}

22
deps/v8/src/debug.h

@ -400,7 +400,22 @@ class Debug {
// Called from stub-cache.cc.
static void GenerateCallICDebugBreak(MacroAssembler* masm);
static void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id);
// Describes how exactly a frame has been dropped from stack.
enum FrameDropMode {
// No frame has been dropped.
FRAMES_UNTOUCHED,
// The top JS frame had been calling IC stub. IC stub mustn't be called now.
FRAME_DROPPED_IN_IC_CALL,
// The top JS frame had been calling debug break slot stub. Patch the
// address this stub jumps to in the end.
FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
// The top JS frame had been calling some C++ function. The return address
// gets patched automatically.
FRAME_DROPPED_IN_DIRECT_CALL
};
static void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
FrameDropMode mode);
static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Handle<Code> code);
@ -471,8 +486,9 @@ class Debug {
// Storage location for jump when exiting debug break calls.
Address after_break_target_;
// Indicates that LiveEdit has patched the stack.
bool frames_are_dropped_;
// Stores the way how LiveEdit has patched the stack. It is used when
// debugger returns control back to user script.
FrameDropMode frame_drop_mode_;
// Top debugger entry.
EnterDebugger* debugger_entry_;

6
deps/v8/src/factory.cc

@ -96,6 +96,12 @@ Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
}
Handle<String> Factory::NewRawAsciiString(int length,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(Heap::AllocateRawAsciiString(length, pretenure), String);
}
Handle<String> Factory::NewRawTwoByteString(int length,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(Heap::AllocateRawTwoByteString(length, pretenure), String);

12
deps/v8/src/factory.h

@ -95,12 +95,16 @@ class Factory : public AllStatic {
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
static Handle<String> NewStringFromTwoByte(Vector<const uc16> str,
static Handle<String> NewStringFromTwoByte(
Vector<const uc16> str,
PretenureFlag pretenure = NOT_TENURED);
// Allocates and partially initializes a TwoByte String. The characters of
// the string are uninitialized. Currently used in regexp code only, where
// they are pretenured.
// Allocates and partially initializes an ASCII or TwoByte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
static Handle<String> NewRawAsciiString(
int length,
PretenureFlag pretenure = NOT_TENURED);
static Handle<String> NewRawTwoByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);

2
deps/v8/src/frames.cc

@ -542,7 +542,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
Address pc = this->pc();
if (code != NULL && code->kind() == Code::FUNCTION &&
pc >= code->instruction_start() && pc < code->relocation_start()) {
pc >= code->instruction_start() && pc < code->instruction_end()) {
int source_pos = code->SourcePosition(pc);
int line = GetScriptLineNumberSafe(script, source_pos) + 1;
accumulator->Add(":%d", line);

6
deps/v8/src/globals.h

@ -463,6 +463,12 @@ enum CallFunctionFlags {
};
enum InlineCacheHolderFlag {
OWN_MAP, // For fast properties objects.
PROTOTYPE_MAP // For slow properties objects (except GlobalObjects).
};
// Type of properties.
// Order of properties is significant.
// Must fit in the BitField PropertyDetails::TypeField.

31
deps/v8/src/heap.cc

@ -2351,8 +2351,13 @@ Object* Heap::CreateCode(const CodeDesc& desc,
ZoneScopeInfo* sinfo,
Code::Flags flags,
Handle<Object> self_reference) {
// Allocate ByteArray before the Code object, so that we do not risk
// leaving uninitialized Code object (and breaking the heap).
Object* reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
if (reloc_info->IsFailure()) return reloc_info;
// Compute size
int body_size = RoundUp(desc.instr_size + desc.reloc_size, kObjectAlignment);
int body_size = RoundUp(desc.instr_size, kObjectAlignment);
int sinfo_size = 0;
if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL);
int obj_size = Code::SizeFor(body_size, sinfo_size);
@ -2371,7 +2376,7 @@ Object* Heap::CreateCode(const CodeDesc& desc,
Code* code = Code::cast(result);
ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
code->set_instruction_size(desc.instr_size);
code->set_relocation_size(desc.reloc_size);
code->set_relocation_info(ByteArray::cast(reloc_info));
code->set_sinfo_size(sinfo_size);
code->set_flags(flags);
// Allow self references to created code object by patching the handle to
@ -2419,8 +2424,12 @@ Object* Heap::CopyCode(Code* code) {
Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
int new_body_size = RoundUp(code->instruction_size() + reloc_info.length(),
kObjectAlignment);
// Allocate ByteArray before the Code object, so that we do not risk
// leaving uninitialized Code object (and breaking the heap).
Object* reloc_info_array = AllocateByteArray(reloc_info.length(), TENURED);
if (reloc_info_array->IsFailure()) return reloc_info_array;
int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
int sinfo_size = code->sinfo_size();
@ -2429,7 +2438,7 @@ Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Address old_addr = code->address();
size_t relocation_offset =
static_cast<size_t>(code->relocation_start() - old_addr);
static_cast<size_t>(code->instruction_end() - old_addr);
Object* result;
if (new_obj_size > MaxObjectSizeInPagedSpace()) {
@ -2446,14 +2455,11 @@ Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
// Copy header and instructions.
memcpy(new_addr, old_addr, relocation_offset);
// Copy patched rinfo.
memcpy(new_addr + relocation_offset,
reloc_info.start(),
reloc_info.length());
Code* new_code = Code::cast(result);
new_code->set_relocation_size(reloc_info.length());
new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
// Copy patched rinfo.
memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
// Copy sinfo.
memcpy(new_code->sinfo_start(), code->sinfo_start(), code->sinfo_size());
@ -2866,6 +2872,8 @@ Object* Heap::AllocateStringFromAscii(Vector<const char> string,
Object* Heap::AllocateStringFromUtf8(Vector<const char> string,
PretenureFlag pretenure) {
// V8 only supports characters in the Basic Multilingual Plane.
const uc32 kMaxSupportedChar = 0xFFFF;
// Count the number of characters in the UTF-8 string and check if
// it is an ASCII string.
Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
@ -2890,6 +2898,7 @@ Object* Heap::AllocateStringFromUtf8(Vector<const char> string,
decoder->Reset(string.start(), string.length());
for (int i = 0; i < chars; i++) {
uc32 r = decoder->GetNext();
if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
string_result->Set(i, r);
}
return result;

248
deps/v8/src/ia32/codegen-ia32.cc

@ -7583,9 +7583,12 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->Push(&value);
} else {
Load(node->expression());
bool overwrite =
bool can_overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
bool no_negative_zero = node->expression()->no_negative_zero();
switch (op) {
case Token::NOT:
case Token::DELETE:
@ -7594,7 +7597,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
GenericUnaryOpStub stub(Token::SUB, overwrite);
GenericUnaryOpStub stub(
Token::SUB,
overwrite,
no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
answer.set_type_info(TypeInfo::Number());
@ -9860,6 +9866,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// the four basic operations. The stub stays in the DEFAULT state
// forever for all other operations (also if smi code is skipped).
GenerateTypeTransition(masm);
break;
}
Label not_floats;
@ -10207,51 +10214,28 @@ void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
// Keep a copy of operands on the stack and make sure they are also in
// edx, eax.
// Ensure the operands are on the stack.
if (HasArgsInRegisters()) {
GenerateRegisterArgsPush(masm);
} else {
GenerateLoadArguments(masm);
}
// Internal frame is necessary to handle exceptions properly.
__ EnterInternalFrame();
// Push arguments on stack if the stub expects them there.
if (!HasArgsInRegisters()) {
__ push(edx);
__ push(eax);
}
// Call the stub proper to get the result in eax.
__ call(&get_result);
__ LeaveInternalFrame();
__ pop(ecx); // Save return address.
__ pop(ecx); // Return address.
// Left and right arguments are now on top.
// Push the operation result. The tail call to BinaryOp_Patch will
// return it to the original caller.
__ push(eax);
// Push this stub's key. Although the operation and the type info are
// encoded into the key, the encoding is opaque, so push them too.
__ push(Immediate(Smi::FromInt(MinorKey())));
__ push(Immediate(Smi::FromInt(op_)));
__ push(Immediate(Smi::FromInt(runtime_operands_type_)));
__ push(ecx); // Return address.
__ push(ecx); // Push return address.
// Patch the caller to an appropriate specialized stub
// and return the operation result.
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
6,
5,
1);
// The entry point for the result calculation is assumed to be immediately
// after this sequence.
__ bind(&get_result);
}
@ -10934,10 +10918,12 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &try_float, not_taken);
// Go slow case if the value of the expression is zero
// to make sure that we switch between 0 and -0.
__ test(eax, Operand(eax));
__ j(zero, &slow, not_taken);
if (negative_zero_ == kStrictNegativeZero) {
// Go slow case if the value of the expression is zero
// to make sure that we switch between 0 and -0.
__ test(eax, Operand(eax));
__ j(zero, &slow, not_taken);
}
// The value of the expression is a smi that is not zero. Try
// optimistic subtraction '0 - value'.
@ -10945,11 +10931,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ mov(edx, Operand(eax));
__ Set(eax, Immediate(0));
__ sub(eax, Operand(edx));
__ j(overflow, &undo, not_taken);
// If result is a smi we are done.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &done, taken);
__ j(no_overflow, &done, taken);
// Restore eax and go slow case.
__ bind(&undo);
@ -10961,7 +10943,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(edx, Factory::heap_number_map());
__ j(not_equal, &slow);
if (overwrite_) {
if (overwrite_ == UNARY_OVERWRITE) {
__ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
__ xor_(edx, HeapNumber::kSignMask); // Flip sign.
__ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
@ -11002,7 +10984,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
// Try to store the result in a heap number.
__ bind(&try_float);
if (!overwrite_) {
if (overwrite_ == UNARY_NO_OVERWRITE) {
// Allocate a fresh heap number, but don't overwrite eax until
// we're sure we can do it without going through the slow case
// that needs the value in eax.
@ -11656,7 +11638,7 @@ static int NegativeComparisonResult(Condition cc) {
void CompareStub::Generate(MacroAssembler* masm) {
Label call_builtin, done;
Label check_unequal_objects, done;
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
@ -11689,15 +11671,15 @@ void CompareStub::Generate(MacroAssembler* masm) {
Label heap_number;
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
if (cc_ == equal) {
__ j(equal, &heap_number);
// Identical objects are equal for operators ==, !=, and ===.
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
} else {
// Identical objects must call ToPrimitive for <, <=, >, and >=.
__ j(not_equal, &not_identical);
__ j(equal, &heap_number);
if (cc_ != equal) {
// Call runtime on identical JSObjects. Otherwise return equal.
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(above_equal, &not_identical);
}
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
__ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if
// it's not NaN.
@ -11734,79 +11716,75 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&not_identical);
}
if (cc_ == equal) { // Both strict and non-strict.
// Strict equality can quickly decide whether objects are equal.
// Non-strict object equality is slower, so it is handled later in the stub.
if (cc_ == equal && strict_) {
Label slow; // Fallthrough label.
Label not_smis;
// If we're doing a strict equality comparison, we don't have to do
// type conversion, so we generate code to do fast comparison for objects
// and oddballs. Non-smi numbers and strings still go through the usual
// slow-case code.
if (strict_) {
// If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case.
{
Label not_smis;
ASSERT_EQ(0, kSmiTag);
ASSERT_EQ(0, Smi::FromInt(0));
__ mov(ecx, Immediate(kSmiTagMask));
__ and_(ecx, Operand(eax));
__ test(ecx, Operand(edx));
__ j(not_zero, &not_smis);
// One operand is a smi.
// Check whether the non-smi is a heap number.
ASSERT_EQ(1, kSmiTagMask);
// ecx still holds eax & kSmiTag, which is either zero or one.
__ sub(Operand(ecx), Immediate(0x01));
__ mov(ebx, edx);
__ xor_(ebx, Operand(eax));
__ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
__ xor_(ebx, Operand(eax));
// if eax was smi, ebx is now edx, else eax.
// Check if the non-smi operand is a heap number.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
// If heap number, handle it in the slow case.
__ j(equal, &slow);
// Return non-equal (ebx is not zero)
__ mov(eax, ebx);
__ ret(0);
__ bind(&not_smis);
}
// If either operand is a JSObject or an oddball value, then they are not
// equal since their pointers are different
// There is no test for undetectability in strict equality.
// Get the type of the first operand.
// If the first object is a JS object, we have done pointer comparison.
Label first_non_object;
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(below, &first_non_object);
// If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case.
ASSERT_EQ(0, kSmiTag);
ASSERT_EQ(0, Smi::FromInt(0));
__ mov(ecx, Immediate(kSmiTagMask));
__ and_(ecx, Operand(eax));
__ test(ecx, Operand(edx));
__ j(not_zero, &not_smis);
// One operand is a smi.
// Check whether the non-smi is a heap number.
ASSERT_EQ(1, kSmiTagMask);
// ecx still holds eax & kSmiTag, which is either zero or one.
__ sub(Operand(ecx), Immediate(0x01));
__ mov(ebx, edx);
__ xor_(ebx, Operand(eax));
__ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
__ xor_(ebx, Operand(eax));
// if eax was smi, ebx is now edx, else eax.
// Check if the non-smi operand is a heap number.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
// If heap number, handle it in the slow case.
__ j(equal, &slow);
// Return non-equal (ebx is not zero)
__ mov(eax, ebx);
__ ret(0);
// Return non-zero (eax is not zero)
Label return_not_equal;
ASSERT(kHeapObjectTag != 0);
__ bind(&return_not_equal);
__ ret(0);
__ bind(&not_smis);
// If either operand is a JSObject or an oddball value, then they are not
// equal since their pointers are different
// There is no test for undetectability in strict equality.
// Get the type of the first operand.
// If the first object is a JS object, we have done pointer comparison.
Label first_non_object;
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(below, &first_non_object);
// Return non-zero (eax is not zero)
Label return_not_equal;
ASSERT(kHeapObjectTag != 0);
__ bind(&return_not_equal);
__ ret(0);
__ bind(&first_non_object);
// Check for oddballs: true, false, null, undefined.
__ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
__ bind(&first_non_object);
// Check for oddballs: true, false, null, undefined.
__ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
__ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
__ j(above_equal, &return_not_equal);
__ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
__ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
__ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
// Fall through to the general case.
}
// Fall through to the general case.
__ bind(&slow);
}
@ -11893,7 +11871,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&check_for_strings);
__ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &call_builtin);
__ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
&check_unequal_objects);
// Inline comparison of ascii strings.
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@ -11906,7 +11885,44 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ Abort("Unexpected fall-through from string comparison");
#endif
__ bind(&call_builtin);
__ bind(&check_unequal_objects);
if (cc_ == equal && !strict_) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
Label not_both_objects;
Label return_unequal;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
ASSERT_EQ(0, kSmiTag);
ASSERT_EQ(1, kSmiTagMask);
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &not_both_objects);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(below, &not_both_objects);
__ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
__ j(below, &not_both_objects);
// We do not bail out after this point. Both are JSObjects, and
// they are equal if and only if both are undetectable.
// The and of the undetectable flags is 1 if and only if they are equal.
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
__ j(zero, &return_unequal);
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
__ j(zero, &return_unequal);
// The objects are both undetectable, so they both compare as the value
// undefined, and are equal.
__ Set(eax, Immediate(EQUAL));
__ bind(&return_unequal);
// Return non-equal by returning the non-zero object pointer in eax,
// or return equal if we fell through to here.
__ ret(2 * kPointerSize); // rax, rdx were pushed
__ bind(&not_both_objects);
}
// must swap argument order
__ pop(ecx);
__ pop(edx);

8
deps/v8/src/ia32/full-codegen-ia32.cc

@ -2813,9 +2813,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::SUB: {
Comment cmt(masm_, "[ UnaryOperation (SUB)");
bool overwrite =
bool can_overwrite =
(expr->expression()->AsBinaryOperation() != NULL &&
expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
GenericUnaryOpStub stub(Token::SUB, overwrite);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register eax.
@ -2827,9 +2829,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::BIT_NOT: {
Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
bool overwrite =
bool can_overwrite =
(expr->expression()->AsBinaryOperation() != NULL &&
expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register eax.

201
deps/v8/src/ia32/ic-ia32.cc

@ -61,11 +61,11 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register r0,
Register r1,
Label* miss) {
static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
Register receiver,
Register r0,
Register r1,
Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// r0: used to hold receiver instance type.
@ -98,36 +98,17 @@ static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
}
// Helper function used to load a property from a dictionary backing storage.
// This function may return false negatives, so miss_label
// must always call a backup property load that is complete.
// This function is safe to call if name is not a symbol, and will jump to
// the miss_label in that case.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss_label,
Register elements,
Register name,
Register r0,
Register r1,
Register result) {
// Register use:
//
// elements - holds the property dictionary on entry and is unchanged.
//
// name - holds the name of the property on entry and is unchanged.
//
// Scratch registers:
//
// r0 - used for the index into the property dictionary
//
// r1 - used to hold the capacity of the property dictionary.
//
// result - holds the result on exit.
Label done;
// Probe the string dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found leaving the
// index into the dictionary in |r0|. Jump to the |miss| label
// otherwise.
static void GenerateStringDictionaryProbes(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register r0,
Register r1) {
// Compute the capacity mask.
const int kCapacityOffset =
StringDictionary::kHeaderSize +
@ -160,14 +141,61 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
__ cmp(name, Operand(elements, r0, times_4,
kElementsStartOffset - kHeapObjectTag));
if (i != kProbes - 1) {
__ j(equal, &done, taken);
__ j(equal, done, taken);
} else {
__ j(not_equal, miss_label, not_taken);
__ j(not_equal, miss, not_taken);
}
}
}
// Helper function used to load a property from a dictionary backing
// storage. This function may fail to load a property even though it is
// in the dictionary, so code at miss_label must always call a backup
// property load that is complete. This function is safe to call if
// name is not a symbol, and will jump to the miss_label in that
// case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss_label,
Register elements,
Register name,
Register r0,
Register r1,
Register result) {
// Register use:
//
// elements - holds the property dictionary on entry and is unchanged.
//
// name - holds the name of the property on entry and is unchanged.
//
// Scratch registers:
//
// r0 - used for the index into the property dictionary
//
// r1 - used to hold the capacity of the property dictionary.
//
// result - holds the result on exit.
// Check that the value is a normal property.
Label done;
// Probe the dictionary.
GenerateStringDictionaryProbes(masm,
miss_label,
&done,
elements,
name,
r0,
r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
// property.
__ bind(&done);
const int kElementsStartOffset =
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
@ -179,6 +207,69 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
}
// Helper function used to store a property to a dictionary backing
// storage. This function may fail to store a property eventhough it
// is in the dictionary, so code at miss_label must always call a
// backup property store that is complete. This function is safe to
// call if name is not a symbol, and will jump to the miss_label in
// that case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
static void GenerateDictionaryStore(MacroAssembler* masm,
Label* miss_label,
Register elements,
Register name,
Register value,
Register r0,
Register r1) {
// Register use:
//
// elements - holds the property dictionary on entry and is clobbered.
//
// name - holds the name of the property on entry and is unchanged.
//
// value - holds the value to store and is unchanged.
//
// r0 - used for index into the property dictionary and is clobbered.
//
// r1 - used to hold the capacity of the property dictionary and is clobbered.
Label done;
// Probe the dictionary.
GenerateStringDictionaryProbes(masm,
miss_label,
&done,
elements,
name,
r0,
r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
// property that is not read only.
__ bind(&done);
const int kElementsStartOffset =
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask
= (PropertyDetails::TypeField::mask() |
PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
__ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
Immediate(kTypeAndReadOnlyMask));
__ j(not_zero, miss_label, not_taken);
// Store the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
__ mov(Operand(r0, 0), value);
// Update write barrier. Make sure not to clobber the value.
__ mov(r1, value);
__ RecordWrite(elements, r0, r1);
}
static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
@ -1238,7 +1329,7 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
GenerateDictionaryLoadReceiverCheck(masm, edx, eax, ebx, &miss);
GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
// eax: elements
// Search the dictionary placing the result in edi.
@ -1517,7 +1608,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -----------------------------------
Label miss;
GenerateDictionaryLoadReceiverCheck(masm, eax, edx, ebx, &miss);
GenerateStringDictionaryReceiverCheck(masm, eax, edx, ebx, &miss);
// edx: elements
// Search the dictionary placing the result in eax.
@ -1775,6 +1866,36 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
}
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss, restore_miss;
GenerateStringDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
// A lot of registers are needed for storing to slow case
// objects. Push and restore receiver but rely on
// GenerateDictionaryStore preserving the value and name.
__ push(edx);
GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
__ Drop(1);
__ IncrementCounter(&Counters::store_normal_hit, 1);
__ ret(0);
__ bind(&restore_miss);
__ pop(edx);
__ bind(&miss);
__ IncrementCounter(&Counters::store_normal_miss, 1);
GenerateMiss(masm);
}
// Defined in ic.cc.
Object* KeyedStoreIC_Miss(Arguments args);

129
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -98,11 +98,6 @@ void MacroAssembler::InNewSpace(Register object,
}
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
// All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object, int offset,
Register value, Register scratch) {
// The compiled code assumes that record write doesn't change the
@ -153,6 +148,39 @@ void MacroAssembler::RecordWrite(Register object, int offset,
}
void MacroAssembler::RecordWrite(Register object,
Register address,
Register value) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are esi.
ASSERT(!object.is(esi) && !value.is(esi) && !address.is(esi));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
// Skip barrier if writing a smi.
ASSERT_EQ(0, kSmiTag);
test(value, Immediate(kSmiTagMask));
j(zero, &done);
InNewSpace(object, value, equal, &done);
RecordWriteHelper(object, address, value);
bind(&done);
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
mov(object, Immediate(BitCast<int32_t>(kZapValue)));
mov(address, Immediate(BitCast<int32_t>(kZapValue)));
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
}
}
void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
cmp(esp,
Operand::StaticVariable(ExternalReference::address_of_stack_limit()));
@ -514,97 +542,6 @@ void MacroAssembler::PopTryHandler() {
}
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
Register scratch,
int save_at_depth,
Label* miss) {
// Make sure there's no overlap between scratch and the other
// registers.
ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
if (save_at_depth == depth) {
mov(Operand(esp, kPointerSize), object_reg);
}
// Check the maps in the prototype chain.
// Traverse the prototype chain from the object and do map checks.
while (object != holder) {
depth++;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
JSObject* prototype = JSObject::cast(object->GetPrototype());
if (Heap::InNewSpace(prototype)) {
// Get the map of the current object.
mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
cmp(Operand(scratch), Immediate(Handle<Map>(object->map())));
// Branch on the result of the map check.
j(not_equal, miss, not_taken);
// Check access rights to the global object. This has to happen
// after the map check so that we know that the object is
// actually a global object.
if (object->IsJSGlobalProxy()) {
CheckAccessGlobalProxy(reg, scratch, miss);
// Restore scratch register to be the map of the object.
// We load the prototype from the map in the scratch register.
mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
}
// The prototype is in new space; we cannot store a reference
// to it in the code. Load it from the map.
reg = holder_reg; // from now the object is in holder_reg
mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
} else {
// Check the map of the current object.
cmp(FieldOperand(reg, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
// Branch on the result of the map check.
j(not_equal, miss, not_taken);
// Check access rights to the global object. This has to happen
// after the map check so that we know that the object is
// actually a global object.
if (object->IsJSGlobalProxy()) {
CheckAccessGlobalProxy(reg, scratch, miss);
}
// The prototype is in old space; load it directly.
reg = holder_reg; // from now the object is in holder_reg
mov(reg, Handle<JSObject>(prototype));
}
if (save_at_depth == depth) {
mov(Operand(esp, kPointerSize), reg);
}
// Go to the next object in the prototype chain.
object = prototype;
}
// Check the holder map.
cmp(FieldOperand(reg, HeapObject::kMapOffset),
Immediate(Handle<Map>(holder->map())));
j(not_equal, miss, not_taken);
// Log the check depth.
LOG(IntEvent("check-maps-depth", depth + 1));
// Perform security check for access to the global object and return
// the holder register.
ASSERT(object == holder);
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
if (object->IsJSGlobalProxy()) {
CheckAccessGlobalProxy(reg, scratch, miss);
}
return reg;
}
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {

39
deps/v8/src/ia32/macro-assembler-ia32.h

@ -73,16 +73,27 @@ class MacroAssembler: public Assembler {
Condition cc, // equal for new space, not_equal otherwise.
Label* branch);
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
// All registers are clobbered by the operation.
// For page containing |object| mark region covering [object+offset]
// dirty. |object| is the object being stored into, |value| is the
// object being stored. If offset is zero, then the scratch register
// contains the array index into the elements array represented as a
// Smi. All registers are clobbered by the operation. RecordWrite
// filters out smis so it does not update the write barrier if the
// value is a smi.
void RecordWrite(Register object,
int offset,
Register value,
Register scratch);
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
// object being stored. All registers are clobbered by the
// operation. RecordWrite filters out smis so it does not update the
// write barrier if the value is a smi.
void RecordWrite(Register object,
Register address,
Register value);
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
@ -233,24 +244,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Inline caching support
// Generates code that verifies that the maps of objects in the
// prototype chain of object hasn't changed since the code was
// generated and branches to the miss label if any map has. If
// necessary the function also generates code for security check
// in case of global object holders. The scratch and holder
// registers are always clobbered, but the object register is only
// clobbered if it the same as the holder register. The function
// returns a register containing the holder - either object_reg or
// holder_reg.
// The function can optionally (when save_at_depth !=
// kInvalidProtoDepth) save the object at the given depth by moving
// it to [esp + kPointerSize].
Register CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
Register scratch,
int save_at_depth,
Label* miss);
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register is clobbered.

294
deps/v8/src/ia32/stub-cache-ia32.cc

@ -101,6 +101,110 @@ static void ProbeTable(MacroAssembler* masm,
}
// Helper function used to check that the dictionary doesn't contain
// the property. This function may return false negatives, so miss_label
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
Label* miss_label,
Register receiver,
String* name,
Register r0,
Register extra) {
ASSERT(name->IsSymbol());
__ IncrementCounter(&Counters::negative_lookups, 1);
__ IncrementCounter(&Counters::negative_lookups_miss, 1);
Label done;
__ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
// Bail out if the receiver has a named interceptor or requires access checks.
__ test(FieldOperand(r0, Map::kBitFieldOffset),
Immediate(kInterceptorOrAccessCheckNeededMask));
__ j(not_zero, miss_label, not_taken);
__ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
__ j(below, miss_label, not_taken);
// Load properties array.
Register properties = r0;
__ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
__ cmp(FieldOperand(properties, HeapObject::kMapOffset),
Immediate(Factory::hash_table_map()));
__ j(not_equal, miss_label);
// Compute the capacity mask.
const int kCapacityOffset =
StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
// Generate an unrolled loop that performs a few probes before
// giving up.
static const int kProbes = 4;
const int kElementsStartOffset =
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
// (their names are the null value).
for (int i = 0; i < kProbes; i++) {
// r0 points to properties hash.
// Compute the masked index: (hash + i + i * i) & mask.
if (extra.is(no_reg)) {
__ push(receiver);
}
Register index = extra.is(no_reg) ? receiver : extra;
// Capacity is smi 2^n.
__ mov(index, FieldOperand(properties, kCapacityOffset));
__ dec(index);
__ and_(Operand(index),
Immediate(Smi::FromInt(name->Hash() +
StringDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3);
__ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
Register entity_name = extra.is(no_reg) ? properties : extra;
// Having undefined at this place means the name is not contained.
ASSERT_EQ(kSmiTagSize, 1);
__ mov(entity_name, Operand(properties, index, times_half_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ cmp(entity_name, Factory::undefined_value());
if (extra.is(no_reg)) {
// 'receiver' shares a register with 'entity_name'.
__ pop(receiver);
}
if (i != kProbes - 1) {
__ j(equal, &done, taken);
// Stop if found the property.
__ cmp(entity_name, Handle<String>(name));
__ j(equal, miss_label, not_taken);
if (extra.is(no_reg)) {
// Restore the properties if their register was occupied by the name.
__ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
}
} else {
// Give up probing if still not found the undefined value.
__ j(not_equal, miss_label, not_taken);
}
}
__ bind(&done);
__ DecrementCounter(&Counters::negative_lookups_miss, 1);
}
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
@ -723,6 +827,33 @@ static Object* GenerateCheckPropertyCell(MacroAssembler* masm,
}
// Calls GenerateCheckPropertyCell for each global object in the prototype chain
// from object to (but not including) holder.
static Object* GenerateCheckPropertyCells(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
String* name,
Register scratch,
Label* miss) {
JSObject* current = object;
while (current != holder) {
if (current->IsGlobalObject()) {
Object* cell = GenerateCheckPropertyCell(masm,
GlobalObject::cast(current),
name,
scratch,
miss);
if (cell->IsFailure()) {
return cell;
}
}
ASSERT(current->IsJSObject());
current = JSObject::cast(current->GetPrototype());
}
return NULL;
}
#undef __
#define __ ACCESS_MASM(masm())
@ -733,33 +864,129 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
Register holder_reg,
Register scratch,
String* name,
int push_at_depth,
Label* miss) {
// Check that the maps haven't changed.
Register result =
masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,
push_at_depth, miss);
int save_at_depth,
Label* miss,
Register extra) {
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
ASSERT(!extra.is(object_reg) && !extra.is(holder_reg) && !extra.is(scratch));
// Keep track of the current object in register reg.
Register reg = object_reg;
JSObject* current = object;
int depth = 0;
if (save_at_depth == depth) {
__ mov(Operand(esp, kPointerSize), reg);
}
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed. We also need to check that the
// property cell for the property is still empty.
while (object != holder) {
if (object->IsGlobalObject()) {
Object* cell = GenerateCheckPropertyCell(masm(),
GlobalObject::cast(object),
name,
scratch,
miss);
if (cell->IsFailure()) {
set_failure(Failure::cast(cell));
return result;
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (current != holder) {
depth++;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
ASSERT(current->GetPrototype()->IsJSObject());
JSObject* prototype = JSObject::cast(current->GetPrototype());
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
Object* lookup_result = Heap::LookupSymbol(name);
if (lookup_result->IsFailure()) {
set_failure(Failure::cast(lookup_result));
return reg;
} else {
name = String::cast(lookup_result);
}
}
ASSERT(current->property_dictionary()->FindEntry(name) ==
StringDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(),
miss,
reg,
name,
scratch,
extra);
__ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
__ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
} else if (Heap::InNewSpace(prototype)) {
// Get the map of the current object.
__ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ cmp(Operand(scratch), Immediate(Handle<Map>(current->map())));
// Branch on the result of the map check.
__ j(not_equal, miss, not_taken);
// Check access rights to the global object. This has to happen
// after the map check so that we know that the object is
// actually a global object.
if (current->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(reg, scratch, miss);
// Restore scratch register to be the map of the object.
// We load the prototype from the map in the scratch register.
__ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
}
// The prototype is in new space; we cannot store a reference
// to it in the code. Load it from the map.
reg = holder_reg; // from now the object is in holder_reg
__ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
} else {
// Check the map of the current object.
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
Immediate(Handle<Map>(current->map())));
// Branch on the result of the map check.
__ j(not_equal, miss, not_taken);
// Check access rights to the global object. This has to happen
// after the map check so that we know that the object is
// actually a global object.
if (current->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(reg, scratch, miss);
}
// The prototype is in old space; load it directly.
reg = holder_reg; // from now the object is in holder_reg
__ mov(reg, Handle<JSObject>(prototype));
}
object = JSObject::cast(object->GetPrototype());
if (save_at_depth == depth) {
__ mov(Operand(esp, kPointerSize), reg);
}
// Go to the next object in the prototype chain.
current = prototype;
}
ASSERT(current == holder);
// Log the check depth.
LOG(IntEvent("check-maps-depth", depth + 1));
// Check the holder map.
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
Immediate(Handle<Map>(holder->map())));
__ j(not_equal, miss, not_taken);
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
if (holder->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(reg, scratch, miss);
};
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed. We also need to check that the
// property cell for the property is still empty.
Object* result = GenerateCheckPropertyCells(masm(),
object,
holder,
name,
scratch,
miss);
if (result->IsFailure()) set_failure(Failure::cast(result));
// Return the register containing the holder.
return result;
return reg;
}
@ -1083,7 +1310,8 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
__ j(zero, &miss, not_taken);
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
Register reg = CheckPrototypes(object, edx, holder, ebx, eax,
name, &miss, edi);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
@ -1145,7 +1373,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
eax, name, &miss);
eax, name, &miss, edi);
if (argc == 0) {
// Noop, return the length.
@ -1291,7 +1519,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
__ j(zero, &miss);
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
eax, name, &miss);
eax, name, &miss, edi);
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@ -1366,7 +1594,7 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
ebx, edx, name, &miss, edi);
Register receiver = ebx;
Register index = edi;
@ -1431,7 +1659,7 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
ebx, edx, name, &miss, edi);
Register receiver = eax;
Register index = edi;
@ -1536,7 +1764,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, eax, name, depth, &miss);
ebx, eax, name, depth, &miss, edi);
// Patch the receiver on the stack with the global proxy if
// necessary.
@ -1559,7 +1787,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
ebx, edx, name, &miss, edi);
}
break;
@ -1579,7 +1807,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
ebx, edx, name, &miss, edi);
}
break;
}
@ -1600,7 +1828,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
ebx, edx, name, &miss, edi);
}
break;
}
@ -1722,7 +1950,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
}
// Check that the maps haven't changed.
CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
CheckPrototypes(object, edx, holder, ebx, eax, name, &miss, edi);
// Get the value from the cell.
__ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
@ -1993,6 +2221,8 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
ASSERT(last->IsGlobalObject() || last->HasFastProperties());
// Check the maps of the full prototype chain. Also check that
// global property cells up to (but not including) the last object
// in the prototype chain are empty.
@ -2140,7 +2370,7 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
// Check that the maps haven't changed.
CheckPrototypes(object, eax, holder, ebx, edx, name, &miss);
CheckPrototypes(object, eax, holder, ebx, edx, name, &miss, edi);
// Get the value from the cell.
__ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));

33
deps/v8/src/ic-inl.h

@ -80,11 +80,38 @@ void IC::SetTargetAtAddress(Address address, Code* target) {
}
Map* IC::GetCodeCacheMapForObject(Object* object) {
if (object->IsJSObject()) return JSObject::cast(object)->map();
InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
JSObject* holder) {
if (object->IsJSObject()) {
return GetCodeCacheForObject(JSObject::cast(object), holder);
}
// If the object is a value, we use the prototype map for the cache.
ASSERT(object->IsString() || object->IsNumber() || object->IsBoolean());
return JSObject::cast(object->GetPrototype())->map();
return PROTOTYPE_MAP;
}
InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
JSObject* holder) {
// Fast-properties and global objects store stubs in their own maps.
// Slow properties objects use prototype's map (unless the property is its own
// when holder == object). It works because slow properties objects having
// the same prototype (or a prototype with the same map) and not having
// the property are interchangeable for such a stub.
if (holder != object &&
!object->HasFastProperties() &&
!object->IsJSGlobalProxy() &&
!object->IsJSGlobalObject()) {
return PROTOTYPE_MAP;
}
return OWN_MAP;
}
Map* IC::GetCodeCacheMap(Object* object, InlineCacheHolderFlag holder) {
Object* map_owner = (holder == OWN_MAP ? object : object->GetPrototype());
ASSERT(map_owner->IsJSObject());
return JSObject::cast(map_owner)->map();
}

157
deps/v8/src/ic.cc

@ -134,13 +134,45 @@ Address IC::OriginalCodeAddress() {
}
#endif
static bool HasNormalObjectsInPrototypeChain(LookupResult* lookup,
Object* receiver) {
Object* end = lookup->IsProperty() ? lookup->holder() : Heap::null_value();
for (Object* current = receiver;
current != end;
current = current->GetPrototype()) {
if (current->IsJSObject() &&
!JSObject::cast(current)->HasFastProperties() &&
!current->IsJSGlobalProxy() &&
!current->IsJSGlobalObject()) {
return true;
}
}
return false;
}
IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
IC::State state = target->ic_state();
if (state != MONOMORPHIC) return state;
if (receiver->IsUndefined() || receiver->IsNull()) return state;
Map* map = GetCodeCacheMapForObject(receiver);
InlineCacheHolderFlag cache_holder =
Code::ExtractCacheHolderFromFlags(target->flags());
if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
// The stub was generated for JSObject but called for non-JSObject.
// IC::GetCodeCacheMap is not applicable.
return MONOMORPHIC;
} else if (cache_holder == PROTOTYPE_MAP &&
receiver->GetPrototype()->IsNull()) {
// IC::GetCodeCacheMap is not applicable.
return MONOMORPHIC;
}
Map* map = IC::GetCodeCacheMap(receiver, cache_holder);
// Decide whether the inline cache failed because of changes to the
// receiver itself or changes to one of its prototypes.
@ -487,12 +519,24 @@ Object* CallICBase::LoadFunction(State state,
void CallICBase::UpdateCaches(LookupResult* lookup,
State state,
Handle<Object> object,
Handle<String> name) {
State state,
Handle<Object> object,
Handle<String> name) {
// Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
#ifndef V8_TARGET_ARCH_IA32
// Normal objects only implemented for IA32 by now.
if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
#else
if (lookup->holder() != *object &&
HasNormalObjectsInPrototypeChain(lookup, object->GetPrototype())) {
// Suppress optimization for prototype chains with slow properties objects
// in the middle.
return;
}
#endif
// Compute the number of arguments.
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
@ -590,8 +634,13 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
state == MONOMORPHIC_PROTOTYPE_FAILURE) {
set_target(Code::cast(code));
} else if (state == MEGAMORPHIC) {
// Cache code holding map should be consistent with
// GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
Map* map = JSObject::cast(object->IsJSObject() ? *object :
object->GetPrototype())->map();
// Update the stub cache.
StubCache::Set(*name, GetCodeCacheMapForObject(*object), Code::cast(code));
StubCache::Set(*name, map, Code::cast(code));
}
#ifdef DEBUG
@ -795,6 +844,8 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
// Compute the code stub for this load.
Object* code = NULL;
if (state == UNINITIALIZED) {
@ -836,7 +887,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return;
code = StubCache::ComputeLoadNormal(*name, *receiver);
code = StubCache::ComputeLoadNormal();
}
break;
}
@ -871,8 +922,12 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
} else if (state == MONOMORPHIC) {
set_target(megamorphic_stub());
} else if (state == MEGAMORPHIC) {
// Update the stub cache.
StubCache::Set(*name, GetCodeCacheMapForObject(*object), Code::cast(code));
// Cache code holding map should be consistent with
// GenerateMonomorphicCacheProbe.
Map* map = JSObject::cast(object->IsJSObject() ? *object :
object->GetPrototype())->map();
StubCache::Set(*name, map, Code::cast(code));
}
#ifdef DEBUG
@ -1018,6 +1073,8 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
// Compute the code stub for this load.
Object* code = NULL;
@ -1198,16 +1255,18 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
break;
}
case NORMAL: {
if (!receiver->IsGlobalObject()) {
return;
if (receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
code = StubCache::ComputeStoreGlobal(*name, *global, cell);
} else {
if (lookup->holder() != *receiver) return;
code = StubCache::ComputeStoreNormal();
}
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
code = StubCache::ComputeStoreGlobal(*name, *global, cell);
break;
}
case CALLBACKS: {
@ -1580,16 +1639,15 @@ Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info);
Object* BinaryOp_Patch(Arguments args) {
ASSERT(args.length() == 6);
ASSERT(args.length() == 5);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
Handle<Object> result = args.at<Object>(2);
int key = Smi::cast(args[3])->value();
int key = Smi::cast(args[2])->value();
Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
#ifdef DEBUG
Token::Value op = static_cast<Token::Value>(Smi::cast(args[4])->value());
BinaryOpIC::TypeInfo prev_type_info =
static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[5])->value());
static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
#endif // DEBUG
{ HandleScope scope;
BinaryOpIC::TypeInfo type_info = BinaryOpIC::GetTypeInfo(*left, *right);
@ -1608,6 +1666,61 @@ Object* BinaryOp_Patch(Arguments args) {
}
}
HandleScope scope;
Handle<JSBuiltinsObject> builtins = Top::builtins();
Object* builtin = NULL; // Initialization calms down the compiler.
switch (op) {
case Token::ADD:
builtin = builtins->javascript_builtin(Builtins::ADD);
break;
case Token::SUB:
builtin = builtins->javascript_builtin(Builtins::SUB);
break;
case Token::MUL:
builtin = builtins->javascript_builtin(Builtins::MUL);
break;
case Token::DIV:
builtin = builtins->javascript_builtin(Builtins::DIV);
break;
case Token::MOD:
builtin = builtins->javascript_builtin(Builtins::MOD);
break;
case Token::BIT_AND:
builtin = builtins->javascript_builtin(Builtins::BIT_AND);
break;
case Token::BIT_OR:
builtin = builtins->javascript_builtin(Builtins::BIT_OR);
break;
case Token::BIT_XOR:
builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
break;
case Token::SHR:
builtin = builtins->javascript_builtin(Builtins::SHR);
break;
case Token::SAR:
builtin = builtins->javascript_builtin(Builtins::SAR);
break;
case Token::SHL:
builtin = builtins->javascript_builtin(Builtins::SHL);
break;
default:
UNREACHABLE();
}
Handle<JSFunction> builtin_function(JSFunction::cast(builtin));
bool caught_exception;
Object** builtin_args[] = { right.location() };
Handle<Object> result = Execution::Call(builtin_function,
left,
ARRAY_SIZE(builtin_args),
builtin_args,
&caught_exception);
if (caught_exception) {
return Failure::Exception();
}
return *result;
}

12
deps/v8/src/ic.h

@ -117,9 +117,14 @@ class IC {
return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
}
// Returns the map to use for caching stubs for a given object.
// This method should not be called with undefined or null.
static inline Map* GetCodeCacheMapForObject(Object* object);
// Determines which map must be used for keeping the code stub.
// These methods should not be called with undefined or null.
static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object,
JSObject* holder);
static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object,
JSObject* holder);
static inline Map* GetCodeCacheMap(Object* object,
InlineCacheHolderFlag holder);
protected:
Address fp() const { return fp_; }
@ -384,6 +389,7 @@ class StoreIC: public IC {
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateArrayLength(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
private:
// Update the inline cache and the global stub cache based on the

169
deps/v8/src/liveedit-debugger.js

@ -51,7 +51,8 @@ Debug.LiveEdit = new function() {
// Applies the change to the script.
// The change is in form of list of chunks encoded in a single array as
// a series of triplets (pos1_start, pos1_end, pos2_end)
function ApplyPatchMultiChunk(script, diff_array, new_source, change_log) {
function ApplyPatchMultiChunk(script, diff_array, new_source, preview_only,
change_log) {
var old_source = script.source;
@ -96,7 +97,7 @@ Debug.LiveEdit = new function() {
}
// Recursively collects all newly compiled functions that are going into
// business and should be have link to the actual script updated.
// business and should have link to the actual script updated.
function CollectNew(node_list) {
for (var i = 0; i < node_list.length; i++) {
link_to_original_script_list.push(node_list[i]);
@ -121,6 +122,20 @@ Debug.LiveEdit = new function() {
}
}
var preview_description = {
change_tree: DescribeChangeTree(root_old_node),
textual_diff: {
old_len: old_source.length,
new_len: new_source.length,
chunks: diff_array
},
updated: false
};
if (preview_only) {
return preview_description;
}
HarvestTodo(root_old_node);
// Collect shared infos for functions whose code need to be patched.
@ -132,13 +147,15 @@ Debug.LiveEdit = new function() {
}
}
// Check that function being patched is not currently on stack.
CheckStackActivations(replaced_function_infos, change_log);
// We haven't changed anything before this line yet.
// Committing all changes.
// Check that function being patched is not currently on stack or drop them.
var dropped_functions_number =
CheckStackActivations(replaced_function_infos, change_log);
preview_description.stack_modified = dropped_functions_number != 0;
// Start with breakpoints. Convert their line/column positions and
// temporary remove.
var break_points_restorer = TemporaryRemoveBreakPoints(script, change_log);
@ -166,6 +183,8 @@ Debug.LiveEdit = new function() {
LinkToOldScript(link_to_old_script_list[i], old_script,
link_to_old_script_report);
}
preview_description.created_script_name = old_script_name;
}
// Link to an actual script all the functions that we are going to use.
@ -189,6 +208,9 @@ Debug.LiveEdit = new function() {
}
break_points_restorer(pos_translator, old_script);
preview_description.updated = true;
return preview_description;
}
// Function is public.
this.ApplyPatchMultiChunk = ApplyPatchMultiChunk;
@ -494,6 +516,16 @@ Debug.LiveEdit = new function() {
this.new_end_pos = void 0;
this.corresponding_node = void 0;
this.unmatched_new_nodes = void 0;
// 'Textual' correspondence/matching is weaker than 'pure'
// correspondence/matching. We need 'textual' level for visual presentation
// in UI, we use 'pure' level for actual code manipulation.
// Sometimes only function body is changed (functions in old and new script
// textually correspond), but we cannot patch the code, so we see them
// as an old function deleted and new function created.
this.textual_corresponding_node = void 0;
this.textually_unmatched_new_nodes = void 0;
this.live_shared_info_wrapper = void 0;
}
@ -640,6 +672,7 @@ Debug.LiveEdit = new function() {
var new_children = new_node.children;
var unmatched_new_nodes_list = [];
var textually_unmatched_new_nodes_list = [];
var old_index = 0;
var new_index = 0;
@ -650,6 +683,7 @@ Debug.LiveEdit = new function() {
if (new_children[new_index].info.start_position <
old_children[old_index].new_start_pos) {
unmatched_new_nodes_list.push(new_children[new_index]);
textually_unmatched_new_nodes_list.push(new_children[new_index]);
new_index++;
} else if (new_children[new_index].info.start_position ==
old_children[old_index].new_start_pos) {
@ -657,6 +691,8 @@ Debug.LiveEdit = new function() {
old_children[old_index].new_end_pos) {
old_children[old_index].corresponding_node =
new_children[new_index];
old_children[old_index].textual_corresponding_node =
new_children[new_index];
if (old_children[old_index].status != FunctionStatus.UNCHANGED) {
ProcessChildren(old_children[old_index],
new_children[new_index]);
@ -673,6 +709,7 @@ Debug.LiveEdit = new function() {
"No corresponding function in new script found";
old_node.status = FunctionStatus.CHANGED;
unmatched_new_nodes_list.push(new_children[new_index]);
textually_unmatched_new_nodes_list.push(new_children[new_index]);
}
new_index++;
old_index++;
@ -694,21 +731,28 @@ Debug.LiveEdit = new function() {
while (new_index < new_children.length) {
unmatched_new_nodes_list.push(new_children[new_index]);
textually_unmatched_new_nodes_list.push(new_children[new_index]);
new_index++;
}
if (old_node.status == FunctionStatus.CHANGED) {
if (!CompareFunctionExpectations(old_node.info, new_node.info)) {
var why_wrong_expectations =
WhyFunctionExpectationsDiffer(old_node.info, new_node.info);
if (why_wrong_expectations) {
old_node.status = FunctionStatus.DAMAGED;
old_node.status_explanation = "Changed code expectations";
old_node.status_explanation = why_wrong_expectations;
}
}
old_node.unmatched_new_nodes = unmatched_new_nodes_list;
old_node.textually_unmatched_new_nodes =
textually_unmatched_new_nodes_list;
}
ProcessChildren(old_code_tree, new_code_tree);
old_code_tree.corresponding_node = new_code_tree;
old_code_tree.textual_corresponding_node = new_code_tree;
Assert(old_code_tree.status != FunctionStatus.DAMAGED,
"Script became damaged");
}
@ -792,27 +836,37 @@ Debug.LiveEdit = new function() {
}
// Compares a function interface old and new version, whether it
// changed or not.
function CompareFunctionExpectations(function_info1, function_info2) {
// changed or not. Returns explanation if they differ.
function WhyFunctionExpectationsDiffer(function_info1, function_info2) {
// Check that function has the same number of parameters (there may exist
// an adapter, that won't survive function parameter number change).
if (function_info1.param_num != function_info2.param_num) {
return false;
return "Changed parameter number: " + function_info1.param_num +
" and " + function_info2.param_num;
}
var scope_info1 = function_info1.scope_info;
var scope_info2 = function_info2.scope_info;
if (!scope_info1) {
return !scope_info2;
var scope_info1_text;
var scope_info2_text;
if (scope_info1) {
scope_info1_text = scope_info1.toString();
} else {
scope_info1_text = "";
}
if (scope_info1.length != scope_info2.length) {
return false;
if (scope_info2) {
scope_info2_text = scope_info2.toString();
} else {
scope_info2_text = "";
}
// Check that outer scope structure is not changed. Otherwise the function
// will not properly work with existing scopes.
return scope_info1.toString() == scope_info2.toString();
if (scope_info1_text != scope_info2_text) {
return "Incompatible variable maps: [" + scope_info1_text +
"] and [" + scope_info2_text + "]";
}
// No differences. Return undefined.
return;
}
// Minifier forward declaration.
@ -856,6 +910,8 @@ Debug.LiveEdit = new function() {
change_log.push( { functions_on_stack: problems } );
throw new Failure("Blocked by functions on stack");
}
return dropped.length;
}
// A copy of the FunctionPatchabilityStatus enum from liveedit.h
@ -897,14 +953,11 @@ Debug.LiveEdit = new function() {
this.GetPcFromSourcePos = GetPcFromSourcePos;
// LiveEdit main entry point: changes a script text to a new string.
function SetScriptSource(script, new_source, change_log) {
function SetScriptSource(script, new_source, preview_only, change_log) {
var old_source = script.source;
var diff = CompareStringsLinewise(old_source, new_source);
if (diff.length == 0) {
change_log.push( { empty_diff: true } );
return;
}
ApplyPatchMultiChunk(script, diff, new_source, change_log);
return ApplyPatchMultiChunk(script, diff, new_source, preview_only,
change_log);
}
// Function is public.
this.SetScriptSource = SetScriptSource;
@ -931,7 +984,67 @@ Debug.LiveEdit = new function() {
return ApplyPatchMultiChunk(script,
[ change_pos, change_pos + change_len, change_pos + new_str.length],
new_source, change_log);
new_source, false, change_log);
}
// Creates JSON description for a change tree.
function DescribeChangeTree(old_code_tree) {
function ProcessOldNode(node) {
var child_infos = [];
for (var i = 0; i < node.children.length; i++) {
var child = node.children[i];
if (child.status != FunctionStatus.UNCHANGED) {
child_infos.push(ProcessOldNode(child));
}
}
var new_child_infos = [];
if (node.textually_unmatched_new_nodes) {
for (var i = 0; i < node.textually_unmatched_new_nodes.length; i++) {
var child = node.textually_unmatched_new_nodes[i];
new_child_infos.push(ProcessNewNode(child));
}
}
var res = {
name: node.info.function_name,
positions: DescribePositions(node),
status: node.status,
children: child_infos,
new_children: new_child_infos
};
if (node.status_explanation) {
res.status_explanation = node.status_explanation;
}
if (node.textual_corresponding_node) {
res.new_positions = DescribePositions(node.textual_corresponding_node);
}
return res;
}
function ProcessNewNode(node) {
var child_infos = [];
// Do not list ancestors.
if (false) {
for (var i = 0; i < node.children.length; i++) {
child_infos.push(ProcessNewNode(node.children[i]));
}
}
var res = {
name: node.info.function_name,
positions: DescribePositions(node),
children: child_infos,
};
return res;
}
function DescribePositions(node) {
return {
start_position: node.info.start_position,
end_position: node.info.end_position
};
}
return ProcessOldNode(old_code_tree);
}

20
deps/v8/src/liveedit.cc

@ -1187,7 +1187,12 @@ static bool FixTryCatchHandler(StackFrame* top_frame,
// Returns error message or NULL.
static const char* DropFrames(Vector<StackFrame*> frames,
int top_frame_index,
int bottom_js_frame_index) {
int bottom_js_frame_index,
Debug::FrameDropMode* mode) {
if (Debug::kFrameDropperFrameSize < 0) {
return "Stack manipulations are not supported in this architecture.";
}
StackFrame* pre_top_frame = frames[top_frame_index - 1];
StackFrame* top_frame = frames[top_frame_index];
StackFrame* bottom_js_frame = frames[bottom_js_frame_index];
@ -1198,12 +1203,18 @@ static const char* DropFrames(Vector<StackFrame*> frames,
if (pre_top_frame->code()->is_inline_cache_stub() &&
pre_top_frame->code()->ic_state() == DEBUG_BREAK) {
// OK, we can drop inline cache calls.
*mode = Debug::FRAME_DROPPED_IN_IC_CALL;
} else if (pre_top_frame->code() == Debug::debug_break_slot()) {
// OK, we can drop debug break slot.
*mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
} else if (pre_top_frame->code() ==
Builtins::builtin(Builtins::FrameDropper_LiveEdit)) {
// OK, we can drop our own code.
*mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
} else if (pre_top_frame->code()->kind() == Code::STUB &&
pre_top_frame->code()->major_key()) {
// Unit Test entry, it's fine, we support this case.
// Entry from our unit tests, it's fine, we support this case.
*mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
} else {
return "Unknown structure of stack above changing function";
}
@ -1316,8 +1327,9 @@ static const char* DropActivationsInActiveThread(
return NULL;
}
Debug::FrameDropMode drop_mode = Debug::FRAMES_UNTOUCHED;
const char* error_message = DropFrames(frames, top_frame_index,
bottom_js_frame_index);
bottom_js_frame_index, &drop_mode);
if (error_message != NULL) {
return error_message;
@ -1331,7 +1343,7 @@ static const char* DropActivationsInActiveThread(
break;
}
}
Debug::FramesHaveBeenDropped(new_id);
Debug::FramesHaveBeenDropped(new_id, drop_mode);
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {

5
deps/v8/src/macros.py

@ -120,6 +120,7 @@ macro IS_SPEC_OBJECT_OR_NULL(arg) = (%_IsObject(arg) || %_IsFunction(arg));
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));
macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0);
macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg));
@ -145,11 +146,15 @@ macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateT
macro DAY(time) = ($floor(time / 86400000));
macro MONTH_FROM_TIME(time) = (MonthFromTime(time));
macro DATE_FROM_TIME(time) = (DateFromTime(time));
macro NAN_OR_DATE_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : DATE_FROM_TIME(time));
macro YEAR_FROM_TIME(time) = (YearFromTime(time));
macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
macro NAN_OR_MIN_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : MIN_FROM_TIME(time));
macro SEC_FROM_TIME(time) = (Modulo($floor(time / 1000), 60));
macro NAN_OR_SEC_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : SEC_FROM_TIME(time));
macro MS_FROM_TIME(time) = (Modulo(time, 1000));
macro NAN_OR_MS_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : MS_FROM_TIME(time));
# Last input and last subject of regexp matches.
macro LAST_SUBJECT(array) = ((array)[1]);

4
deps/v8/src/messages.js

@ -196,7 +196,9 @@ function FormatMessage(message) {
circular_structure: "Converting circular structure to JSON",
obj_ctor_property_non_object: "Object.%0 called on non-object",
array_indexof_not_defined: "Array.getIndexOf: Argument undefined",
illegal_access: "illegal access"
object_not_extensible: "Can't add property %0, object is not extensible",
illegal_access: "Illegal access",
invalid_preparser_data: "Invalid preparser data for function %0"
};
}
var format = kMessages[message.type];

68
deps/v8/src/objects-inl.h

@ -1335,6 +1335,21 @@ void JSObject::InitializeBody(int object_size) {
}
bool JSObject::HasFastProperties() {
return !properties()->IsDictionary();
}
int JSObject::MaxFastProperties() {
// Allow extra fast properties if the object has more than
// kMaxFastProperties in-object properties. When this is the case,
// it is very unlikely that the object is being used as a dictionary
// and there is a good chance that allowing more map transitions
// will be worth it.
return Max(map()->inobject_properties(), kMaxFastProperties);
}
void Struct::InitializeBody(int object_size) {
Object* value = Heap::undefined_value();
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
@ -1343,11 +1358,6 @@ void Struct::InitializeBody(int object_size) {
}
bool JSObject::HasFastProperties() {
return !properties()->IsDictionary();
}
bool Object::ToArrayIndex(uint32_t* index) {
if (IsSmi()) {
int value = Smi::cast(this)->value();
@ -2189,6 +2199,20 @@ bool Map::is_access_check_needed() {
}
void Map::set_is_extensible(bool value) {
if (value) {
set_bit_field2(bit_field2() | (1 << kIsExtensible));
} else {
set_bit_field2(bit_field2() & ~(1 << kIsExtensible));
}
}
bool Map::is_extensible() {
return ((1 << kIsExtensible) & bit_field2()) != 0;
}
Code::Flags Code::flags() {
return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
}
@ -2263,13 +2287,15 @@ Code::Flags Code::ComputeFlags(Kind kind,
InLoopFlag in_loop,
InlineCacheState ic_state,
PropertyType type,
int argc) {
int argc,
InlineCacheHolderFlag holder) {
// Compute the bit mask.
int bits = kind << kFlagsKindShift;
if (in_loop) bits |= kFlagsICInLoopMask;
bits |= ic_state << kFlagsICStateShift;
bits |= type << kFlagsTypeShift;
bits |= argc << kFlagsArgumentsCountShift;
if (holder == PROTOTYPE_MAP) bits |= kFlagsCacheInPrototypeMapMask;
// Cast to flags and validate result before returning it.
Flags result = static_cast<Flags>(bits);
ASSERT(ExtractKindFromFlags(result) == kind);
@ -2283,9 +2309,10 @@ Code::Flags Code::ComputeFlags(Kind kind,
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
PropertyType type,
InlineCacheHolderFlag holder,
InLoopFlag in_loop,
int argc) {
return ComputeFlags(kind, in_loop, MONOMORPHIC, type, argc);
return ComputeFlags(kind, in_loop, MONOMORPHIC, type, argc, holder);
}
@ -2318,6 +2345,12 @@ int Code::ExtractArgumentsCountFromFlags(Flags flags) {
}
InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
int bits = (flags & kFlagsCacheInPrototypeMapMask);
return bits != 0 ? PROTOTYPE_MAP : OWN_MAP;
}
Code::Flags Code::RemoveTypeFromFlags(Flags flags) {
int bits = flags & ~kFlagsTypeMask;
return static_cast<Flags>(bits);
@ -2774,7 +2807,7 @@ JSValue* JSValue::cast(Object* obj) {
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
INT_ACCESSORS(Code, relocation_size, kRelocationSizeOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
INT_ACCESSORS(Code, sinfo_size, kSInfoSizeOffset)
@ -2783,13 +2816,28 @@ byte* Code::instruction_start() {
}
byte* Code::instruction_end() {
return instruction_start() + instruction_size();
}
int Code::body_size() {
return RoundUp(instruction_size() + relocation_size(), kObjectAlignment);
return RoundUp(instruction_size(), kObjectAlignment);
}
ByteArray* Code::unchecked_relocation_info() {
return reinterpret_cast<ByteArray*>(READ_FIELD(this, kRelocationInfoOffset));
}
byte* Code::relocation_start() {
return FIELD_ADDR(this, kHeaderSize + instruction_size());
return unchecked_relocation_info()->GetDataStartAddress();
}
int Code::relocation_size() {
return unchecked_relocation_info()->length();
}

59
deps/v8/src/objects.cc

@ -1276,7 +1276,7 @@ Object* JSObject::AddFastProperty(String* name,
}
if (map()->unused_property_fields() == 0) {
if (properties()->length() > kMaxFastProperties) {
if (properties()->length() > MaxFastProperties()) {
Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (obj->IsFailure()) return obj;
return AddSlowProperty(name, value, attributes);
@ -1386,6 +1386,11 @@ Object* JSObject::AddProperty(String* name,
Object* value,
PropertyAttributes attributes) {
ASSERT(!IsJSGlobalProxy());
if (!map()->is_extensible()) {
Handle<Object> args[1] = {Handle<String>(name)};
return Top::Throw(*Factory::NewTypeError("object_not_extensible",
HandleVector(args, 1)));
}
if (HasFastProperties()) {
// Ensure the descriptor array does not get too big.
if (map()->instance_descriptors()->number_of_descriptors() <
@ -1474,7 +1479,7 @@ Object* JSObject::ConvertDescriptorToField(String* name,
Object* new_value,
PropertyAttributes attributes) {
if (map()->unused_property_fields() == 0 &&
properties()->length() > kMaxFastProperties) {
properties()->length() > MaxFastProperties()) {
Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (obj->IsFailure()) return obj;
return ReplaceSlowProperty(name, new_value, attributes);
@ -1746,8 +1751,6 @@ void JSObject::LocalLookupRealNamedProperty(String* name,
result->DictionaryResult(this, entry);
return;
}
// Slow case object skipped during lookup. Do not use inline caching.
if (!IsGlobalObject()) result->DisallowCaching();
}
result->NotFound();
}
@ -2576,6 +2579,25 @@ bool JSObject::ReferencesObject(Object* obj) {
}
Object* JSObject::PreventExtensions() {
// If there are fast elements we normalize.
if (HasFastElements()) {
NormalizeElements();
}
// Make sure that we never go back to fast case.
element_dictionary()->set_requires_slow_elements();
// Do a map transition, other objects with this map may still
// be extensible.
Object* new_map = map()->CopyDropTransitions();
if (new_map->IsFailure()) return new_map;
Map::cast(new_map)->set_is_extensible(false);
set_map(Map::cast(new_map));
ASSERT(!map()->is_extensible());
return new_map;
}
// Tests for the fast common case for property enumeration:
// - This object and all prototypes has an enum cache (which means that it has
// no interceptors and needs no access checks).
@ -3076,7 +3098,7 @@ Object* Map::CopyDropTransitions() {
Object* descriptors = instance_descriptors()->RemoveTransitions();
if (descriptors->IsFailure()) return descriptors;
cast(new_map)->set_instance_descriptors(DescriptorArray::cast(descriptors));
return cast(new_map);
return new_map;
}
@ -5292,7 +5314,15 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
// Use the relocation info pointer before it is visited by
// the heap compaction in the next statement.
RelocIterator it(this, mode_mask);
IteratePointers(v,
kRelocationInfoOffset,
kRelocationInfoOffset + kPointerSize);
for (; !it.done(); it.next()) {
it.rinfo()->Visit(v);
}
@ -5312,14 +5342,6 @@ void Code::CopyFrom(const CodeDesc& desc) {
// copy code
memmove(instruction_start(), desc.buffer, desc.instr_size);
// fill gap with zero bytes
{ byte* p = instruction_start() + desc.instr_size;
byte* q = relocation_start();
while (p < q) {
*p++ = 0;
}
}
// copy reloc info
memmove(relocation_start(),
desc.buffer + desc.buffer_size - desc.reloc_size,
@ -6209,6 +6231,15 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
return value;
}
}
// When we set the is_extensible flag to false we always force
// the element into dictionary mode (and force them to stay there).
if (!map()->is_extensible()) {
Handle<Object> number(Heap::NumberFromUint32(index));
Handle<String> index_string(Factory::NumberToString(number));
Handle<Object> args[1] = { index_string };
return Top::Throw(*Factory::NewTypeError("object_not_extensible",
HandleVector(args, 1)));
}
Object* result = dictionary->AtNumberPut(index, value);
if (result->IsFailure()) return result;
if (elms != FixedArray::cast(result)) {

57
deps/v8/src/objects.h

@ -1367,6 +1367,7 @@ class JSObject: public HeapObject {
// Returns the index'th element.
// The undefined object if index is out of bounds.
Object* GetElementWithReceiver(JSObject* receiver, uint32_t index);
Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
Object* SetFastElementsCapacityAndLength(int capacity, int length);
Object* SetSlowElements(Object* length);
@ -1516,6 +1517,10 @@ class JSObject: public HeapObject {
// Casting.
static inline JSObject* cast(Object* obj);
// Disalow further properties to be added to the object.
Object* PreventExtensions();
// Dispatched behavior.
void JSObjectIterateBody(int object_size, ObjectVisitor* v);
void JSObjectShortPrint(StringStream* accumulator);
@ -1547,6 +1552,11 @@ class JSObject: public HeapObject {
#endif
Object* SlowReverseLookup(Object* value);
// Maximal number of fast properties for the JSObject. Used to
// restrict the number of map transitions to avoid an explosion in
// the number of maps for objects used as dictionaries.
inline int MaxFastProperties();
// Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
// Also maximal value of JSArray's length property.
static const uint32_t kMaxElementCount = 0xffffffffu;
@ -1568,8 +1578,6 @@ class JSObject: public HeapObject {
STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize);
Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
private:
Object* GetElementWithCallback(Object* receiver,
Object* structure,
@ -2728,9 +2736,13 @@ class Code: public HeapObject {
inline int instruction_size();
inline void set_instruction_size(int value);
// [relocation_size]: Size of relocation information.
// [relocation_info]: Code relocation information
DECL_ACCESSORS(relocation_info, ByteArray)
// Unchecked accessor to be used during GC.
inline ByteArray* unchecked_relocation_info();
inline int relocation_size();
inline void set_relocation_size(int value);
// [sinfo_size]: Size of scope information.
inline int sinfo_size();
@ -2765,11 +2777,13 @@ class Code: public HeapObject {
InLoopFlag in_loop = NOT_IN_LOOP,
InlineCacheState ic_state = UNINITIALIZED,
PropertyType type = NORMAL,
int argc = -1);
int argc = -1,
InlineCacheHolderFlag holder = OWN_MAP);
static inline Flags ComputeMonomorphicFlags(
Kind kind,
PropertyType type,
InlineCacheHolderFlag holder = OWN_MAP,
InLoopFlag in_loop = NOT_IN_LOOP,
int argc = -1);
@ -2778,6 +2792,7 @@ class Code: public HeapObject {
static inline InLoopFlag ExtractICInLoopFromFlags(Flags flags);
static inline PropertyType ExtractTypeFromFlags(Flags flags);
static inline int ExtractArgumentsCountFromFlags(Flags flags);
static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
static inline Flags RemoveTypeFromFlags(Flags flags);
// Convert a target address into a code object.
@ -2786,6 +2801,9 @@ class Code: public HeapObject {
// Returns the address of the first instruction.
inline byte* instruction_start();
// Returns the address right after the last instruction.
inline byte* instruction_end();
// Returns the size of the instructions, padding, and relocation information.
inline int body_size();
@ -2846,8 +2864,8 @@ class Code: public HeapObject {
// Layout description.
static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
static const int kRelocationSizeOffset = kInstructionSizeOffset + kIntSize;
static const int kSInfoSizeOffset = kRelocationSizeOffset + kIntSize;
static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
static const int kSInfoSizeOffset = kRelocationInfoOffset + kPointerSize;
static const int kFlagsOffset = kSInfoSizeOffset + kIntSize;
static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
// Add padding to align the instruction start following right after
@ -2864,16 +2882,18 @@ class Code: public HeapObject {
static const int kFlagsICInLoopShift = 3;
static const int kFlagsTypeShift = 4;
static const int kFlagsKindShift = 7;
static const int kFlagsArgumentsCountShift = 11;
static const int kFlagsICHolderShift = 11;
static const int kFlagsArgumentsCountShift = 12;
static const int kFlagsICStateMask = 0x00000007; // 00000000111
static const int kFlagsICInLoopMask = 0x00000008; // 00000001000
static const int kFlagsTypeMask = 0x00000070; // 00001110000
static const int kFlagsKindMask = 0x00000780; // 11110000000
static const int kFlagsArgumentsCountMask = 0xFFFFF800;
static const int kFlagsCacheInPrototypeMapMask = 0x00000800;
static const int kFlagsArgumentsCountMask = 0xFFFFF000;
static const int kFlagsNotUsedInLookup =
(kFlagsICInLoopMask | kFlagsTypeMask);
(kFlagsICInLoopMask | kFlagsTypeMask | kFlagsCacheInPrototypeMapMask);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
@ -2980,13 +3000,8 @@ class Map: public HeapObject {
return ((1 << kHasInstanceCallHandler) & bit_field()) != 0;
}
inline void set_is_extensible() {
set_bit_field2(bit_field2() | (1 << kIsExtensible));
}
inline bool is_extensible() {
return ((1 << kIsExtensible) & bit_field2()) != 0;
}
inline void set_is_extensible(bool value);
inline bool is_extensible();
// Tells whether the instance has fast elements.
void set_has_fast_elements(bool value) {
@ -4398,6 +4413,8 @@ class SeqString: public String {
// Each character in the AsciiString is an ascii character.
class SeqAsciiString: public SeqString {
public:
static const bool kHasAsciiEncoding = true;
// Dispatched behavior.
inline uint16_t SeqAsciiStringGet(int index);
inline void SeqAsciiStringSet(int index, uint16_t value);
@ -4447,6 +4464,8 @@ class SeqAsciiString: public SeqString {
// Each character in the TwoByteString is a two-byte uint16_t.
class SeqTwoByteString: public SeqString {
public:
static const bool kHasAsciiEncoding = false;
// Dispatched behavior.
inline uint16_t SeqTwoByteStringGet(int index);
inline void SeqTwoByteStringSet(int index, uint16_t value);
@ -4579,6 +4598,8 @@ class ExternalString: public String {
// ASCII string.
class ExternalAsciiString: public ExternalString {
public:
static const bool kHasAsciiEncoding = true;
typedef v8::String::ExternalAsciiStringResource Resource;
// The underlying resource.
@ -4611,6 +4632,8 @@ class ExternalAsciiString: public ExternalString {
// encoded string.
class ExternalTwoByteString: public ExternalString {
public:
static const bool kHasAsciiEncoding = false;
typedef v8::String::ExternalStringResource Resource;
// The underlying string resource.

17
deps/v8/src/parser.cc

@ -134,6 +134,7 @@ class Parser {
// Report syntax error
void ReportUnexpectedToken(Token::Value token);
void ReportInvalidPreparseData(Handle<String> name, bool* ok);
Handle<Script> script_;
Scanner scanner_;
@ -3263,6 +3264,15 @@ void Parser::ReportUnexpectedToken(Token::Value token) {
}
void Parser::ReportInvalidPreparseData(Handle<String> name, bool* ok) {
SmartPointer<char> name_string = name->ToCString(DISALLOW_NULLS);
const char* element[1] = { *name_string };
ReportMessage("invalid_preparser_data",
Vector<const char*>(element, 1));
*ok = false;
}
Expression* Parser::ParsePrimaryExpression(bool* ok) {
// PrimaryExpression ::
// 'this'
@ -3810,7 +3820,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
Handle<FixedArray> this_property_assignments;
if (is_lazily_compiled && pre_data() != NULL) {
FunctionEntry entry = pre_data()->GetFunctionEnd(start_pos);
if (!entry.is_valid()) {
ReportInvalidPreparseData(name, CHECK_OK);
}
int end_pos = entry.end_pos();
if (end_pos <= start_pos) {
// End position greater than end of stream is safe, and hard to check.
ReportInvalidPreparseData(name, CHECK_OK);
}
Counters::total_preparse_skipped.Increment(end_pos - start_pos);
scanner_.SeekForward(end_pos);
materialized_literal_count = entry.literal_count();

93
deps/v8/src/rewriter.cc

@ -87,11 +87,13 @@ void AstOptimizer::VisitBlock(Block* node) {
void AstOptimizer::VisitExpressionStatement(ExpressionStatement* node) {
node->expression()->set_no_negative_zero(true);
Visit(node->expression());
}
void AstOptimizer::VisitIfStatement(IfStatement* node) {
node->condition()->set_no_negative_zero(true);
Visit(node->condition());
Visit(node->then_statement());
if (node->HasElseStatement()) {
@ -101,6 +103,7 @@ void AstOptimizer::VisitIfStatement(IfStatement* node) {
void AstOptimizer::VisitDoWhileStatement(DoWhileStatement* node) {
node->cond()->set_no_negative_zero(true);
Visit(node->cond());
Visit(node->body());
}
@ -108,6 +111,7 @@ void AstOptimizer::VisitDoWhileStatement(DoWhileStatement* node) {
void AstOptimizer::VisitWhileStatement(WhileStatement* node) {
has_function_literal_ = false;
node->cond()->set_no_negative_zero(true);
Visit(node->cond());
node->may_have_function_literal_ = has_function_literal_;
Visit(node->body());
@ -120,6 +124,7 @@ void AstOptimizer::VisitForStatement(ForStatement* node) {
}
if (node->cond() != NULL) {
has_function_literal_ = false;
node->cond()->set_no_negative_zero(true);
Visit(node->cond());
node->may_have_function_literal_ = has_function_literal_;
}
@ -151,6 +156,7 @@ void AstOptimizer::VisitTryFinallyStatement(TryFinallyStatement* node) {
void AstOptimizer::VisitSwitchStatement(SwitchStatement* node) {
node->tag()->set_no_negative_zero(true);
Visit(node->tag());
for (int i = 0; i < node->cases()->length(); i++) {
CaseClause* clause = node->cases()->at(i);
@ -444,6 +450,7 @@ void AstOptimizer::VisitUnaryOperation(UnaryOperation* node) {
if (FLAG_safe_int32_compiler) {
switch (node->op()) {
case Token::BIT_NOT:
node->expression()->set_no_negative_zero(true);
node->expression()->set_to_int32(true);
// Fall through.
case Token::ADD:
@ -476,10 +483,49 @@ void AstOptimizer::VisitCountOperation(CountOperation* node) {
}
static bool CouldBeNegativeZero(AstNode* node) {
Literal* literal = node->AsLiteral();
if (literal != NULL) {
Handle<Object> handle = literal->handle();
if (handle->IsString() || handle->IsSmi()) {
return false;
} else if (handle->IsHeapNumber()) {
double double_value = HeapNumber::cast(*handle)->value();
if (double_value != 0) {
return false;
}
}
}
BinaryOperation* binary = node->AsBinaryOperation();
if (binary != NULL && Token::IsBitOp(binary->op())) {
return false;
}
return true;
}
static bool CouldBePositiveZero(AstNode* node) {
Literal* literal = node->AsLiteral();
if (literal != NULL) {
Handle<Object> handle = literal->handle();
if (handle->IsSmi()) {
if (Smi::cast(*handle) != Smi::FromInt(0)) {
return false;
}
} else if (handle->IsHeapNumber()) {
// Heap number literal can't be +0, because that's a Smi.
return false;
}
}
return true;
}
void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
// Depending on the operation we can propagate this node's type down the
// AST nodes.
switch (node->op()) {
Token::Value op = node->op();
switch (op) {
case Token::COMMA:
case Token::OR:
node->left()->set_no_negative_zero(true);
@ -503,23 +549,54 @@ void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
node->left()->set_no_negative_zero(true);
node->right()->set_no_negative_zero(true);
break;
case Token::MUL: {
VariableProxy* lvar_proxy = node->left()->AsVariableProxy();
VariableProxy* rvar_proxy = node->right()->AsVariableProxy();
if (lvar_proxy != NULL && rvar_proxy != NULL) {
Variable* lvar = lvar_proxy->AsVariable();
Variable* rvar = rvar_proxy->AsVariable();
if (lvar != NULL && rvar != NULL) {
if (lvar->mode() == Variable::VAR && rvar->mode() == Variable::VAR) {
Slot* lslot = lvar->slot();
Slot* rslot = rvar->slot();
if (lslot->type() == rslot->type() &&
(lslot->type() == Slot::PARAMETER ||
lslot->type() == Slot::LOCAL) &&
lslot->index() == rslot->index()) {
// A number squared doesn't give negative zero.
node->set_no_negative_zero(true);
}
}
}
}
}
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
case Token::MOD: {
if (node->type()->IsLikelySmi()) {
node->left()->type()->SetAsLikelySmiIfUnknown();
node->right()->type()->SetAsLikelySmiIfUnknown();
}
node->left()->set_no_negative_zero(node->no_negative_zero());
node->right()->set_no_negative_zero(node->no_negative_zero());
if (op == Token::ADD && (!CouldBeNegativeZero(node->left()) ||
!CouldBeNegativeZero(node->right()))) {
node->left()->set_no_negative_zero(true);
node->right()->set_no_negative_zero(true);
} else if (op == Token::SUB && (!CouldBeNegativeZero(node->left()) ||
!CouldBePositiveZero(node->right()))) {
node->left()->set_no_negative_zero(true);
node->right()->set_no_negative_zero(true);
} else {
node->left()->set_no_negative_zero(node->no_negative_zero());
node->right()->set_no_negative_zero(node->no_negative_zero());
}
if (node->op() == Token::DIV) {
node->right()->set_no_negative_zero(false);
} else if (node->op() == Token::MOD) {
node->right()->set_no_negative_zero(true);
}
break;
}
default:
UNREACHABLE();
break;
@ -530,7 +607,7 @@ void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
// After visiting the operand nodes we have to check if this node's type
// can be updated. If it does, then we can push that information down
// towards the leafs again if the new information is an upgrade over the
// towards the leaves again if the new information is an upgrade over the
// previous type of the operand nodes.
if (node->type()->IsUnknown()) {
if (node->left()->type()->IsLikelySmi() ||
@ -590,7 +667,7 @@ void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
void AstOptimizer::VisitCompareOperation(CompareOperation* node) {
if (node->type()->IsKnown()) {
// Propagate useful information down towards the leafs.
// Propagate useful information down towards the leaves.
node->left()->type()->SetAsLikelySmiIfUnknown();
node->right()->type()->SetAsLikelySmiIfUnknown();
}
@ -604,7 +681,7 @@ void AstOptimizer::VisitCompareOperation(CompareOperation* node) {
// After visiting the operand nodes we have to check if this node's type
// can be updated. If it does, then we can push that information down
// towards the leafs again if the new information is an upgrade over the
// towards the leaves again if the new information is an upgrade over the
// previous type of the operand nodes.
if (node->type()->IsUnknown()) {
if (node->left()->type()->IsLikelySmi() ||

157
deps/v8/src/runtime.cc

@ -678,6 +678,12 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
}
static Object* Runtime_PreventExtensions(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, obj, args[0]);
return obj->PreventExtensions();
}
static Object* Runtime_IsExtensible(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, obj, args[0]);
@ -2279,6 +2285,134 @@ static Object* StringReplaceRegExpWithString(String* subject,
}
template <typename ResultSeqString>
static Object* StringReplaceRegExpWithEmptyString(String* subject,
JSRegExp* regexp,
JSArray* last_match_info) {
ASSERT(subject->IsFlat());
HandleScope handles;
Handle<String> subject_handle(subject);
Handle<JSRegExp> regexp_handle(regexp);
Handle<JSArray> last_match_info_handle(last_match_info);
Handle<Object> match = RegExpImpl::Exec(regexp_handle,
subject_handle,
0,
last_match_info_handle);
if (match.is_null()) return Failure::Exception();
if (match->IsNull()) return *subject_handle;
ASSERT(last_match_info_handle->HasFastElements());
HandleScope loop_scope;
int start, end;
{
AssertNoAllocation match_info_array_is_not_in_a_handle;
FixedArray* match_info_array =
FixedArray::cast(last_match_info_handle->elements());
start = RegExpImpl::GetCapture(match_info_array, 0);
end = RegExpImpl::GetCapture(match_info_array, 1);
}
int length = subject->length();
int new_length = length - (end - start);
if (new_length == 0) {
return Heap::empty_string();
}
Handle<ResultSeqString> answer;
if (ResultSeqString::kHasAsciiEncoding) {
answer =
Handle<ResultSeqString>::cast(Factory::NewRawAsciiString(new_length));
} else {
answer =
Handle<ResultSeqString>::cast(Factory::NewRawTwoByteString(new_length));
}
// If the regexp isn't global, only match once.
if (!regexp_handle->GetFlags().is_global()) {
if (start > 0) {
String::WriteToFlat(*subject_handle,
answer->GetChars(),
0,
start);
}
if (end < length) {
String::WriteToFlat(*subject_handle,
answer->GetChars() + start,
end,
length);
}
return *answer;
}
int prev = 0; // Index of end of last match.
int next = 0; // Start of next search (prev unless last match was empty).
int position = 0;
do {
if (prev < start) {
// Add substring subject[prev;start] to answer string.
String::WriteToFlat(*subject_handle,
answer->GetChars() + position,
prev,
start);
position += start - prev;
}
prev = end;
next = end;
// Continue from where the match ended, unless it was an empty match.
if (start == end) {
next++;
if (next > length) break;
}
match = RegExpImpl::Exec(regexp_handle,
subject_handle,
next,
last_match_info_handle);
if (match.is_null()) return Failure::Exception();
if (match->IsNull()) break;
ASSERT(last_match_info_handle->HasFastElements());
HandleScope loop_scope;
{
AssertNoAllocation match_info_array_is_not_in_a_handle;
FixedArray* match_info_array =
FixedArray::cast(last_match_info_handle->elements());
start = RegExpImpl::GetCapture(match_info_array, 0);
end = RegExpImpl::GetCapture(match_info_array, 1);
}
} while (true);
if (prev < length) {
// Add substring subject[prev;length] to answer string.
String::WriteToFlat(*subject_handle,
answer->GetChars() + position,
prev,
length);
position += length - prev;
}
if (position == 0) {
return Heap::empty_string();
}
// Shorten string and fill
int string_size = ResultSeqString::SizeFor(position);
int allocated_string_size = ResultSeqString::SizeFor(new_length);
int delta = allocated_string_size - string_size;
answer->set_length(position);
if (delta == 0) return *answer;
Address end_of_string = answer->address() + string_size;
Heap::CreateFillerObjectAt(end_of_string, delta);
return *answer;
}
static Object* Runtime_StringReplaceRegExpWithString(Arguments args) {
ASSERT(args.length() == 4);
@ -2305,6 +2439,16 @@ static Object* Runtime_StringReplaceRegExpWithString(Arguments args) {
ASSERT(last_match_info->HasFastElements());
if (replacement->length() == 0) {
if (subject->HasOnlyAsciiChars()) {
return StringReplaceRegExpWithEmptyString<SeqAsciiString>(
subject, regexp, last_match_info);
} else {
return StringReplaceRegExpWithEmptyString<SeqTwoByteString>(
subject, regexp, last_match_info);
}
}
return StringReplaceRegExpWithString(subject,
regexp,
replacement,
@ -5362,9 +5506,6 @@ static Object* Runtime_NumberToInteger(Arguments args) {
}
static Object* Runtime_NumberToIntegerMapMinusZero(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@ -5434,7 +5575,7 @@ static Object* Runtime_NumberAdd(Arguments args) {
CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]);
return Heap::AllocateHeapNumber(x + y);
return Heap::NumberFromDouble(x + y);
}
@ -5444,7 +5585,7 @@ static Object* Runtime_NumberSub(Arguments args) {
CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]);
return Heap::AllocateHeapNumber(x - y);
return Heap::NumberFromDouble(x - y);
}
@ -5454,7 +5595,7 @@ static Object* Runtime_NumberMul(Arguments args) {
CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]);
return Heap::AllocateHeapNumber(x * y);
return Heap::NumberFromDouble(x * y);
}
@ -5463,7 +5604,7 @@ static Object* Runtime_NumberUnaryMinus(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::AllocateHeapNumber(-x);
return Heap::NumberFromDouble(-x);
}
@ -6065,7 +6206,7 @@ static Object* Runtime_Math_pow(Arguments args) {
// custom powi() function than the generic pow().
if (args[1]->IsSmi()) {
int y = Smi::cast(args[1])->value();
return Heap::AllocateHeapNumber(powi(x, y));
return Heap::NumberFromDouble(powi(x, y));
}
CONVERT_DOUBLE_CHECKED(y, args[1]);

1
deps/v8/src/runtime.h

@ -72,6 +72,7 @@ namespace internal {
F(GetOwnProperty, 2, 1) \
\
F(IsExtensible, 1, 1) \
F(PreventExtensions, 1, 1)\
\
/* Utilities */ \
F(GetFunctionDelegate, 1, 1) \

4
deps/v8/src/spaces.cc

@ -2305,8 +2305,8 @@ void PagedSpace::CollectCodeStatistics() {
}
ASSERT(code->instruction_start() <= prev_pc &&
prev_pc <= code->relocation_start());
delta += static_cast<int>(code->relocation_start() - prev_pc);
prev_pc <= code->instruction_end());
delta += static_cast<int>(code->instruction_end() - prev_pc);
EnterComment("NoComment", delta);
}
}

124
deps/v8/src/stub-cache.cc

@ -94,6 +94,7 @@ Code* StubCache::Set(String* name, Map* map, Code* code) {
Object* StubCache::ComputeLoadNonexistent(String* name, JSObject* receiver) {
ASSERT(receiver->IsGlobalObject() || receiver->HasFastProperties());
// If no global objects are present in the prototype chain, the load
// nonexistent IC stub can be shared for all names for a given map
// and we use the empty string for the map cache in that case. If
@ -129,14 +130,16 @@ Object* StubCache::ComputeLoadField(String* name,
JSObject* receiver,
JSObject* holder,
int field_index) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
Object* code = receiver->map()->FindInCodeCache(name, flags);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadField(receiver, holder, field_index, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@ -148,14 +151,16 @@ Object* StubCache::ComputeLoadCallback(String* name,
JSObject* holder,
AccessorInfo* callback) {
ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
Object* code = receiver->map()->FindInCodeCache(name, flags);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@ -166,15 +171,17 @@ Object* StubCache::ComputeLoadConstant(String* name,
JSObject* receiver,
JSObject* holder,
Object* value) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
Object* code = receiver->map()->FindInCodeCache(name, flags);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadConstant(receiver, holder, value, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@ -184,21 +191,23 @@ Object* StubCache::ComputeLoadConstant(String* name,
Object* StubCache::ComputeLoadInterceptor(String* name,
JSObject* receiver,
JSObject* holder) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
Object* code = receiver->map()->FindInCodeCache(name, flags);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
}
Object* StubCache::ComputeLoadNormal(String* name, JSObject* receiver) {
Object* StubCache::ComputeLoadNormal() {
return Builtins::builtin(Builtins::LoadIC_Normal);
}
@ -208,8 +217,10 @@ Object* StubCache::ComputeLoadGlobal(String* name,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
bool is_dont_delete) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
Object* code = receiver->map()->FindInCodeCache(name, flags);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadGlobal(receiver,
@ -219,7 +230,7 @@ Object* StubCache::ComputeLoadGlobal(String* name,
is_dont_delete);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@ -230,14 +241,16 @@ Object* StubCache::ComputeKeyedLoadField(String* name,
JSObject* receiver,
JSObject* holder,
int field_index) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
Object* code = receiver->map()->FindInCodeCache(name, flags);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadField(name, receiver, holder, field_index);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@ -248,15 +261,17 @@ Object* StubCache::ComputeKeyedLoadConstant(String* name,
JSObject* receiver,
JSObject* holder,
Object* value) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
Object* code = receiver->map()->FindInCodeCache(name, flags);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadConstant(name, receiver, holder, value);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@ -266,15 +281,17 @@ Object* StubCache::ComputeKeyedLoadConstant(String* name,
Object* StubCache::ComputeKeyedLoadInterceptor(String* name,
JSObject* receiver,
JSObject* holder) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
Object* code = receiver->map()->FindInCodeCache(name, flags);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@ -285,15 +302,17 @@ Object* StubCache::ComputeKeyedLoadCallback(String* name,
JSObject* receiver,
JSObject* holder,
AccessorInfo* callback) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
Object* code = receiver->map()->FindInCodeCache(name, flags);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@ -305,13 +324,15 @@ Object* StubCache::ComputeKeyedLoadArrayLength(String* name,
JSArray* receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
Object* code = receiver->map()->FindInCodeCache(name, flags);
ASSERT(receiver->IsJSObject());
Map* map = receiver->map();
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadArrayLength(name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@ -322,13 +343,14 @@ Object* StubCache::ComputeKeyedLoadStringLength(String* name,
String* receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
Object* code = receiver->map()->FindInCodeCache(name, flags);
Map* map = receiver->map();
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadStringLength(name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@ -339,13 +361,14 @@ Object* StubCache::ComputeKeyedLoadFunctionPrototype(String* name,
JSFunction* receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
Object* code = receiver->map()->FindInCodeCache(name, flags);
Map* map = receiver->map();
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadFunctionPrototype(name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@ -371,6 +394,11 @@ Object* StubCache::ComputeStoreField(String* name,
}
Object* StubCache::ComputeStoreNormal() {
return Builtins::builtin(Builtins::StoreIC_Normal);
}
Object* StubCache::ComputeStoreGlobal(String* name,
GlobalObject* receiver,
JSGlobalPropertyCell* cell) {
@ -380,7 +408,7 @@ Object* StubCache::ComputeStoreGlobal(String* name,
StoreStubCompiler compiler;
code = compiler.CompileStoreGlobal(receiver, cell, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@ -451,7 +479,9 @@ Object* StubCache::ComputeCallConstant(int argc,
JSObject* holder,
JSFunction* function) {
// Compute the check type and the map.
Map* map = IC::GetCodeCacheMapForObject(object);
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(object, holder);
Map* map = IC::GetCodeCacheMap(object, cache_holder);
// Compute check type based on receiver/holder.
StubCompiler::CheckType check = StubCompiler::RECEIVER_MAP_CHECK;
@ -466,6 +496,7 @@ Object* StubCache::ComputeCallConstant(int argc,
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
CONSTANT_FUNCTION,
cache_holder,
in_loop,
argc);
Object* code = map->FindInCodeCache(name, flags);
@ -476,7 +507,7 @@ Object* StubCache::ComputeCallConstant(int argc,
// caches.
if (!function->is_compiled()) return Failure::InternalError();
// Compile the stub - only create stubs for fully compiled functions.
CallStubCompiler compiler(argc, in_loop, kind);
CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
code = compiler.CompileCallConstant(object, holder, function, name, check);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
@ -497,7 +528,9 @@ Object* StubCache::ComputeCallField(int argc,
JSObject* holder,
int index) {
// Compute the check type and the map.
Map* map = IC::GetCodeCacheMapForObject(object);
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(object, holder);
Map* map = IC::GetCodeCacheMap(object, cache_holder);
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
@ -508,11 +541,12 @@ Object* StubCache::ComputeCallField(int argc,
Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
FIELD,
cache_holder,
in_loop,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc, in_loop, kind);
CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
code = compiler.CompileCallField(JSObject::cast(object),
holder,
index,
@ -534,8 +568,9 @@ Object* StubCache::ComputeCallInterceptor(int argc,
Object* object,
JSObject* holder) {
// Compute the check type and the map.
// If the object is a value, we use the prototype map for the cache.
Map* map = IC::GetCodeCacheMapForObject(object);
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(object, holder);
Map* map = IC::GetCodeCacheMap(object, cache_holder);
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
@ -547,11 +582,12 @@ Object* StubCache::ComputeCallInterceptor(int argc,
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
INTERCEPTOR,
cache_holder,
NOT_IN_LOOP,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc, NOT_IN_LOOP, kind);
CallStubCompiler compiler(argc, NOT_IN_LOOP, kind, cache_holder);
code = compiler.CompileCallInterceptor(JSObject::cast(object),
holder,
name);
@ -585,25 +621,29 @@ Object* StubCache::ComputeCallGlobal(int argc,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function) {
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(receiver, holder);
Map* map = IC::GetCodeCacheMap(receiver, cache_holder);
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
NORMAL,
cache_holder,
in_loop,
argc);
Object* code = receiver->map()->FindInCodeCache(name, flags);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
// because it may cause GC. To avoid this issue, we return an
// internal error which will make sure we do not update any
// caches.
if (!function->is_compiled()) return Failure::InternalError();
CallStubCompiler compiler(argc, in_loop, kind);
CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
code = compiler.CompileCallGlobal(receiver, holder, cell, function, name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@ -1203,6 +1243,17 @@ Object* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
}
CallStubCompiler::CallStubCompiler(int argc,
InLoopFlag in_loop,
Code::Kind kind,
InlineCacheHolderFlag cache_holder)
: arguments_(argc)
, in_loop_(in_loop)
, kind_(kind)
, cache_holder_(cache_holder) {
}
Object* CallStubCompiler::CompileCustomCall(int generator_id,
Object* object,
JSObject* holder,
@ -1230,6 +1281,7 @@ Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
type,
cache_holder_,
in_loop_,
argc);
return GetCodeWithFlags(flags, name);

36
deps/v8/src/stub-cache.h

@ -77,7 +77,7 @@ class StubCache : public AllStatic {
JSObject* receiver,
JSObject* holder);
static Object* ComputeLoadNormal(String* name, JSObject* receiver);
static Object* ComputeLoadNormal();
static Object* ComputeLoadGlobal(String* name,
@ -121,6 +121,8 @@ class StubCache : public AllStatic {
int field_index,
Map* transition = NULL);
static Object* ComputeStoreNormal();
static Object* ComputeStoreGlobal(String* name,
GlobalObject* receiver,
JSGlobalPropertyCell* cell);
@ -407,8 +409,21 @@ class StubCompiler BASE_EMBEDDED {
static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
// Check the integrity of the prototype chain to make sure that the
// current IC is still valid.
// Generates code that verifies that the property holder has not changed
// (checking maps of objects in the prototype chain for fast and global
// objects or doing negative lookup for slow objects, ensures that the
// property cells for global objects are still empty) and checks that the map
// of the holder has not changed. If necessary the function also generates
// code for security check in case of global object holders. Helps to make
// sure that the current IC is still valid.
//
// The scratch and holder registers are always clobbered, but the object
// register is only clobbered if it the same as the holder register. The
// function returns a register containing the holder - either object_reg or
// holder_reg.
// The function can optionally (when save_at_depth !=
// kInvalidProtoDepth) save the object at the given depth by moving
// it to [esp + kPointerSize].
Register CheckPrototypes(JSObject* object,
Register object_reg,
@ -416,9 +431,10 @@ class StubCompiler BASE_EMBEDDED {
Register holder_reg,
Register scratch,
String* name,
Label* miss) {
Label* miss,
Register extra = no_reg) {
return CheckPrototypes(object, object_reg, holder, holder_reg, scratch,
name, kInvalidProtoDepth, miss);
name, kInvalidProtoDepth, miss, extra);
}
Register CheckPrototypes(JSObject* object,
@ -428,7 +444,8 @@ class StubCompiler BASE_EMBEDDED {
Register scratch,
String* name,
int save_at_depth,
Label* miss);
Label* miss,
Register extra = no_reg);
protected:
Object* GetCodeWithFlags(Code::Flags flags, const char* name);
@ -611,8 +628,10 @@ class CallStubCompiler: public StubCompiler {
kNumCallGenerators
};
CallStubCompiler(int argc, InLoopFlag in_loop, Code::Kind kind)
: arguments_(argc), in_loop_(in_loop), kind_(kind) { }
CallStubCompiler(int argc,
InLoopFlag in_loop,
Code::Kind kind,
InlineCacheHolderFlag cache_holder);
Object* CompileCallField(JSObject* object,
JSObject* holder,
@ -653,6 +672,7 @@ class CallStubCompiler: public StubCompiler {
const ParameterCount arguments_;
const InLoopFlag in_loop_;
const Code::Kind kind_;
const InlineCacheHolderFlag cache_holder_;
const ParameterCount& arguments() { return arguments_; }

4
deps/v8/src/v8-counters.h

@ -153,6 +153,8 @@ namespace internal {
SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
SC(store_normal_miss, V8.StoreNormalMiss) \
SC(store_normal_hit, V8.StoreNormalHit) \
SC(call_miss, V8.CallMiss) \
SC(keyed_call_miss, V8.KeyedCallMiss) \
SC(load_miss, V8.LoadMiss) \
@ -166,6 +168,8 @@ namespace internal {
SC(constructed_objects, V8.ConstructedObjects) \
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
SC(negative_lookups, V8.NegativeLookups) \
SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
SC(array_function_runtime, V8.ArrayFunctionRuntime) \
SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \

39
deps/v8/src/v8dll-main.cc

@ -0,0 +1,39 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <windows.h>
#include "../include/v8.h"
extern "C" {
BOOL WINAPI DllMain(HANDLE hinstDLL,
DWORD dwReason,
LPVOID lpvReserved) {
// Do nothing.
return TRUE;
}
}

25
deps/v8/src/v8natives.js

@ -745,6 +745,27 @@ function ObjectDefineProperties(obj, properties) {
}
// ES5 section 15.2.3.10
function ObjectPreventExtension(obj) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
}
%PreventExtensions(obj);
return obj;
}
// ES5 section 15.2.3.13
function ObjectIsExtensible(obj) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
}
return %IsExtensible(obj);
}
%SetCode($Object, function(x) {
if (%_IsConstructCall()) {
if (x == null) return this;
@ -780,7 +801,9 @@ function SetupObject() {
"defineProperties", ObjectDefineProperties,
"getPrototypeOf", ObjectGetPrototypeOf,
"getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
"getOwnPropertyNames", ObjectGetOwnPropertyNames
"getOwnPropertyNames", ObjectGetOwnPropertyNames,
"isExtensible", ObjectIsExtensible,
"preventExtensions", ObjectPreventExtension
));
}

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
#define BUILD_NUMBER 21
#define BUILD_NUMBER 23
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

4
deps/v8/src/x64/builtins-x64.cc

@ -1238,10 +1238,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ movq(rbx, r8);
#endif // _WIN64
// Set up the roots register.
ExternalReference roots_address = ExternalReference::roots_address();
__ movq(kRootRegister, roots_address);
// Current stack contents:
// [rsp + 2 * kPointerSize ... ]: Internal frame
// [rsp + kPointerSize] : function

322
deps/v8/src/x64/codegen-x64.cc

@ -592,7 +592,6 @@ bool CodeGenerator::HasValidEntryRegisters() {
&& (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
&& (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
&& (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
&& (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
&& (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
}
#endif
@ -1600,11 +1599,133 @@ void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
}
void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
// A fast smi loop is a for loop with an initializer
// that is a simple assignment of a smi to a stack variable,
// a test that is a simple test of that variable against a smi constant,
// and a step that is a increment/decrement of the variable, and
// where the variable isn't modified in the loop body.
// This guarantees that the variable is always a smi.
Variable* loop_var = node->loop_variable();
Smi* initial_value = *Handle<Smi>::cast(node->init()
->StatementAsSimpleAssignment()->value()->AsLiteral()->handle());
Smi* limit_value = *Handle<Smi>::cast(
node->cond()->AsCompareOperation()->right()->AsLiteral()->handle());
Token::Value compare_op =
node->cond()->AsCompareOperation()->op();
bool increments =
node->next()->StatementAsCountOperation()->op() == Token::INC;
// Check that the condition isn't initially false.
bool initially_false = false;
int initial_int_value = initial_value->value();
int limit_int_value = limit_value->value();
switch (compare_op) {
case Token::LT:
initially_false = initial_int_value >= limit_int_value;
break;
case Token::LTE:
initially_false = initial_int_value > limit_int_value;
break;
case Token::GT:
initially_false = initial_int_value <= limit_int_value;
break;
case Token::GTE:
initially_false = initial_int_value < limit_int_value;
break;
default:
UNREACHABLE();
}
if (initially_false) return;
// Only check loop condition at the end.
Visit(node->init());
JumpTarget loop(JumpTarget::BIDIRECTIONAL);
// Set type and stack height of BreakTargets.
node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
IncrementLoopNesting();
loop.Bind();
// Set number type of the loop variable to smi.
CheckStack(); // TODO(1222600): ignore if body contains calls.
SetTypeForStackSlot(loop_var->slot(), TypeInfo::Smi());
Visit(node->body());
if (node->continue_target()->is_linked()) {
node->continue_target()->Bind();
}
if (has_valid_frame()) {
CodeForStatementPosition(node);
Slot* loop_var_slot = loop_var->slot();
if (loop_var_slot->type() == Slot::LOCAL) {
frame_->PushLocalAt(loop_var_slot->index());
} else {
ASSERT(loop_var_slot->type() == Slot::PARAMETER);
frame_->PushParameterAt(loop_var_slot->index());
}
Result loop_var_result = frame_->Pop();
if (!loop_var_result.is_register()) {
loop_var_result.ToRegister();
}
if (increments) {
__ SmiAddConstant(loop_var_result.reg(),
loop_var_result.reg(),
Smi::FromInt(1));
} else {
__ SmiSubConstant(loop_var_result.reg(),
loop_var_result.reg(),
Smi::FromInt(1));
}
{
__ SmiCompare(loop_var_result.reg(), limit_value);
Condition condition;
switch (compare_op) {
case Token::LT:
condition = less;
break;
case Token::LTE:
condition = less_equal;
break;
case Token::GT:
condition = greater;
break;
case Token::GTE:
condition = greater_equal;
break;
default:
condition = never;
UNREACHABLE();
}
loop.Branch(condition);
}
loop_var_result.Unuse();
}
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
DecrementLoopNesting();
}
void CodeGenerator::VisitForStatement(ForStatement* node) {
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ ForStatement");
CodeForStatementPosition(node);
if (node->is_fast_smi_loop()) {
GenerateFastSmiLoop(node);
return;
}
// Compile the init expression if present.
if (node->init() != NULL) {
Visit(node->init());
@ -1694,16 +1815,6 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
CheckStack(); // TODO(1222600): ignore if body contains calls.
// We know that the loop index is a smi if it is not modified in the
// loop body and it is checked against a constant limit in the loop
// condition. In this case, we reset the static type information of the
// loop index to smi before compiling the body, the update expression, and
// the bottom check of the loop condition.
if (node->is_fast_smi_loop()) {
// Set number type of the loop variable to smi.
SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
}
Visit(node->body());
// If there is an update expression, compile it if necessary.
@ -1723,13 +1834,6 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
}
}
// Set the type of the loop variable to smi before compiling the test
// expression if we are in a fast smi loop condition.
if (node->is_fast_smi_loop() && has_valid_frame()) {
// Set number type of the loop variable to smi.
SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
}
// Based on the condition analysis, compile the backward jump as
// necessary.
switch (info) {
@ -3269,9 +3373,12 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
}
} else {
bool overwrite =
bool can_overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
bool no_negative_zero = node->expression()->no_negative_zero();
Load(node->expression());
switch (op) {
case Token::NOT:
@ -3281,7 +3388,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
GenericUnaryOpStub stub(Token::SUB, overwrite);
GenericUnaryOpStub stub(
Token::SUB,
overwrite,
no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
answer.set_type_info(TypeInfo::Number());
@ -3501,17 +3611,16 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
__ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
}
if (is_increment) {
__ SmiAddConstant(kScratchRegister,
__ SmiAddConstant(new_value.reg(),
new_value.reg(),
Smi::FromInt(1),
deferred->entry_label());
} else {
__ SmiSubConstant(kScratchRegister,
__ SmiSubConstant(new_value.reg(),
new_value.reg(),
Smi::FromInt(1),
deferred->entry_label());
}
__ movq(new_value.reg(), kScratchRegister);
deferred->BindExit();
// Postfix count operations return their input converted to
@ -8403,6 +8512,11 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Label try_float;
__ JumpIfNotSmi(rax, &try_float);
if (negative_zero_ == kIgnoreNegativeZero) {
__ SmiCompare(rax, Smi::FromInt(0));
__ j(equal, &done);
}
// Enter runtime system if the value of the smi is zero
// to make sure that we switch between 0 and -0.
// Also enter it if the value of the smi is Smi::kMinValue.
@ -8410,10 +8524,14 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
// Either zero or Smi::kMinValue, neither of which become a smi when
// negated.
__ SmiCompare(rax, Smi::FromInt(0));
__ j(not_equal, &slow);
__ Move(rax, Factory::minus_zero_value());
__ jmp(&done);
if (negative_zero_ == kStrictNegativeZero) {
__ SmiCompare(rax, Smi::FromInt(0));
__ j(not_equal, &slow);
__ Move(rax, Factory::minus_zero_value());
__ jmp(&done);
} else {
__ jmp(&slow);
}
// Try floating point case.
__ bind(&try_float);
@ -8426,7 +8544,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ shl(kScratchRegister, Immediate(63));
__ xor_(rdx, kScratchRegister); // Flip sign.
// rdx is value to store.
if (overwrite_) {
if (overwrite_ == UNARY_OVERWRITE) {
__ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
} else {
__ AllocateHeapNumber(rcx, rbx, &slow);
@ -8622,26 +8740,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&seq_ascii_string);
// rax: subject string (sequential ascii)
// rcx: RegExp data (FixedArray)
__ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
__ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
__ Set(rdi, 1); // Type is ascii.
__ jmp(&check_code);
__ bind(&seq_two_byte_string);
// rax: subject string (flat two-byte)
// rcx: RegExp data (FixedArray)
__ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
__ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
__ Set(rdi, 0); // Type is two byte.
__ bind(&check_code);
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// the hole.
__ CmpObjectType(r12, CODE_TYPE, kScratchRegister);
__ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// rax: subject string
// rdi: encoding of subject string (1 if ascii, 0 if two_byte);
// r12: code
// r11: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
__ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
@ -8649,7 +8767,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rax: subject string
// rbx: previous index
// rdi: encoding of subject string (1 if ascii 0 if two_byte);
// r12: code
// r11: code
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(&Counters::regexp_entry_native, 1);
@ -8699,7 +8817,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rax: subject string
// rbx: previous index
// rdi: encoding of subject string (1 if ascii 0 if two_byte);
// r12: code
// r11: code
// Argument 4: End of string data
// Argument 3: Start of string data
@ -8723,8 +8841,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movq(arg1, rax);
// Locate the code entry and call it.
__ addq(r12, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ CallCFunction(r12, kRegExpExecuteArguments);
__ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ CallCFunction(r11, kRegExpExecuteArguments);
// rsi is caller save, as it is used to pass parameter.
__ pop(rsi);
@ -8938,7 +9056,7 @@ static int NegativeComparisonResult(Condition cc) {
void CompareStub::Generate(MacroAssembler* masm) {
Label call_builtin, done;
Label check_unequal_objects, done;
// The compare stub returns a positive, negative, or zero 64-bit integer
// value in rax, corresponding to result of comparing the two inputs.
// NOTICE! This code is only reached after a smi-fast-case check, so
@ -8975,14 +9093,14 @@ void CompareStub::Generate(MacroAssembler* masm) {
// If it's not a heap number, then return equal for (in)equality operator.
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
Factory::heap_number_map());
if (cc_ == equal) {
__ j(equal, &heap_number);
__ Set(rax, EQUAL);
__ ret(0);
} else {
// Identical objects must still be converted to primitive for < and >.
__ j(not_equal, &not_identical);
__ j(equal, &heap_number);
if (cc_ != equal) {
// Call runtime on identical JSObjects. Otherwise return equal.
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
__ j(above_equal, &not_identical);
}
__ Set(rax, EQUAL);
__ ret(0);
__ bind(&heap_number);
// It is a heap number, so return equal if it's not NaN.
@ -9113,7 +9231,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&check_for_strings);
__ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &call_builtin);
__ JumpIfNotBothSequentialAsciiStrings(
rdx, rax, rcx, rbx, &check_unequal_objects);
// Inline comparison of ascii strings.
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@ -9128,7 +9247,40 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ Abort("Unexpected fall-through from string comparison");
#endif
__ bind(&call_builtin);
__ bind(&check_unequal_objects);
if (cc_ == equal && !strict_) {
// Not strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
Label not_both_objects, return_unequal;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
ASSERT_EQ(0, kSmiTag);
ASSERT_EQ(V8_UINT64_C(1), kSmiTagMask);
__ lea(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
__ j(not_zero, &not_both_objects);
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
__ j(below, &not_both_objects);
__ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
__ j(below, &not_both_objects);
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(zero, &return_unequal);
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(zero, &return_unequal);
// The objects are both undetectable, so they both compare as the value
// undefined, and are equal.
__ Set(rax, EQUAL);
__ bind(&return_unequal);
// Return non-equal by returning the non-zero object pointer in eax,
// or return equal if we fell through to here.
__ ret(2 * kPointerSize); // rax, rdx were pushed
__ bind(&not_both_objects);
}
// must swap argument order
__ pop(rcx);
__ pop(rdx);
@ -9488,7 +9640,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// rbp: frame pointer (restored after C call).
// rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
// r15: pointer to the first argument (C callee-saved).
// r12: pointer to the first argument (C callee-saved).
// This pointer is reused in LeaveExitFrame(), so it is stored in a
// callee-saved register.
@ -9529,7 +9681,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
// Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
__ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
__ movq(Operand(rsp, 5 * kPointerSize), r15); // argv.
__ movq(Operand(rsp, 5 * kPointerSize), r12); // argv.
if (result_size_ < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
@ -9545,7 +9697,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc.
__ movq(rsi, r15); // argv.
__ movq(rsi, r12); // argv.
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
@ -9747,7 +9899,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// rbp: frame pointer of exit frame (restored after C call).
// rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
// r15: argv pointer (C callee-saved).
// r12: argv pointer (C callee-saved).
Label throw_normal_exception;
Label throw_termination_exception;
@ -9807,24 +9959,38 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Push the stack frame type marker twice.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
__ Push(Smi::FromInt(marker)); // context slot
__ Push(Smi::FromInt(marker)); // function slot
// Save callee-saved registers (X64 calling conventions).
// Scratch register is neither callee-save, nor an argument register on any
// platform. It's free to use at this point.
// Cannot use smi-register for loading yet.
__ movq(kScratchRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
RelocInfo::NONE);
__ push(kScratchRegister); // context slot
__ push(kScratchRegister); // function slot
// Save callee-saved registers (X64/Win64 calling conventions).
__ push(r12);
__ push(r13);
__ push(r14);
__ push(r15);
__ push(rdi);
__ push(rsi);
#ifdef _WIN64
__ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
__ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
#endif
__ push(rbx);
// TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
// callee-save in JS code as well.
// TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
// callee save as well.
// Save copies of the top frame descriptor on the stack.
ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
__ load_rax(c_entry_fp);
__ push(rax);
// Set up the roots and smi constant registers.
// Needs to be done before any further smi loads.
ExternalReference roots_address = ExternalReference::roots_address();
__ movq(kRootRegister, roots_address);
__ InitializeSmiConstantRegister();
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
@ -9895,8 +10061,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Restore callee-saved registers (X64 conventions).
__ pop(rbx);
#ifdef _WIN64
// Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
__ pop(rsi);
__ pop(rdi);
#endif
__ pop(r15);
__ pop(r14);
__ pop(r13);
@ -10442,6 +10611,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// the four basic operations. The stub stays in the DEFAULT state
// forever for all other operations (also if smi code is skipped).
GenerateTypeTransition(masm);
break;
}
Label not_floats;
@ -10759,31 +10929,13 @@ void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
// Keep a copy of operands on the stack and make sure they are also in
// rdx, rax.
// Ensure the operands are on the stack.
if (HasArgsInRegisters()) {
GenerateRegisterArgsPush(masm);
} else {
GenerateLoadArguments(masm);
}
// Internal frame is necessary to handle exceptions properly.
__ EnterInternalFrame();
// Push arguments on stack if the stub expects them there.
if (!HasArgsInRegisters()) {
__ push(rdx);
__ push(rax);
}
// Call the stub proper to get the result in rax.
__ call(&get_result);
__ LeaveInternalFrame();
// Left and right arguments are already on stack.
__ pop(rcx);
// Push the operation result. The tail call to BinaryOp_Patch will
// return it to the original caller..
__ push(rax);
__ pop(rcx); // Save the return address.
// Push this stub's key.
__ Push(Smi::FromInt(MinorKey()));
@ -10794,17 +10946,13 @@ void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ Push(Smi::FromInt(runtime_operands_type_));
__ push(rcx);
__ push(rcx); // The return address.
// Perform patching to an appropriate fast case and return the result.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
6,
5,
1);
// The entry point for the result calculation is assumed to be immediately
// after this sequence.
__ bind(&get_result);
}
@ -11130,7 +11278,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Check that both strings are non-external ascii strings.
__ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
&string_add_runtime);
&string_add_runtime);
// Get the two characters forming the sub string.
__ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
@ -11140,7 +11288,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// just allocate a new one.
Label make_two_character_string, make_flat_ascii_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, rbx, rcx, r14, r12, rdi, r15, &make_two_character_string);
masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
@ -11232,7 +11380,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&make_flat_ascii_string);
// Both strings are ascii strings. As they are short they are both flat.
__ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
__ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
// rcx: result string
__ movq(rbx, rcx);
// Locate first character of result.
@ -11269,7 +11417,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &string_add_runtime);
// Both strings are two byte strings. As they are short they are both
// flat.
__ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
__ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
// rcx: result string
__ movq(rbx, rcx);
// Locate first character of result.

3
deps/v8/src/x64/codegen-x64.h

@ -393,6 +393,9 @@ class CodeGenerator: public AstVisitor {
// target (which can not be done more than once).
void GenerateReturnSequence(Result* return_value);
// Generate code for a fast smi loop.
void GenerateFastSmiLoop(ForStatement* node);
// Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode();

4
deps/v8/src/x64/frames-x64.h

@ -56,7 +56,11 @@ class StackHandlerConstants : public AllStatic {
class EntryFrameConstants : public AllStatic {
public:
#ifdef _WIN64
static const int kCallerFPOffset = -10 * kPointerSize;
#else
static const int kCallerFPOffset = -8 * kPointerSize;
#endif
static const int kArgvOffset = 6 * kPointerSize;
};

8
deps/v8/src/x64/full-codegen-x64.cc

@ -2807,9 +2807,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::SUB: {
Comment cmt(masm_, "[ UnaryOperation (SUB)");
bool overwrite =
bool can_overwrite =
(expr->expression()->AsBinaryOperation() != NULL &&
expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
GenericUnaryOpStub stub(Token::SUB, overwrite);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register rax.
@ -2821,9 +2823,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::BIT_NOT: {
Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
bool overwrite =
bool can_overwrite =
(expr->expression()->AsBinaryOperation() != NULL &&
expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register rax.

194
deps/v8/src/x64/ic-x64.cc

@ -61,11 +61,11 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register r0,
Register r1,
Label* miss) {
static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
Register receiver,
Register r0,
Register r1,
Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// r0: used to hold receiver instance type.
@ -98,34 +98,17 @@ static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
}
// Helper function used to load a property from a dictionary backing storage.
// This function may return false negatives, so miss_label
// must always call a backup property load that is complete.
// This function is safe to call if name is not a symbol, and will jump to
// the miss_label in that case.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss_label,
Register elements,
Register name,
Register r0,
Register r1,
Register result) {
// Register use:
//
// elements - holds the property dictionary on entry and is unchanged.
//
// name - holds the name of the property on entry and is unchanged.
//
// r0 - used to hold the capacity of the property dictionary.
//
// r1 - used to hold the index into the property dictionary.
//
// result - holds the result on exit if the load succeeded.
Label done;
// Probe the string dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found leaving the
// index into the dictionary in |r1|. Jump to the |miss| label
// otherwise.
static void GenerateStringDictionaryProbes(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register r0,
Register r1) {
// Compute the capacity mask.
const int kCapacityOffset =
StringDictionary::kHeaderSize +
@ -157,14 +140,58 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
__ cmpq(name, Operand(elements, r1, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
if (i != kProbes - 1) {
__ j(equal, &done);
__ j(equal, done);
} else {
__ j(not_equal, miss_label);
__ j(not_equal, miss);
}
}
}
// Helper function used to load a property from a dictionary backing storage.
// This function may return false negatives, so miss_label
// must always call a backup property load that is complete.
// This function is safe to call if name is not a symbol, and will jump to
// the miss_label in that case.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss_label,
Register elements,
Register name,
Register r0,
Register r1,
Register result) {
// Register use:
//
// elements - holds the property dictionary on entry and is unchanged.
//
// name - holds the name of the property on entry and is unchanged.
//
// r0 - used to hold the capacity of the property dictionary.
//
// r1 - used to hold the index into the property dictionary.
//
// result - holds the result on exit if the load succeeded.
Label done;
// Check that the value is a normal property.
// Probe the dictionary.
GenerateStringDictionaryProbes(masm,
miss_label,
&done,
elements,
name,
r0,
r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
// property.
__ bind(&done);
const int kElementsStartOffset =
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ Test(Operand(elements, r1, times_pointer_size,
kDetailsOffset - kHeapObjectTag),
@ -179,6 +206,75 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
}
// Helper function used to store a property to a dictionary backing
// storage. This function may fail to store a property even though it
// is in the dictionary, so code at miss_label must always call a
// backup property store that is complete. This function is safe to
// call if name is not a symbol, and will jump to the miss_label in
// that case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
static void GenerateDictionaryStore(MacroAssembler* masm,
Label* miss_label,
Register elements,
Register name,
Register value,
Register scratch0,
Register scratch1) {
// Register use:
//
// elements - holds the property dictionary on entry and is clobbered.
//
// name - holds the name of the property on entry and is unchanged.
//
// value - holds the value to store and is unchanged.
//
// scratch0 - used for index into the property dictionary and is clobbered.
//
// scratch1 - used to hold the capacity of the property dictionary and is
// clobbered.
Label done;
// Probe the dictionary.
GenerateStringDictionaryProbes(masm,
miss_label,
&done,
elements,
name,
scratch0,
scratch1);
// If probing finds an entry in the dictionary, scratch0 contains the
// index into the dictionary. Check that the value is a normal
// property that is not read only.
__ bind(&done);
const int kElementsStartOffset =
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask
= (PropertyDetails::TypeField::mask() |
PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
__ Test(Operand(elements,
scratch1,
times_pointer_size,
kDetailsOffset - kHeapObjectTag),
Smi::FromInt(kTypeAndReadOnlyMask));
__ j(not_zero, miss_label);
// Store the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ lea(scratch1, Operand(elements,
scratch1,
times_pointer_size,
kValueOffset - kHeapObjectTag));
__ movq(Operand(scratch1, 0), value);
// Update write barrier. Make sure not to clobber the value.
__ movq(scratch0, value);
__ RecordWrite(elements, scratch1, scratch0);
}
static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
@ -1332,7 +1428,7 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
GenerateDictionaryLoadReceiverCheck(masm, rdx, rax, rbx, &miss);
GenerateStringDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
// rax: elements
// Search the dictionary placing the result in rdi.
@ -1616,7 +1712,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -----------------------------------
Label miss;
GenerateDictionaryLoadReceiverCheck(masm, rax, rdx, rbx, &miss);
GenerateStringDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss);
// rdx: elements
// Search the dictionary placing the result in rax.
@ -1760,6 +1856,28 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
}
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss, restore_miss;
GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9);
__ IncrementCounter(&Counters::store_normal_hit, 1);
__ ret(0);
__ bind(&miss);
__ IncrementCounter(&Counters::store_normal_miss, 1);
GenerateMiss(masm);
}
#undef __

232
deps/v8/src/x64/macro-assembler-x64.cc

@ -105,12 +105,6 @@ void MacroAssembler::RecordWriteHelper(Register object,
}
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the index register contains the array index into
// the elements array represented a zero extended int32. Otherwise it can be
// used as a scratch register.
// All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
@ -141,6 +135,35 @@ void MacroAssembler::RecordWrite(Register object,
}
void MacroAssembler::RecordWrite(Register object,
Register address,
Register value) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are esi.
ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
JumpIfSmi(value, &done);
InNewSpace(object, value, equal, &done);
RecordWriteHelper(object, address, value);
bind(&done);
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
void MacroAssembler::RecordWriteNonSmi(Register object,
int offset,
Register scratch,
@ -444,7 +467,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xor_(dst, dst);
xorl(dst, dst);
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else if (is_uint32(x)) {
@ -454,7 +477,6 @@ void MacroAssembler::Set(Register dst, int64_t x) {
}
}
void MacroAssembler::Set(const Operand& dst, int64_t x) {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
@ -469,6 +491,78 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
static int kSmiShift = kSmiTagSize + kSmiShiftSize;
Register MacroAssembler::GetSmiConstant(Smi* source) {
int value = source->value();
if (value == 0) {
xorl(kScratchRegister, kScratchRegister);
return kScratchRegister;
}
if (value == 1) {
return kSmiConstantRegister;
}
LoadSmiConstant(kScratchRegister, source);
return kScratchRegister;
}
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (FLAG_debug_code) {
movq(dst,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
RelocInfo::NONE);
cmpq(dst, kSmiConstantRegister);
if (allow_stub_calls()) {
Assert(equal, "Uninitialized kSmiConstantRegister");
} else {
Label ok;
j(equal, &ok);
int3();
bind(&ok);
}
}
if (source->value() == 0) {
xorl(dst, dst);
return;
}
int value = source->value();
bool negative = value < 0;
unsigned int uvalue = negative ? -value : value;
switch (uvalue) {
case 9:
lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
break;
case 8:
xorl(dst, dst);
lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
break;
case 4:
xorl(dst, dst);
lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
break;
case 5:
lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
break;
case 3:
lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
break;
case 2:
lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
break;
case 1:
movq(dst, kSmiConstantRegister);
break;
case 0:
UNREACHABLE();
return;
default:
movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
return;
}
if (negative) {
neg(dst);
}
}
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
ASSERT_EQ(0, kSmiTag);
if (!dst.is(src)) {
@ -629,9 +723,10 @@ Condition MacroAssembler::CheckSmi(Register src) {
Condition MacroAssembler::CheckPositiveSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
// Make mask 0x8000000000000001 and test that both bits are zero.
movq(kScratchRegister, src);
rol(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(0x03));
testb(kScratchRegister, Immediate(3));
return zero;
}
@ -660,7 +755,6 @@ Condition MacroAssembler::CheckBothPositiveSmi(Register first,
}
Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
if (first.is(second)) {
return CheckSmi(first);
@ -673,11 +767,10 @@ Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
movq(kScratchRegister, src);
rol(kScratchRegister, Immediate(1));
cmpq(kScratchRegister, Immediate(1));
return equal;
ASSERT(!src.is(kScratchRegister));
// If we overflow by subtracting one, it's the minimal smi value.
cmpq(src, kSmiConstantRegister);
return overflow;
}
@ -690,8 +783,8 @@ Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
// An unsigned 32-bit integer value is valid as long as the high bit
// is not set.
testq(src, Immediate(0x80000000));
return zero;
testl(src, src);
return positive;
}
@ -784,10 +877,10 @@ void MacroAssembler::SmiSub(Register dst,
}
Assert(no_overflow, "Smi subtraction overflow");
} else if (dst.is(src1)) {
movq(kScratchRegister, src1);
subq(kScratchRegister, src2);
movq(kScratchRegister, src2);
cmpq(src1, kScratchRegister);
j(overflow, on_not_smi_result);
movq(src1, kScratchRegister);
subq(src1, kScratchRegister);
} else {
movq(dst, src1);
subq(dst, src2);
@ -860,7 +953,7 @@ void MacroAssembler::SmiTryAddConstant(Register dst,
JumpIfNotSmi(src, on_not_smi_result);
Register tmp = (dst.is(src) ? kScratchRegister : dst);
Move(tmp, constant);
LoadSmiConstant(tmp, constant);
addq(tmp, src);
j(overflow, on_not_smi_result);
if (dst.is(src)) {
@ -874,14 +967,46 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (!dst.is(src)) {
movq(dst, src);
}
return;
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Move(kScratchRegister, constant);
addq(dst, kScratchRegister);
switch (constant->value()) {
case 1:
addq(dst, kSmiConstantRegister);
return;
case 2:
lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
Register constant_reg = GetSmiConstant(constant);
addq(dst, constant_reg);
return;
}
} else {
Move(dst, constant);
addq(dst, src);
switch (constant->value()) {
case 1:
lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
return;
case 2:
lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
LoadSmiConstant(dst, constant);
addq(dst, src);
return;
}
}
}
@ -904,12 +1029,12 @@ void MacroAssembler::SmiAddConstant(Register dst,
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Move(kScratchRegister, constant);
addq(kScratchRegister, dst);
LoadSmiConstant(kScratchRegister, constant);
addq(kScratchRegister, src);
j(overflow, on_not_smi_result);
movq(dst, kScratchRegister);
} else {
Move(dst, constant);
LoadSmiConstant(dst, constant);
addq(dst, src);
j(overflow, on_not_smi_result);
}
@ -923,19 +1048,17 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Move(kScratchRegister, constant);
subq(dst, kScratchRegister);
Register constant_reg = GetSmiConstant(constant);
subq(dst, constant_reg);
} else {
// Subtract by adding the negative, to do it in two operations.
if (constant->value() == Smi::kMinValue) {
Move(dst, constant);
LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
addq(dst, src);
} else {
// Subtract by adding the negation.
Move(dst, Smi::FromInt(-constant->value()));
LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
addq(dst, src);
}
}
@ -957,11 +1080,11 @@ void MacroAssembler::SmiSubConstant(Register dst,
// We test the non-negativeness before doing the subtraction.
testq(src, src);
j(not_sign, on_not_smi_result);
Move(kScratchRegister, constant);
LoadSmiConstant(kScratchRegister, constant);
subq(dst, kScratchRegister);
} else {
// Subtract by adding the negation.
Move(kScratchRegister, Smi::FromInt(-constant->value()));
LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
addq(kScratchRegister, dst);
j(overflow, on_not_smi_result);
movq(dst, kScratchRegister);
@ -972,13 +1095,13 @@ void MacroAssembler::SmiSubConstant(Register dst,
// We test the non-negativeness before doing the subtraction.
testq(src, src);
j(not_sign, on_not_smi_result);
Move(dst, constant);
LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
addq(dst, src);
} else {
// Subtract by adding the negation.
Move(dst, Smi::FromInt(-(constant->value())));
LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
addq(dst, src);
j(overflow, on_not_smi_result);
}
@ -1132,10 +1255,10 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
xor_(dst, dst);
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Move(kScratchRegister, constant);
and_(dst, kScratchRegister);
Register constant_reg = GetSmiConstant(constant);
and_(dst, constant_reg);
} else {
Move(dst, constant);
LoadSmiConstant(dst, constant);
and_(dst, src);
}
}
@ -1152,10 +1275,10 @@ void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Move(kScratchRegister, constant);
or_(dst, kScratchRegister);
Register constant_reg = GetSmiConstant(constant);
or_(dst, constant_reg);
} else {
Move(dst, constant);
LoadSmiConstant(dst, constant);
or_(dst, src);
}
}
@ -1172,10 +1295,10 @@ void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Move(kScratchRegister, constant);
xor_(dst, kScratchRegister);
Register constant_reg = GetSmiConstant(constant);
xor_(dst, constant_reg);
} else {
Move(dst, constant);
LoadSmiConstant(dst, constant);
xor_(dst, src);
}
}
@ -1343,6 +1466,7 @@ void MacroAssembler::SelectNonSmi(Register dst,
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
}
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
@ -1568,8 +1692,8 @@ void MacroAssembler::Push(Smi* source) {
if (is_int32(smi)) {
push(Immediate(static_cast<int32_t>(smi)));
} else {
Set(kScratchRegister, smi);
push(kScratchRegister);
Register constant = GetSmiConstant(source);
push(constant);
}
}
@ -2109,10 +2233,10 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) {
movq(rax, rsi);
store_rax(context_address);
// Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
// Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(r15, Operand(rbp, r14, times_pointer_size, offset));
lea(r12, Operand(rbp, r14, times_pointer_size, offset));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
@ -2158,7 +2282,7 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) {
void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
// Registers:
// r15 : argv
// r12 : argv
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
@ -2178,7 +2302,7 @@ void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
// Pop everything up to and including the arguments and the receiver
// from the caller stack.
lea(rsp, Operand(r15, 1 * kPointerSize));
lea(rsp, Operand(r12, 1 * kPointerSize));
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Top::k_context_address);

47
deps/v8/src/x64/macro-assembler-x64.h

@ -47,8 +47,11 @@ enum AllocationFlags {
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
static const Register kScratchRegister = { 10 }; // r10.
static const Register kRootRegister = { 13 }; // r13
static const Register kScratchRegister = { 10 }; // r10.
static const Register kSmiConstantRegister = { 15 }; // r15 (callee save).
static const Register kRootRegister = { 13 }; // r13 (callee save).
// Value of smi in kSmiConstantRegister.
static const int kSmiConstantRegisterValue = 1;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
@ -93,16 +96,27 @@ class MacroAssembler: public Assembler {
Condition cc,
Label* branch);
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
// All registers are clobbered by the operation.
// For page containing |object| mark region covering [object+offset]
// dirty. |object| is the object being stored into, |value| is the
// object being stored. If |offset| is zero, then the |scratch|
// register contains the array index into the elements array
// represented as a Smi. All registers are clobbered by the
// operation. RecordWrite filters out smis so it does not update the
// write barrier if the value is a smi.
void RecordWrite(Register object,
int offset,
Register value,
Register scratch);
// For page containing |object| mark region covering [address]
// dirty. |object| is the object being stored into, |value| is the
// object being stored. All registers are clobbered by the
// operation. RecordWrite filters out smis so it does not update
// the write barrier if the value is a smi.
void RecordWrite(Register object,
Register address,
Register value);
// For page containing |object| mark region covering [object+offset] dirty.
// The value is known to not be a smi.
// object is the object being stored into, value is the object being stored.
@ -191,6 +205,12 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
void InitializeSmiConstantRegister() {
movq(kSmiConstantRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
RelocInfo::NONE);
}
// Conversions between tagged smi values and non-tagged integer values.
// Tag an integer value. The result must be known to be a valid smi value.
@ -458,11 +478,12 @@ class MacroAssembler: public Assembler {
// Basic Smi operations.
void Move(Register dst, Smi* source) {
Set(dst, reinterpret_cast<int64_t>(source));
LoadSmiConstant(dst, source);
}
void Move(const Operand& dst, Smi* source) {
Set(dst, reinterpret_cast<int64_t>(source));
Register constant = GetSmiConstant(source);
movq(dst, constant);
}
void Push(Smi* smi);
@ -809,6 +830,14 @@ class MacroAssembler: public Assembler {
private:
bool generating_stub_;
bool allow_stub_calls_;
// Returns a register holding the smi value. The register MUST NOT be
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi* value);
// Moves the smi value to the destination register.
void LoadSmiConstant(Register dst, Smi* value);
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;

11
deps/v8/src/x64/register-allocator-x64-inl.h

@ -38,7 +38,8 @@ namespace internal {
bool RegisterAllocator::IsReserved(Register reg) {
return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
reg.is(kScratchRegister) || reg.is(kRootRegister);
reg.is(kScratchRegister) || reg.is(kRootRegister) ||
reg.is(kSmiConstantRegister);
}
@ -58,11 +59,11 @@ int RegisterAllocator::ToNumber(Register reg) {
5, // r8
6, // r9
-1, // r10 Scratch register.
9, // r11
10, // r12
8, // r11
9, // r12
-1, // r13 Roots array. This is callee saved.
7, // r14
8 // r15
-1 // r15 Smi constant register.
};
return kNumbers[reg.code()];
}
@ -71,7 +72,7 @@ int RegisterAllocator::ToNumber(Register reg) {
Register RegisterAllocator::ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters);
const Register kRegisters[] =
{ rax, rbx, rcx, rdx, rdi, r8, r9, r14, r15, r11, r12 };
{ rax, rbx, rcx, rdx, rdi, r8, r9, r14, r11, r12 };
return kRegisters[num];
}

2
deps/v8/src/x64/register-allocator-x64.h

@ -33,7 +33,7 @@ namespace internal {
class RegisterAllocatorConstants : public AllStatic {
public:
static const int kNumRegisters = 11;
static const int kNumRegisters = 10;
static const int kInvalidRegister = -1;
};

3
deps/v8/src/x64/stub-cache-x64.cc

@ -2125,7 +2125,8 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
Register scratch,
String* name,
int save_at_depth,
Label* miss) {
Label* miss,
Register extra) {
// Check that the maps haven't changed.
Register result =
masm()->CheckMaps(object,

7
deps/v8/src/x64/virtual-frame-x64.h

@ -388,6 +388,13 @@ class VirtualFrame : public ZoneObject {
// Duplicate the top element of the frame.
void Dup() { PushFrameSlotAt(element_count() - 1); }
// Duplicate the n'th element from the top of the frame.
// Dup(1) is equivalent to Dup().
void Dup(int n) {
ASSERT(n > 0);
PushFrameSlotAt(element_count() - n);
}
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
Result Pop();

84
deps/v8/test/cctest/test-api.cc

@ -3335,6 +3335,42 @@ THREADED_TEST(UndetectableObject) {
}
THREADED_TEST(ExtensibleOnUndetectable) {
v8::HandleScope scope;
LocalContext env;
Local<v8::FunctionTemplate> desc =
v8::FunctionTemplate::New(0, v8::Handle<Value>());
desc->InstanceTemplate()->MarkAsUndetectable(); // undetectable
Local<v8::Object> obj = desc->GetFunction()->NewInstance();
env->Global()->Set(v8_str("undetectable"), obj);
Local<String> source = v8_str("undetectable.x = 42;"
"undetectable.x");
Local<Script> script = Script::Compile(source);
CHECK_EQ(v8::Integer::New(42), script->Run());
ExpectBoolean("Object.isExtensible(undetectable)", true);
source = v8_str("Object.preventExtensions(undetectable);");
script = Script::Compile(source);
script->Run();
ExpectBoolean("Object.isExtensible(undetectable)", false);
source = v8_str("undetectable.y = 2000;");
script = Script::Compile(source);
v8::TryCatch try_catch;
Local<Value> result = script->Run();
CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
}
THREADED_TEST(UndetectableString) {
v8::HandleScope scope;
LocalContext env;
@ -8521,6 +8557,54 @@ TEST(PreCompileDeserializationError) {
}
// Attempts to deserialize bad data.
TEST(PreCompileInvalidPreparseDataError) {
v8::V8::Initialize();
v8::HandleScope scope;
LocalContext context;
const char* script = "function foo(){ return 5;}\n"
"function bar(){ return 6 + 7;} foo();";
v8::ScriptData* sd =
v8::ScriptData::PreCompile(script, i::StrLength(script));
CHECK(!sd->HasError());
// ScriptDataImpl private implementation details
const int kUnsignedSize = sizeof(unsigned);
const int kHeaderSize = 4;
const int kFunctionEntrySize = 4;
const int kFunctionEntryStartOffset = 0;
const int kFunctionEntryEndOffset = 1;
unsigned* sd_data =
reinterpret_cast<unsigned*>(const_cast<char*>(sd->Data()));
CHECK_EQ(sd->Length(),
(kHeaderSize + 2 * kFunctionEntrySize) * kUnsignedSize);
// Overwrite function bar's end position with 0.
sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryEndOffset] = 0;
v8::TryCatch try_catch;
Local<String> source = String::New(script);
Local<Script> compiled_script = Script::New(source, NULL, sd);
CHECK(try_catch.HasCaught());
String::AsciiValue exception_value(try_catch.Message()->Get());
CHECK_EQ("Uncaught SyntaxError: Invalid preparser data for function bar",
*exception_value);
try_catch.Reset();
// Overwrite function bar's start position with 200. The function entry
// will not be found when searching for it by position.
sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryStartOffset] =
200;
compiled_script = Script::New(source, NULL, sd);
CHECK(try_catch.HasCaught());
String::AsciiValue second_exception_value(try_catch.Message()->Get());
CHECK_EQ("Uncaught SyntaxError: Invalid preparser data for function bar",
*second_exception_value);
delete sd;
}
// Verifies that the Handle<String> and const char* versions of the API produce
// the same results (at least for one trivial case).
TEST(PreCompileAPIVariationsAreSame) {

35
deps/v8/test/cctest/test-debug.cc

@ -2075,6 +2075,39 @@ TEST(ScriptBreakPointLineTopLevel) {
}
// Test that it is possible to add and remove break points in a top level
// function which has no references but has not been collected yet.
TEST(ScriptBreakPointTopLevelCrash) {
v8::HandleScope scope;
DebugLocalContext env;
env.ExposeDebug();
v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount,
v8::Undefined());
v8::Local<v8::String> script_source = v8::String::New(
"function f() {\n"
" return 0;\n"
"}\n"
"f()");
int sbp1 = SetScriptBreakPointByNameFromJS("test.html", 3, -1);
{
v8::HandleScope scope;
break_point_hit_count = 0;
v8::Script::Compile(script_source, v8::String::New("test.html"))->Run();
CHECK_EQ(1, break_point_hit_count);
}
int sbp2 = SetScriptBreakPointByNameFromJS("test.html", 3, -1);
ClearBreakPointFromJS(sbp1);
ClearBreakPointFromJS(sbp2);
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
}
// Test that it is possible to remove the last break point for a function
// inside the break handling of that break point.
TEST(RemoveBreakPointInBreak) {
@ -2129,7 +2162,7 @@ TEST(DebuggerStatement) {
}
// Test setting a breakpoint on the debugger statement.
// Test setting a breakpoint on the debugger statement.
TEST(DebuggerStatementBreakpoint) {
break_point_hit_count = 0;
v8::HandleScope scope;

5
deps/v8/test/cctest/test-disasm-arm.cc

@ -408,6 +408,11 @@ TEST(Vfp) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
COMPARE(vmov(d0, d1),
"eeb00b41 vmov.f64 d0, d1");
COMPARE(vmov(d3, d3, eq),
"0eb03b43 vmov.f64eq d3, d3");
COMPARE(vadd(d0, d1, d2),
"ee310b02 vadd.f64 d0, d1, d2");
COMPARE(vadd(d3, d4, d5, mi),

153
deps/v8/test/cctest/test-macro-assembler-x64.cc

@ -57,10 +57,9 @@ using v8::internal::rsp;
using v8::internal::r8;
using v8::internal::r9;
using v8::internal::r11;
using v8::internal::r12; // Remember: r12..r15 are callee save!
using v8::internal::r12;
using v8::internal::r13;
using v8::internal::r14;
using v8::internal::r15;
using v8::internal::times_pointer_size;
using v8::internal::FUNCTION_CAST;
using v8::internal::CodeDesc;
@ -92,6 +91,24 @@ typedef int (*F0)();
#define __ masm->
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
__ push(v8::internal::kSmiConstantRegister);
__ InitializeSmiConstantRegister();
}
static void ExitCode(MacroAssembler* masm) {
// Return -1 if kSmiConstantRegister was clobbered during the test.
__ Move(rdx, Smi::FromInt(1));
__ cmpq(rdx, v8::internal::kSmiConstantRegister);
__ movq(rdx, Immediate(-1));
__ cmovq(not_equal, rax, rdx);
__ pop(v8::internal::kSmiConstantRegister);
}
TEST(Smi) {
// Check that C++ Smi operations work as expected.
int64_t test_numbers[] = {
@ -139,6 +156,7 @@ TEST(SmiMove) {
MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
TestMoveSmi(masm, &exit, 1, Smi::FromInt(0));
@ -156,6 +174,7 @@ TEST(SmiMove) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -225,6 +244,7 @@ TEST(SmiCompare) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
TestSmiCompare(masm, &exit, 0x10, 0, 0);
@ -249,6 +269,7 @@ TEST(SmiCompare) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -272,6 +293,7 @@ TEST(Integer32ToSmi) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
__ movq(rax, Immediate(1)); // Test number.
@ -349,6 +371,7 @@ TEST(Integer32ToSmi) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -397,6 +420,7 @@ TEST(Integer64PlusConstantToSmi) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
int64_t twice_max = static_cast<int64_t>(Smi::kMaxValue) * 2;
@ -416,6 +440,7 @@ TEST(Integer64PlusConstantToSmi) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -438,6 +463,7 @@ TEST(SmiCheck) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
Condition cond;
@ -613,6 +639,7 @@ TEST(SmiCheck) {
__ xor_(rax, rax);
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -683,6 +710,7 @@ TEST(SmiNeg) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
TestSmiNeg(masm, &exit, 0x10, 0);
@ -696,6 +724,7 @@ TEST(SmiNeg) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -768,6 +797,7 @@ TEST(SmiAdd) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
// No-overflow tests.
@ -782,6 +812,7 @@ TEST(SmiAdd) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -955,6 +986,7 @@ TEST(SmiSub) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
SmiSubTest(masm, &exit, 0x10, 1, 2);
@ -977,6 +1009,7 @@ TEST(SmiSub) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -1042,6 +1075,7 @@ TEST(SmiMul) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
TestSmiMul(masm, &exit, 0x10, 0, 0);
@ -1061,6 +1095,7 @@ TEST(SmiMul) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -1081,51 +1116,51 @@ void TestSmiDiv(MacroAssembler* masm, Label* exit, int id, int x, int y) {
#endif
bool fraction = !division_by_zero && !overflow && (x % y != 0);
__ Move(r11, Smi::FromInt(x));
__ Move(r12, Smi::FromInt(y));
__ Move(r14, Smi::FromInt(y));
if (!fraction && !overflow && !negative_zero && !division_by_zero) {
// Division succeeds
__ movq(rcx, r11);
__ movq(r15, Immediate(id));
__ movq(r12, Immediate(id));
int result = x / y;
__ Move(r8, Smi::FromInt(result));
__ SmiDiv(r9, rcx, r12, exit);
// Might have destroyed rcx and r12.
__ incq(r15);
__ SmiDiv(r9, rcx, r14, exit);
// Might have destroyed rcx and r14.
__ incq(r12);
__ SmiCompare(r9, r8);
__ j(not_equal, exit);
__ incq(r15);
__ incq(r12);
__ movq(rcx, r11);
__ Move(r12, Smi::FromInt(y));
__ Move(r14, Smi::FromInt(y));
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
__ incq(r15);
__ SmiDiv(rcx, rcx, r12, exit);
__ incq(r12);
__ SmiDiv(rcx, rcx, r14, exit);
__ incq(r15);
__ incq(r12);
__ SmiCompare(rcx, r8);
__ j(not_equal, exit);
} else {
// Division fails.
__ movq(r15, Immediate(id + 8));
__ movq(r12, Immediate(id + 8));
Label fail_ok, fail_ok2;
__ movq(rcx, r11);
__ SmiDiv(r9, rcx, r12, &fail_ok);
__ SmiDiv(r9, rcx, r14, &fail_ok);
__ jmp(exit);
__ bind(&fail_ok);
__ incq(r15);
__ incq(r12);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
__ incq(r15);
__ SmiDiv(rcx, rcx, r12, &fail_ok2);
__ incq(r12);
__ SmiDiv(rcx, rcx, r14, &fail_ok2);
__ jmp(exit);
__ bind(&fail_ok2);
__ incq(r15);
__ incq(r12);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
}
@ -1145,10 +1180,11 @@ TEST(SmiDiv) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
__ push(r14);
__ push(r12);
__ push(r15);
TestSmiDiv(masm, &exit, 0x10, 1, 1);
TestSmiDiv(masm, &exit, 0x20, 1, 0);
TestSmiDiv(masm, &exit, 0x30, -1, 0);
@ -1170,11 +1206,12 @@ TEST(SmiDiv) {
TestSmiDiv(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
TestSmiDiv(masm, &exit, 0x140, Smi::kMinValue, -1);
__ xor_(r15, r15); // Success.
__ xor_(r12, r12); // Success.
__ bind(&exit);
__ movq(rax, r15);
__ pop(r15);
__ movq(rax, r12);
__ pop(r12);
__ pop(r14);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -1192,47 +1229,47 @@ void TestSmiMod(MacroAssembler* masm, Label* exit, int id, int x, int y) {
bool negative_zero = (!fraction && x < 0);
__ Move(rcx, Smi::FromInt(x));
__ movq(r11, rcx);
__ Move(r12, Smi::FromInt(y));
__ Move(r14, Smi::FromInt(y));
if (!division_overflow && !negative_zero && !division_by_zero) {
// Modulo succeeds
__ movq(r15, Immediate(id));
__ movq(r12, Immediate(id));
int result = x % y;
__ Move(r8, Smi::FromInt(result));
__ SmiMod(r9, rcx, r12, exit);
__ SmiMod(r9, rcx, r14, exit);
__ incq(r15);
__ incq(r12);
__ SmiCompare(r9, r8);
__ j(not_equal, exit);
__ incq(r15);
__ incq(r12);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
__ incq(r15);
__ SmiMod(rcx, rcx, r12, exit);
__ incq(r12);
__ SmiMod(rcx, rcx, r14, exit);
__ incq(r15);
__ incq(r12);
__ SmiCompare(rcx, r8);
__ j(not_equal, exit);
} else {
// Modulo fails.
__ movq(r15, Immediate(id + 8));
__ movq(r12, Immediate(id + 8));
Label fail_ok, fail_ok2;
__ SmiMod(r9, rcx, r12, &fail_ok);
__ SmiMod(r9, rcx, r14, &fail_ok);
__ jmp(exit);
__ bind(&fail_ok);
__ incq(r15);
__ incq(r12);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
__ incq(r15);
__ SmiMod(rcx, rcx, r12, &fail_ok2);
__ incq(r12);
__ SmiMod(rcx, rcx, r14, &fail_ok2);
__ jmp(exit);
__ bind(&fail_ok2);
__ incq(r15);
__ incq(r12);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
}
@ -1252,10 +1289,11 @@ TEST(SmiMod) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
__ push(r14);
__ push(r12);
__ push(r15);
TestSmiMod(masm, &exit, 0x10, 1, 1);
TestSmiMod(masm, &exit, 0x20, 1, 0);
TestSmiMod(masm, &exit, 0x30, -1, 0);
@ -1277,11 +1315,12 @@ TEST(SmiMod) {
TestSmiMod(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
TestSmiMod(masm, &exit, 0x140, Smi::kMinValue, -1);
__ xor_(r15, r15); // Success.
__ xor_(r12, r12); // Success.
__ bind(&exit);
__ movq(rax, r15);
__ pop(r15);
__ movq(rax, r12);
__ pop(r12);
__ pop(r14);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -1336,7 +1375,7 @@ TEST(SmiIndex) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
&actual_size,
true));
CHECK(buffer);
@ -1345,6 +1384,7 @@ TEST(SmiIndex) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
TestSmiIndex(masm, &exit, 0x10, 0);
@ -1355,6 +1395,7 @@ TEST(SmiIndex) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -1411,6 +1452,7 @@ TEST(SmiSelectNonSmi) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false); // Avoid inline checks.
EntryCode(masm);
Label exit;
TestSelectNonSmi(masm, &exit, 0x10, 0, 0);
@ -1425,6 +1467,7 @@ TEST(SmiSelectNonSmi) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -1487,6 +1530,7 @@ TEST(SmiAnd) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
TestSmiAnd(masm, &exit, 0x10, 0, 0);
@ -1503,6 +1547,7 @@ TEST(SmiAnd) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -1565,6 +1610,7 @@ TEST(SmiOr) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
TestSmiOr(masm, &exit, 0x10, 0, 0);
@ -1583,6 +1629,7 @@ TEST(SmiOr) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -1645,6 +1692,7 @@ TEST(SmiXor) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
TestSmiXor(masm, &exit, 0x10, 0, 0);
@ -1663,6 +1711,7 @@ TEST(SmiXor) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -1709,6 +1758,7 @@ TEST(SmiNot) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
TestSmiNot(masm, &exit, 0x10, 0);
@ -1722,6 +1772,7 @@ TEST(SmiNot) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -1793,7 +1844,7 @@ TEST(SmiShiftLeft) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 4,
&actual_size,
true));
CHECK(buffer);
@ -1802,6 +1853,7 @@ TEST(SmiShiftLeft) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
TestSmiShiftLeft(masm, &exit, 0x10, 0);
@ -1814,6 +1866,7 @@ TEST(SmiShiftLeft) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -1896,7 +1949,7 @@ TEST(SmiShiftLogicalRight) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
&actual_size,
true));
CHECK(buffer);
@ -1905,6 +1958,7 @@ TEST(SmiShiftLogicalRight) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
TestSmiShiftLogicalRight(masm, &exit, 0x10, 0);
@ -1917,6 +1971,7 @@ TEST(SmiShiftLogicalRight) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -1971,6 +2026,7 @@ TEST(SmiShiftArithmeticRight) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
TestSmiShiftArithmeticRight(masm, &exit, 0x10, 0);
@ -1983,6 +2039,7 @@ TEST(SmiShiftArithmeticRight) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -2032,6 +2089,7 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
TestPositiveSmiPowerUp(masm, &exit, 0x20, 0);
@ -2046,6 +2104,7 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
CodeDesc desc;
@ -2074,8 +2133,9 @@ TEST(OperandOffset) {
masm->set_allow_stub_calls(false);
Label exit;
__ push(r12);
EntryCode(masm);
__ push(r13);
__ push(r14);
__ push(rbx);
__ push(rbp);
__ push(Immediate(0x100)); // <-- rbp
@ -2093,7 +2153,7 @@ TEST(OperandOffset) {
// r12 = rsp[3]
// rbx = rsp[5]
// r13 = rsp[7]
__ lea(r12, Operand(rsp, 3 * kPointerSize));
__ lea(r14, Operand(rsp, 3 * kPointerSize));
__ lea(r13, Operand(rbp, -3 * kPointerSize));
__ lea(rbx, Operand(rbp, -5 * kPointerSize));
__ movl(rcx, Immediate(2));
@ -2396,8 +2456,9 @@ TEST(OperandOffset) {
__ lea(rsp, Operand(rbp, kPointerSize));
__ pop(rbp);
__ pop(rbx);
__ pop(r14);
__ pop(r13);
__ pop(r12);
ExitCode(masm);
__ ret(0);

11
deps/v8/test/es5conform/es5conform.status

@ -44,7 +44,6 @@ chapter11/11.1/11.1.5: UNIMPLEMENTED
chapter11/11.4/11.4.1//11.4.1-4.a-5: FAIL
chapter11/11.4/11.4.1//11.4.1-4.a-7: FAIL
# We do not have a global object called 'global' as required by tests.
chapter15/15.1: FAIL_OK
@ -52,14 +51,10 @@ chapter15/15.1: FAIL_OK
chapter15/15.2/15.2.3/15.2.3.8: UNIMPLEMENTED
# NOT IMPLEMENTED: freeze
chapter15/15.2/15.2.3/15.2.3.9: UNIMPLEMENTED
# NOT IMPLEMENTED: preventExtensions
chapter15/15.2/15.2.3/15.2.3.10: UNIMPLEMENTED
# NOT IMPLEMENTED: isSealed
chapter15/15.2/15.2.3/15.2.3.11: UNIMPLEMENTED
# NOT IMPLEMENTED: isFrozen
chapter15/15.2/15.2.3/15.2.3.12: UNIMPLEMENTED
# NOT IMPLEMENTED: isExtensible
chapter15/15.2/15.2.3/15.2.3.13: UNIMPLEMENTED
# NOT IMPLEMENTED: seal
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-20: UNIMPLEMENTED
@ -67,18 +62,12 @@ chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-20: UNIMPLEMENTED
# NOT IMPLEMENTED: freeze
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-21: UNIMPLEMENTED
# NOT IMPLEMENTED: preventExtensions
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-22: UNIMPLEMENTED
# NOT IMPLEMENTED: isSealed
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-23: UNIMPLEMENTED
# NOT IMPLEMENTED: isFrozen
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-24: UNIMPLEMENTED
# NOT IMPLEMENTED: isExtensible
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-25: UNIMPLEMENTED
# NOT IMPLEMENTED: bind
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-38: UNIMPLEMENTED

51
deps/v8/test/mjsunit/call-stub.js

@ -0,0 +1,51 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
function Hash() {
for (var i = 0; i < 100; i++) {
this['a' + i] = i;
}
delete this.a50; // Ensure it's a normal object.
}
Hash.prototype.m = function() {
return 1;
};
var h = new Hash();
for (var i = 1; i < 100; i++) {
if (i == 50) {
h.m = function() {
return 2;
};
} else if (i == 70) {
delete h.m;
}
assertEquals(i < 50 || i >= 70 ? 1 : 2, h.m());
}

9
deps/v8/test/mjsunit/date.js

@ -154,6 +154,15 @@ function testToLocaleTimeString() {
testToLocaleTimeString();
// Test that -0 is treated correctly in MakeDay.
var d = new Date();
assertDoesNotThrow("d.setDate(-0)");
assertDoesNotThrow("new Date(-0, -0, -0, -0, -0, -0. -0)");
assertDoesNotThrow("new Date(0x40000000, 0x40000000, 0x40000000," +
"0x40000000, 0x40000000, 0x40000000, 0x40000000)")
assertDoesNotThrow("new Date(-0x40000001, -0x40000001, -0x40000001," +
"-0x40000001, -0x40000001, -0x40000001, -0x40000001)")
// Modified test from WebKit
// LayoutTests/fast/js/script-tests/date-utc-timeclip.js:

3
deps/v8/test/mjsunit/debug-liveedit-3.js

@ -57,7 +57,8 @@ var new_source = script.source.replace(function_z_text, "function Intermediate()
print("new source: " + new_source);
var change_log = new Array();
Debug.LiveEdit.SetScriptSource(script, new_source, change_log);
var result = Debug.LiveEdit.SetScriptSource(script, new_source, false, change_log);
print("Result: " + JSON.stringify(result) + "\n");
print("Change log: " + JSON.stringify(change_log) + "\n");
assertEquals(8, z6());

3
deps/v8/test/mjsunit/debug-liveedit-breakpoints.js

@ -72,7 +72,8 @@ var new_source = script.source.replace(function_z_text, "");
print("new source: " + new_source);
var change_log = new Array();
Debug.LiveEdit.SetScriptSource(script, new_source, change_log);
var result = Debug.LiveEdit.SetScriptSource(script, new_source, false, change_log);
print("Result: " + JSON.stringify(result) + "\n");
print("Change log: " + JSON.stringify(change_log) + "\n");
var breaks = Debug.scriptBreakPoints();

3
deps/v8/test/mjsunit/debug-liveedit-newsource.js

@ -57,7 +57,8 @@ var new_source = new_source.replace("17", "18");
print("new source: " + new_source);
var change_log = new Array();
Debug.LiveEdit.SetScriptSource(script, new_source, change_log);
var result = Debug.LiveEdit.SetScriptSource(script, new_source, false, change_log);
print("Result: " + JSON.stringify(result) + "\n");
print("Change log: " + JSON.stringify(change_log) + "\n");
assertEquals("Capybara", ChooseAnimal());

2
deps/v8/test/mjsunit/fuzz-natives.js

@ -63,7 +63,7 @@ function testArgumentCount(name, argc) {
try {
func = makeFunction(name, i);
} catch (e) {
if (e != "SyntaxError: illegal access") throw e;
if (e != "SyntaxError: Illegal access") throw e;
}
if (func === null && i == argc) {
throw "unexpected exception";

11
deps/v8/test/mjsunit/math-min-max.js

@ -42,7 +42,16 @@ assertEquals(1.1, Math.min(2.2, 3.3, 1.1));
// Prepare a non-Smi zero value.
function returnsNonSmi(){ return 0.25; }
var ZERO = returnsNonSmi() - returnsNonSmi();
var ZERO = (function() {
var z;
// We have to have a loop here because the first time we get a Smi from the
// runtime system. After a while the binary op IC settles down and we get
// a non-Smi from the generated code.
for (var i = 0; i < 10; i++) {
z = returnsNonSmi() - returnsNonSmi();
}
return z;
})();
assertEquals(0, ZERO);
assertEquals(Infinity, 1/ZERO);
assertEquals(-Infinity, 1/-ZERO);

129
deps/v8/test/mjsunit/math-pow.js

@ -0,0 +1,129 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Tests the special cases specified by ES 15.8.2.13
// Simple sanity check
assertEquals(4, Math.pow(2, 2));
assertEquals(2147483648, Math.pow(2, 31));
assertEquals(0.25, Math.pow(2, -2));
assertEquals(0.0625, Math.pow(2, -4));
assertEquals(1, Math.pow(1, 100));
assertEquals(0, Math.pow(0, 1000));
// Spec tests
assertEquals(NaN, Math.pow(2, NaN));
assertEquals(NaN, Math.pow(+0, NaN));
assertEquals(NaN, Math.pow(-0, NaN));
assertEquals(NaN, Math.pow(Infinity, NaN));
assertEquals(NaN, Math.pow(-Infinity, NaN));
assertEquals(1, Math.pow(NaN, +0));
assertEquals(1, Math.pow(NaN, -0));
assertEquals(NaN, Math.pow(NaN, NaN));
assertEquals(NaN, Math.pow(NaN, 2.2));
assertEquals(NaN, Math.pow(NaN, 1));
assertEquals(NaN, Math.pow(NaN, -1));
assertEquals(NaN, Math.pow(NaN, -2.2));
assertEquals(NaN, Math.pow(NaN, Infinity));
assertEquals(NaN, Math.pow(NaN, -Infinity));
assertEquals(Infinity, Math.pow(1.1, Infinity));
assertEquals(Infinity, Math.pow(-1.1, Infinity));
assertEquals(Infinity, Math.pow(2, Infinity));
assertEquals(Infinity, Math.pow(-2, Infinity));
assertEquals(+0, Math.pow(1.1, -Infinity));
assertEquals(+0, Math.pow(-1.1, -Infinity));
assertEquals(+0, Math.pow(2, -Infinity));
assertEquals(+0, Math.pow(-2, -Infinity));
assertEquals(NaN, Math.pow(1, Infinity));
assertEquals(NaN, Math.pow(1, -Infinity));
assertEquals(NaN, Math.pow(-1, Infinity));
assertEquals(NaN, Math.pow(-1, -Infinity));
assertEquals(+0, Math.pow(0.1, Infinity));
assertEquals(+0, Math.pow(-0.1, Infinity));
assertEquals(+0, Math.pow(0.999, Infinity));
assertEquals(+0, Math.pow(-0.999, Infinity));
assertEquals(Infinity, Math.pow(0.1, -Infinity));
assertEquals(Infinity, Math.pow(-0.1, -Infinity));
assertEquals(Infinity, Math.pow(0.999, -Infinity));
assertEquals(Infinity, Math.pow(-0.999, -Infinity));
assertEquals(Infinity, Math.pow(Infinity, 0.1));
assertEquals(Infinity, Math.pow(Infinity, 2));
assertEquals(+0, Math.pow(Infinity, -0.1));
assertEquals(+0, Math.pow(Infinity, -2));
assertEquals(-Infinity, Math.pow(-Infinity, 3));
assertEquals(-Infinity, Math.pow(-Infinity, 13));
assertEquals(Infinity, Math.pow(-Infinity, 3.1));
assertEquals(Infinity, Math.pow(-Infinity, 2));
assertEquals(-0, Math.pow(-Infinity, -3));
assertEquals(-0, Math.pow(-Infinity, -13));
assertEquals(+0, Math.pow(-Infinity, -3.1));
assertEquals(+0, Math.pow(-Infinity, -2));
assertEquals(+0, Math.pow(+0, 1.1));
assertEquals(+0, Math.pow(+0, 2));
assertEquals(Infinity, Math.pow(+0, -1.1));
assertEquals(Infinity, Math.pow(+0, -2));
assertEquals(-0, Math.pow(-0, 3));
assertEquals(-0, Math.pow(-0, 13));
assertEquals(+0, Math.pow(-0, 3.1));
assertEquals(+0, Math.pow(-0, 2));
assertEquals(-Infinity, Math.pow(-0, -3));
assertEquals(-Infinity, Math.pow(-0, -13));
assertEquals(Infinity, Math.pow(-0, -3.1));
assertEquals(Infinity, Math.pow(-0, -2));
assertEquals(NaN, Math.pow(-0.00001, 1.1));
assertEquals(NaN, Math.pow(-0.00001, -1.1));
assertEquals(NaN, Math.pow(-1.1, 1.1));
assertEquals(NaN, Math.pow(-1.1, -1.1));
assertEquals(NaN, Math.pow(-2, 1.1));
assertEquals(NaN, Math.pow(-2, -1.1));
assertEquals(NaN, Math.pow(-1000, 1.1));
assertEquals(NaN, Math.pow(-1000, -1.1));
// Tests from Sputnik S8.5_A13_T1.
assertTrue((1*((Math.pow(2,53))-1)*(Math.pow(2,-1074))) === 4.4501477170144023e-308);
assertTrue((1*(Math.pow(2,52))*(Math.pow(2,-1074))) === 2.2250738585072014e-308);
assertTrue((-1*(Math.pow(2,52))*(Math.pow(2,-1074))) === -2.2250738585072014e-308);

157
deps/v8/test/mjsunit/object-prevent-extensions.js

@ -0,0 +1,157 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Tests the Object.preventExtensions method - ES 15.2.3.10
var obj1 = {};
// Extensible defaults to true.
assertTrue(Object.isExtensible(obj1));
Object.preventExtensions(obj1);
// Make sure the is_extensible flag is set.
assertFalse(Object.isExtensible(obj1));
// Try adding a new property.
try {
obj1.x = 42;
assertUnreachable();
} catch (e) {
assertTrue(/object is not extensible/.test(e));
}
assertEquals(undefined, obj1.x);
// Try adding a new element.
try {
obj1[1] = 42;
assertUnreachable();
} catch (e) {
assertTrue(/object is not extensible/.test(e));
}
assertEquals(undefined, obj1[1]);
// Try when the object has an existing property.
var obj2 = {};
assertTrue(Object.isExtensible(obj2));
obj2.x = 42;
assertEquals(42, obj2.x);
assertTrue(Object.isExtensible(obj2));
Object.preventExtensions(obj2);
assertEquals(42, obj2.x);
try {
obj2.y = 42;
assertUnreachable();
} catch (e) {
assertTrue(/object is not extensible/.test(e));
}
// obj2.y should still be undefined.
assertEquals(undefined, obj2.y);
// Make sure we can still write values to obj.x.
obj2.x = 43;
assertEquals(43, obj2.x)
try {
obj2.y = new function() { return 42; };
assertUnreachable();
} catch (e) {
assertTrue(/object is not extensible/.test(e));
}
// obj2.y should still be undefined.
assertEquals(undefined, obj2.y);
assertEquals(43, obj2.x)
try {
Object.defineProperty(obj2, "y", {value: 42});
} catch (e) {
assertTrue(/object is not extensible/.test(e));
}
// obj2.y should still be undefined.
assertEquals(undefined, obj2.y);
assertEquals(43, obj2.x);
try {
obj2[1] = 42;
} catch (e) {
assertTrue(/object is not extensible/.test(e));
}
assertEquals(undefined, obj2[1]);
var arr = new Array();
arr[1] = 10;
Object.preventExtensions(arr);
try {
arr[2] = 42;
assertUnreachable();
} catch (e) {
assertTrue(/object is not extensible/.test(e));
}
assertEquals(10, arr[1]);
// We should still be able to change exiting elements.
arr[1]= 42;
assertEquals(42, arr[1]);
// Test the the extensible flag is not inherited.
var parent = {};
parent.x = 42;
Object.preventExtensions(parent);
var child = Object.create(parent);
// We should be able to add new properties to the child object.
child.y = 42;
// This should have no influence on the parent class.
try {
parent.y = 29;
assertUnreachable();
} catch (e) {
assertTrue(/object is not extensible/.test(e));
}
// Test that attributes on functions are also handled correctly.
function foo() {
return 42;
}
Object.preventExtensions(foo);
try {
foo.x = 29;
assertUnreachable();
} catch (e) {
assertTrue(/object is not extensible/.test(e));
}

65
deps/v8/test/mjsunit/store-dictionary.js

@ -0,0 +1,65 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test dictionary store ICs.
// Function that stores property 'x' on an object.
function store(obj) { obj.x = 42; }
// Create object and force it to dictionary mode by deleting property.
var o = { x: 32, y: 33 };
delete o.y;
// Make the store ic in the 'store' function go into dictionary store
// case.
for (var i = 0; i < 3; i++) {
store(o);
}
assertEquals(42, o.x);
// Test that READ_ONLY property attribute is respected. Make 'x'
// READ_ONLY.
Object.defineProperty(o, 'x', { value: 32, writable: false });
// Attempt to store using the store ic in the 'store' function.
store(o);
// Check that the store did not change the value.
assertEquals(32, o.x);
// Check that bail-out code works.
// Smi.
store(1);
// Fast case object.
o = new Object();
store(o);
assertEquals(42, o.x);
// Slow case object without x property.
delete o.x;
store(o);
assertEquals(42, o.x);

57
deps/v8/test/mjsunit/string-replace-with-empty.js

@ -0,0 +1,57 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-externalize-string
assertEquals("0123", "aa0bb1cc2dd3".replace(/[a-z]/g, ""));
assertEquals("0123", "\u1234a0bb1cc2dd3".replace(/[\u1234a-z]/g, ""));
var expected = "0123";
var cons = "a0b1c2d3";
for (var i = 0; i < 5; i++) {
expected += expected;
cons += cons;
}
assertEquals(expected, cons.replace(/[a-z]/g, ""));
cons = "\u12340b1c2d3";
for (var i = 0; i < 5; i++) {
cons += cons;
}
assertEquals(expected, cons.replace(/[\u1234a-z]/g, ""));
cons = "a0b1c2d3";
for (var i = 0; i < 5; i++) {
cons += cons;
}
externalizeString(cons, true/* force two-byte */);
assertEquals(expected, cons.replace(/[a-z]/g, ""));
cons = "\u12340b1c2d3";
for (var i = 0; i < 5; i++) {
cons += cons;
}
externalizeString(cons);
assertEquals(expected, cons.replace(/[\u1234a-z]/g, ""));

33
deps/v8/test/mjsunit/value-of.js

@ -0,0 +1,33 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
function MyException() { }
var o = new Object();
o.valueOf = function() { throw new MyException(); }
assertThrows(function() { o + 1 }, MyException);

57
deps/v8/tools/gyp/v8.gyp

@ -75,7 +75,14 @@
'msvs_settings': {
'VCCLCompilerTool': {
'Optimizations': '0',
'RuntimeLibrary': '1',
'conditions': [
['OS=="win" and component=="shared_library"', {
'RuntimeLibrary': '3', # /MDd
}, {
'RuntimeLibrary': '1', # /MTd
}],
],
},
'VCLinkerTool': {
'LinkIncremental': '2',
@ -129,13 +136,20 @@
},
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': '0',
'Optimizations': '2',
'InlineFunctionExpansion': '2',
'EnableIntrinsicFunctions': 'true',
'FavorSizeOrSpeed': '0',
'OmitFramePointers': 'true',
'StringPooling': 'true',
'conditions': [
['OS=="win" and component=="shared_library"', {
'RuntimeLibrary': '2', #/MD
}, {
'RuntimeLibrary': '0', #/MT
}],
],
},
'VCLinkerTool': {
'LinkIncremental': '1',
@ -152,7 +166,6 @@
'targets': [
{
'target_name': 'v8',
'type': 'none',
'conditions': [
['v8_use_snapshot=="true"', {
'dependencies': ['v8_snapshot'],
@ -160,6 +173,18 @@
{
'dependencies': ['v8_nosnapshot'],
}],
['OS=="win" and component=="shared_library"', {
'type': '<(component)',
'sources': [
'../../src/v8dll-main.cc',
],
'defines': [
'BUILDING_V8_SHARED'
],
},
{
'type': 'none',
}],
],
'direct_dependent_settings': {
'include_dirs': [
@ -170,6 +195,13 @@
{
'target_name': 'v8_snapshot',
'type': '<(library)',
'conditions': [
['OS=="win" and component=="shared_library"', {
'defines': [
'BUILDING_V8_SHARED',
],
}],
],
'dependencies': [
'mksnapshot#host',
'js2c#host',
@ -216,7 +248,12 @@
['v8_target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}]
}],
['OS=="win" and component=="shared_library"', {
'defines': [
'BUILDING_V8_SHARED',
],
}],
]
},
{
@ -614,6 +651,11 @@
'libraries': [ '-lwinmm.lib' ],
},
}],
['OS=="win" and component=="shared_library"', {
'defines': [
'BUILDING_V8_SHARED'
],
}],
],
},
{
@ -692,10 +734,15 @@
'../../samples/shell.cc',
],
'conditions': [
[ 'OS=="win"', {
['OS=="win"', {
# This could be gotten by not setting chromium_code, if that's OK.
'defines': ['_CRT_SECURE_NO_WARNINGS'],
}],
['OS=="win" and component=="shared_library"', {
'defines': [
'USING_V8_SHARED',
],
}],
],
},
],

30
deps/v8/tools/js2c.py

@ -104,19 +104,22 @@ def Validate(lines, file):
def ExpandConstants(lines, constants):
for key, value in constants.items():
lines = lines.replace(key, str(value))
for key, value in constants:
lines = key.sub(str(value), lines)
return lines
def ExpandMacros(lines, macros):
for name, macro in macros.items():
start = lines.find(name + '(', 0)
while start != -1:
# We allow macros to depend on the previously declared macros, but
# we don't allow self-dependecies or recursion.
for name_pattern, macro in reversed(macros):
pattern_match = name_pattern.search(lines, 0)
while pattern_match is not None:
# Scan over the arguments
assert lines[start + len(name)] == '('
height = 1
end = start + len(name) + 1
start = pattern_match.start()
end = pattern_match.end()
assert lines[end - 1] == '('
last_match = end
arg_index = 0
mapping = { }
@ -139,7 +142,7 @@ def ExpandMacros(lines, macros):
result = macro.expand(mapping)
# Replace the occurrence of the macro with the expansion
lines = lines[:start] + result + lines[end:]
start = lines.find(name + '(', end)
pattern_match = name_pattern.search(lines, start + len(result))
return lines
class TextMacro:
@ -166,9 +169,10 @@ CONST_PATTERN = re.compile(r'^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
MACRO_PATTERN = re.compile(r'^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
PYTHON_MACRO_PATTERN = re.compile(r'^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
def ReadMacros(lines):
constants = { }
macros = { }
constants = []
macros = []
for line in lines:
hash = line.find('#')
if hash != -1: line = line[:hash]
@ -178,14 +182,14 @@ def ReadMacros(lines):
if const_match:
name = const_match.group(1)
value = const_match.group(2).strip()
constants[name] = value
constants.append((re.compile("\\b%s\\b" % name), value))
else:
macro_match = MACRO_PATTERN.match(line)
if macro_match:
name = macro_match.group(1)
args = map(string.strip, macro_match.group(2).split(','))
body = macro_match.group(3).strip()
macros[name] = TextMacro(args, body)
macros.append((re.compile("\\b%s\\(" % name), TextMacro(args, body)))
else:
python_match = PYTHON_MACRO_PATTERN.match(line)
if python_match:
@ -193,7 +197,7 @@ def ReadMacros(lines):
args = map(string.strip, python_match.group(2).split(','))
body = python_match.group(3).strip()
fun = eval("lambda " + ",".join(args) + ': ' + body)
macros[name] = PythonMacro(args, fun)
macros.append((re.compile("\\b%s\\(" % name), PythonMacro(args, fun)))
else:
raise ("Illegal line: " + line)
return (constants, macros)

Loading…
Cancel
Save