Browse Source

deps: upgrade v8 to 3.18.4

v0.11.2-release
Ben Noordhuis 12 years ago
parent
commit
2f75785c01
  1. 1
      deps/v8/.gitignore
  2. 1
      deps/v8/AUTHORS
  3. 32
      deps/v8/ChangeLog
  4. 69
      deps/v8/build/README.txt
  5. 9
      deps/v8/build/common.gypi
  6. 6
      deps/v8/build/gyp_v8
  7. 5
      deps/v8/include/v8-profiler.h
  8. 254
      deps/v8/include/v8.h
  9. 7
      deps/v8/src/accessors.cc
  10. 2
      deps/v8/src/accessors.h
  11. 304
      deps/v8/src/api.cc
  12. 4
      deps/v8/src/api.h
  13. 6
      deps/v8/src/arm/assembler-arm.cc
  14. 39
      deps/v8/src/arm/builtins-arm.cc
  15. 873
      deps/v8/src/arm/code-stubs-arm.cc
  16. 163
      deps/v8/src/arm/code-stubs-arm.h
  17. 8
      deps/v8/src/arm/deoptimizer-arm.cc
  18. 176
      deps/v8/src/arm/full-codegen-arm.cc
  19. 8
      deps/v8/src/arm/ic-arm.cc
  20. 22
      deps/v8/src/arm/lithium-arm.cc
  21. 16
      deps/v8/src/arm/lithium-arm.h
  22. 198
      deps/v8/src/arm/lithium-codegen-arm.cc
  23. 17
      deps/v8/src/arm/lithium-codegen-arm.h
  24. 280
      deps/v8/src/arm/macro-assembler-arm.cc
  25. 99
      deps/v8/src/arm/macro-assembler-arm.h
  26. 16
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  27. 2
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  28. 10
      deps/v8/src/arm/simulator-arm.cc
  29. 131
      deps/v8/src/arm/stub-cache-arm.cc
  30. 17
      deps/v8/src/assembler.cc
  31. 4
      deps/v8/src/assembler.h
  32. 18
      deps/v8/src/ast.cc
  33. 35
      deps/v8/src/ast.h
  34. 2
      deps/v8/src/bignum-dtoa.cc
  35. 23
      deps/v8/src/bootstrapper.cc
  36. 14
      deps/v8/src/builtins.cc
  37. 4
      deps/v8/src/builtins.h
  38. 2
      deps/v8/src/cached-powers.cc
  39. 253
      deps/v8/src/code-stubs-hydrogen.cc
  40. 90
      deps/v8/src/code-stubs.cc
  41. 280
      deps/v8/src/code-stubs.h
  42. 4
      deps/v8/src/code.h
  43. 24
      deps/v8/src/compiler.cc
  44. 10
      deps/v8/src/compiler.h
  45. 2
      deps/v8/src/contexts.h
  46. 6
      deps/v8/src/conversions-inl.h
  47. 7
      deps/v8/src/conversions.cc
  48. 33
      deps/v8/src/counters.cc
  49. 99
      deps/v8/src/counters.h
  50. 18
      deps/v8/src/cpu-profiler.cc
  51. 4
      deps/v8/src/cpu-profiler.h
  52. 382
      deps/v8/src/d8.cc
  53. 13
      deps/v8/src/d8.h
  54. 1
      deps/v8/src/debug.cc
  55. 1
      deps/v8/src/deoptimizer.h
  56. 2
      deps/v8/src/dtoa.cc
  57. 9
      deps/v8/src/execution.cc
  58. 1
      deps/v8/src/execution.h
  59. 13
      deps/v8/src/extensions/gc-extension.cc
  60. 4
      deps/v8/src/extensions/gc-extension.h
  61. 12
      deps/v8/src/factory.cc
  62. 2
      deps/v8/src/factory.h
  63. 2
      deps/v8/src/fixed-dtoa.cc
  64. 11
      deps/v8/src/flag-definitions.h
  65. 41
      deps/v8/src/full-codegen.cc
  66. 5
      deps/v8/src/full-codegen.h
  67. 18
      deps/v8/src/generator.js
  68. 176
      deps/v8/src/global-handles.cc
  69. 148
      deps/v8/src/global-handles.h
  70. 53
      deps/v8/src/handles-inl.h
  71. 4
      deps/v8/src/handles.cc
  72. 20
      deps/v8/src/handles.h
  73. 30
      deps/v8/src/heap-inl.h
  74. 5
      deps/v8/src/heap-profiler.cc
  75. 2
      deps/v8/src/heap-profiler.h
  76. 55
      deps/v8/src/heap-snapshot-generator.cc
  77. 1
      deps/v8/src/heap-snapshot-generator.h
  78. 73
      deps/v8/src/heap.cc
  79. 69
      deps/v8/src/heap.h
  80. 106
      deps/v8/src/hydrogen-instructions.cc
  81. 200
      deps/v8/src/hydrogen-instructions.h
  82. 587
      deps/v8/src/hydrogen.cc
  83. 123
      deps/v8/src/hydrogen.h
  84. 16
      deps/v8/src/ia32/assembler-ia32-inl.h
  85. 6
      deps/v8/src/ia32/assembler-ia32.cc
  86. 4
      deps/v8/src/ia32/assembler-ia32.h
  87. 36
      deps/v8/src/ia32/builtins-ia32.cc
  88. 282
      deps/v8/src/ia32/code-stubs-ia32.cc
  89. 4
      deps/v8/src/ia32/code-stubs-ia32.h
  90. 2
      deps/v8/src/ia32/codegen-ia32.cc
  91. 8
      deps/v8/src/ia32/deoptimizer-ia32.cc
  92. 177
      deps/v8/src/ia32/full-codegen-ia32.cc
  93. 246
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  94. 17
      deps/v8/src/ia32/lithium-codegen-ia32.h
  95. 74
      deps/v8/src/ia32/lithium-ia32.cc
  96. 17
      deps/v8/src/ia32/lithium-ia32.h
  97. 22
      deps/v8/src/ia32/macro-assembler-ia32.cc
  98. 2
      deps/v8/src/ia32/macro-assembler-ia32.h
  99. 20
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  100. 3
      deps/v8/src/ia32/regexp-macro-assembler-ia32.h

1
deps/v8/.gitignore

@ -19,6 +19,7 @@
*~ *~
.cpplint-cache .cpplint-cache
.d8_history .d8_history
bsuite
d8 d8
d8_g d8_g
shell shell

1
deps/v8/AUTHORS

@ -9,6 +9,7 @@ ARM Ltd.
Hewlett-Packard Development Company, LP Hewlett-Packard Development Company, LP
Igalia, S.L. Igalia, S.L.
Joyent, Inc. Joyent, Inc.
Bloomberg Finance L.P.
Akinori MUSHA <knu@FreeBSD.org> Akinori MUSHA <knu@FreeBSD.org>
Alexander Botero-Lowry <alexbl@FreeBSD.org> Alexander Botero-Lowry <alexbl@FreeBSD.org>

32
deps/v8/ChangeLog

@ -1,3 +1,35 @@
2013-04-26: Version 3.18.4
Added a preliminary API for ES6 ArrayBuffers
Replaced qsort with std::sort. (Chromium issue 2639)
Performance and stability improvements on all platforms.
2013-04-24: Version 3.18.3
Exposed the GC under a name that is less collision prone than window.gc.
(issue 2641)
Do not emit double values at their use sites. (Chromium issue 234101)
Added methods to allow resuming execution after calling
TerminateExecution(). (issue 2361)
Performance and stability improvements on all platforms.
2013-04-22: Version 3.18.2
OS::MemMove/OS::MemCopy: Don't call through to generated code when size
== 0 to avoid prefetching invalid memory (Chromium issue 233500)
Removed heap snapshot size limit. (Chromium issue 232305)
Performance and stability improvements on all platforms.
2013-04-18: Version 3.18.1 2013-04-18: Version 3.18.1
Removed SCons related files and deprecated test suite configurations. Removed SCons related files and deprecated test suite configurations.

69
deps/v8/build/README.txt

@ -1,66 +1,9 @@
This directory contains the V8 GYP files used to generate actual project files For build instructions, please refer to:
for different build systems.
This is currently work in progress but this is expected to replace the SCons https://code.google.com/p/v8/wiki/BuildingWithGYP
based build system.
To use this a checkout of GYP is needed inside this directory. From the root of TL;DR version on *nix:
the V8 project do the following: $ make dependencies # Only needed once.
$ make ia32.release -j8
$ make ia32.release.check # Optionally: run tests.
$ svn co http://gyp.googlecode.com/svn/trunk build/gyp
Note for the command lines below that Debug is the default configuration,
so specifying that on the command lines is not required.
To generate Makefiles on Linux:
-------------------------------
$ build/gyp_v8
This will build makefiles for ia32, x64 and the ARM simulator with names
Makefile-ia32, Makefile-x64 and Makefile-armu respectively.
To build and run for ia32 in debug and release version do:
$ make -f Makefile-ia32
$ out/Debug/shell
$ make -f Makefile-ia32 BUILDTYPE=Release
$ out/Release/shell
Change the makefile to build and run for the other architectures.
To generate Xcode project files on Mac OS:
------------------------------------------
$ build/gyp_v8
This will make an Xcode project for the ia32 architecture. To build and run do:
$ xcodebuild -project build/all.xcodeproj
$ samples/build/Debug/shell
$ xcodebuild -project build/all.xcodeproj -configuration Release
$ samples/build/Release/shell
To generate Visual Studio solution and project files on Windows:
----------------------------------------------------------------
On Windows an additional third party component is required. This is cygwin in
the same version as is used by the Chromium project. This can be checked out
from the Chromium repository. From the root of the V8 project do the following:
> svn co http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844 third_party/cygwin
To run GYP Python is required and it is recommended to use the same version as
is used by the Chromium project. This can also be checked out from the Chromium
repository. From the root of the V8 project do the following:
> svn co http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111 third_party/python_26
Now generate Visual Studio solution and project files for the ia32 architecture:
> third_party\python_26\python build/gyp_v8
Now open build\All.sln in Visual Studio.

9
deps/v8/build/common.gypi

@ -454,6 +454,15 @@
}], }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', { or OS=="android"', {
'cflags!': [
'-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
'-O3',
],
'conditions': [ 'conditions': [
[ 'gcc_version==44 and clang==0', { [ 'gcc_version==44 and clang==0', {
'cflags': [ 'cflags': [

6
deps/v8/build/gyp_v8

@ -32,6 +32,7 @@
import glob import glob
import os import os
import platform
import shlex import shlex
import sys import sys
@ -43,9 +44,6 @@ if __name__ == '__main__':
script_dir = os.path.dirname(__file__) script_dir = os.path.dirname(__file__)
v8_root = '.' v8_root = '.'
sys.path.insert(0, os.path.join(v8_root, 'tools'))
import utils
sys.path.insert(0, os.path.join(v8_root, 'build', 'gyp', 'pylib')) sys.path.insert(0, os.path.join(v8_root, 'build', 'gyp', 'pylib'))
import gyp import gyp
@ -164,6 +162,6 @@ if __name__ == '__main__':
# Generate for the architectures supported on the given platform. # Generate for the architectures supported on the given platform.
gyp_args = list(args) gyp_args = list(args)
if utils.GuessOS() == 'linux': if platform.system() == 'Linux':
gyp_args.append('--generator-output=out') gyp_args.append('--generator-output=out')
run_gyp(gyp_args) run_gyp(gyp_args)

5
deps/v8/include/v8-profiler.h

@ -554,6 +554,11 @@ class V8EXPORT HeapProfiler {
/** Returns memory used for profiler internal data and snapshots. */ /** Returns memory used for profiler internal data and snapshots. */
size_t GetProfilerMemorySize(); size_t GetProfilerMemorySize();
/**
* Sets a RetainedObjectInfo for an object group (see V8::SetObjectGroupId).
*/
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
private: private:
HeapProfiler(); HeapProfiler();
~HeapProfiler(); ~HeapProfiler();

254
deps/v8/include/v8.h

@ -92,6 +92,14 @@
#define V8_DEPRECATED(declarator) declarator #define V8_DEPRECATED(declarator) declarator
#endif #endif
#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
#define V8_UNLIKELY(condition) __builtin_expect((condition), 0)
#define V8_LIKELY(condition) __builtin_expect((condition), 1)
#else
#define V8_UNLIKELY(condition) (condition)
#define V8_LIKELY(condition) (condition)
#endif
/** /**
* The v8 JavaScript engine. * The v8 JavaScript engine.
*/ */
@ -145,6 +153,31 @@ class Object;
} }
/**
* General purpose unique identifier.
*/
class UniqueId {
public:
explicit UniqueId(intptr_t data)
: data_(data) {}
bool operator==(const UniqueId& other) const {
return data_ == other.data_;
}
bool operator!=(const UniqueId& other) const {
return data_ != other.data_;
}
bool operator<(const UniqueId& other) const {
return data_ < other.data_;
}
private:
intptr_t data_;
};
// --- Weak Handles --- // --- Weak Handles ---
@ -376,6 +409,14 @@ template <class T> class Persistent : public Handle<T> {
template <class S> V8_INLINE(Persistent(S* that)) : Handle<T>(that) { } template <class S> V8_INLINE(Persistent(S* that)) : Handle<T>(that) { }
/**
* A constructor that creates a new global cell pointing to that. In contrast
* to the copy constructor, this creates a new persistent handle which needs
* to be separately disposed.
*/
template <class S> V8_INLINE(Persistent(Isolate* isolate, Handle<S> that))
: Handle<T>(New(isolate, that)) { }
/** /**
* "Casts" a plain handle which is known to be a persistent handle * "Casts" a plain handle which is known to be a persistent handle
* to a persistent handle. * to a persistent handle.
@ -1142,12 +1183,10 @@ class V8EXPORT String : public Primitive {
int Utf8Length() const; int Utf8Length() const;
/** /**
* A fast conservative check for non-ASCII characters. May * This function is no longer useful.
* return true even for ASCII strings, but if it returns
* false you can be sure that all characters are in the range
* 0-127.
*/ */
bool MayContainNonAscii() const; // TODO(dcarney): deprecate
V8_INLINE(bool MayContainNonAscii()) const { return true; }
/** /**
* Returns whether this string contains only one byte data. * Returns whether this string contains only one byte data.
@ -1326,22 +1365,48 @@ class V8EXPORT String : public Primitive {
V8_INLINE(static String* Cast(v8::Value* obj)); V8_INLINE(static String* Cast(v8::Value* obj));
// TODO(dcarney): deprecate
/** /**
* Allocates a new string from either UTF-8 encoded or ASCII data. * Allocates a new string from either UTF-8 encoded or ASCII data.
* The second parameter 'length' gives the buffer length. If omitted, * The second parameter 'length' gives the buffer length. If omitted,
* the function calls 'strlen' to determine the buffer length. * the function calls 'strlen' to determine the buffer length.
*/ */
static Local<String> New(const char* data, int length = -1); V8_INLINE(static Local<String> New(const char* data, int length = -1));
// TODO(dcarney): deprecate
/** Allocates a new string from 16-bit character codes.*/ /** Allocates a new string from 16-bit character codes.*/
static Local<String> New(const uint16_t* data, int length = -1); V8_INLINE(static Local<String> New(const uint16_t* data, int length = -1));
// TODO(dcarney): deprecate
/** /**
* Creates an internalized string (historically called a "symbol", * Creates an internalized string (historically called a "symbol",
* not to be confused with ES6 symbols). Returns one if it exists already. * not to be confused with ES6 symbols). Returns one if it exists already.
* TODO(rossberg): Deprecate me when the new string API is here.
*/ */
static Local<String> NewSymbol(const char* data, int length = -1); V8_INLINE(static Local<String> NewSymbol(const char* data, int length = -1));
enum NewStringType {
kNormalString, kInternalizedString, kUndetectableString
};
/** Allocates a new string from UTF-8 data.*/
static Local<String> NewFromUtf8(Isolate* isolate,
const char* data,
NewStringType type = kNormalString,
int length = -1);
/** Allocates a new string from Latin-1 data.*/
static Local<String> NewFromOneByte(
Isolate* isolate,
const uint8_t* data,
NewStringType type = kNormalString,
int length = -1);
/** Allocates a new string from UTF-16 data.*/
static Local<String> NewFromTwoByte(
Isolate* isolate,
const uint16_t* data,
NewStringType type = kNormalString,
int length = -1);
/** /**
* Creates a new string by concatenating the left and the right strings * Creates a new string by concatenating the left and the right strings
@ -1396,11 +1461,15 @@ class V8EXPORT String : public Primitive {
*/ */
bool CanMakeExternal(); bool CanMakeExternal();
// TODO(dcarney): deprecate
/** Creates an undetectable string from the supplied ASCII or UTF-8 data.*/ /** Creates an undetectable string from the supplied ASCII or UTF-8 data.*/
static Local<String> NewUndetectable(const char* data, int length = -1); V8_INLINE(
static Local<String> NewUndetectable(const char* data, int length = -1));
// TODO(dcarney): deprecate
/** Creates an undetectable string from the supplied 16-bit character codes.*/ /** Creates an undetectable string from the supplied 16-bit character codes.*/
static Local<String> NewUndetectable(const uint16_t* data, int length = -1); V8_INLINE(static Local<String> NewUndetectable(
const uint16_t* data, int length = -1));
/** /**
* Converts an object to a UTF-8-encoded character array. Useful if * Converts an object to a UTF-8-encoded character array. Useful if
@ -1940,6 +2009,43 @@ class V8EXPORT Function : public Object {
}; };
/**
* An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5).
* This API is experimental and may change significantly.
*/
class V8EXPORT ArrayBuffer : public Object {
public:
/**
* Data length in bytes.
*/
size_t ByteLength() const;
/**
* Raw pointer to the array buffer data
*/
void* Data() const;
/**
* Create a new ArrayBuffer. Allocate |byte_length| bytes.
* Allocated memory will be owned by a created ArrayBuffer and
* will be deallocated when it is garbage-collected.
*/
static Local<ArrayBuffer> New(size_t byte_length);
/**
* Create a new ArrayBuffer over an existing memory block.
* The memory block will not be reclaimed when a created ArrayBuffer
* is garbage-collected.
*/
static Local<ArrayBuffer> New(void* data, size_t byte_length);
V8_INLINE(static ArrayBuffer* Cast(Value* obj));
private:
ArrayBuffer();
static void CheckCast(Value* obj);
};
/** /**
* An instance of the built-in Date constructor (ECMA-262, 15.9). * An instance of the built-in Date constructor (ECMA-262, 15.9).
*/ */
@ -2953,7 +3059,8 @@ enum GCType {
enum GCCallbackFlags { enum GCCallbackFlags {
kNoGCCallbackFlags = 0, kNoGCCallbackFlags = 0,
kGCCallbackFlagCompacted = 1 << 0 kGCCallbackFlagCompacted = 1 << 0,
kGCCallbackFlagConstructRetainedObjectInfos = 1 << 1
}; };
typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags); typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags);
@ -3110,6 +3217,39 @@ class V8EXPORT Isolate {
/** Returns the context that is on the top of the stack. */ /** Returns the context that is on the top of the stack. */
Local<Context> GetCurrentContext(); Local<Context> GetCurrentContext();
/**
* Allows the host application to group objects together. If one
* object in the group is alive, all objects in the group are alive.
* After each garbage collection, object groups are removed. It is
* intended to be used in the before-garbage-collection callback
* function, for instance to simulate DOM tree connections among JS
* wrapper objects. Object groups for all dependent handles need to
* be provided for kGCTypeMarkSweepCompact collections, for all other
* garbage collection types it is sufficient to provide object groups
* for partially dependent handles only.
*/
void SetObjectGroupId(const Persistent<Value>& object,
UniqueId id);
/**
* Allows the host application to declare implicit references from an object
* group to an object. If the objects of the object group are alive, the child
* object is alive too. After each garbage collection, all implicit references
* are removed. It is intended to be used in the before-garbage-collection
* callback function.
*/
void SetReferenceFromGroup(UniqueId id,
const Persistent<Value>& child);
/**
* Allows the host application to declare implicit references from an object
* to another object. If the parent object is alive, the child object is alive
* too. After each garbage collection, all implicit references are removed. It
* is intended to be used in the before-garbage-collection callback function.
*/
void SetReference(const Persistent<Object>& parent,
const Persistent<Value>& child);
private: private:
Isolate(); Isolate();
Isolate(const Isolate&); Isolate(const Isolate&);
@ -3514,6 +3654,8 @@ class V8EXPORT V8 {
* for partially dependent handles only. * for partially dependent handles only.
* See v8-profiler.h for RetainedObjectInfo interface description. * See v8-profiler.h for RetainedObjectInfo interface description.
*/ */
// TODO(marja): deprecate AddObjectGroup. Use Isolate::SetObjectGroupId and
// HeapProfiler::SetRetainedObjectInfo instead.
static void AddObjectGroup(Persistent<Value>* objects, static void AddObjectGroup(Persistent<Value>* objects,
size_t length, size_t length,
RetainedObjectInfo* info = NULL); RetainedObjectInfo* info = NULL);
@ -3529,6 +3671,8 @@ class V8EXPORT V8 {
* are removed. It is intended to be used in the before-garbage-collection * are removed. It is intended to be used in the before-garbage-collection
* callback function. * callback function.
*/ */
// TODO(marja): Deprecate AddImplicitReferences. Use
// Isolate::SetReferenceFromGroup instead.
static void AddImplicitReferences(Persistent<Object> parent, static void AddImplicitReferences(Persistent<Object> parent,
Persistent<Value>* children, Persistent<Value>* children,
size_t length); size_t length);
@ -3675,6 +3819,24 @@ class V8EXPORT V8 {
*/ */
static bool IsExecutionTerminating(Isolate* isolate = NULL); static bool IsExecutionTerminating(Isolate* isolate = NULL);
/**
* Resume execution capability in the given isolate, whose execution
* was previously forcefully terminated using TerminateExecution().
*
* When execution is forcefully terminated using TerminateExecution(),
* the isolate can not resume execution until all JavaScript frames
* have propagated the uncatchable exception which is generated. This
* method allows the program embedding the engine to handle the
* termination event and resume execution capability, even if
* JavaScript frames remain on the stack.
*
* This method can be used by any thread even if that thread has not
* acquired the V8 lock with a Locker object.
*
* \param isolate The isolate in which to resume execution capability.
*/
static void CancelTerminateExecution(Isolate* isolate);
/** /**
* Releases any resources used by v8 and stops any utility threads * Releases any resources used by v8 and stops any utility threads
* that may be running. Note that disposing v8 is permanent, it * that may be running. Note that disposing v8 is permanent, it
@ -3785,20 +3947,29 @@ class V8EXPORT TryCatch {
bool HasCaught() const; bool HasCaught() const;
/** /**
* For certain types of exceptions, it makes no sense to continue * For certain types of exceptions, it makes no sense to continue execution.
* execution.
* *
* Currently, the only type of exception that can be caught by a * If CanContinue returns false, the correct action is to perform any C++
* TryCatch handler and for which it does not make sense to continue * cleanup needed and then return. If CanContinue returns false and
* is termination exception. Such exceptions are thrown when the * HasTerminated returns true, it is possible to call
* TerminateExecution methods are called to terminate a long-running * CancelTerminateExecution in order to continue calling into the engine.
* script.
*
* If CanContinue returns false, the correct action is to perform
* any C++ cleanup needed and then return.
*/ */
bool CanContinue() const; bool CanContinue() const;
/**
* Returns true if an exception has been caught due to script execution
* being terminated.
*
* There is no JavaScript representation of an execution termination
* exception. Such exceptions are thrown when the TerminateExecution
* methods are called to terminate a long-running script.
*
* If such an exception has been thrown, HasTerminated will return true,
* indicating that it is possible to call CancelTerminateExecution in order
* to continue calling into the engine.
*/
bool HasTerminated() const;
/** /**
* Throws the exception caught by this TryCatch in a way that avoids * Throws the exception caught by this TryCatch in a way that avoids
* it being caught again by this same TryCatch. As with ThrowException * it being caught again by this same TryCatch. As with ThrowException
@ -3874,6 +4045,7 @@ class V8EXPORT TryCatch {
bool can_continue_ : 1; bool can_continue_ : 1;
bool capture_message_ : 1; bool capture_message_ : 1;
bool rethrow_ : 1; bool rethrow_ : 1;
bool has_terminated_ : 1;
friend class v8::internal::Isolate; friend class v8::internal::Isolate;
}; };
@ -4371,7 +4543,7 @@ class Internals {
static const int kJSObjectHeaderSize = 3 * kApiPointerSize; static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize; static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize; static const int kContextHeaderSize = 2 * kApiPointerSize;
static const int kContextEmbedderDataIndex = 55; static const int kContextEmbedderDataIndex = 56;
static const int kFullStringRepresentationMask = 0x07; static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4; static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02; static const int kExternalTwoByteRepresentationTag = 0x02;
@ -4813,7 +4985,7 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
O* obj = *reinterpret_cast<O**>(this); O* obj = *reinterpret_cast<O**>(this);
// Fast path: If the object is a plain JSObject, which is the common case, we // Fast path: If the object is a plain JSObject, which is the common case, we
// know where to find the internal fields and can return the value directly. // know where to find the internal fields and can return the value directly.
if (I::GetInstanceType(obj) == I::kJSObjectType) { if (V8_LIKELY(I::GetInstanceType(obj) == I::kJSObjectType)) {
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index); int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
return I::ReadField<void*>(obj, offset); return I::ReadField<void*>(obj, offset);
} }
@ -4839,6 +5011,32 @@ Local<String> String::Empty(Isolate* isolate) {
} }
Local<String> String::New(const char* data, int length) {
return NewFromUtf8(Isolate::GetCurrent(), data, kNormalString, length);
}
Local<String> String::New(const uint16_t* data, int length) {
return NewFromTwoByte(Isolate::GetCurrent(), data, kNormalString, length);
}
Local<String> String::NewSymbol(const char* data, int length) {
return NewFromUtf8(Isolate::GetCurrent(), data, kInternalizedString, length);
}
Local<String> String::NewUndetectable(const char* data, int length) {
return NewFromUtf8(Isolate::GetCurrent(), data, kUndetectableString, length);
}
Local<String> String::NewUndetectable(const uint16_t* data, int length) {
return NewFromTwoByte(
Isolate::GetCurrent(), data, kUndetectableString, length);
}
String::ExternalStringResource* String::GetExternalStringResource() const { String::ExternalStringResource* String::GetExternalStringResource() const {
typedef internal::Object O; typedef internal::Object O;
typedef internal::Internals I; typedef internal::Internals I;
@ -5018,6 +5216,14 @@ Array* Array::Cast(v8::Value* value) {
} }
ArrayBuffer* ArrayBuffer::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<ArrayBuffer*>(value);
}
Function* Function::Cast(v8::Value* value) { Function* Function::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS #ifdef V8_ENABLE_CHECKS
CheckCast(value); CheckCast(value);

7
deps/v8/src/accessors.cc

@ -441,6 +441,13 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
// //
Handle<Object> Accessors::FunctionGetPrototype(Handle<Object> object) {
Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
isolate, Accessors::FunctionGetPrototype(*object, 0), Object);
}
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
Isolate* isolate = Isolate::Current(); Isolate* isolate = Isolate::Current();
JSFunction* function = FindInstanceOf<JSFunction>(isolate, object); JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);

2
deps/v8/src/accessors.h

@ -79,6 +79,8 @@ class Accessors : public AllStatic {
// Accessor functions called directly from the runtime system. // Accessor functions called directly from the runtime system.
MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object, MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object,
void*); void*);
static Handle<Object> FunctionGetPrototype(Handle<Object> object);
MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object, MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object,
Object* value, Object* value,
void*); void*);

304
deps/v8/src/api.cc

@ -27,8 +27,8 @@
#include "api.h" #include "api.h"
#include <math.h> // For isnan.
#include <string.h> // For memcpy, strlen. #include <string.h> // For memcpy, strlen.
#include <cmath> // For isnan.
#include "../include/v8-debug.h" #include "../include/v8-debug.h"
#include "../include/v8-profiler.h" #include "../include/v8-profiler.h"
#include "../include/v8-testing.h" #include "../include/v8-testing.h"
@ -52,6 +52,7 @@
#include "profile-generator-inl.h" #include "profile-generator-inl.h"
#include "property-details.h" #include "property-details.h"
#include "property.h" #include "property.h"
#include "runtime.h"
#include "runtime-profiler.h" #include "runtime-profiler.h"
#include "scanner-character-streams.h" #include "scanner-character-streams.h"
#include "snapshot.h" #include "snapshot.h"
@ -65,9 +66,7 @@
#define ENTER_V8(isolate) \ #define ENTER_V8(isolate) \
ASSERT((isolate)->IsInitialized()); \ ASSERT((isolate)->IsInitialized()); \
i::VMState __state__((isolate), i::OTHER) i::VMState<i::OTHER> __state__((isolate))
#define LEAVE_V8(isolate) \
i::VMState __state__((isolate), i::EXTERNAL)
namespace v8 { namespace v8 {
@ -131,7 +130,7 @@ static void DefaultFatalErrorHandler(const char* location,
const char* message) { const char* message) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
if (isolate->IsInitialized()) { if (isolate->IsInitialized()) {
i::VMState __state__(isolate, i::OTHER); i::VMState<i::OTHER> state(isolate);
API_Fatal(location, message); API_Fatal(location, message);
} else { } else {
API_Fatal(location, message); API_Fatal(location, message);
@ -216,14 +215,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
i::V8::SetFatalError(); i::V8::SetFatalError();
FatalErrorCallback callback = GetFatalErrorHandler(); FatalErrorCallback callback = GetFatalErrorHandler();
const char* message = "Allocation failed - process out of memory"; const char* message = "Allocation failed - process out of memory";
{
if (isolate->IsInitialized()) {
LEAVE_V8(isolate);
callback(location, message); callback(location, message);
} else {
callback(location, message);
}
}
// If the callback returns, we stop execution. // If the callback returns, we stop execution.
UNREACHABLE(); UNREACHABLE();
} }
@ -1909,7 +1901,8 @@ v8::TryCatch::TryCatch()
is_verbose_(false), is_verbose_(false),
can_continue_(true), can_continue_(true),
capture_message_(true), capture_message_(true),
rethrow_(false) { rethrow_(false),
has_terminated_(false) {
isolate_->RegisterTryCatchHandler(this); isolate_->RegisterTryCatchHandler(this);
} }
@ -1937,6 +1930,11 @@ bool v8::TryCatch::CanContinue() const {
} }
bool v8::TryCatch::HasTerminated() const {
return has_terminated_;
}
v8::Handle<v8::Value> v8::TryCatch::ReThrow() { v8::Handle<v8::Value> v8::TryCatch::ReThrow() {
if (!HasCaught()) return v8::Local<v8::Value>(); if (!HasCaught()) return v8::Local<v8::Value>();
rethrow_ = true; rethrow_ = true;
@ -2748,6 +2746,15 @@ void v8::Array::CheckCast(Value* that) {
} }
void v8::ArrayBuffer::CheckCast(Value* that) {
if (IsDeadCheck(i::Isolate::Current(), "v8::ArrayBuffer::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSArrayBuffer(),
"v8::ArrayBuffer::Cast()",
"Could not convert to ArrayBuffer");
}
void v8::Date::CheckCast(v8::Value* that) { void v8::Date::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Date::Cast()")) return; if (IsDeadCheck(isolate, "v8::Date::Cast()")) return;
@ -2984,7 +2991,7 @@ bool Value::StrictEquals(Handle<Value> that) const {
double x = obj->Number(); double x = obj->Number();
double y = other->Number(); double y = other->Number();
// Must check explicitly for NaN:s on Windows, but -0 works fine. // Must check explicitly for NaN:s on Windows, but -0 works fine.
return x == y && !isnan(x) && !isnan(y); return x == y && !std::isnan(x) && !std::isnan(y);
} else if (*obj == *other) { // Also covers Booleans. } else if (*obj == *other) { // Also covers Booleans.
return true; return true;
} else if (obj->IsSmi()) { } else if (obj->IsSmi()) {
@ -4048,14 +4055,6 @@ int String::Length() const {
return str->length(); return str->length();
} }
bool String::MayContainNonAscii() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) {
return false;
}
return !str->HasOnlyAsciiChars();
}
bool String::IsOneByte() const { bool String::IsOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this); i::Handle<i::String> str = Utils::OpenHandle(this);
@ -4509,25 +4508,6 @@ int String::WriteAscii(char* buffer,
FlattenString(str); // Flatten the string for efficiency. FlattenString(str); // Flatten the string for efficiency.
} }
if (str->HasOnlyAsciiChars()) {
// WriteToFlat is faster than using the StringCharacterStream.
if (length == -1) length = str->length() + 1;
int len = i::Min(length, str->length() - start);
i::String::WriteToFlat(*str,
reinterpret_cast<uint8_t*>(buffer),
start,
start + len);
if (!(options & PRESERVE_ASCII_NULL)) {
for (int i = 0; i < len; i++) {
if (buffer[i] == '\0') buffer[i] = ' ';
}
}
if (!(options & NO_NULL_TERMINATION) && length > len) {
buffer[len] = '\0';
}
return len;
}
int end = length; int end = length;
if ((length == -1) || (length > str->length() - start)) { if ((length == -1) || (length > str->length() - start)) {
end = str->length() - start; end = str->length() - start;
@ -5283,78 +5263,131 @@ Local<String> v8::String::Empty() {
} }
Local<String> v8::String::New(const char* data, int length) { // anonymous namespace for string creation helper functions
i::Isolate* isolate = i::Isolate::Current(); namespace {
EnsureInitializedForIsolate(isolate, "v8::String::New()");
LOG_API(isolate, "String::New(char)"); inline int StringLength(const char* string) {
if (length == 0) return Empty(); return i::StrLength(string);
ENTER_V8(isolate);
if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
isolate->factory()->NewStringFromUtf8(
i::Vector<const char>(data, length));
return Utils::ToLocal(result);
} }
Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) { inline int StringLength(const uint8_t* string) {
i::Handle<i::String> left_string = Utils::OpenHandle(*left); return i::StrLength(reinterpret_cast<const char*>(string));
i::Isolate* isolate = left_string->GetIsolate();
EnsureInitializedForIsolate(isolate, "v8::String::New()");
LOG_API(isolate, "String::New(char)");
ENTER_V8(isolate);
i::Handle<i::String> right_string = Utils::OpenHandle(*right);
i::Handle<i::String> result = isolate->factory()->NewConsString(left_string,
right_string);
return Utils::ToLocal(result);
} }
Local<String> v8::String::NewUndetectable(const char* data, int length) { inline int StringLength(const uint16_t* string) {
i::Isolate* isolate = i::Isolate::Current(); int length = 0;
EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()"); while (string[length] != '\0')
LOG_API(isolate, "String::NewUndetectable(char)"); length++;
return length;
}
inline i::Handle<i::String> NewString(i::Factory* factory,
String::NewStringType type,
i::Vector<const char> string) {
if (type ==String::kInternalizedString) {
return factory->InternalizeUtf8String(string);
}
return factory->NewStringFromUtf8(string);
}
inline i::Handle<i::String> NewString(i::Factory* factory,
String::NewStringType type,
i::Vector<const uint8_t> string) {
if (type == String::kInternalizedString) {
return factory->InternalizeOneByteString(string);
}
return factory->NewStringFromOneByte(string);
}
inline i::Handle<i::String> NewString(i::Factory* factory,
String::NewStringType type,
i::Vector<const uint16_t> string) {
if (type == String::kInternalizedString) {
return factory->InternalizeTwoByteString(string);
}
return factory->NewStringFromTwoByte(string);
}
template<typename Char>
inline Local<String> NewString(Isolate* v8_isolate,
const char* location,
const char* env,
const Char* data,
String::NewStringType type,
int length) {
i::Isolate* isolate = reinterpret_cast<internal::Isolate*>(v8_isolate);
EnsureInitializedForIsolate(isolate, location);
LOG_API(isolate, env);
if (length == 0 && type != String::kUndetectableString) {
return String::Empty();
}
ENTER_V8(isolate); ENTER_V8(isolate);
if (length == -1) length = i::StrLength(data); if (length == -1) length = StringLength(data);
i::Handle<i::String> result = i::Handle<i::String> result = NewString(
isolate->factory()->NewStringFromUtf8( isolate->factory(), type, i::Vector<const Char>(data, length));
i::Vector<const char>(data, length)); if (type == String::kUndetectableString) {
result->MarkAsUndetectable(); result->MarkAsUndetectable();
}
return Utils::ToLocal(result); return Utils::ToLocal(result);
} }
} // anonymous namespace
static int TwoByteStringLength(const uint16_t* data) {
int length = 0; Local<String> String::NewFromUtf8(Isolate* isolate,
while (data[length] != '\0') length++; const char* data,
return length; NewStringType type,
int length) {
return NewString(isolate,
"v8::String::NewFromUtf8()",
"String::NewFromUtf8",
data,
type,
length);
} }
Local<String> v8::String::New(const uint16_t* data, int length) { Local<String> String::NewFromOneByte(Isolate* isolate,
i::Isolate* isolate = i::Isolate::Current(); const uint8_t* data,
EnsureInitializedForIsolate(isolate, "v8::String::New()"); NewStringType type,
LOG_API(isolate, "String::New(uint16_)"); int length) {
if (length == 0) return Empty(); return NewString(isolate,
ENTER_V8(isolate); "v8::String::NewFromOneByte()",
if (length == -1) length = TwoByteStringLength(data); "String::NewFromOneByte",
i::Handle<i::String> result = data,
isolate->factory()->NewStringFromTwoByte( type,
i::Vector<const uint16_t>(data, length)); length);
return Utils::ToLocal(result);
} }
Local<String> v8::String::NewUndetectable(const uint16_t* data, int length) { Local<String> String::NewFromTwoByte(Isolate* isolate,
i::Isolate* isolate = i::Isolate::Current(); const uint16_t* data,
EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()"); NewStringType type,
LOG_API(isolate, "String::NewUndetectable(uint16_)"); int length) {
return NewString(isolate,
"v8::String::NewFromTwoByte()",
"String::NewFromTwoByte",
data,
type,
length);
}
Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
i::Handle<i::String> left_string = Utils::OpenHandle(*left);
i::Isolate* isolate = left_string->GetIsolate();
EnsureInitializedForIsolate(isolate, "v8::String::New()");
LOG_API(isolate, "String::New(char)");
ENTER_V8(isolate); ENTER_V8(isolate);
if (length == -1) length = TwoByteStringLength(data); i::Handle<i::String> right_string = Utils::OpenHandle(*right);
i::Handle<i::String> result = i::Handle<i::String> result = isolate->factory()->NewConsString(left_string,
isolate->factory()->NewStringFromTwoByte( right_string);
i::Vector<const uint16_t>(data, length));
result->MarkAsUndetectable();
return Utils::ToLocal(result); return Utils::ToLocal(result);
} }
@ -5568,7 +5601,7 @@ Local<v8::Value> v8::Date::New(double time) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Date::New()"); EnsureInitializedForIsolate(isolate, "v8::Date::New()");
LOG_API(isolate, "Date::New"); LOG_API(isolate, "Date::New");
if (isnan(time)) { if (std::isnan(time)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs. // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
time = i::OS::nan_value(); time = i::OS::nan_value();
} }
@ -5733,15 +5766,43 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
} }
Local<String> v8::String::NewSymbol(const char* data, int length) { size_t v8::ArrayBuffer::ByteLength() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ArrayBuffer::ByteLength()")) return 0;
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_length()->Number());
}
void* v8::ArrayBuffer::Data() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ArrayBuffer::Data()")) return 0;
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
return obj->backing_store();
}
Local<ArrayBuffer> v8::ArrayBuffer::New(size_t byte_length) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::String::NewSymbol()"); EnsureInitializedForIsolate(isolate, "v8::ArrayBuffer::New(size_t)");
LOG_API(isolate, "String::NewSymbol(char)"); LOG_API(isolate, "v8::ArrayBuffer::New(size_t)");
ENTER_V8(isolate); ENTER_V8(isolate);
if (length == -1) length = i::StrLength(data); i::Handle<i::JSArrayBuffer> obj =
i::Handle<i::String> result = isolate->factory()->InternalizeUtf8String( isolate->factory()->NewJSArrayBuffer();
i::Vector<const char>(data, length)); i::Runtime::SetupArrayBufferAllocatingData(isolate, obj, byte_length);
return Utils::ToLocal(result); return Utils::ToLocal(obj);
}
Local<ArrayBuffer> v8::ArrayBuffer::New(void* data, size_t byte_length) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::ArrayBuffer::New(void*, size_t)");
LOG_API(isolate, "v8::ArrayBuffer::New(void*, size_t)");
ENTER_V8(isolate);
i::Handle<i::JSArrayBuffer> obj =
isolate->factory()->NewJSArrayBuffer();
i::Runtime::SetupArrayBuffer(isolate, obj, data, byte_length);
return Utils::ToLocal(obj);
} }
@ -5772,7 +5833,7 @@ Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) {
Local<Number> v8::Number::New(double value) { Local<Number> v8::Number::New(double value) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Number::New()"); EnsureInitializedForIsolate(isolate, "v8::Number::New()");
if (isnan(value)) { if (std::isnan(value)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs. // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
value = i::OS::nan_value(); value = i::OS::nan_value();
} }
@ -5981,6 +6042,31 @@ v8::Local<v8::Context> Isolate::GetCurrentContext() {
} }
void Isolate::SetObjectGroupId(const Persistent<Value>& object,
UniqueId id) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
internal_isolate->global_handles()->SetObjectGroupId(
reinterpret_cast<i::Object**>(*object), id);
}
void Isolate::SetReferenceFromGroup(UniqueId id,
const Persistent<Value>& object) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
internal_isolate->global_handles()
->SetReferenceFromGroup(id, reinterpret_cast<i::Object**>(*object));
}
void Isolate::SetReference(const Persistent<Object>& parent,
const Persistent<Value>& child) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
internal_isolate->global_handles()->SetReference(
i::Handle<i::HeapObject>::cast(Utils::OpenHandle(*parent)).location(),
reinterpret_cast<i::Object**>(*child));
}
void V8::SetGlobalGCPrologueCallback(GCCallback callback) { void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return; if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return;
@ -6116,6 +6202,12 @@ bool V8::IsExecutionTerminating(Isolate* isolate) {
} }
void V8::CancelTerminateExecution(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i_isolate->stack_guard()->CancelTerminateExecution();
}
Isolate* Isolate::GetCurrent() { Isolate* Isolate::GetCurrent() {
i::Isolate* isolate = i::Isolate::UncheckedCurrent(); i::Isolate* isolate = i::Isolate::UncheckedCurrent();
return reinterpret_cast<Isolate*>(isolate); return reinterpret_cast<Isolate*>(isolate);
@ -7174,6 +7266,12 @@ size_t HeapProfiler::GetProfilerMemorySize() {
} }
void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
RetainedObjectInfo* info) {
reinterpret_cast<i::HeapProfiler*>(this)->SetRetainedObjectInfo(id, info);
}
v8::Testing::StressType internal::Testing::stress_type_ = v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt; v8::Testing::kStressTypeOpt;

4
deps/v8/src/api.h

@ -170,6 +170,7 @@ class RegisteredExtension {
V(RegExp, JSRegExp) \ V(RegExp, JSRegExp) \
V(Object, JSObject) \ V(Object, JSObject) \
V(Array, JSArray) \ V(Array, JSArray) \
V(ArrayBuffer, JSArrayBuffer) \
V(String, String) \ V(String, String) \
V(Symbol, Symbol) \ V(Symbol, Symbol) \
V(Script, Object) \ V(Script, Object) \
@ -205,6 +206,8 @@ class Utils {
v8::internal::Handle<v8::internal::JSObject> obj); v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal( static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj); v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<ArrayBuffer> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<Message> MessageToLocal( static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj); v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal( static inline Local<StackTrace> StackTraceToLocal(
@ -275,6 +278,7 @@ MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp) MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object) MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array) MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate) MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate) MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature) MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)

6
deps/v8/src/arm/assembler-arm.cc

@ -305,10 +305,14 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// See assembler-arm-inl.h for inlined constructors // See assembler-arm-inl.h for inlined constructors
Operand::Operand(Handle<Object> handle) { Operand::Operand(Handle<Object> handle) {
#ifdef DEBUG
Isolate* isolate = Isolate::Current();
#endif
ALLOW_HANDLE_DEREF(isolate, "using and embedding raw address");
rm_ = no_reg; rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space. // Verify all Objects referred by code are NOT in new space.
Object* obj = *handle; Object* obj = *handle;
ASSERT(!HEAP->InNewSpace(obj)); ASSERT(!isolate->heap()->InNewSpace(obj));
if (obj->IsHeapObject()) { if (obj->IsHeapObject()) {
imm32_ = reinterpret_cast<intptr_t>(handle.location()); imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT; rmode_ = RelocInfo::EMBEDDED_OBJECT;

39
deps/v8/src/arm/builtins-arm.cc

@ -306,8 +306,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// entering the generic code. In both cases argc in r0 needs to be preserved. // entering the generic code. In both cases argc in r0 needs to be preserved.
// Both registers are preserved by this code so no need to differentiate between // Both registers are preserved by this code so no need to differentiate between
// construct call and normal call. // construct call and normal call.
static void ArrayNativeCode(MacroAssembler* masm, void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
Label* call_generic_code) {
Counters* counters = masm->isolate()->counters(); Counters* counters = masm->isolate()->counters();
Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array, Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
has_non_smi_element, finish, cant_transition_map, not_double; has_non_smi_element, finish, cant_transition_map, not_double;
@ -532,7 +531,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
} }
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : number of arguments // -- r0 : number of arguments
// -- r1 : constructor function // -- r1 : constructor function
@ -550,40 +549,7 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
__ Assert(ne, "Unexpected initial map for Array function"); __ Assert(ne, "Unexpected initial map for Array function");
__ CompareObjectType(r3, r3, r4, MAP_TYPE); __ CompareObjectType(r3, r3, r4, MAP_TYPE);
__ Assert(eq, "Unexpected initial map for Array function"); __ Assert(eq, "Unexpected initial map for Array function");
if (FLAG_optimize_constructed_arrays) {
// We should either have undefined in r2 or a valid jsglobalpropertycell
Label okay_here;
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(), masm->isolate());
Handle<Map> global_property_cell_map(
masm->isolate()->heap()->global_property_cell_map());
__ cmp(r2, Operand(undefined_sentinel));
__ b(eq, &okay_here);
__ ldr(r3, FieldMemOperand(r2, 0));
__ cmp(r3, Operand(global_property_cell_map));
__ Assert(eq, "Expected property cell in register ebx");
__ bind(&okay_here);
} }
}
if (FLAG_optimize_constructed_arrays) {
Label not_zero_case, not_one_case;
__ tst(r0, r0);
__ b(ne, &not_zero_case);
ArrayNoArgumentConstructorStub no_argument_stub;
__ TailCallStub(&no_argument_stub);
__ bind(&not_zero_case);
__ cmp(r0, Operand(1));
__ b(gt, &not_one_case);
ArraySingleArgumentConstructorStub single_argument_stub;
__ TailCallStub(&single_argument_stub);
__ bind(&not_one_case);
ArrayNArgumentsConstructorStub n_argument_stub;
__ TailCallStub(&n_argument_stub);
} else {
Label generic_constructor; Label generic_constructor;
// Run the native code for the Array function called as a constructor. // Run the native code for the Array function called as a constructor.
ArrayNativeCode(masm, &generic_constructor); ArrayNativeCode(masm, &generic_constructor);
@ -595,7 +561,6 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
masm->isolate()->builtins()->JSConstructStubGeneric(); masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
} }
}
void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {

873
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

163
deps/v8/src/arm/code-stubs-arm.h

@ -34,6 +34,9 @@ namespace v8 {
namespace internal { namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
// Compute a transcendental math function natively, or call the // Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function. // TranscendentalCache runtime function.
class TranscendentalCacheStub: public PlatformCodeStub { class TranscendentalCacheStub: public PlatformCodeStub {
@ -469,34 +472,14 @@ class RecordWriteStub: public PlatformCodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) { if (mode == kSaveFPRegs) {
// Number of d-regs not known at snapshot time. masm->SaveFPRegs(sp, scratch0_);
ASSERT(!Serializer::enabled());
masm->sub(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
// Save all VFP registers except d0.
// TODO(hans): We should probably save d0 too. And maybe use vstm.
for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
} }
} }
inline void RestoreCallerSaveRegisters(MacroAssembler*masm, inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) { SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) { if (mode == kSaveFPRegs) {
// Number of d-regs not known at snapshot time. masm->RestoreFPRegs(sp, scratch0_);
ASSERT(!Serializer::enabled());
// Restore all VFP registers except d0.
// TODO(hans): We should probably restore d0 too. And maybe use vldm.
for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
masm->add(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
} }
masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
} }
@ -608,142 +591,6 @@ class DirectCEntryStub: public PlatformCodeStub {
}; };
class FloatingPointHelper : public AllStatic {
public:
enum Destination {
kVFPRegisters,
kCoreRegisters
};
// Loads smis from r0 and r1 (right and left in binary operations) into
// floating point registers. Depending on the destination the values ends up
// either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
// floating point registers VFP3 must be supported. If core registers are
// requested when VFP3 is supported d6 and d7 will be scratched.
static void LoadSmis(MacroAssembler* masm,
Destination destination,
Register scratch1,
Register scratch2);
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
static void ConvertNumberToInt32(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2,
Label* not_int32);
// Converts the integer (untagged smi) in |int_scratch| to a double, storing
// the result either in |double_dst| or |dst2:dst1|, depending on
// |destination|.
// Warning: The value in |int_scratch| will be changed in the process!
static void ConvertIntToDouble(MacroAssembler* masm,
Register int_scratch,
Destination destination,
DwVfpRegister double_dst,
Register dst1,
Register dst2,
Register scratch2,
SwVfpRegister single_scratch);
// Load the number from object into double_dst in the double format.
// Control will jump to not_int32 if the value cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be loaded.
static void LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
DwVfpRegister double_dst,
DwVfpRegister double_scratch,
Register dst1,
Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
SwVfpRegister single_scratch,
Label* not_int32);
// Loads the number from object into dst as a 32-bit integer.
// Control will jump to not_int32 if the object cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be converted.
// scratch3 is not used when VFP3 is supported.
static void LoadNumberAsInt32(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch0,
DwVfpRegister double_scratch1,
Label* not_int32);
// Generate non VFP3 code to check if a double can be exactly represented by a
// 32-bit integer. This does not check for 0 or -0, which need
// to be checked for separately.
// Control jumps to not_int32 if the value is not a 32-bit integer, and falls
// through otherwise.
// src1 and src2 will be cloberred.
//
// Expected input:
// - src1: higher (exponent) part of the double value.
// - src2: lower (mantissa) part of the double value.
// Output status:
// - dst: 32 higher bits of the mantissa. (mantissa[51:20])
// - src2: contains 1.
// - other registers are clobbered.
static void DoubleIs32BitInteger(MacroAssembler* masm,
Register src1,
Register src2,
Register dst,
Register scratch,
Label* not_int32);
// Generates code to call a C function to do a double operation using core
// registers. (Used when VFP3 is not supported.)
// This code never falls through, but returns with a heap number containing
// the result in r0.
// Register heapnumber_result must be a heap number in which the
// result of the operation will be stored.
// Requires the following layout on entry:
// r0: Left value (least significant part of mantissa).
// r1: Left value (sign, exponent, top of mantissa).
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
static void CallCCodeForDoubleOperation(MacroAssembler* masm,
Token::Value op,
Register heap_number_result,
Register scratch);
// Loads the objects from |object| into floating point registers.
// Depending on |destination| the value ends up either in |dst| or
// in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3
// must be supported. If kCoreRegisters are requested and VFP3 is
// supported, |dst| will be scratched. If |object| is neither smi nor
// heap number, |not_number| is jumped to with |object| still intact.
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,
DwVfpRegister dst,
Register dst1,
Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* not_number);
};
class NameDictionaryLookupStub: public PlatformCodeStub { class NameDictionaryLookupStub: public PlatformCodeStub {
public: public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };

8
deps/v8/src/arm/deoptimizer-arm.cc

@ -604,8 +604,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
void Deoptimizer::EntryGenerator::Generate() { void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue(); GeneratePrologue();
Isolate* isolate = masm()->isolate();
// Save all general purpose registers before messing with them. // Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters; const int kNumberOfRegisters = Register::kNumRegisters;
@ -665,12 +663,12 @@ void Deoptimizer::EntryGenerator::Generate() {
// r2: bailout id already loaded. // r2: bailout id already loaded.
// r3: code address or 0 already loaded. // r3: code address or 0 already loaded.
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta. __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
__ mov(r5, Operand(ExternalReference::isolate_address())); __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate. __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New(). // Call Deoptimizer::New().
{ {
AllowExternalCallThatCantCauseGC scope(masm()); AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
} }
// Preserve "deoptimizer" object in register r0 and get the input // Preserve "deoptimizer" object in register r0 and get the input
@ -731,7 +729,7 @@ void Deoptimizer::EntryGenerator::Generate() {
{ {
AllowExternalCallThatCantCauseGC scope(masm()); AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction( __ CallCFunction(
ExternalReference::compute_output_frames_function(isolate), 1); ExternalReference::compute_output_frames_function(isolate()), 1);
} }
__ pop(r0); // Restore deoptimizer object (class Deoptimizer). __ pop(r0); // Restore deoptimizer object (class Deoptimizer).

176
deps/v8/src/arm/full-codegen-arm.cc

@ -1922,6 +1922,158 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
} }
void FullCodeGenerator::VisitYield(Yield* expr) {
Comment cmnt(masm_, "[ Yield");
// Evaluate yielded value first; the initial iterator definition depends on
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
case Yield::INITIAL:
case Yield::SUSPEND: {
VisitForStackValue(expr->generator_object());
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ ldr(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
Label resume;
__ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
__ b(ne, &resume);
__ pop(result_register());
if (expr->yield_kind() == Yield::SUSPEND) {
// TODO(wingo): Box into { value: VALUE, done: false }.
}
EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
break;
}
case Yield::FINAL: {
VisitForAccumulatorValue(expr->generator_object());
__ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ str(r1, FieldMemOperand(result_register(),
JSGeneratorObject::kContinuationOffset));
__ pop(result_register());
// TODO(wingo): Box into { value: VALUE, done: true }.
// Exit all nested statements.
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
while (current != NULL) {
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
EmitReturnSequence();
break;
}
case Yield::DELEGATING:
UNIMPLEMENTED();
}
}
void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in r0, and is ultimately read by the resumed generator, as
// if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. r1
// will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
__ pop(r1);
// Check generator state.
Label wrong_state, done;
__ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
__ cmp(r3, Operand(Smi::FromInt(0)));
__ b(le, &wrong_state);
// Load suspended function and context.
__ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
// Load receiver and store as the first argument.
__ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
__ push(r2);
// Push holes for the rest of the arguments to the generator function.
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
Label push_argument_holes, push_frame;
__ bind(&push_argument_holes);
__ sub(r3, r3, Operand(1), SetCC);
__ b(mi, &push_frame);
__ push(r2);
__ jmp(&push_argument_holes);
// Enter a new JavaScript frame, and initialize its slots as they were when
// the generator was suspended.
Label resume_frame;
__ bind(&push_frame);
__ bl(&resume_frame);
__ jmp(&done);
__ bind(&resume_frame);
__ push(lr); // Return address.
__ push(fp); // Caller's frame pointer.
__ mov(fp, sp);
__ push(cp); // Callee's context.
__ push(r4); // Callee's JS Function.
// Load the operand stack size.
__ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
__ ldr(r3, FieldMemOperand(r3, FixedArray::kLengthOffset));
__ SmiUntag(r3);
// If we are sending a value and there is no operand stack, we can jump back
// in directly.
if (resume_mode == JSGeneratorObject::SEND) {
Label slow_resume;
__ cmp(r3, Operand(0));
__ b(ne, &slow_resume);
__ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
__ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
__ SmiUntag(r2);
__ add(r3, r3, r2);
__ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
__ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
__ Jump(r3);
__ bind(&slow_resume);
}
// Otherwise, we push holes for the operand stack and call the runtime to fix
// up the stack and the handlers.
Label push_operand_holes, call_resume;
__ bind(&push_operand_holes);
__ sub(r3, r3, Operand(1), SetCC);
__ b(mi, &call_resume);
__ push(r2);
__ b(&push_operand_holes);
__ bind(&call_resume);
__ push(r1);
__ push(result_register());
__ Push(Smi::FromInt(resume_mode));
__ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(r1);
__ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position()); SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral(); Literal* key = prop->key()->AsLiteral();
@ -4383,28 +4535,22 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr); VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
EqualityKind kind = expr->op() == Token::EQ_STRICT
? kStrictEquality : kNonStrictEquality;
if (kind == kStrictEquality) {
Heap::RootListIndex nil_value = nil == kNullValue ? Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex : Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex; Heap::kUndefinedValueRootIndex;
__ LoadRoot(r1, nil_value); __ LoadRoot(r1, nil_value);
__ cmp(r0, r1); __ cmp(r0, r1);
if (expr->op() == Token::EQ_STRICT) {
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else { } else {
Heap::RootListIndex other_nil_value = nil == kNullValue ? Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(),
Heap::kUndefinedValueRootIndex : kNonStrictEquality,
Heap::kNullValueRootIndex; nil);
__ b(eq, if_true); CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
__ LoadRoot(r1, other_nil_value); __ cmp(r0, Operand(0));
__ cmp(r0, r1); Split(ne, if_true, if_false, fall_through);
__ b(eq, if_true);
__ JumpIfSmi(r0, if_false);
// It can be an undetectable object.
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
__ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
__ cmp(r1, Operand(1 << Map::kIsUndetectable));
Split(eq, if_true, if_false, fall_through);
} }
context()->Plug(if_true, if_false); context()->Plug(if_true, if_false);
} }

8
deps/v8/src/arm/ic-arm.cc

@ -1340,13 +1340,7 @@ static void KeyedStoreGenerateGenericHelper(
__ b(ne, slow); __ b(ne, slow);
} }
__ bind(&fast_double_without_map_check); __ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value, __ StoreNumberToDoubleElements(value, key, elements, r3,
key,
elements, // Overwritten.
r3, // Scratch regs...
r4,
r5,
r6,
&transition_double_elements); &transition_double_elements);
if (increment_length == kIncrementLength) { if (increment_length == kIncrementLength) {
// Add 1 to receiver->length. // Add 1 to receiver->length.

22
deps/v8/src/arm/lithium-arm.cc

@ -192,6 +192,11 @@ const char* LArithmeticT::Mnemonic() const {
} }
bool LGoto::HasInterestingComment(LCodeGen* gen) const {
return !gen->IsNextEmittedBlock(block_id());
}
void LGoto::PrintDataTo(StringStream* stream) { void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id()); stream->Add("B%d", block_id());
} }
@ -989,12 +994,14 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) { LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
info()->MarkAsRequiresFrame();
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new(zone()) LArgumentsLength(value)); return DefineAsRegister(new(zone()) LArgumentsLength(value));
} }
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
info()->MarkAsRequiresFrame();
return DefineAsRegister(new(zone()) LArgumentsElements); return DefineAsRegister(new(zone()) LArgumentsElements);
} }
@ -2424,7 +2431,8 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
ASSERT(info()->IsStub()); ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor = CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
Register reg = descriptor->register_params_[instr->index()]; int index = static_cast<int>(instr->index());
Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
return DefineFixed(result, reg); return DefineFixed(result, reg);
} }
} }
@ -2456,9 +2464,17 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments()); LOperand* args = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length()); LOperand* length;
LOperand* index = UseRegister(instr->index()); LOperand* index;
if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
length = UseRegisterOrConstant(instr->length());
index = UseOrConstant(instr->index());
} else {
length = UseTempRegister(instr->length());
index = Use(instr->index());
}
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
} }

16
deps/v8/src/arm/lithium-arm.h

@ -282,6 +282,8 @@ class LInstruction: public ZoneObject {
LOperand* FirstInput() { return InputAt(0); } LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; } LOperand* Output() { return HasResult() ? result() : NULL; }
virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
#ifdef DEBUG #ifdef DEBUG
void VerifyCall(); void VerifyCall();
#endif #endif
@ -381,6 +383,10 @@ class LInstructionGap: public LGap {
public: public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
virtual bool HasInterestingComment(LCodeGen* gen) const {
return !IsRedundant();
}
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
}; };
@ -389,6 +395,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> {
public: public:
explicit LGoto(int block_id) : block_id_(block_id) { } explicit LGoto(int block_id) : block_id_(block_id) { }
virtual bool HasInterestingComment(LCodeGen* gen) const;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; } virtual bool IsControl() const { return true; }
@ -436,12 +443,14 @@ class LLabel: public LGap {
explicit LLabel(HBasicBlock* block) explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { } : LGap(block), replacement_(NULL) { }
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(Label, "label") DECLARE_CONCRETE_INSTRUCTION(Label, "label")
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
int block_id() const { return block()->block_id(); } int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); } bool is_loop_header() const { return block()->IsLoopHeader(); }
bool is_osr_entry() const { return block()->is_osr_entry(); }
Label* label() { return &label_; } Label* label() { return &label_; }
LLabel* replacement() const { return replacement_; } LLabel* replacement() const { return replacement_; }
void set_replacement(LLabel* label) { replacement_ = label; } void set_replacement(LLabel* label) { replacement_ = label; }
@ -455,6 +464,7 @@ class LLabel: public LGap {
class LParameter: public LTemplateInstruction<1, 0, 0> { class LParameter: public LTemplateInstruction<1, 0, 0> {
public: public:
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
}; };
@ -472,6 +482,7 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> {
class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> { class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
public: public:
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
}; };
@ -1843,7 +1854,6 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; } int arity() const { return hydrogen()->argument_count() - 1; }
Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
}; };
@ -1911,7 +1921,6 @@ class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> target() const { return hydrogen()->target(); }
int arity() const { return hydrogen()->argument_count() - 1; } int arity() const { return hydrogen()->argument_count() - 1; }
}; };
@ -2488,8 +2497,6 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
public: public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal") DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral) DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
}; };
@ -2566,6 +2573,7 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public: public:
LOsrEntry(); LOsrEntry();
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
LOperand** SpilledRegisterArray() { return register_spills_; } LOperand** SpilledRegisterArray() { return register_spills_; }

198
deps/v8/src/arm/lithium-codegen-arm.cc

@ -238,7 +238,12 @@ bool LCodeGen::GeneratePrologue() {
__ str(r0, target); __ str(r0, target);
// Update the write barrier. This clobbers r3 and r0. // Update the write barrier. This clobbers r3 and r0.
__ RecordWriteContextSlot( __ RecordWriteContextSlot(
cp, target.offset(), r0, r3, GetLinkRegisterState(), kSaveFPRegs); cp,
target.offset(),
r0,
r3,
GetLinkRegisterState(),
kSaveFPRegs);
} }
} }
Comment(";;; End allocate local context"); Comment(";;; End allocate local context");
@ -259,39 +264,22 @@ bool LCodeGen::GenerateBody() {
!is_aborted() && current_instruction_ < instructions_->length(); !is_aborted() && current_instruction_ < instructions_->length();
current_instruction_++) { current_instruction_++) {
LInstruction* instr = instructions_->at(current_instruction_); LInstruction* instr = instructions_->at(current_instruction_);
// Don't emit code for basic blocks with a replacement.
if (instr->IsLabel()) { if (instr->IsLabel()) {
LLabel* label = LLabel::cast(instr); emit_instructions = !LLabel::cast(instr)->HasReplacement();
emit_instructions = !label->HasReplacement();
}
if (emit_instructions) {
if (FLAG_code_comments) {
HValue* hydrogen = instr->hydrogen_value();
if (hydrogen != NULL) {
if (hydrogen->IsChange()) {
HValue* changed_value = HChange::cast(hydrogen)->value();
int use_id = 0;
const char* use_mnemo = "dead";
if (hydrogen->UseCount() >= 1) {
HValue* use_value = hydrogen->uses().value();
use_id = use_value->id();
use_mnemo = use_value->Mnemonic();
}
Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
current_instruction_, instr->Mnemonic(),
changed_value->id(), changed_value->Mnemonic(),
use_id, use_mnemo);
} else {
Comment(";;; @%d: %s. <#%d>", current_instruction_,
instr->Mnemonic(), hydrogen->id());
}
} else {
Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
} }
if (!emit_instructions) continue;
if (FLAG_code_comments && instr->HasInterestingComment(this)) {
Comment(";;; <@%d,#%d> %s",
current_instruction_,
instr->hydrogen_value()->id(),
instr->Mnemonic());
} }
instr->CompileToNative(this); instr->CompileToNative(this);
} }
}
EnsureSpaceForLazyDeopt(); EnsureSpaceForLazyDeopt();
return !is_aborted(); return !is_aborted();
} }
@ -302,11 +290,14 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) { if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i]; LDeferredCode* code = deferred_[i];
__ bind(code->entry()); Comment(";;; <@%d,#%d> "
if (NeedsDeferredFrame()) { "-------------------- Deferred %s --------------------",
Comment(";;; Deferred build frame @%d: %s.",
code->instruction_index(), code->instruction_index(),
code->instr()->hydrogen_value()->id(),
code->instr()->Mnemonic()); code->instr()->Mnemonic());
__ bind(code->entry());
if (NeedsDeferredFrame()) {
Comment(";;; Build frame");
ASSERT(!frame_is_built_); ASSERT(!frame_is_built_);
ASSERT(info()->IsStub()); ASSERT(info()->IsStub());
frame_is_built_ = true; frame_is_built_ = true;
@ -314,15 +305,11 @@ bool LCodeGen::GenerateDeferredCode() {
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0()); __ push(scratch0());
__ add(fp, sp, Operand(2 * kPointerSize)); __ add(fp, sp, Operand(2 * kPointerSize));
Comment(";;; Deferred code");
} }
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate(); code->Generate();
if (NeedsDeferredFrame()) { if (NeedsDeferredFrame()) {
Comment(";;; Deferred destroy frame @%d: %s.", Comment(";;; Destroy frame");
code->instruction_index(),
code->instr()->Mnemonic());
ASSERT(frame_is_built_); ASSERT(frame_is_built_);
__ pop(ip); __ pop(ip);
__ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit()); __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
@ -353,7 +340,9 @@ bool LCodeGen::GenerateDeoptJumpTable() {
Abort("Generated code is too large"); Abort("Generated code is too large");
} }
__ RecordComment("[ Deoptimisation jump table"); if (deopt_jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
}
Label table_start; Label table_start;
__ bind(&table_start); __ bind(&table_start);
Label needs_frame_not_call; Label needs_frame_not_call;
@ -414,7 +403,6 @@ bool LCodeGen::GenerateDeoptJumpTable() {
} }
masm()->CheckConstPool(false, false); masm()->CheckConstPool(false, false);
} }
__ RecordComment("]");
// Force constant pool emission at the end of the deopt jump table to make // Force constant pool emission at the end of the deopt jump table to make
// sure that no constant pools are emitted after. // sure that no constant pools are emitted after.
@ -607,7 +595,7 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
pushed_arguments_index, pushed_arguments_index,
pushed_arguments_count); pushed_arguments_count);
bool has_closure_id = !info()->closure().is_null() && bool has_closure_id = !info()->closure().is_null() &&
*info()->closure() != *environment->closure(); !info()->closure().is_identical_to(environment->closure());
int closure_id = has_closure_id int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure()) ? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId; : Translation::kSelfLiteralId;
@ -923,10 +911,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<FixedArray> literals = Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
{ ALLOW_HANDLE_DEREF(isolate(),
"copying a ZoneList of handles into a FixedArray");
for (int i = 0; i < deoptimization_literals_.length(); i++) { for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]); literals->set(i, *deoptimization_literals_[i]);
} }
data->SetLiteralArray(*literals); data->SetLiteralArray(*literals);
}
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
@ -1042,10 +1033,19 @@ void LCodeGen::RecordPosition(int position) {
} }
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
return "";
}
void LCodeGen::DoLabel(LLabel* label) { void LCodeGen::DoLabel(LLabel* label) {
Comment(";;; -------------------- B%d%s --------------------", Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
current_instruction_,
label->hydrogen_value()->id(),
label->block_id(), label->block_id(),
label->is_loop_header() ? " (loop header)" : ""); LabelType(label));
__ bind(label->label()); __ bind(label->label());
current_block_ = label->block_id(); current_block_ = label->block_id();
DoGap(label); DoGap(label);
@ -1904,6 +1904,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) { void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value(); Handle<Object> value = instr->value();
ALLOW_HANDLE_DEREF(isolate(), "smi check");
if (value->IsSmi()) { if (value->IsSmi()) {
__ mov(ToRegister(instr->result()), Operand(value)); __ mov(ToRegister(instr->result()), Operand(value));
} else { } else {
@ -2170,17 +2171,16 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
} }
int LCodeGen::GetNextEmittedBlock(int block) { int LCodeGen::GetNextEmittedBlock() const {
for (int i = block + 1; i < graph()->blocks()->length(); ++i) { for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
LLabel* label = chunk_->GetLabel(i); if (!chunk_->GetLabel(i)->HasReplacement()) return i;
if (!label->HasReplacement()) return i;
} }
return -1; return -1;
} }
void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
int next_block = GetNextEmittedBlock(current_block_); int next_block = GetNextEmittedBlock();
right_block = chunk_->LookupDestination(right_block); right_block = chunk_->LookupDestination(right_block);
left_block = chunk_->LookupDestination(left_block); left_block = chunk_->LookupDestination(left_block);
@ -2317,10 +2317,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
void LCodeGen::EmitGoto(int block) { void LCodeGen::EmitGoto(int block) {
block = chunk_->LookupDestination(block); if (!IsNextEmittedBlock(block)) {
int next_block = GetNextEmittedBlock(current_block_); __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
if (block != next_block) {
__ jmp(chunk_->GetAssemblyLabel(block));
} }
} }
@ -2944,7 +2942,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
if (NeedsEagerFrame()) { if (NeedsEagerFrame()) {
__ mov(sp, fp); __ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit()); __ ldm(ia_w, sp, fp.bit() | lr.bit());
}
if (instr->has_constant_parameter_count()) { if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count()); int parameter_count = ToInteger32(instr->constant_parameter_count());
int32_t sp_delta = (parameter_count + 1) * kPointerSize; int32_t sp_delta = (parameter_count + 1) * kPointerSize;
@ -2953,10 +2951,11 @@ void LCodeGen::DoReturn(LReturn* instr) {
} }
} else { } else {
Register reg = ToRegister(instr->parameter_count()); Register reg = ToRegister(instr->parameter_count());
__ add(reg, reg, Operand(1)); // The argument count parameter is a smi
__ SmiUntag(reg);
__ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2)); __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
} }
}
__ Jump(lr); __ Jump(lr);
} }
@ -3274,15 +3273,23 @@ void LCodeGen::DoLoadExternalArrayPointer(
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments()); Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
if (instr->length()->IsConstantOperand() &&
instr->index()->IsConstantOperand()) {
int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
int index = (const_length - const_index) + 1;
__ ldr(result, MemOperand(arguments, index * kPointerSize));
} else {
Register length = ToRegister(instr->length()); Register length = ToRegister(instr->length());
Register index = ToRegister(instr->index()); Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
// There are two words between the frame pointer and the last argument. // There are two words between the frame pointer and the last argument.
// Subtracting from length accounts for one of them add one more. // Subtracting from length accounts for one of them add one more.
__ sub(length, length, index); __ sub(length, length, index);
__ add(length, length, Operand(1)); __ add(length, length, Operand(1));
__ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2)); __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
} }
}
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
@ -3703,12 +3710,15 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function, void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity, int arity,
LInstruction* instr, LInstruction* instr,
CallKind call_kind, CallKind call_kind,
R1State r1_state) { R1State r1_state) {
bool can_invoke_directly = !function->NeedsArgumentsAdaption() || bool dont_adapt_arguments =
function->shared()->formal_parameter_count() == arity; formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
LPointerMap* pointers = instr->pointer_map(); LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position()); RecordPosition(pointers->position());
@ -3723,7 +3733,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Set r0 to arguments count if adaption is not needed. Assumes that r0 // Set r0 to arguments count if adaption is not needed. Assumes that r0
// is available to write to at this point. // is available to write to at this point.
if (!function->NeedsArgumentsAdaption()) { if (dont_adapt_arguments) {
__ mov(r0, Operand(arity)); __ mov(r0, Operand(arity));
} }
@ -3737,7 +3747,9 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
} else { } else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity); ParameterCount count(arity);
__ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind); ParameterCount expected(formal_parameter_count);
__ InvokeFunction(
function, expected, count, CALL_FUNCTION, generator, call_kind);
} }
// Restore context. // Restore context.
@ -3747,7 +3759,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0));
CallKnownFunction(instr->function(), CallKnownFunction(instr->hydrogen()->function(),
instr->hydrogen()->formal_parameter_count(),
instr->arity(), instr->arity(),
instr, instr,
CALL_AS_METHOD, CALL_AS_METHOD,
@ -4119,7 +4132,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->function()).is(r1)); ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(instr->HasPointerMap()); ASSERT(instr->HasPointerMap());
if (instr->known_function().is_null()) { Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map(); LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position()); RecordPosition(pointers->position());
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
@ -4127,7 +4141,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
__ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else { } else {
CallKnownFunction(instr->known_function(), CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
instr->arity(), instr->arity(),
instr, instr,
CALL_AS_METHOD, CALL_AS_METHOD,
@ -4187,7 +4202,8 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0));
CallKnownFunction(instr->target(), CallKnownFunction(instr->hydrogen()->target(),
instr->hydrogen()->formal_parameter_count(),
instr->arity(), instr->arity(),
instr, instr,
CALL_AS_FUNCTION, CALL_AS_FUNCTION,
@ -4218,10 +4234,18 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ mov(r0, Operand(instr->arity())); __ mov(r0, Operand(instr->arity()));
__ mov(r2, Operand(instr->hydrogen()->property_cell())); __ mov(r2, Operand(instr->hydrogen()->property_cell()));
Handle<Code> array_construct_code = Object* cell_value = instr->hydrogen()->property_cell()->value();
isolate()->builtins()->ArrayConstructCode(); ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value());
if (instr->arity() == 0) {
CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); ArrayNoArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
ArraySingleArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else {
ArrayNArgumentsConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
} }
@ -5038,8 +5062,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ sub(scratch1, input_reg, Operand(kHeapObjectTag)); __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
__ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset); __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
__ ECMAToInt32(input_reg, double_scratch2, double_scratch, __ ECMAToInt32(input_reg, double_scratch2,
scratch1, scratch2, scratch3); scratch1, scratch2, scratch3, double_scratch);
} else { } else {
// Deoptimize if we don't have a heap number. // Deoptimize if we don't have a heap number.
@ -5136,8 +5160,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
if (instr->truncating()) { if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2()); Register scratch3 = ToRegister(instr->temp2());
__ ECMAToInt32(result_reg, double_input, double_scratch, __ ECMAToInt32(result_reg, double_input,
scratch1, scratch2, scratch3); scratch1, scratch2, scratch3, double_scratch);
} else { } else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double). // Deoptimize if the input wasn't a int32 (inside a double).
@ -5207,6 +5231,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) { void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
Register reg = ToRegister(instr->value()); Register reg = ToRegister(instr->value());
Handle<JSFunction> target = instr->hydrogen()->target(); Handle<JSFunction> target = instr->hydrogen()->target();
ALLOW_HANDLE_DEREF(isolate(), "smi check");
if (isolate()->heap()->InNewSpace(*target)) { if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value()); Register reg = ToRegister(instr->value());
Handle<JSGlobalPropertyCell> cell = Handle<JSGlobalPropertyCell> cell =
@ -5348,16 +5373,12 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
Register scratch = ToRegister(instr->temp()); Register scratch = ToRegister(instr->temp());
Register scratch2 = ToRegister(instr->temp2()); Register scratch2 = ToRegister(instr->temp2());
Handle<JSFunction> constructor = instr->hydrogen()->constructor(); Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map()); Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
int instance_size = initial_map->instance_size(); int instance_size = initial_map->instance_size();
ASSERT(initial_map->pre_allocated_property_fields() + ASSERT(initial_map->pre_allocated_property_fields() +
initial_map->unused_property_fields() - initial_map->unused_property_fields() -
initial_map->inobject_properties() == 0); initial_map->inobject_properties() == 0);
// Allocate memory for the object. The initial map might change when
// the constructor's prototype changes, but instance size and property
// counts remain unchanged (if slack tracking finished).
ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
__ Allocate(instance_size, result, scratch, scratch2, deferred->entry(), __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(),
TAG_OBJECT); TAG_OBJECT);
@ -5392,8 +5413,7 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Handle<JSFunction> constructor = instr->hydrogen()->constructor(); Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size(); int instance_size = initial_map->instance_size();
// TODO(3095996): Get rid of this. For now, we need to make the // TODO(3095996): Get rid of this. For now, we need to make the
@ -5476,7 +5496,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Handle<FixedArray> literals(instr->environment()->closure()->literals()); Handle<FixedArray> literals = instr->hydrogen()->literals();
ElementsKind boilerplate_elements_kind = ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind(); instr->hydrogen()->boilerplate_elements_kind();
AllocationSiteMode allocation_site_mode = AllocationSiteMode allocation_site_mode =
@ -5531,7 +5551,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
Handle<FixedArray> literals(instr->environment()->closure()->literals()); Handle<FixedArray> literals = instr->hydrogen()->literals();
Handle<FixedArray> constant_properties = Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties(); instr->hydrogen()->constant_properties();
@ -5545,7 +5565,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
__ mov(r0, Operand(Smi::FromInt(flags))); __ mov(r0, Operand(Smi::FromInt(flags)));
// Pick the right runtime function or stub to call. // Pick the right runtime function or stub to call.
int properties_count = constant_properties->length() / 2; int properties_count = instr->hydrogen()->constant_properties_length() / 2;
if (instr->hydrogen()->depth() > 1) { if (instr->hydrogen()->depth() > 1) {
__ Push(r3, r2, r1, r0); __ Push(r3, r2, r1, r0);
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
@ -5614,18 +5634,16 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// Use the fast case closure allocation code that allocates in new // Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning. // space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure(); bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) { if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(shared_info->language_mode(), FastNewClosureStub stub(instr->hydrogen()->language_mode(),
shared_info->is_generator()); instr->hydrogen()->is_generator());
__ mov(r1, Operand(shared_info)); __ mov(r1, Operand(instr->hydrogen()->shared_info()));
__ push(r1); __ push(r1);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else { } else {
__ mov(r2, Operand(shared_info)); __ mov(r2, Operand(instr->hydrogen()->shared_info()));
__ mov(r1, Operand(pretenure __ mov(r1, Operand(pretenure ? factory()->true_value()
? factory()->true_value()
: factory()->false_value())); : factory()->false_value()));
__ Push(cp, r2, r1); __ Push(cp, r2, r1);
CallRuntime(Runtime::kNewClosure, 3, instr); CallRuntime(Runtime::kNewClosure, 3, instr);

17
deps/v8/src/arm/lithium-codegen-arm.h

@ -80,10 +80,20 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); } Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
// TODO(svenpanne) Use this consistently.
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
bool IsNextEmittedBlock(int block_id) const {
return LookupDestination(block_id) == GetNextEmittedBlock();
}
bool NeedsEagerFrame() const { bool NeedsEagerFrame() const {
return GetStackSlotCount() > 0 || return GetStackSlotCount() > 0 ||
info()->is_non_deferred_calling() || info()->is_non_deferred_calling() ||
!info()->IsStub(); !info()->IsStub() ||
info()->requires_frame();
} }
bool NeedsDeferredFrame() const { bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling(); return !NeedsEagerFrame() && info()->is_deferred_calling();
@ -195,12 +205,12 @@ class LCodeGen BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; } LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; } Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); } HGraph* graph() const { return chunk()->graph(); }
Register scratch0() { return r9; } Register scratch0() { return r9; }
DwVfpRegister double_scratch0() { return kScratchDoubleReg; } DwVfpRegister double_scratch0() { return kScratchDoubleReg; }
int GetNextEmittedBlock(int block); int GetNextEmittedBlock() const;
LInstruction* GetNextInstruction(); LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true, void EmitClassOfTest(Label* if_true,
@ -266,6 +276,7 @@ class LCodeGen BASE_EMBEDDED {
// Generate a direct call to a known function. Expects the function // Generate a direct call to a known function. Expects the function
// to be in r1. // to be in r1.
void CallKnownFunction(Handle<JSFunction> function, void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity, int arity,
LInstruction* instr, LInstruction* instr,
CallKind call_kind, CallKind call_kind,

280
deps/v8/src/arm/macro-assembler-arm.cc

@ -74,6 +74,7 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) { Condition cond) {
ASSERT(RelocInfo::IsCodeTarget(rmode)); ASSERT(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code // 'code' is always generated ARM code, never THUMB code
ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
} }
@ -162,6 +163,7 @@ int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode, RelocInfo::Mode rmode,
TypeFeedbackId ast_id, TypeFeedbackId ast_id,
Condition cond) { Condition cond) {
ALLOW_HANDLE_DEREF(isolate(), "using raw address");
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond); return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
} }
@ -179,6 +181,7 @@ void MacroAssembler::Call(Handle<Code> code,
rmode = RelocInfo::CODE_TARGET_WITH_ID; rmode = RelocInfo::CODE_TARGET_WITH_ID;
} }
// 'code' is always generated ARM code, never THUMB code // 'code' is always generated ARM code, never THUMB code
ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode); Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
} }
@ -395,6 +398,7 @@ void MacroAssembler::StoreRoot(Register source,
void MacroAssembler::LoadHeapObject(Register result, void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) { Handle<HeapObject> object) {
ALLOW_HANDLE_DEREF(isolate(), "using raw address");
if (isolate()->heap()->InNewSpace(*object)) { if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell = Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object); isolate()->factory()->NewJSGlobalPropertyCell(object);
@ -790,6 +794,116 @@ void MacroAssembler::Vmov(const DwVfpRegister dst,
} }
void MacroAssembler::ConvertNumberToInt32(Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2,
Label* not_number) {
Label done;
UntagAndJumpIfSmi(dst, object, &done);
JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
vldr(double_scratch1, FieldMemOperand(object, HeapNumber::kValueOffset));
ECMAToInt32(dst, double_scratch1,
scratch1, scratch2, scratch3, double_scratch2);
bind(&done);
}
void MacroAssembler::LoadNumber(Register object,
DwVfpRegister dst,
Register heap_number_map,
Register scratch,
Label* not_number) {
Label is_smi, done;
UntagAndJumpIfSmi(scratch, object, &is_smi);
JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
vldr(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
b(&done);
// Handle loading a double from a smi.
bind(&is_smi);
vmov(dst.high(), scratch);
vcvt_f64_s32(dst, dst.high());
bind(&done);
}
void MacroAssembler::LoadNumberAsInt32Double(Register object,
DwVfpRegister double_dst,
Register heap_number_map,
Register scratch,
DwVfpRegister double_scratch,
Label* not_int32) {
ASSERT(!scratch.is(object));
ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch));
Label done, obj_is_not_smi;
UntagAndJumpIfNotSmi(scratch, object, &obj_is_not_smi);
vmov(double_scratch.low(), scratch);
vcvt_f64_s32(double_dst, double_scratch.low());
b(&done);
bind(&obj_is_not_smi);
JumpIfNotHeapNumber(object, heap_number_map, scratch, not_int32);
// Load the number.
// Load the double value.
vldr(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
TestDoubleIsInt32(double_dst, double_scratch);
// Jump to not_int32 if the operation did not succeed.
b(ne, not_int32);
bind(&done);
}
void MacroAssembler::LoadNumberAsInt32(Register object,
Register dst,
Register heap_number_map,
Register scratch,
DwVfpRegister double_scratch0,
DwVfpRegister double_scratch1,
Label* not_int32) {
ASSERT(!dst.is(object));
ASSERT(!scratch.is(object));
Label done, maybe_undefined;
UntagAndJumpIfSmi(dst, object, &done);
JumpIfNotHeapNumber(object, heap_number_map, scratch, &maybe_undefined);
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
// Load the double value.
vldr(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1);
// Jump to not_int32 if the operation did not succeed.
b(ne, not_int32);
b(&done);
bind(&maybe_undefined);
CompareRoot(object, Heap::kUndefinedValueRootIndex);
b(ne, not_int32);
// |undefined| is truncated to 0.
mov(dst, Operand(Smi::FromInt(0)));
// Fall through.
bind(&done);
}
void MacroAssembler::EnterFrame(StackFrame::Type type) { void MacroAssembler::EnterFrame(StackFrame::Type type) {
// r0-r3: preserved // r0-r3: preserved
stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
@ -837,14 +951,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Optionally save all double registers. // Optionally save all double registers.
if (save_doubles) { if (save_doubles) {
// Check CPU flags for number of registers, setting the Z condition flag. SaveFPRegs(sp, ip);
CheckFor32DRegs(ip);
// Push registers d0-d15, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
vstm(db_w, sp, d16, d31, ne);
sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
vstm(db_w, sp, d0, d15);
// Note that d0 will be accessible at // Note that d0 will be accessible at
// fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize, // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
// since the sp slot and code slot were pushed after the fp. // since the sp slot and code slot were pushed after the fp.
@ -905,15 +1012,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
const int offset = 2 * kPointerSize; const int offset = 2 * kPointerSize;
sub(r3, fp, sub(r3, fp,
Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize)); Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
RestoreFPRegs(r3, ip);
// Check CPU flags for number of registers, setting the Z condition flag.
CheckFor32DRegs(ip);
// Pop registers d0-d15, and possibly d16-d31, from r3.
// If d16-d31 are not popped, increase r3 instead.
vldm(ia_w, r3, d0, d15);
vldm(ia_w, r3, d16, d31, ne);
add(r3, r3, Operand(16 * kDoubleSize), LeaveCC, eq);
} }
// Clear top frame. // Clear top frame.
@ -1132,6 +1231,7 @@ void MacroAssembler::InvokeFunction(Register fun,
void MacroAssembler::InvokeFunction(Handle<JSFunction> function, void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag, InvokeFlag flag,
const CallWrapper& call_wrapper, const CallWrapper& call_wrapper,
@ -1143,7 +1243,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
LoadHeapObject(r1, function); LoadHeapObject(r1, function);
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
// We call indirectly through the code field in the function to // We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the // allow recompilation to take effect without changing any of the
// call sites. // call sites.
@ -1945,14 +2044,9 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg, Register key_reg,
Register elements_reg, Register elements_reg,
Register scratch1, Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail, Label* fail,
int elements_offset) { int elements_offset) {
Label smi_value, store; Label smi_value, store;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
// Handle smi values specially. // Handle smi values specially.
JumpIfSmi(value_reg, &smi_value); JumpIfSmi(value_reg, &smi_value);
@ -1977,9 +2071,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&smi_value); bind(&smi_value);
Register untagged_value = scratch1; Register untagged_value = scratch1;
SmiUntag(untagged_value, value_reg); SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble( vmov(s2, untagged_value);
this, untagged_value, FloatingPointHelper::kVFPRegisters, d0, vcvt_f64_s32(d0, s2);
mantissa_reg, exponent_reg, scratch4, s2);
bind(&store); bind(&store);
add(scratch1, elements_reg, add(scratch1, elements_reg,
@ -2171,8 +2264,9 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
if (FLAG_log_timer_events) { if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL); FrameScope frame(this, StackFrame::MANUAL);
PushSafepointRegisters(); PushSafepointRegisters();
PrepareCallCFunction(0, r0); PrepareCallCFunction(1, r0);
CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0); mov(r0, Operand(ExternalReference::isolate_address(isolate())));
CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
PopSafepointRegisters(); PopSafepointRegisters();
} }
@ -2185,8 +2279,9 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
if (FLAG_log_timer_events) { if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL); FrameScope frame(this, StackFrame::MANUAL);
PushSafepointRegisters(); PushSafepointRegisters();
PrepareCallCFunction(0, r0); PrepareCallCFunction(1, r0);
CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); mov(r0, Operand(ExternalReference::isolate_address(isolate())));
CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
PopSafepointRegisters(); PopSafepointRegisters();
} }
@ -2238,7 +2333,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
str(r5, MemOperand(r7, kLimitOffset)); str(r5, MemOperand(r7, kLimitOffset));
mov(r4, r0); mov(r4, r0);
PrepareCallCFunction(1, r5); PrepareCallCFunction(1, r5);
mov(r0, Operand(ExternalReference::isolate_address())); mov(r0, Operand(ExternalReference::isolate_address(isolate())));
CallCFunction( CallCFunction(
ExternalReference::delete_handle_scope_extensions(isolate()), 1); ExternalReference::delete_handle_scope_extensions(isolate()), 1);
mov(r0, r4); mov(r0, r4);
@ -2401,34 +2496,21 @@ void MacroAssembler::TryInt32Floor(Register result,
} }
void MacroAssembler::ECMAConvertNumberToInt32(Register source,
Register result,
Register input_low,
Register input_high,
Register scratch,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2) {
vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset));
ECMAToInt32(result, double_scratch1, double_scratch2,
scratch, input_high, input_low);
}
void MacroAssembler::ECMAToInt32(Register result, void MacroAssembler::ECMAToInt32(Register result,
DwVfpRegister double_input, DwVfpRegister double_input,
DwVfpRegister double_scratch,
Register scratch, Register scratch,
Register input_high, Register scratch_high,
Register input_low) { Register scratch_low,
ASSERT(!input_high.is(result)); DwVfpRegister double_scratch) {
ASSERT(!input_low.is(result)); ASSERT(!scratch_high.is(result));
ASSERT(!input_low.is(input_high)); ASSERT(!scratch_low.is(result));
ASSERT(!scratch_low.is(scratch_high));
ASSERT(!scratch.is(result) && ASSERT(!scratch.is(result) &&
!scratch.is(input_high) && !scratch.is(scratch_high) &&
!scratch.is(input_low)); !scratch.is(scratch_low));
ASSERT(!double_input.is(double_scratch)); ASSERT(!double_input.is(double_scratch));
Label out_of_range, negate, done; Label out_of_range, only_low, negate, done;
vcvt_s32_f64(double_scratch.low(), double_input); vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low()); vmov(result, double_scratch.low());
@ -2438,8 +2520,8 @@ void MacroAssembler::ECMAToInt32(Register result,
cmp(scratch, Operand(0x7ffffffe)); cmp(scratch, Operand(0x7ffffffe));
b(lt, &done); b(lt, &done);
vmov(input_low, input_high, double_input); vmov(scratch_low, scratch_high, double_input);
Ubfx(scratch, input_high, Ubfx(scratch, scratch_high,
HeapNumber::kExponentShift, HeapNumber::kExponentBits); HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Load scratch with exponent - 1. This is faster than loading // Load scratch with exponent - 1. This is faster than loading
// with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
@ -2454,59 +2536,45 @@ void MacroAssembler::ECMAToInt32(Register result,
// If we reach this code, 31 <= exponent <= 83. // If we reach this code, 31 <= exponent <= 83.
// So, we don't have to handle cases where 0 <= exponent <= 20 for // So, we don't have to handle cases where 0 <= exponent <= 20 for
// which we would need to shift right the high part of the mantissa. // which we would need to shift right the high part of the mantissa.
ECMAToInt32Tail(result, scratch, input_high, input_low, // Scratch contains exponent - 1.
&out_of_range, &negate, &done);
}
void MacroAssembler::ECMAToInt32Tail(Register result,
Register scratch,
Register input_high,
Register input_low,
Label* out_of_range,
Label* negate,
Label* done) {
Label only_low;
// On entry, scratch contains exponent - 1.
// Load scratch with 52 - exponent (load with 51 - (exponent - 1)). // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
rsb(scratch, scratch, Operand(51), SetCC); rsb(scratch, scratch, Operand(51), SetCC);
b(ls, &only_low); b(ls, &only_low);
// 21 <= exponent <= 51, shift input_low and input_high // 21 <= exponent <= 51, shift scratch_low and scratch_high
// to generate the result. // to generate the result.
mov(input_low, Operand(input_low, LSR, scratch)); mov(scratch_low, Operand(scratch_low, LSR, scratch));
// Scratch contains: 52 - exponent. // Scratch contains: 52 - exponent.
// We needs: exponent - 20. // We needs: exponent - 20.
// So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20. // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
rsb(scratch, scratch, Operand(32)); rsb(scratch, scratch, Operand(32));
Ubfx(result, input_high, Ubfx(result, scratch_high,
0, HeapNumber::kMantissaBitsInTopWord); 0, HeapNumber::kMantissaBitsInTopWord);
// Set the implicit 1 before the mantissa part in input_high. // Set the implicit 1 before the mantissa part in scratch_high.
orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord)); orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord));
orr(result, input_low, Operand(result, LSL, scratch)); orr(result, scratch_low, Operand(result, LSL, scratch));
b(negate); b(&negate);
bind(out_of_range); bind(&out_of_range);
mov(result, Operand::Zero()); mov(result, Operand::Zero());
b(done); b(&done);
bind(&only_low); bind(&only_low);
// 52 <= exponent <= 83, shift only input_low. // 52 <= exponent <= 83, shift only scratch_low.
// On entry, scratch contains: 52 - exponent. // On entry, scratch contains: 52 - exponent.
rsb(scratch, scratch, Operand::Zero()); rsb(scratch, scratch, Operand::Zero());
mov(result, Operand(input_low, LSL, scratch)); mov(result, Operand(scratch_low, LSL, scratch));
bind(negate); bind(&negate);
// If input was positive, input_high ASR 31 equals 0 and // If input was positive, scratch_high ASR 31 equals 0 and
// input_high LSR 31 equals zero. // scratch_high LSR 31 equals zero.
// New result = (result eor 0) + 0 = result. // New result = (result eor 0) + 0 = result.
// If the input was negative, we have to negate the result. // If the input was negative, we have to negate the result.
// Input_high ASR 31 equals 0xffffffff and input_high LSR 31 equals 1. // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
// New result = (result eor 0xffffffff) + 1 = 0 - result. // New result = (result eor 0xffffffff) + 1 = 0 - result.
eor(result, result, Operand(input_high, ASR, 31)); eor(result, result, Operand(scratch_high, ASR, 31));
add(result, result, Operand(input_high, LSR, 31)); add(result, result, Operand(scratch_high, LSR, 31));
bind(done); bind(&done);
} }
@ -2688,16 +2756,6 @@ void MacroAssembler::Assert(Condition cond, const char* msg) {
} }
void MacroAssembler::AssertRegisterIsRoot(Register reg,
Heap::RootListIndex index) {
if (emit_debug_code()) {
LoadRoot(ip, index);
cmp(reg, ip);
Check(eq, "Register did not match expected root");
}
}
void MacroAssembler::AssertFastElements(Register elements) { void MacroAssembler::AssertFastElements(Register elements) {
if (emit_debug_code()) { if (emit_debug_code()) {
ASSERT(!elements.is(ip)); ASSERT(!elements.is(ip));
@ -2991,12 +3049,10 @@ void MacroAssembler::AssertName(Register object) {
void MacroAssembler::AssertRootValue(Register src, void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
Heap::RootListIndex root_value_index,
const char* message) {
if (emit_debug_code()) { if (emit_debug_code()) {
CompareRoot(src, root_value_index); CompareRoot(reg, index);
Check(eq, message); Check(eq, "HeapNumberMap register clobbered.");
} }
} }
@ -3006,7 +3062,7 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
Register scratch, Register scratch,
Label* on_not_heap_number) { Label* on_not_heap_number) {
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
cmp(scratch, heap_number_map); cmp(scratch, heap_number_map);
b(ne, on_not_heap_number); b(ne, on_not_heap_number);
} }
@ -3063,7 +3119,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
// Store heap number map in the allocated object. // Store heap number map in the allocated object.
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
if (tagging_mode == TAG_RESULT) { if (tagging_mode == TAG_RESULT) {
str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
} else { } else {
@ -3183,6 +3239,22 @@ void MacroAssembler::CheckFor32DRegs(Register scratch) {
} }
void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
CheckFor32DRegs(scratch);
vstm(db_w, location, d16, d31, ne);
sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
vstm(db_w, location, d0, d15);
}
void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
CheckFor32DRegs(scratch);
vldm(ia_w, location, d0, d15);
vldm(ia_w, location, d16, d31, ne);
add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
}
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first, Register first,
Register second, Register second,

99
deps/v8/src/arm/macro-assembler-arm.h

@ -178,6 +178,7 @@ class MacroAssembler: public Assembler {
void LoadHeapObject(Register dst, Handle<HeapObject> object); void LoadHeapObject(Register dst, Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) { void LoadObject(Register result, Handle<Object> object) {
ALLOW_HANDLE_DEREF(isolate(), "heap object check");
if (object->IsHeapObject()) { if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object)); LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else { } else {
@ -495,6 +496,54 @@ class MacroAssembler: public Assembler {
const double imm, const double imm,
const Register scratch = no_reg); const Register scratch = no_reg);
// Converts the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
void ConvertNumberToInt32(Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2,
Label* not_int32);
// Loads the number from object into dst register.
// If |object| is neither smi nor heap number, |not_number| is jumped to
// with |object| still intact.
void LoadNumber(Register object,
DwVfpRegister dst,
Register heap_number_map,
Register scratch,
Label* not_number);
// Loads the number from object into double_dst in the double format.
// Control will jump to not_int32 if the value cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be loaded.
void LoadNumberAsInt32Double(Register object,
DwVfpRegister double_dst,
Register heap_number_map,
Register scratch,
DwVfpRegister double_scratch,
Label* not_int32);
// Loads the number from object into dst as a 32-bit integer.
// Control will jump to not_int32 if the object cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be converted.
void LoadNumberAsInt32(Register object,
Register dst,
Register heap_number_map,
Register scratch,
DwVfpRegister double_scratch0,
DwVfpRegister double_scratch1,
Label* not_int32);
// Enter exit frame. // Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C. // stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0); void EnterExitFrame(bool save_doubles, int stack_space = 0);
@ -573,6 +622,7 @@ class MacroAssembler: public Assembler {
CallKind call_kind); CallKind call_kind);
void InvokeFunction(Handle<JSFunction> function, void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag, InvokeFlag flag,
const CallWrapper& call_wrapper, const CallWrapper& call_wrapper,
@ -831,16 +881,11 @@ class MacroAssembler: public Assembler {
// Check to see if maybe_number can be stored as a double in // Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in // FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements. Otherwise jump to fail, in which // the FastDoubleElements array elements. Otherwise jump to fail.
// case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg, void StoreNumberToDoubleElements(Register value_reg,
Register key_reg, Register key_reg,
// All regs below here overwritten.
Register elements_reg, Register elements_reg,
Register scratch1, Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail, Label* fail,
int elements_offset = 0); int elements_offset = 0);
@ -972,31 +1017,28 @@ class MacroAssembler: public Assembler {
Label* done, Label* done,
Label* exact); Label* exact);
// Performs a truncating conversion of a heap floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void ECMAConvertNumberToInt32(Register source,
Register result,
Register input_low,
Register input_high,
Register scratch,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2);
// Performs a truncating conversion of a floating point number as used by // Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Double_scratch must be between d0 and d15.
// Exits with 'result' holding the answer and all other registers clobbered. // Exits with 'result' holding the answer and all other registers clobbered.
void ECMAToInt32(Register result, void ECMAToInt32(Register result,
DwVfpRegister double_input, DwVfpRegister double_input,
DwVfpRegister double_scratch,
Register scratch, Register scratch,
Register input_high, Register scratch_high,
Register input_low); Register scratch_low,
DwVfpRegister double_scratch);
// Check whether d16-d31 are available on the CPU. The result is given by the // Check whether d16-d31 are available on the CPU. The result is given by the
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise. // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
void CheckFor32DRegs(Register scratch); void CheckFor32DRegs(Register scratch);
// Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
// values to location, saving [d0..(d15|d31)].
void SaveFPRegs(Register location, Register scratch);
// Does a runtime check for 16/32 FP registers. Either way, pops 32 double
// values to location, restoring [d0..(d15|d31)].
void RestoreFPRegs(Register location, Register scratch);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Runtime calls // Runtime calls
@ -1120,7 +1162,6 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied. // Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable. // Use --debug_code to enable.
void Assert(Condition cond, const char* msg); void Assert(Condition cond, const char* msg);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
void AssertFastElements(Register elements); void AssertFastElements(Register elements);
// Like Assert(), but always enabled. // Like Assert(), but always enabled.
@ -1230,11 +1271,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code. // Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object); void AssertName(Register object);
// Abort execution if argument is not the root value with the given index, // Abort execution if reg is not the root value with the given index,
// enabled via --debug-code. // enabled via --debug-code.
void AssertRootValue(Register src, void AssertIsRoot(Register reg, Heap::RootListIndex index);
Heap::RootListIndex root_value_index,
const char* message);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// HeapNumber utilities // HeapNumber utilities
@ -1365,16 +1404,6 @@ class MacroAssembler: public Assembler {
// it. See the implementation for register usage. // it. See the implementation for register usage.
void JumpToHandlerEntry(); void JumpToHandlerEntry();
// Helper for ECMAToInt32VFP and ECMAToInt32NoVFP.
// It is expected that 31 <= exponent <= 83, and scratch is exponent - 1.
void ECMAToInt32Tail(Register result,
Register scratch,
Register input_high,
Register input_low,
Label* out_of_range,
Label* negate,
Label* done);
// Compute memory operands for safepoint stack slots. // Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code); static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg); MemOperand SafepointRegisterSlot(Register reg);

16
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -380,12 +380,12 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Address of current input position. // Address of current input position.
__ add(r1, current_input_offset(), Operand(end_of_input_address())); __ add(r1, current_input_offset(), Operand(end_of_input_address()));
// Isolate. // Isolate.
__ mov(r3, Operand(ExternalReference::isolate_address())); __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
{ {
AllowExternalCallThatCantCauseGC scope(masm_); AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference function = ExternalReference function =
ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); ExternalReference::re_case_insensitive_compare_uc16(isolate());
__ CallCFunction(function, argument_count); __ CallCFunction(function, argument_count);
} }
@ -682,7 +682,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label stack_ok; Label stack_ok;
ExternalReference stack_limit = ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm_->isolate()); ExternalReference::address_of_stack_limit(isolate());
__ mov(r0, Operand(stack_limit)); __ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0)); __ ldr(r0, MemOperand(r0));
__ sub(r0, sp, r0, SetCC); __ sub(r0, sp, r0, SetCC);
@ -893,9 +893,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ PrepareCallCFunction(num_arguments, r0); __ PrepareCallCFunction(num_arguments, r0);
__ mov(r0, backtrack_stackpointer()); __ mov(r0, backtrack_stackpointer());
__ add(r1, frame_pointer(), Operand(kStackHighEnd)); __ add(r1, frame_pointer(), Operand(kStackHighEnd));
__ mov(r2, Operand(ExternalReference::isolate_address())); __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack = ExternalReference grow_stack =
ExternalReference::re_grow_stack(masm_->isolate()); ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments); __ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and // If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception. // must exit with a stack-overflow exception.
@ -1111,7 +1111,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
__ mov(r1, Operand(masm_->CodeObject())); __ mov(r1, Operand(masm_->CodeObject()));
// r0 becomes return address pointer. // r0 becomes return address pointer.
ExternalReference stack_guard_check = ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(masm_->isolate()); ExternalReference::re_check_stack_guard_state(isolate());
CallCFunctionUsingStub(stack_guard_check, num_arguments); CallCFunctionUsingStub(stack_guard_check, num_arguments);
} }
@ -1292,7 +1292,7 @@ void RegExpMacroAssemblerARM::Pop(Register target) {
void RegExpMacroAssemblerARM::CheckPreemption() { void RegExpMacroAssemblerARM::CheckPreemption() {
// Check for preemption. // Check for preemption.
ExternalReference stack_limit = ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm_->isolate()); ExternalReference::address_of_stack_limit(isolate());
__ mov(r0, Operand(stack_limit)); __ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0)); __ ldr(r0, MemOperand(r0));
__ cmp(sp, r0); __ cmp(sp, r0);
@ -1302,7 +1302,7 @@ void RegExpMacroAssemblerARM::CheckPreemption() {
void RegExpMacroAssemblerARM::CheckStackLimit() { void RegExpMacroAssemblerARM::CheckStackLimit() {
ExternalReference stack_limit = ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(masm_->isolate()); ExternalReference::address_of_regexp_stack_limit(isolate());
__ mov(r0, Operand(stack_limit)); __ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0)); __ ldr(r0, MemOperand(r0));
__ cmp(backtrack_stackpointer(), Operand(r0)); __ cmp(backtrack_stackpointer(), Operand(r0));

2
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -30,6 +30,7 @@
#include "arm/assembler-arm.h" #include "arm/assembler-arm.h"
#include "arm/assembler-arm-inl.h" #include "arm/assembler-arm-inl.h"
#include "macro-assembler.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -223,6 +224,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
inline void CallCFunctionUsingStub(ExternalReference function, inline void CallCFunctionUsingStub(ExternalReference function,
int num_arguments); int num_arguments);
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_; MacroAssembler* masm_;

10
deps/v8/src/arm/simulator-arm.cc

@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h> #include <stdlib.h>
#include <math.h> #include <cmath>
#include <cstdarg> #include <cstdarg>
#include "v8.h" #include "v8.h"
@ -331,7 +331,7 @@ void ArmDebugger::Debug() {
PrintF("\n"); PrintF("\n");
} }
} }
for (int i = 0; i < kNumVFPDoubleRegisters; i++) { for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) {
dvalue = GetVFPDoubleRegisterValue(i); dvalue = GetVFPDoubleRegisterValue(i);
uint64_t as_words = BitCast<uint64_t>(dvalue); uint64_t as_words = BitCast<uint64_t>(dvalue);
PrintF("%3s: %f 0x%08x %08x\n", PrintF("%3s: %f 0x%08x %08x\n",
@ -1297,7 +1297,7 @@ bool Simulator::OverflowFrom(int32_t alu_out,
// Support for VFP comparisons. // Support for VFP comparisons.
void Simulator::Compute_FPSCR_Flags(double val1, double val2) { void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
if (isnan(val1) || isnan(val2)) { if (std::isnan(val1) || std::isnan(val2)) {
n_flag_FPSCR_ = false; n_flag_FPSCR_ = false;
z_flag_FPSCR_ = false; z_flag_FPSCR_ = false;
c_flag_FPSCR_ = true; c_flag_FPSCR_ = true;
@ -1866,7 +1866,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
double Simulator::canonicalizeNaN(double value) { double Simulator::canonicalizeNaN(double value) {
return (FPSCR_default_NaN_mode_ && isnan(value)) ? return (FPSCR_default_NaN_mode_ && std::isnan(value)) ?
FixedDoubleArray::canonical_not_the_hole_nan_as_double() : value; FixedDoubleArray::canonical_not_the_hole_nan_as_double() : value;
} }
@ -2947,7 +2947,7 @@ void Simulator::DecodeVCMP(Instruction* instr) {
// Raise exceptions for quiet NaNs if necessary. // Raise exceptions for quiet NaNs if necessary.
if (instr->Bit(7) == 1) { if (instr->Bit(7) == 1) {
if (isnan(dd_value)) { if (std::isnan(dd_value)) {
inv_op_vfp_flag_ = true; inv_op_vfp_flag_ = true;
} }
} }

131
deps/v8/src/arm/stub-cache-arm.cc

@ -726,7 +726,7 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(holder); __ push(holder);
__ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset)); __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
__ push(scratch); __ push(scratch);
__ mov(scratch, Operand(ExternalReference::isolate_address())); __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
__ push(scratch); __ push(scratch);
} }
@ -798,7 +798,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
} else { } else {
__ Move(r6, call_data); __ Move(r6, call_data);
} }
__ mov(r7, Operand(ExternalReference::isolate_address())); __ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate())));
// Store JS function, call data and isolate. // Store JS function, call data and isolate.
__ stm(ib, sp, r5.bit() | r6.bit() | r7.bit()); __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
@ -954,7 +954,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION ? CALL_AS_FUNCTION
: CALL_AS_METHOD; : CALL_AS_METHOD;
__ InvokeFunction(optimization.constant_function(), arguments_, Handle<JSFunction> function = optimization.constant_function();
ParameterCount expected(function);
__ InvokeFunction(function, expected, arguments_,
JUMP_FUNCTION, NullCallWrapper(), call_kind); JUMP_FUNCTION, NullCallWrapper(), call_kind);
} }
@ -1165,7 +1167,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
} }
// Log the check depth. // Log the check depth.
LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1)); LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) { if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
// Check the holder map. // Check the holder map.
@ -1293,11 +1295,11 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ ldr(scratch3(), FieldMemOperand(scratch3(), __ ldr(scratch3(), FieldMemOperand(scratch3(),
ExecutableAccessorInfo::kDataOffset)); ExecutableAccessorInfo::kDataOffset));
} else { } else {
__ Move(scratch3(), Handle<Object>(callback->data(), __ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
callback->GetIsolate()));
} }
__ Push(reg, scratch3()); __ Push(reg, scratch3());
__ mov(scratch3(), Operand(ExternalReference::isolate_address())); __ mov(scratch3(),
Operand(ExternalReference::isolate_address(isolate())));
__ Push(scratch3(), name()); __ Push(scratch3(), name());
__ mov(r0, sp); // r0 = Handle<Name> __ mov(r0, sp); // r0 = Handle<Name>
@ -1313,10 +1315,8 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
const int kStackUnwindSpace = 5; const int kStackUnwindSpace = 5;
Address getter_address = v8::ToCData<Address>(callback->getter()); Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address); ApiFunction fun(getter_address);
ExternalReference ref = ExternalReference ref = ExternalReference(
ExternalReference(&fun, &fun, ExternalReference::DIRECT_GETTER_CALL, isolate());
ExternalReference::DIRECT_GETTER_CALL,
masm()->isolate());
__ CallApiFunctionAndReturn(ref, kStackUnwindSpace); __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
} }
@ -1404,7 +1404,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
ExternalReference ref = ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
masm()->isolate()); isolate());
__ TailCallExternalReference(ref, 6, 1); __ TailCallExternalReference(ref, 6, 1);
} }
} }
@ -1620,8 +1620,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ b(gt, &call_builtin); __ b(gt, &call_builtin);
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements( __ StoreNumberToDoubleElements(r4, r0, elements, r5,
r4, r0, elements, r5, r2, r3, r9,
&call_builtin, argc * kDoubleSize); &call_builtin, argc * kDoubleSize);
// Save new length. // Save new length.
@ -1715,11 +1714,10 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ CheckFastObjectElements(r7, r7, &call_builtin); __ CheckFastObjectElements(r7, r7, &call_builtin);
__ bind(&no_fast_elements_check); __ bind(&no_fast_elements_check);
Isolate* isolate = masm()->isolate();
ExternalReference new_space_allocation_top = ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate); ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit = ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate); ExternalReference::new_space_allocation_limit_address(isolate());
const int kAllocationDelta = 4; const int kAllocationDelta = 4;
// Load top and check if it is the end of elements. // Load top and check if it is the end of elements.
@ -1758,10 +1756,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Ret(); __ Ret();
} }
__ bind(&call_builtin); __ bind(&call_builtin);
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush, __ TailCallExternalReference(
masm()->isolate()), ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
argc + 1,
1);
} }
// Handle call cache miss. // Handle call cache miss.
@ -1845,10 +1841,8 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
__ Ret(); __ Ret();
__ bind(&call_builtin); __ bind(&call_builtin);
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop, __ TailCallExternalReference(
masm()->isolate()), ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1);
argc + 1,
1);
// Handle call cache miss. // Handle call cache miss.
__ bind(&miss); __ bind(&miss);
@ -2085,8 +2079,9 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// Tail call the full function. We do not have to patch the receiver // Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it. // because the function makes no use of it.
__ bind(&slow); __ bind(&slow);
__ InvokeFunction( ParameterCount expected(function);
function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ InvokeFunction(function, expected, arguments(),
JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss); __ bind(&miss);
// r2: function name. // r2: function name.
@ -2196,8 +2191,9 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ bind(&slow); __ bind(&slow);
// Tail call the full function. We do not have to patch the receiver // Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it. // because the function makes no use of it.
__ InvokeFunction( ParameterCount expected(function);
function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ InvokeFunction(function, expected, arguments(),
JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss); __ bind(&miss);
// r2: function name. // r2: function name.
@ -2295,8 +2291,9 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// Tail call the full function. We do not have to patch the receiver // Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it. // because the function makes no use of it.
__ bind(&slow); __ bind(&slow);
__ InvokeFunction( ParameterCount expected(function);
function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ InvokeFunction(function, expected, arguments(),
JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss); __ bind(&miss);
// r2: function name. // r2: function name.
@ -2384,8 +2381,7 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK); ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
switch (check) { switch (check) {
case RECEIVER_MAP_CHECK: case RECEIVER_MAP_CHECK:
__ IncrementCounter(masm()->isolate()->counters()->call_const(), __ IncrementCounter(isolate()->counters()->call_const(), 1, r0, r3);
1, r0, r3);
// Check that the maps haven't changed. // Check that the maps haven't changed.
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
@ -2470,8 +2466,9 @@ void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
CallKind call_kind = CallICBase::Contextual::decode(extra_state_) CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION ? CALL_AS_FUNCTION
: CALL_AS_METHOD; : CALL_AS_METHOD;
__ InvokeFunction( ParameterCount expected(function);
function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind); __ InvokeFunction(function, expected, arguments(),
JUMP_FUNCTION, NullCallWrapper(), call_kind);
} }
@ -2574,7 +2571,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call). // Jump to the cached code (tail call).
Counters* counters = masm()->isolate()->counters(); Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1, r3, r4); __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
ParameterCount expected(function->shared()->formal_parameter_count()); ParameterCount expected(function->shared()->formal_parameter_count());
CallKind call_kind = CallICBase::Contextual::decode(extra_state_) CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
@ -2617,8 +2614,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// Do tail-call to the runtime system. // Do tail-call to the runtime system.
ExternalReference store_callback_property = ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
masm()->isolate());
__ TailCallExternalReference(store_callback_property, 4, 1); __ TailCallExternalReference(store_callback_property, 4, 1);
// Handle store cache miss. // Handle store cache miss.
@ -2653,8 +2649,9 @@ void StoreStubCompiler::GenerateStoreViaSetter(
// Call the JavaScript setter with receiver and value on the stack. // Call the JavaScript setter with receiver and value on the stack.
__ Push(r1, r0); __ Push(r1, r0);
ParameterCount actual(1); ParameterCount actual(1);
__ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(), ParameterCount expected(setter);
CALL_AS_METHOD); __ InvokeFunction(setter, expected, actual,
CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else { } else {
// If we generate a global code snippet for deoptimization only, remember // If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization. // the place to continue after deoptimization.
@ -2700,8 +2697,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
// Do tail-call to the runtime system. // Do tail-call to the runtime system.
ExternalReference store_ic_property = ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
masm()->isolate());
__ TailCallExternalReference(store_ic_property, 4, 1); __ TailCallExternalReference(store_ic_property, 4, 1);
// Handle store cache miss. // Handle store cache miss.
@ -2740,7 +2736,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset)); FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset));
// Cells are always rescanned, so no write barrier here. // Cells are always rescanned, so no write barrier here.
Counters* counters = masm()->isolate()->counters(); Counters* counters = isolate()->counters();
__ IncrementCounter( __ IncrementCounter(
counters->named_store_global_inline(), 1, scratch1(), scratch2()); counters->named_store_global_inline(), 1, scratch1(), scratch2());
__ Ret(); __ Ret();
@ -2838,8 +2834,9 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
// Call the JavaScript getter with the receiver on the stack. // Call the JavaScript getter with the receiver on the stack.
__ push(r0); __ push(r0);
ParameterCount actual(0); ParameterCount actual(0);
__ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(), ParameterCount expected(getter);
CALL_AS_METHOD); __ InvokeFunction(getter, expected, actual,
CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else { } else {
// If we generate a global code snippet for deoptimization only, remember // If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization. // the place to continue after deoptimization.
@ -2884,7 +2881,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
HandlerFrontendFooter(&success, &miss); HandlerFrontendFooter(&success, &miss);
__ bind(&success); __ bind(&success);
Counters* counters = masm()->isolate()->counters(); Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3); __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
__ mov(r0, r4); __ mov(r0, r4);
__ Ret(); __ Ret();
@ -3088,7 +3085,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
// Remove caller arguments and receiver from the stack and return. // Remove caller arguments and receiver from the stack and return.
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2)); __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
__ add(sp, sp, Operand(kPointerSize)); __ add(sp, sp, Operand(kPointerSize));
Counters* counters = masm()->isolate()->counters(); Counters* counters = isolate()->counters();
__ IncrementCounter(counters->constructed_objects(), 1, r1, r2); __ IncrementCounter(counters->constructed_objects(), 1, r1, r2);
__ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2); __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2);
__ Jump(lr); __ Jump(lr);
@ -3096,7 +3093,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
// Jump to the generic stub in case the specialized code cannot handle the // Jump to the generic stub in case the specialized code cannot handle the
// construction. // construction.
__ bind(&generic_stub_call); __ bind(&generic_stub_call);
Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric(); Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
__ Jump(code, RelocInfo::CODE_TARGET); __ Jump(code, RelocInfo::CODE_TARGET);
// Return the generated code. // Return the generated code.
@ -3246,14 +3243,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
StoreIntAsFloat(masm, r3, r4, r5, r7); StoreIntAsFloat(masm, r3, r4, r5, r7);
break; break;
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
__ vmov(s2, r5);
__ vcvt_f64_s32(d0, s2);
__ add(r3, r3, Operand(key, LSL, 2)); __ add(r3, r3, Operand(key, LSL, 2));
// r3: effective address of the double element // r3: effective address of the double element
FloatingPointHelper::Destination destination;
destination = FloatingPointHelper::kVFPRegisters;
FloatingPointHelper::ConvertIntToDouble(
masm, r5, destination,
d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent.
r4, s2); // These are: scratch2, single_scratch.
__ vstr(d0, r3, 0); __ vstr(d0, r3, 0);
break; break;
case FAST_ELEMENTS: case FAST_ELEMENTS:
@ -3303,7 +3296,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// not include -kHeapObjectTag into it. // not include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag)); __ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset); __ vldr(d0, r5, HeapNumber::kValueOffset);
__ ECMAToInt32(r5, d0, d1, r6, r7, r9); __ ECMAToInt32(r5, d0, r6, r7, r9, d1);
switch (elements_kind) { switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS:
@ -3537,9 +3530,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r3 : scratch (elements backing store) // -- r3 : scratch (elements backing store)
// -- r4 : scratch // -- r4 : scratch
// -- r5 : scratch // -- r5 : scratch
// -- r6 : scratch
// -- r7 : scratch
// -- r9 : scratch
// ----------------------------------- // -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow; Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity; Label finish_store, check_capacity;
@ -3550,9 +3540,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register elements_reg = r3; Register elements_reg = r3;
Register scratch1 = r4; Register scratch1 = r4;
Register scratch2 = r5; Register scratch2 = r5;
Register scratch3 = r6;
Register scratch4 = r7;
Register scratch5 = r9;
Register length_reg = r7; Register length_reg = r7;
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
@ -3581,15 +3568,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
} }
__ bind(&finish_store); __ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg, __ StoreNumberToDoubleElements(value_reg, key_reg, elements_reg,
key_reg, scratch1, &transition_elements_kind);
// All registers after this are overwritten.
elements_reg,
scratch1,
scratch3,
scratch4,
scratch2,
&transition_elements_kind);
__ Ret(); __ Ret();
// Handle store cache miss, replacing the ic with the generic stub. // Handle store cache miss, replacing the ic with the generic stub.
@ -3636,15 +3616,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
__ mov(scratch1, elements_reg); __ mov(scratch1, elements_reg);
__ StoreNumberToDoubleElements(value_reg, __ StoreNumberToDoubleElements(value_reg, key_reg, scratch1,
key_reg, scratch2, &transition_elements_kind);
// All registers after this are overwritten.
scratch1,
scratch2,
scratch3,
scratch4,
scratch5,
&transition_elements_kind);
__ mov(scratch1, Operand(kHoleNanLower32)); __ mov(scratch1, Operand(kHoleNanLower32));
__ mov(scratch2, Operand(kHoleNanUpper32)); __ mov(scratch2, Operand(kHoleNanUpper32));

17
deps/v8/src/assembler.cc

@ -34,7 +34,7 @@
#include "assembler.h" #include "assembler.h"
#include <math.h> // For cos, log, pow, sin, tan, etc. #include <cmath>
#include "api.h" #include "api.h"
#include "builtins.h" #include "builtins.h"
#include "counters.h" #include "counters.h"
@ -969,8 +969,8 @@ ExternalReference::ExternalReference(const Runtime::Function* f,
: address_(Redirect(isolate, f->entry)) {} : address_(Redirect(isolate, f->entry)) {}
ExternalReference ExternalReference::isolate_address() { ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
return ExternalReference(Isolate::Current()); return ExternalReference(isolate);
} }
@ -1459,10 +1459,11 @@ double power_helper(double x, double y) {
return power_double_int(x, y_int); // Returns 1 if exponent is 0. return power_double_int(x, y_int); // Returns 1 if exponent is 0.
} }
if (y == 0.5) { if (y == 0.5) {
return (isinf(x)) ? V8_INFINITY : fast_sqrt(x + 0.0); // Convert -0 to +0. return (std::isinf(x)) ? V8_INFINITY
: fast_sqrt(x + 0.0); // Convert -0 to +0.
} }
if (y == -0.5) { if (y == -0.5) {
return (isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0. return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
} }
return power_double_double(x, y); return power_double_double(x, y);
} }
@ -1492,7 +1493,7 @@ double power_double_double(double x, double y) {
(!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1) (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)
// MinGW64 has a custom implementation for pow. This handles certain // MinGW64 has a custom implementation for pow. This handles certain
// special cases that are different. // special cases that are different.
if ((x == 0.0 || isinf(x)) && isfinite(y)) { if ((x == 0.0 || std::isinf(x)) && std::isfinite(y)) {
double f; double f;
if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0; if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
} }
@ -1505,7 +1506,9 @@ double power_double_double(double x, double y) {
// The checks for special cases can be dropped in ia32 because it has already // The checks for special cases can be dropped in ia32 because it has already
// been done in generated code before bailing out here. // been done in generated code before bailing out here.
if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value(); if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
return OS::nan_value();
}
return pow(x, y); return pow(x, y);
} }

4
deps/v8/src/assembler.h

@ -50,7 +50,7 @@ class ApiFunction;
namespace internal { namespace internal {
struct StatsCounter; class StatsCounter;
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Platform independent assembler base class. // Platform independent assembler base class.
@ -681,7 +681,7 @@ class ExternalReference BASE_EMBEDDED {
explicit ExternalReference(const SCTableReference& table_ref); explicit ExternalReference(const SCTableReference& table_ref);
// Isolate::Current() as an external reference. // Isolate::Current() as an external reference.
static ExternalReference isolate_address(); static ExternalReference isolate_address(Isolate* isolate);
// One-of-a-kind references. These references are not part of a general // One-of-a-kind references. These references are not part of a general
// pattern. This means that they have to be added to the // pattern. This means that they have to be added to the

18
deps/v8/src/ast.cc

@ -27,7 +27,7 @@
#include "ast.h" #include "ast.h"
#include <math.h> // For isfinite. #include <cmath> // For isfinite.
#include "builtins.h" #include "builtins.h"
#include "code-stubs.h" #include "code-stubs.h"
#include "conversions.h" #include "conversions.h"
@ -70,6 +70,11 @@ bool Expression::IsNullLiteral() {
} }
bool Expression::IsUndefinedLiteral() {
return AsLiteral() != NULL && AsLiteral()->handle()->IsUndefined();
}
VariableProxy::VariableProxy(Isolate* isolate, Variable* var) VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
: Expression(isolate), : Expression(isolate),
name_(var->name()), name_(var->name()),
@ -241,8 +246,8 @@ bool IsEqualNumber(void* first, void* second) {
if (h2->IsSmi()) return false; if (h2->IsSmi()) return false;
Handle<HeapNumber> n1 = Handle<HeapNumber>::cast(h1); Handle<HeapNumber> n1 = Handle<HeapNumber>::cast(h1);
Handle<HeapNumber> n2 = Handle<HeapNumber>::cast(h2); Handle<HeapNumber> n2 = Handle<HeapNumber>::cast(h2);
ASSERT(isfinite(n1->value())); ASSERT(std::isfinite(n1->value()));
ASSERT(isfinite(n2->value())); ASSERT(std::isfinite(n2->value()));
return n1->value() == n2->value(); return n1->value() == n2->value();
} }
@ -352,7 +357,8 @@ static bool IsVoidOfLiteral(Expression* expr) {
} }
// Check for the pattern: void <literal> equals <expression> // Check for the pattern: void <literal> equals <expression> or
// undefined equals <expression>
static bool MatchLiteralCompareUndefined(Expression* left, static bool MatchLiteralCompareUndefined(Expression* left,
Token::Value op, Token::Value op,
Expression* right, Expression* right,
@ -361,6 +367,10 @@ static bool MatchLiteralCompareUndefined(Expression* left,
*expr = right; *expr = right;
return true; return true;
} }
if (left->IsUndefinedLiteral() && Token::IsEqualityOp(op)) {
*expr = right;
return true;
}
return false; return false;
} }

35
deps/v8/src/ast.h

@ -339,6 +339,9 @@ class Expression: public AstNode {
// True iff the expression is the null literal. // True iff the expression is the null literal.
bool IsNullLiteral(); bool IsNullLiteral();
// True iff the expression is the undefined literal.
bool IsUndefinedLiteral();
// Type feedback information for assignments and properties. // Type feedback information for assignments and properties.
virtual bool IsMonomorphic() { virtual bool IsMonomorphic() {
UNREACHABLE(); UNREACHABLE();
@ -939,15 +942,18 @@ class WithStatement: public Statement {
public: public:
DECLARE_NODE_TYPE(WithStatement) DECLARE_NODE_TYPE(WithStatement)
Scope* scope() { return scope_; }
Expression* expression() const { return expression_; } Expression* expression() const { return expression_; }
Statement* statement() const { return statement_; } Statement* statement() const { return statement_; }
protected: protected:
WithStatement(Expression* expression, Statement* statement) WithStatement(Scope* scope, Expression* expression, Statement* statement)
: expression_(expression), : scope_(scope),
expression_(expression),
statement_(statement) { } statement_(statement) { }
private: private:
Scope* scope_;
Expression* expression_; Expression* expression_;
Statement* statement_; Statement* statement_;
}; };
@ -1964,27 +1970,34 @@ class Yield: public Expression {
public: public:
DECLARE_NODE_TYPE(Yield) DECLARE_NODE_TYPE(Yield)
enum Kind {
INITIAL, // The initial yield that returns the unboxed generator object.
SUSPEND, // A normal yield: { value: EXPRESSION, done: false }
DELEGATING, // A yield*.
FINAL // A return: { value: EXPRESSION, done: true }
};
Expression* generator_object() const { return generator_object_; } Expression* generator_object() const { return generator_object_; }
Expression* expression() const { return expression_; } Expression* expression() const { return expression_; }
bool is_delegating_yield() const { return is_delegating_yield_; } Kind yield_kind() const { return yield_kind_; }
virtual int position() const { return pos_; } virtual int position() const { return pos_; }
protected: protected:
Yield(Isolate* isolate, Yield(Isolate* isolate,
Expression* generator_object, Expression* generator_object,
Expression* expression, Expression* expression,
bool is_delegating_yield, Kind yield_kind,
int pos) int pos)
: Expression(isolate), : Expression(isolate),
generator_object_(generator_object), generator_object_(generator_object),
expression_(expression), expression_(expression),
is_delegating_yield_(is_delegating_yield), yield_kind_(yield_kind),
pos_(pos) { } pos_(pos) { }
private: private:
Expression* generator_object_; Expression* generator_object_;
Expression* expression_; Expression* expression_;
bool is_delegating_yield_; Kind yield_kind_;
int pos_; int pos_;
}; };
@ -2777,9 +2790,11 @@ class AstNodeFactory BASE_EMBEDDED {
VISIT_AND_RETURN(ReturnStatement, stmt) VISIT_AND_RETURN(ReturnStatement, stmt)
} }
WithStatement* NewWithStatement(Expression* expression, WithStatement* NewWithStatement(Scope* scope,
Expression* expression,
Statement* statement) { Statement* statement) {
WithStatement* stmt = new(zone_) WithStatement(expression, statement); WithStatement* stmt = new(zone_) WithStatement(
scope, expression, statement);
VISIT_AND_RETURN(WithStatement, stmt) VISIT_AND_RETURN(WithStatement, stmt)
} }
@ -2966,10 +2981,10 @@ class AstNodeFactory BASE_EMBEDDED {
Yield* NewYield(Expression *generator_object, Yield* NewYield(Expression *generator_object,
Expression* expression, Expression* expression,
bool is_delegating_yield, Yield::Kind yield_kind,
int pos) { int pos) {
Yield* yield = new(zone_) Yield( Yield* yield = new(zone_) Yield(
isolate_, generator_object, expression, is_delegating_yield, pos); isolate_, generator_object, expression, yield_kind, pos);
VISIT_AND_RETURN(Yield, yield) VISIT_AND_RETURN(Yield, yield)
} }

2
deps/v8/src/bignum-dtoa.cc

@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <math.h> #include <cmath>
#include "../include/v8stdint.h" #include "../include/v8stdint.h"
#include "checks.h" #include "checks.h"

23
deps/v8/src/bootstrapper.cc

@ -43,6 +43,7 @@
#include "extensions/externalize-string-extension.h" #include "extensions/externalize-string-extension.h"
#include "extensions/gc-extension.h" #include "extensions/gc-extension.h"
#include "extensions/statistics-extension.h" #include "extensions/statistics-extension.h"
#include "code-stubs.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -862,8 +863,6 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize, InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
isolate->initial_object_prototype(), isolate->initial_object_prototype(),
Builtins::kArrayCode, true); Builtins::kArrayCode, true);
array_function->shared()->set_construct_stub(
isolate->builtins()->builtin(Builtins::kArrayConstructCode));
array_function->shared()->DontAdaptArguments(); array_function->shared()->DontAdaptArguments();
// This seems a bit hackish, but we need to make sure Array.length // This seems a bit hackish, but we need to make sure Array.length
@ -890,6 +889,17 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// as the constructor. 'Array' property on a global object can be // as the constructor. 'Array' property on a global object can be
// overwritten by JS code. // overwritten by JS code.
native_context()->set_array_function(*array_function); native_context()->set_array_function(*array_function);
if (FLAG_optimize_constructed_arrays) {
// Cache the array maps, needed by ArrayConstructorStub
CacheInitialJSArrayMaps(native_context(), initial_map);
ArrayConstructorStub array_constructor_stub(isolate);
Handle<Code> code = array_constructor_stub.GetCode(isolate);
array_function->shared()->set_construct_stub(*code);
} else {
array_function->shared()->set_construct_stub(
isolate->builtins()->builtin(Builtins::kCommonArrayConstructCode));
}
} }
{ // --- N u m b e r --- { // --- N u m b e r ---
@ -1303,10 +1313,12 @@ void Genesis::InitializeExperimentalGlobal() {
if (FLAG_harmony_typed_arrays) { if (FLAG_harmony_typed_arrays) {
{ // -- A r r a y B u f f e r { // -- A r r a y B u f f e r
Handle<JSFunction> array_buffer_fun =
InstallFunction(global, "__ArrayBuffer", JS_ARRAY_BUFFER_TYPE, InstallFunction(global, "__ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSize, JSArrayBuffer::kSize,
isolate()->initial_object_prototype(), isolate()->initial_object_prototype(),
Builtins::kIllegal, true); Builtins::kIllegal, true);
native_context()->set_array_buffer_fun(*array_buffer_fun);
} }
{ {
// -- T y p e d A r r a y s // -- T y p e d A r r a y s
@ -1533,13 +1545,8 @@ Handle<JSFunction> Genesis::InstallInternalArray(
factory()->NewJSObject(isolate()->object_function(), TENURED); factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(array_function, prototype); SetPrototype(array_function, prototype);
// TODO(mvstanton): For performance reasons, this code would have to
// be changed to successfully run with FLAG_optimize_constructed_arrays.
// The next checkin to enable FLAG_optimize_constructed_arrays by
// default will address this.
CHECK(!FLAG_optimize_constructed_arrays);
array_function->shared()->set_construct_stub( array_function->shared()->set_construct_stub(
isolate()->builtins()->builtin(Builtins::kArrayConstructCode)); isolate()->builtins()->builtin(Builtins::kCommonArrayConstructCode));
array_function->shared()->DontAdaptArguments(); array_function->shared()->DontAdaptArguments();

14
deps/v8/src/builtins.cc

@ -192,9 +192,8 @@ BUILTIN(EmptyFunction) {
RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) { RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) {
CONVERT_ARG_STUB_CALLER_ARGS(caller_args); CONVERT_ARG_STUB_CALLER_ARGS(caller_args);
// ASSERT(args.length() == 3); ASSERT(args.length() == 2);
Handle<JSFunction> function = args.at<JSFunction>(1); Handle<Object> type_info = args.at<Object>(1);
Handle<Object> type_info = args.at<Object>(2);
JSArray* array = NULL; JSArray* array = NULL;
bool holey = false; bool holey = false;
@ -226,8 +225,7 @@ RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) {
} }
} }
ASSERT(function->has_initial_map()); ElementsKind kind = GetInitialFastElementsKind();
ElementsKind kind = function->initial_map()->elements_kind();
if (holey) { if (holey) {
kind = GetHoleyElementsKind(kind); kind = GetHoleyElementsKind(kind);
} }
@ -934,7 +932,7 @@ BUILTIN(ArraySplice) {
if (start < kMinInt || start > kMaxInt) { if (start < kMinInt || start > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySplice", args); return CallJsBuiltin(isolate, "ArraySplice", args);
} }
relative_start = static_cast<int>(start); relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
} else if (!arg1->IsUndefined()) { } else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySplice", args); return CallJsBuiltin(isolate, "ArraySplice", args);
} }
@ -1321,7 +1319,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
v8::Handle<v8::Value> value; v8::Handle<v8::Value> value;
{ {
// Leaving JavaScript. // Leaving JavaScript.
VMState state(isolate, EXTERNAL); VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, ExternalCallbackScope call_scope(isolate,
v8::ToCData<Address>(callback_obj)); v8::ToCData<Address>(callback_obj));
value = callback(new_args); value = callback(new_args);
@ -1398,7 +1396,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
v8::Handle<v8::Value> value; v8::Handle<v8::Value> value;
{ {
// Leaving JavaScript. // Leaving JavaScript.
VMState state(isolate, EXTERNAL); VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, ExternalCallbackScope call_scope(isolate,
v8::ToCData<Address>(callback_obj)); v8::ToCData<Address>(callback_obj));
value = callback(new_args); value = callback(new_args);

4
deps/v8/src/builtins.h

@ -199,7 +199,7 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \ Code::kNoExtraICState) \
V(ArrayCode, BUILTIN, UNINITIALIZED, \ V(ArrayCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \
V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \ V(CommonArrayConstructCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \
\ \
V(StringConstructCode, BUILTIN, UNINITIALIZED, \ V(StringConstructCode, BUILTIN, UNINITIALIZED, \
@ -388,7 +388,7 @@ class Builtins {
static void Generate_InternalArrayCode(MacroAssembler* masm); static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm); static void Generate_ArrayCode(MacroAssembler* masm);
static void Generate_ArrayConstructCode(MacroAssembler* masm); static void Generate_CommonArrayConstructCode(MacroAssembler* masm);
static void Generate_StringConstructCode(MacroAssembler* masm); static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm); static void Generate_OnStackReplacement(MacroAssembler* masm);

2
deps/v8/src/cached-powers.cc

@ -26,8 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h> #include <stdarg.h>
#include <math.h>
#include <limits.h> #include <limits.h>
#include <cmath>
#include "../include/v8stdint.h" #include "../include/v8stdint.h"
#include "globals.h" #include "globals.h"

253
deps/v8/src/code-stubs-hydrogen.cc

@ -61,11 +61,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
arguments_length_(NULL), arguments_length_(NULL),
info_(stub, isolate), info_(stub, isolate),
context_(NULL) { context_(NULL) {
int major_key = stub->MajorKey(); descriptor_ = stub->GetInterfaceDescriptor(isolate);
descriptor_ = isolate->code_stub_interface_descriptor(major_key);
if (descriptor_->register_param_count_ < 0) {
stub->InitializeInterfaceDescriptor(isolate, descriptor_);
}
parameters_.Reset(new HParameter*[descriptor_->register_param_count_]); parameters_.Reset(new HParameter*[descriptor_->register_param_count_]);
} }
virtual bool BuildGraph(); virtual bool BuildGraph();
@ -96,6 +92,9 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
bool CodeStubGraphBuilderBase::BuildGraph() { bool CodeStubGraphBuilderBase::BuildGraph() {
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
if (FLAG_trace_hydrogen) { if (FLAG_trace_hydrogen) {
const char* name = CodeStub::MajorName(stub()->MajorKey(), false); const char* name = CodeStub::MajorName(stub()->MajorKey(), false);
PrintF("-----------------------------------------------------------\n"); PrintF("-----------------------------------------------------------\n");
@ -130,9 +129,10 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
stack_parameter_count = new(zone) HParameter(param_count, stack_parameter_count = new(zone) HParameter(param_count,
HParameter::REGISTER_PARAMETER, HParameter::REGISTER_PARAMETER,
Representation::Integer32()); Representation::Integer32());
stack_parameter_count->set_type(HType::Smi());
// it's essential to bind this value to the environment in case of deopt // it's essential to bind this value to the environment in case of deopt
start_environment->Bind(param_count, stack_parameter_count);
AddInstruction(stack_parameter_count); AddInstruction(stack_parameter_count);
start_environment->Bind(param_count, stack_parameter_count);
arguments_length_ = stack_parameter_count; arguments_length_ = stack_parameter_count;
} else { } else {
ASSERT(descriptor_->environment_length() == param_count); ASSERT(descriptor_->environment_length() == param_count);
@ -154,17 +154,26 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
// arguments above // arguments above
HInstruction* stack_pop_count = stack_parameter_count; HInstruction* stack_pop_count = stack_parameter_count;
if (descriptor_->function_mode_ == JS_FUNCTION_STUB_MODE) { if (descriptor_->function_mode_ == JS_FUNCTION_STUB_MODE) {
if (!stack_parameter_count->IsConstant() &&
descriptor_->hint_stack_parameter_count_ < 0) {
HInstruction* amount = graph()->GetConstant1(); HInstruction* amount = graph()->GetConstant1();
stack_pop_count = AddInstruction( stack_pop_count = AddInstruction(
HAdd::New(zone, context_, stack_parameter_count, amount)); HAdd::New(zone, context_, stack_parameter_count, amount));
stack_pop_count->ChangeRepresentation(Representation::Integer32()); stack_pop_count->ChangeRepresentation(Representation::Integer32());
stack_pop_count->ClearFlag(HValue::kCanOverflow); stack_pop_count->ClearFlag(HValue::kCanOverflow);
} else {
int count = descriptor_->hint_stack_parameter_count_;
stack_pop_count = AddInstruction(new(zone)
HConstant(count, Representation::Integer32()));
}
} }
if (!current_block()->IsFinished()) {
HReturn* hreturn_instruction = new(zone) HReturn(return_value, HReturn* hreturn_instruction = new(zone) HReturn(return_value,
context_, context_,
stack_pop_count); stack_pop_count);
current_block()->Finish(hreturn_instruction); current_block()->Finish(hreturn_instruction);
}
return true; return true;
} }
@ -176,23 +185,96 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
: CodeStubGraphBuilderBase(Isolate::Current(), stub) {} : CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
protected: protected:
virtual HValue* BuildCodeStub(); virtual HValue* BuildCodeStub() {
if (casted_stub()->IsMiss()) {
return BuildCodeInitializedStub();
} else {
return BuildCodeUninitializedStub();
}
}
virtual HValue* BuildCodeInitializedStub() {
UNIMPLEMENTED();
return NULL;
}
virtual HValue* BuildCodeUninitializedStub() {
// Force a deopt that falls back to the runtime.
HValue* undefined = graph()->GetConstantUndefined();
IfBuilder builder(this);
builder.IfNot<HCompareObjectEqAndBranch, HValue*>(undefined, undefined);
builder.Then();
builder.ElseDeopt();
return undefined;
}
Stub* casted_stub() { return static_cast<Stub*>(stub()); } Stub* casted_stub() { return static_cast<Stub*>(stub()); }
}; };
Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
Factory* factory = isolate->factory();
// Generate the new code.
MacroAssembler masm(isolate, NULL, 256);
{
// Update the static counter each time a new code stub is generated.
isolate->counters()->code_stubs()->Increment();
// Nested stubs are not allowed for leaves.
AllowStubCallsScope allow_scope(&masm, false);
// Generate the code for the stub.
masm.set_generating_stub(true);
NoCurrentFrameScope scope(&masm);
GenerateLightweightMiss(&masm);
}
// Create the code object.
CodeDesc desc;
masm.GetCode(&desc);
// Copy the generated code into a heap object.
Code::Flags flags = Code::ComputeFlags(
GetCodeKind(),
GetICState(),
GetExtraICState(),
GetStubType(), -1);
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
}
template <class Stub> template <class Stub>
static Handle<Code> DoGenerateCode(Stub* stub) { static Handle<Code> DoGenerateCode(Stub* stub) {
Isolate* isolate = Isolate::Current();
CodeStub::Major major_key =
static_cast<HydrogenCodeStub*>(stub)->MajorKey();
CodeStubInterfaceDescriptor* descriptor =
isolate->code_stub_interface_descriptor(major_key);
if (descriptor->register_param_count_ < 0) {
stub->InitializeInterfaceDescriptor(isolate, descriptor);
}
// The miss case without stack parameters can use a light-weight stub to enter
// the runtime that is significantly faster than using the standard
// stub-failure deopt mechanism.
if (stub->IsMiss() && descriptor->stack_parameter_count_ == NULL) {
return stub->GenerateLightweightMissCode(isolate);
} else {
CodeStubGraphBuilder<Stub> builder(stub); CodeStubGraphBuilder<Stub> builder(stub);
LChunk* chunk = OptimizeGraph(builder.CreateGraph()); LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(); return chunk->Codegen();
} }
}
template <> template <>
HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() { HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
Zone* zone = this->zone(); Zone* zone = this->zone();
Factory* factory = isolate()->factory(); Factory* factory = isolate()->factory();
HValue* undefined = graph()->GetConstantUndefined();
AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode(); AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode();
FastCloneShallowArrayStub::Mode mode = casted_stub()->mode(); FastCloneShallowArrayStub::Mode mode = casted_stub()->mode();
int length = casted_stub()->length(); int length = casted_stub()->length();
@ -203,8 +285,9 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
NULL, NULL,
FAST_ELEMENTS)); FAST_ELEMENTS));
CheckBuilder builder(this); IfBuilder checker(this);
builder.CheckNotUndefined(boilerplate); checker.IfNot<HCompareObjectEqAndBranch, HValue*>(boilerplate, undefined);
checker.Then();
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) { if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
HValue* elements = HValue* elements =
@ -243,14 +326,14 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
length)); length));
} }
return environment()->Pop(); HValue* result = environment()->Pop();
checker.ElseDeopt();
return result;
} }
Handle<Code> FastCloneShallowArrayStub::GenerateCode() { Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
CodeStubGraphBuilder<FastCloneShallowArrayStub> builder(this); return DoGenerateCode(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen();
} }
@ -258,6 +341,7 @@ template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() { HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
Zone* zone = this->zone(); Zone* zone = this->zone();
Factory* factory = isolate()->factory(); Factory* factory = isolate()->factory();
HValue* undefined = graph()->GetConstantUndefined();
HInstruction* boilerplate = HInstruction* boilerplate =
AddInstruction(new(zone) HLoadKeyed(GetParameter(0), AddInstruction(new(zone) HLoadKeyed(GetParameter(0),
@ -265,8 +349,9 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
NULL, NULL,
FAST_ELEMENTS)); FAST_ELEMENTS));
CheckBuilder builder(this); IfBuilder checker(this);
builder.CheckNotUndefined(boilerplate); checker.IfNot<HCompareObjectEqAndBranch, HValue*>(boilerplate, undefined);
checker.And();
int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize; int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize;
HValue* boilerplate_size = HValue* boilerplate_size =
@ -274,7 +359,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HValue* size_in_words = HValue* size_in_words =
AddInstruction(new(zone) HConstant(size >> kPointerSizeLog2, AddInstruction(new(zone) HConstant(size >> kPointerSizeLog2,
Representation::Integer32())); Representation::Integer32()));
builder.CheckIntegerEq(boilerplate_size, size_in_words); checker.IfCompare(boilerplate_size, size_in_words, Token::EQ);
checker.Then();
HValue* size_in_bytes = HValue* size_in_bytes =
AddInstruction(new(zone) HConstant(size, Representation::Integer32())); AddInstruction(new(zone) HConstant(size, Representation::Integer32()));
@ -298,7 +384,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
true, i)); true, i));
} }
builder.End(); checker.ElseDeopt();
return object; return object;
} }
@ -401,10 +487,18 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() {
template <> template <>
HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() { HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
HInstruction* deopt = new(zone()) HSoftDeoptimize(); // ----------- S t a t e -------------
AddInstruction(deopt); // -- Parameter 1 : type info cell
current_block()->MarkAsDeoptimizing(); // -- Parameter 0 : constructor
return GetParameter(0); // -----------------------------------
// Get the right map
// Should be a constant
JSArrayBuilder array_builder(
this,
casted_stub()->elements_kind(),
GetParameter(ArrayConstructorStubBase::kPropertyCell),
casted_stub()->mode());
return array_builder.AllocateEmptyArray();
} }
@ -416,10 +510,49 @@ Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
template <> template <>
HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>:: HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
BuildCodeStub() { BuildCodeStub() {
HInstruction* deopt = new(zone()) HSoftDeoptimize(); // Smi check and range check on the input arg.
AddInstruction(deopt); HValue* constant_one = graph()->GetConstant1();
current_block()->MarkAsDeoptimizing(); HValue* constant_zero = graph()->GetConstant0();
return GetParameter(0);
HInstruction* elements = AddInstruction(
new(zone()) HArgumentsElements(false));
HInstruction* argument = AddInstruction(
new(zone()) HAccessArgumentsAt(elements, constant_one, constant_zero));
HConstant* max_alloc_length =
new(zone()) HConstant(JSObject::kInitialMaxFastElementArray,
Representation::Tagged());
AddInstruction(max_alloc_length);
const int initial_capacity = JSArray::kPreallocatedArrayElements;
HConstant* initial_capacity_node =
new(zone()) HConstant(initial_capacity, Representation::Tagged());
AddInstruction(initial_capacity_node);
// Since we're forcing Integer32 representation for this HBoundsCheck,
// there's no need to Smi-check the index.
HBoundsCheck* checked_arg = AddBoundsCheck(argument, max_alloc_length,
ALLOW_SMI_KEY,
Representation::Tagged());
IfBuilder if_builder(this);
if_builder.IfCompare(checked_arg, constant_zero, Token::EQ);
if_builder.Then();
Push(initial_capacity_node); // capacity
Push(constant_zero); // length
if_builder.Else();
Push(checked_arg); // capacity
Push(checked_arg); // length
if_builder.End();
// Figure out total size
HValue* length = Pop();
HValue* capacity = Pop();
JSArrayBuilder array_builder(
this,
casted_stub()->elements_kind(),
GetParameter(ArrayConstructorStubBase::kPropertyCell),
casted_stub()->mode());
return array_builder.AllocateArray(capacity, length, true);
} }
@ -430,10 +563,46 @@ Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
template <> template <>
HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() { HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
HInstruction* deopt = new(zone()) HSoftDeoptimize(); ElementsKind kind = casted_stub()->elements_kind();
AddInstruction(deopt); HValue* length = GetArgumentsLength();
current_block()->MarkAsDeoptimizing();
return GetParameter(0); JSArrayBuilder array_builder(
this,
kind,
GetParameter(ArrayConstructorStubBase::kPropertyCell),
casted_stub()->mode());
// We need to fill with the hole if it's a smi array in the multi-argument
// case because we might have to bail out while copying arguments into
// the array because they aren't compatible with a smi array.
// If it's a double array, no problem, and if it's fast then no
// problem either because doubles are boxed.
bool fill_with_hole = IsFastSmiElementsKind(kind);
HValue* new_object = array_builder.AllocateArray(length,
length,
fill_with_hole);
HValue* elements = array_builder.GetElementsLocation();
ASSERT(elements != NULL);
// Now populate the elements correctly.
LoopBuilder builder(this,
context(),
LoopBuilder::kPostIncrement);
HValue* start = graph()->GetConstant0();
HValue* key = builder.BeginBody(start, length, Token::LT);
HInstruction* argument_elements = AddInstruction(
new(zone()) HArgumentsElements(false));
HInstruction* argument = AddInstruction(new(zone()) HAccessArgumentsAt(
argument_elements, length, key));
// Checks to prevent incompatible stores
if (IsFastSmiElementsKind(kind)) {
AddInstruction(new(zone()) HCheckSmi(argument));
}
AddInstruction(new(zone()) HStoreKeyed(elements, key, argument, kind));
builder.EndBody();
return new_object;
} }
@ -441,4 +610,30 @@ Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
return DoGenerateCode(this); return DoGenerateCode(this);
} }
template <>
HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeUninitializedStub() {
CompareNilICStub* stub = casted_stub();
HIfContinuation continuation;
Handle<Map> sentinel_map(graph()->isolate()->heap()->meta_map());
BuildCompareNil(GetParameter(0), stub->GetKind(),
stub->GetTypes(), sentinel_map,
RelocInfo::kNoPosition, &continuation);
IfBuilder if_nil(this, &continuation);
if_nil.Then();
if (continuation.IsFalseReachable()) {
if_nil.Else();
if_nil.Return(graph()->GetConstantSmi0());
}
if_nil.End();
return continuation.IsTrueReachable()
? graph()->GetConstantSmi1()
: graph()->GetConstantUndefined();
}
Handle<Code> CompareNilICStub::GenerateCode() {
return DoGenerateCode(this);
}
} } // namespace v8::internal } } // namespace v8::internal

90
deps/v8/src/code-stubs.cc

@ -37,6 +37,17 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
: register_param_count_(-1),
stack_parameter_count_(NULL),
hint_stack_parameter_count_(-1),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
register_params_(NULL),
deoptimization_handler_(NULL),
miss_handler_(IC_Utility(IC::kUnreachable), Isolate::Current()) { }
bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) { bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
UnseededNumberDictionary* stubs = isolate->heap()->code_stubs(); UnseededNumberDictionary* stubs = isolate->heap()->code_stubs();
int index = stubs->FindEntry(GetKey()); int index = stubs->FindEntry(GetKey());
@ -397,6 +408,42 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
} }
CompareNilICStub::Types CompareNilICStub::GetPatchedICFlags(
Code::ExtraICState extra_ic_state,
Handle<Object> object,
bool* already_monomorphic) {
Types types = TypesField::decode(extra_ic_state);
NilValue nil = NilValueField::decode(extra_ic_state);
EqualityKind kind = EqualityKindField::decode(extra_ic_state);
ASSERT(types != CompareNilICStub::kFullCompare);
*already_monomorphic =
(types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0;
if (kind == kStrictEquality) {
if (nil == kNullValue) {
return CompareNilICStub::kCompareAgainstNull;
} else {
return CompareNilICStub::kCompareAgainstUndefined;
}
} else {
if (object->IsNull()) {
types = static_cast<CompareNilICStub::Types>(
types | CompareNilICStub::kCompareAgainstNull);
} else if (object->IsUndefined()) {
types = static_cast<CompareNilICStub::Types>(
types | CompareNilICStub::kCompareAgainstUndefined);
} else if (object->IsUndetectableObject() || !object->IsHeapObject()) {
types = CompareNilICStub::kFullCompare;
} else if ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0) {
types = CompareNilICStub::kFullCompare;
} else {
types = static_cast<CompareNilICStub::Types>(
types | CompareNilICStub::kCompareAgainstMonomorphicMap);
}
}
return types;
}
void InstanceofStub::PrintName(StringStream* stream) { void InstanceofStub::PrintName(StringStream* stream) {
const char* args = ""; const char* args = "";
if (HasArgsInRegisters()) { if (HasArgsInRegisters()) {
@ -557,7 +604,7 @@ bool ToBooleanStub::Types::Record(Handle<Object> object) {
ASSERT(!object->IsUndetectableObject()); ASSERT(!object->IsUndetectableObject());
Add(HEAP_NUMBER); Add(HEAP_NUMBER);
double value = HeapNumber::cast(*object)->value(); double value = HeapNumber::cast(*object)->value();
return value != 0 && !isnan(value); return value != 0 && !std::isnan(value);
} else { } else {
// We should never see an internal object at runtime here! // We should never see an internal object at runtime here!
UNREACHABLE(); UNREACHABLE();
@ -647,4 +694,45 @@ bool ProfileEntryHookStub::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
} }
static void InstallDescriptor(Isolate* isolate, HydrogenCodeStub* stub) {
int major_key = stub->MajorKey();
CodeStubInterfaceDescriptor* descriptor =
isolate->code_stub_interface_descriptor(major_key);
if (!descriptor->initialized()) {
stub->InitializeInterfaceDescriptor(isolate, descriptor);
}
}
void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
ArrayNoArgumentConstructorStub stub1(GetInitialFastElementsKind());
InstallDescriptor(isolate, &stub1);
ArraySingleArgumentConstructorStub stub2(GetInitialFastElementsKind());
InstallDescriptor(isolate, &stub2);
ArrayNArgumentsConstructorStub stub3(GetInitialFastElementsKind());
InstallDescriptor(isolate, &stub3);
}
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
: argument_count_(ANY) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
int argument_count) {
if (argument_count == 0) {
argument_count_ = NONE;
} else if (argument_count == 1) {
argument_count_ = ONE;
} else if (argument_count >= 2) {
argument_count_ = MORE_THAN_ONE;
} else {
UNREACHABLE();
}
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
} } // namespace v8::internal } } // namespace v8::internal

280
deps/v8/src/code-stubs.h

@ -29,6 +29,7 @@
#define V8_CODE_STUBS_H_ #define V8_CODE_STUBS_H_
#include "allocation.h" #include "allocation.h"
#include "assembler.h"
#include "globals.h" #include "globals.h"
#include "codegen.h" #include "codegen.h"
@ -46,6 +47,7 @@ namespace internal {
V(StringCompare) \ V(StringCompare) \
V(Compare) \ V(Compare) \
V(CompareIC) \ V(CompareIC) \
V(CompareNilIC) \
V(MathPow) \ V(MathPow) \
V(StringLength) \ V(StringLength) \
V(FunctionPrototype) \ V(FunctionPrototype) \
@ -82,6 +84,7 @@ namespace internal {
V(TransitionElementsKind) \ V(TransitionElementsKind) \
V(StoreArrayLiteralElement) \ V(StoreArrayLiteralElement) \
V(StubFailureTrampoline) \ V(StubFailureTrampoline) \
V(ArrayConstructor) \
V(ProfileEntryHook) \ V(ProfileEntryHook) \
/* IC Handler stubs */ \ /* IC Handler stubs */ \
V(LoadField) V(LoadField)
@ -260,17 +263,18 @@ class PlatformCodeStub : public CodeStub {
enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE }; enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
struct CodeStubInterfaceDescriptor { struct CodeStubInterfaceDescriptor {
CodeStubInterfaceDescriptor() CodeStubInterfaceDescriptor();
: register_param_count_(-1),
stack_parameter_count_(NULL),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
register_params_(NULL) { }
int register_param_count_; int register_param_count_;
const Register* stack_parameter_count_; const Register* stack_parameter_count_;
// if hint_stack_parameter_count_ > 0, the code stub can optimize the
// return sequence. Default value is -1, which means it is ignored.
int hint_stack_parameter_count_;
StubFunctionMode function_mode_; StubFunctionMode function_mode_;
Register* register_params_; Register* register_params_;
Address deoptimization_handler_; Address deoptimization_handler_;
ExternalReference miss_handler_;
int environment_length() const { int environment_length() const {
if (stack_parameter_count_ != NULL) { if (stack_parameter_count_ != NULL) {
@ -278,13 +282,28 @@ struct CodeStubInterfaceDescriptor {
} }
return register_param_count_; return register_param_count_;
} }
bool initialized() const { return register_param_count_ >= 0; }
}; };
// A helper to make up for the fact that type Register is not fully
// defined outside of the platform directories
#define DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index) \
((index) == (descriptor)->register_param_count_) \
? *((descriptor)->stack_parameter_count_) \
: (descriptor)->register_params_[(index)]
class HydrogenCodeStub : public CodeStub { class HydrogenCodeStub : public CodeStub {
public: public:
// Retrieve the code for the stub. Generate the code if needed. enum InitializationState {
virtual Handle<Code> GenerateCode() = 0; CODE_STUB_IS_NOT_MISS,
CODE_STUB_IS_MISS
};
explicit HydrogenCodeStub(InitializationState state) {
is_miss_ = (state == CODE_STUB_IS_MISS);
}
virtual Code::Kind GetCodeKind() const { return Code::STUB; } virtual Code::Kind GetCodeKind() const { return Code::STUB; }
@ -292,9 +311,36 @@ class HydrogenCodeStub : public CodeStub {
return isolate->code_stub_interface_descriptor(MajorKey()); return isolate->code_stub_interface_descriptor(MajorKey());
} }
bool IsMiss() { return is_miss_; }
template<class SubClass>
static Handle<Code> GetUninitialized(Isolate* isolate) {
SubClass::GenerateAheadOfTime(isolate);
return SubClass().GetCode(isolate);
}
virtual void InitializeInterfaceDescriptor( virtual void InitializeInterfaceDescriptor(
Isolate* isolate, Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) = 0; CodeStubInterfaceDescriptor* descriptor) = 0;
// Retrieve the code for the stub. Generate the code if needed.
virtual Handle<Code> GenerateCode() = 0;
virtual int NotMissMinorKey() = 0;
Handle<Code> GenerateLightweightMissCode(Isolate* isolate);
private:
class MinorKeyBits: public BitField<int, 0, kStubMinorKeyBits - 1> {};
class IsMissBits: public BitField<bool, kStubMinorKeyBits - 1, 1> {};
void GenerateLightweightMiss(MacroAssembler* masm);
virtual int MinorKey() {
return IsMissBits::encode(is_miss_) |
MinorKeyBits::encode(NotMissMinorKey());
}
bool is_miss_;
}; };
@ -467,7 +513,8 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub {
FastCloneShallowArrayStub(Mode mode, FastCloneShallowArrayStub(Mode mode,
AllocationSiteMode allocation_site_mode, AllocationSiteMode allocation_site_mode,
int length) int length)
: mode_(mode), : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS),
mode_(mode),
allocation_site_mode_(allocation_site_mode), allocation_site_mode_(allocation_site_mode),
length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) { length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
ASSERT_GE(length_, 0); ASSERT_GE(length_, 0);
@ -513,7 +560,7 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub {
STATIC_ASSERT(kFastCloneModeCount < 16); STATIC_ASSERT(kFastCloneModeCount < 16);
STATIC_ASSERT(kMaximumClonedLength < 16); STATIC_ASSERT(kMaximumClonedLength < 16);
Major MajorKey() { return FastCloneShallowArray; } Major MajorKey() { return FastCloneShallowArray; }
int MinorKey() { int NotMissMinorKey() {
return AllocationSiteModeBits::encode(allocation_site_mode_) return AllocationSiteModeBits::encode(allocation_site_mode_)
| ModeBits::encode(mode_) | ModeBits::encode(mode_)
| LengthBits::encode(length_); | LengthBits::encode(length_);
@ -526,7 +573,9 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
// Maximum number of properties in copied object. // Maximum number of properties in copied object.
static const int kMaximumClonedProperties = 6; static const int kMaximumClonedProperties = 6;
explicit FastCloneShallowObjectStub(int length) : length_(length) { explicit FastCloneShallowObjectStub(int length)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS),
length_(length) {
ASSERT_GE(length_, 0); ASSERT_GE(length_, 0);
ASSERT_LE(length_, kMaximumClonedProperties); ASSERT_LE(length_, kMaximumClonedProperties);
} }
@ -543,7 +592,7 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
int length_; int length_;
Major MajorKey() { return FastCloneShallowObject; } Major MajorKey() { return FastCloneShallowObject; }
int MinorKey() { return length_; } int NotMissMinorKey() { return length_; }
DISALLOW_COPY_AND_ASSIGN(FastCloneShallowObjectStub); DISALLOW_COPY_AND_ASSIGN(FastCloneShallowObjectStub);
}; };
@ -587,6 +636,22 @@ class InstanceofStub: public PlatformCodeStub {
}; };
class ArrayConstructorStub: public PlatformCodeStub {
public:
enum ArgumentCountKey { ANY, NONE, ONE, MORE_THAN_ONE };
ArrayConstructorStub(Isolate* isolate, int argument_count);
explicit ArrayConstructorStub(Isolate* isolate);
void Generate(MacroAssembler* masm);
private:
virtual CodeStub::Major MajorKey() { return ArrayConstructor; }
virtual int MinorKey() { return argument_count_; }
ArgumentCountKey argument_count_;
};
class MathPowStub: public PlatformCodeStub { class MathPowStub: public PlatformCodeStub {
public: public:
enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK}; enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
@ -911,6 +976,102 @@ class ICCompareStub: public PlatformCodeStub {
}; };
class CompareNilICStub : public HydrogenCodeStub {
public:
enum Types {
kCompareAgainstNull = 1 << 0,
kCompareAgainstUndefined = 1 << 1,
kCompareAgainstMonomorphicMap = 1 << 2,
kCompareAgainstUndetectable = 1 << 3,
kFullCompare = kCompareAgainstNull | kCompareAgainstUndefined |
kCompareAgainstUndetectable
};
CompareNilICStub(EqualityKind kind, NilValue nil, Types types)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), bit_field_(0) {
bit_field_ = EqualityKindField::encode(kind) |
NilValueField::encode(nil) |
TypesField::encode(types);
}
virtual InlineCacheState GetICState() {
Types types = GetTypes();
if (types == kFullCompare) {
return MEGAMORPHIC;
} else if ((types & kCompareAgainstMonomorphicMap) != 0) {
return MONOMORPHIC;
} else {
return PREMONOMORPHIC;
}
}
virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; }
Handle<Code> GenerateCode();
static Handle<Code> GetUninitialized(Isolate* isolate,
EqualityKind kind,
NilValue nil) {
return CompareNilICStub(kind, nil).GetCode(isolate);
}
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
static void InitializeForIsolate(Isolate* isolate) {
CompareNilICStub compare_stub(kStrictEquality, kNullValue);
compare_stub.InitializeInterfaceDescriptor(
isolate,
isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC));
}
virtual Code::ExtraICState GetExtraICState() {
return bit_field_;
}
EqualityKind GetKind() { return EqualityKindField::decode(bit_field_); }
NilValue GetNilValue() { return NilValueField::decode(bit_field_); }
Types GetTypes() { return TypesField::decode(bit_field_); }
static Types TypesFromExtraICState(
Code::ExtraICState state) {
return TypesField::decode(state);
}
static EqualityKind EqualityKindFromExtraICState(
Code::ExtraICState state) {
return EqualityKindField::decode(state);
}
static NilValue NilValueFromExtraICState(Code::ExtraICState state) {
return NilValueField::decode(state);
}
static Types GetPatchedICFlags(Code::ExtraICState extra_ic_state,
Handle<Object> object,
bool* already_monomorphic);
private:
friend class CompareNilIC;
class EqualityKindField : public BitField<EqualityKind, 0, 1> {};
class NilValueField : public BitField<NilValue, 1, 1> {};
class TypesField : public BitField<Types, 3, 4> {};
CompareNilICStub(EqualityKind kind, NilValue nil)
: HydrogenCodeStub(CODE_STUB_IS_MISS), bit_field_(0) {
bit_field_ = EqualityKindField::encode(kind) |
NilValueField::encode(nil);
}
virtual CodeStub::Major MajorKey() { return CompareNilIC; }
virtual int NotMissMinorKey() { return bit_field_; }
int bit_field_;
DISALLOW_COPY_AND_ASSIGN(CompareNilICStub);
};
class CEntryStub : public PlatformCodeStub { class CEntryStub : public PlatformCodeStub {
public: public:
explicit CEntryStub(int result_size, explicit CEntryStub(int result_size,
@ -1291,19 +1452,20 @@ class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
public: public:
KeyedLoadDictionaryElementStub() {} KeyedLoadDictionaryElementStub() {}
Major MajorKey() { return KeyedLoadElement; }
int MinorKey() { return DICTIONARY_ELEMENTS; }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
private: private:
Major MajorKey() { return KeyedLoadElement; }
int MinorKey() { return DICTIONARY_ELEMENTS; }
DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub); DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
}; };
class KeyedLoadFastElementStub : public HydrogenCodeStub { class KeyedLoadFastElementStub : public HydrogenCodeStub {
public: public:
KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) { KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
bit_field_ = ElementsKindBits::encode(elements_kind) | bit_field_ = ElementsKindBits::encode(elements_kind) |
IsJSArrayBits::encode(is_js_array); IsJSArrayBits::encode(is_js_array);
} }
@ -1323,12 +1485,12 @@ class KeyedLoadFastElementStub : public HydrogenCodeStub {
CodeStubInterfaceDescriptor* descriptor); CodeStubInterfaceDescriptor* descriptor);
private: private:
class IsJSArrayBits: public BitField<bool, 8, 1> {};
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {}; class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class IsJSArrayBits: public BitField<bool, 8, 1> {};
uint32_t bit_field_; uint32_t bit_field_;
Major MajorKey() { return KeyedLoadElement; } Major MajorKey() { return KeyedLoadElement; }
int MinorKey() { return bit_field_; } int NotMissMinorKey() { return bit_field_; }
DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub); DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub);
}; };
@ -1338,15 +1500,13 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
public: public:
KeyedStoreFastElementStub(bool is_js_array, KeyedStoreFastElementStub(bool is_js_array,
ElementsKind elements_kind, ElementsKind elements_kind,
KeyedAccessStoreMode mode) { KeyedAccessStoreMode mode)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
bit_field_ = ElementsKindBits::encode(elements_kind) | bit_field_ = ElementsKindBits::encode(elements_kind) |
IsJSArrayBits::encode(is_js_array) | IsJSArrayBits::encode(is_js_array) |
StoreModeBits::encode(mode); StoreModeBits::encode(mode);
} }
Major MajorKey() { return KeyedStoreElement; }
int MinorKey() { return bit_field_; }
bool is_js_array() const { bool is_js_array() const {
return IsJSArrayBits::decode(bit_field_); return IsJSArrayBits::decode(bit_field_);
} }
@ -1371,6 +1531,9 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
class IsJSArrayBits: public BitField<bool, 12, 1> {}; class IsJSArrayBits: public BitField<bool, 12, 1> {};
uint32_t bit_field_; uint32_t bit_field_;
Major MajorKey() { return KeyedStoreElement; }
int NotMissMinorKey() { return bit_field_; }
DISALLOW_COPY_AND_ASSIGN(KeyedStoreFastElementStub); DISALLOW_COPY_AND_ASSIGN(KeyedStoreFastElementStub);
}; };
@ -1378,7 +1541,8 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
class TransitionElementsKindStub : public HydrogenCodeStub { class TransitionElementsKindStub : public HydrogenCodeStub {
public: public:
TransitionElementsKindStub(ElementsKind from_kind, TransitionElementsKindStub(ElementsKind from_kind,
ElementsKind to_kind) { ElementsKind to_kind)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
bit_field_ = FromKindBits::encode(from_kind) | bit_field_ = FromKindBits::encode(from_kind) |
ToKindBits::encode(to_kind); ToKindBits::encode(to_kind);
} }
@ -1403,19 +1567,55 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
uint32_t bit_field_; uint32_t bit_field_;
Major MajorKey() { return TransitionElementsKind; } Major MajorKey() { return TransitionElementsKind; }
int MinorKey() { return bit_field_; } int NotMissMinorKey() { return bit_field_; }
DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub); DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub);
}; };
class ArrayNoArgumentConstructorStub : public HydrogenCodeStub { class ArrayConstructorStubBase : public HydrogenCodeStub {
public: public:
ArrayNoArgumentConstructorStub() { ArrayConstructorStubBase(ElementsKind kind, AllocationSiteMode mode)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
bit_field_ = ElementsKindBits::encode(kind) |
AllocationSiteModeBits::encode(mode == TRACK_ALLOCATION_SITE);
} }
Major MajorKey() { return ArrayNoArgumentConstructor; } ElementsKind elements_kind() const {
int MinorKey() { return 0; } return ElementsKindBits::decode(bit_field_);
}
AllocationSiteMode mode() const {
return AllocationSiteModeBits::decode(bit_field_)
? TRACK_ALLOCATION_SITE
: DONT_TRACK_ALLOCATION_SITE;
}
virtual bool IsPregenerated() { return true; }
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void InstallDescriptors(Isolate* isolate);
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kPropertyCell = 0;
private:
int NotMissMinorKey() { return bit_field_; }
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class AllocationSiteModeBits: public BitField<bool, 8, 1> {};
uint32_t bit_field_;
DISALLOW_COPY_AND_ASSIGN(ArrayConstructorStubBase);
};
class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
public:
ArrayNoArgumentConstructorStub(
ElementsKind kind,
AllocationSiteMode mode = TRACK_ALLOCATION_SITE)
: ArrayConstructorStubBase(kind, mode) {
}
virtual Handle<Code> GenerateCode(); virtual Handle<Code> GenerateCode();
@ -1424,18 +1624,20 @@ class ArrayNoArgumentConstructorStub : public HydrogenCodeStub {
CodeStubInterfaceDescriptor* descriptor); CodeStubInterfaceDescriptor* descriptor);
private: private:
Major MajorKey() { return ArrayNoArgumentConstructor; }
DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub); DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub);
}; };
class ArraySingleArgumentConstructorStub : public HydrogenCodeStub { class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
public: public:
ArraySingleArgumentConstructorStub() { ArraySingleArgumentConstructorStub(
ElementsKind kind,
AllocationSiteMode mode = TRACK_ALLOCATION_SITE)
: ArrayConstructorStubBase(kind, mode) {
} }
Major MajorKey() { return ArraySingleArgumentConstructor; }
int MinorKey() { return 0; }
virtual Handle<Code> GenerateCode(); virtual Handle<Code> GenerateCode();
virtual void InitializeInterfaceDescriptor( virtual void InitializeInterfaceDescriptor(
@ -1443,18 +1645,20 @@ class ArraySingleArgumentConstructorStub : public HydrogenCodeStub {
CodeStubInterfaceDescriptor* descriptor); CodeStubInterfaceDescriptor* descriptor);
private: private:
Major MajorKey() { return ArraySingleArgumentConstructor; }
DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub); DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub);
}; };
class ArrayNArgumentsConstructorStub : public HydrogenCodeStub { class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
public: public:
ArrayNArgumentsConstructorStub() { ArrayNArgumentsConstructorStub(
ElementsKind kind,
AllocationSiteMode mode = TRACK_ALLOCATION_SITE) :
ArrayConstructorStubBase(kind, mode) {
} }
Major MajorKey() { return ArrayNArgumentsConstructor; }
int MinorKey() { return 0; }
virtual Handle<Code> GenerateCode(); virtual Handle<Code> GenerateCode();
virtual void InitializeInterfaceDescriptor( virtual void InitializeInterfaceDescriptor(
@ -1462,6 +1666,8 @@ class ArrayNArgumentsConstructorStub : public HydrogenCodeStub {
CodeStubInterfaceDescriptor* descriptor); CodeStubInterfaceDescriptor* descriptor);
private: private:
Major MajorKey() { return ArrayNArgumentsConstructor; }
DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub); DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub);
}; };

4
deps/v8/src/code.h

@ -29,6 +29,8 @@
#define V8_CODE_H_ #define V8_CODE_H_
#include "allocation.h" #include "allocation.h"
#include "handles.h"
#include "objects.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -44,6 +46,8 @@ class ParameterCount BASE_EMBEDDED {
: reg_(reg), immediate_(0) { } : reg_(reg), immediate_(0) { }
explicit ParameterCount(int immediate) explicit ParameterCount(int immediate)
: reg_(no_reg), immediate_(immediate) { } : reg_(no_reg), immediate_(immediate) { }
explicit ParameterCount(Handle<JSFunction> f)
: reg_(no_reg), immediate_(f->shared()->formal_parameter_count()) { }
bool is_reg() const { return !reg_.is(no_reg); } bool is_reg() const { return !reg_.is(no_reg); }
bool is_immediate() const { return !is_reg(); } bool is_immediate() const { return !is_reg(); }

24
deps/v8/src/compiler.cc

@ -125,12 +125,9 @@ CompilationInfo::~CompilationInfo() {
int CompilationInfo::num_parameters() const { int CompilationInfo::num_parameters() const {
if (IsStub()) { ASSERT(!IsStub());
return 0;
} else {
return scope()->num_parameters(); return scope()->num_parameters();
} }
}
int CompilationInfo::num_heap_slots() const { int CompilationInfo::num_heap_slots() const {
@ -147,8 +144,7 @@ Code::Flags CompilationInfo::flags() const {
return Code::ComputeFlags(code_stub()->GetCodeKind(), return Code::ComputeFlags(code_stub()->GetCodeKind(),
code_stub()->GetICState(), code_stub()->GetICState(),
code_stub()->GetExtraICState(), code_stub()->GetExtraICState(),
Code::NORMAL, Code::NORMAL, -1);
0);
} else { } else {
return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION); return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
} }
@ -425,6 +421,12 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
Timer timer(this, &time_taken_to_codegen_); Timer timer(this, &time_taken_to_codegen_);
ASSERT(chunk_ != NULL); ASSERT(chunk_ != NULL);
ASSERT(graph_ != NULL); ASSERT(graph_ != NULL);
// Deferred handles reference objects that were accessible during
// graph creation. To make sure that we don't encounter inconsistencies
// between graph creation and code generation, we disallow accessing
// objects through deferred handles during the latter, with exceptions.
HandleDereferenceGuard no_deref_deferred(
isolate(), HandleDereferenceGuard::DISALLOW_DEFERRED);
Handle<Code> optimized_code = chunk_->Codegen(); Handle<Code> optimized_code = chunk_->Codegen();
if (optimized_code.is_null()) { if (optimized_code.is_null()) {
info()->set_bailout_reason("code generation failed"); info()->set_bailout_reason("code generation failed");
@ -622,7 +624,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
isolate->counters()->total_compile_size()->Increment(source_length); isolate->counters()->total_compile_size()->Increment(source_length);
// The VM is in the COMPILER state until exiting this function. // The VM is in the COMPILER state until exiting this function.
VMState state(isolate, COMPILER); VMState<COMPILER> state(isolate);
CompilationCache* compilation_cache = isolate->compilation_cache(); CompilationCache* compilation_cache = isolate->compilation_cache();
@ -696,7 +698,7 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
isolate->counters()->total_compile_size()->Increment(source_length); isolate->counters()->total_compile_size()->Increment(source_length);
// The VM is in the COMPILER state until exiting this function. // The VM is in the COMPILER state until exiting this function.
VMState state(isolate, COMPILER); VMState<COMPILER> state(isolate);
// Do a lookup in the compilation cache; if the entry is not there, invoke // Do a lookup in the compilation cache; if the entry is not there, invoke
// the compiler and add the result to the cache. // the compiler and add the result to the cache.
@ -859,7 +861,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT); ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
// The VM is in the COMPILER state until exiting this function. // The VM is in the COMPILER state until exiting this function.
VMState state(isolate, COMPILER); VMState<COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate); PostponeInterruptsScope postpone(isolate);
@ -923,7 +925,7 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
} }
SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure)); SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
VMState state(isolate, PARALLEL_COMPILER); VMState<COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate); PostponeInterruptsScope postpone(isolate);
Handle<SharedFunctionInfo> shared = info->shared_info(); Handle<SharedFunctionInfo> shared = info->shared_info();
@ -998,7 +1000,7 @@ void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
} }
Isolate* isolate = info->isolate(); Isolate* isolate = info->isolate();
VMState state(isolate, PARALLEL_COMPILER); VMState<COMPILER> state(isolate);
Logger::TimerEventScope timer( Logger::TimerEventScope timer(
isolate, Logger::TimerEventScope::v8_recompile_synchronous); isolate, Logger::TimerEventScope::v8_recompile_synchronous);
// If crankshaft succeeded, install the optimized code else install // If crankshaft succeeded, install the optimized code else install

10
deps/v8/src/compiler.h

@ -143,6 +143,14 @@ class CompilationInfo {
return SavesCallerDoubles::decode(flags_); return SavesCallerDoubles::decode(flags_);
} }
void MarkAsRequiresFrame() {
flags_ |= RequiresFrame::encode(true);
}
bool requires_frame() const {
return RequiresFrame::decode(flags_);
}
void SetParseRestriction(ParseRestriction restriction) { void SetParseRestriction(ParseRestriction restriction) {
flags_ = ParseRestricitonField::update(flags_, restriction); flags_ = ParseRestricitonField::update(flags_, restriction);
} }
@ -300,6 +308,8 @@ class CompilationInfo {
class SavesCallerDoubles: public BitField<bool, 12, 1> {}; class SavesCallerDoubles: public BitField<bool, 12, 1> {};
// If the set of valid statements is restricted. // If the set of valid statements is restricted.
class ParseRestricitonField: public BitField<ParseRestriction, 13, 1> {}; class ParseRestricitonField: public BitField<ParseRestriction, 13, 1> {};
// If the function requires a frame (for unspecified reasons)
class RequiresFrame: public BitField<bool, 14, 1> {};
unsigned flags_; unsigned flags_;

2
deps/v8/src/contexts.h

@ -123,6 +123,7 @@ enum BindingFlags {
V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \ V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \ V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \ V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \ V(FUNCTION_MAP_INDEX, Map, function_map) \
V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \ V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \ V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
@ -276,6 +277,7 @@ class Context: public FixedArray {
GLOBAL_EVAL_FUN_INDEX, GLOBAL_EVAL_FUN_INDEX,
INSTANTIATE_FUN_INDEX, INSTANTIATE_FUN_INDEX,
CONFIGURE_INSTANCE_FUN_INDEX, CONFIGURE_INSTANCE_FUN_INDEX,
ARRAY_BUFFER_FUN_INDEX,
MESSAGE_LISTENERS_INDEX, MESSAGE_LISTENERS_INDEX,
MAKE_MESSAGE_FUN_INDEX, MAKE_MESSAGE_FUN_INDEX,
GET_STACK_TRACE_LINE_INDEX, GET_STACK_TRACE_LINE_INDEX,

6
deps/v8/src/conversions-inl.h

@ -29,9 +29,9 @@
#define V8_CONVERSIONS_INL_H_ #define V8_CONVERSIONS_INL_H_
#include <limits.h> // Required for INT_MAX etc. #include <limits.h> // Required for INT_MAX etc.
#include <math.h>
#include <float.h> // Required for DBL_MAX and on Win32 for finite() #include <float.h> // Required for DBL_MAX and on Win32 for finite()
#include <stdarg.h> #include <stdarg.h>
#include <cmath>
#include "globals.h" // Required for V8_INFINITY #include "globals.h" // Required for V8_INFINITY
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
@ -86,8 +86,8 @@ inline unsigned int FastD2UI(double x) {
inline double DoubleToInteger(double x) { inline double DoubleToInteger(double x) {
if (isnan(x)) return 0; if (std::isnan(x)) return 0;
if (!isfinite(x) || x == 0) return x; if (!std::isfinite(x) || x == 0) return x;
return (x >= 0) ? floor(x) : ceil(x); return (x >= 0) ? floor(x) : ceil(x);
} }

7
deps/v8/src/conversions.cc

@ -26,14 +26,19 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h> #include <stdarg.h>
#include <math.h>
#include <limits.h> #include <limits.h>
#include <cmath>
#include "conversions-inl.h" #include "conversions-inl.h"
#include "dtoa.h" #include "dtoa.h"
#include "strtod.h" #include "strtod.h"
#include "utils.h" #include "utils.h"
#ifndef _STLP_VENDOR_CSTD
// STLPort doesn't import fpclassify into the std namespace.
using std::fpclassify;
#endif
namespace v8 { namespace v8 {
namespace internal { namespace internal {

33
deps/v8/src/counters.cc

@ -45,57 +45,38 @@ int* StatsCounter::FindLocationInStatsTable() const {
} }
// Start the timer.
void StatsCounterTimer::Start() {
if (!counter_.Enabled())
return;
stop_time_ = 0;
start_time_ = OS::Ticks();
}
// Stop the timer and record the results.
void StatsCounterTimer::Stop() {
if (!counter_.Enabled())
return;
stop_time_ = OS::Ticks();
// Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
counter_.Increment(milliseconds);
}
void Histogram::AddSample(int sample) { void Histogram::AddSample(int sample) {
if (Enabled()) { if (Enabled()) {
Isolate::Current()->stats_table()->AddHistogramSample(histogram_, sample); isolate()->stats_table()->AddHistogramSample(histogram_, sample);
} }
} }
void* Histogram::CreateHistogram() const { void* Histogram::CreateHistogram() const {
return Isolate::Current()->stats_table()-> return isolate()->stats_table()->
CreateHistogram(name_, min_, max_, num_buckets_); CreateHistogram(name_, min_, max_, num_buckets_);
} }
// Start the timer. // Start the timer.
void HistogramTimer::Start() { void HistogramTimer::Start() {
if (histogram_.Enabled()) { if (Enabled()) {
stop_time_ = 0; stop_time_ = 0;
start_time_ = OS::Ticks(); start_time_ = OS::Ticks();
} }
if (FLAG_log_internal_timer_events) { if (FLAG_log_internal_timer_events) {
LOG(Isolate::Current(), TimerEvent(Logger::START, histogram_.name_)); LOG(isolate(), TimerEvent(Logger::START, name()));
} }
} }
// Stop the timer and record the results. // Stop the timer and record the results.
void HistogramTimer::Stop() { void HistogramTimer::Stop() {
if (histogram_.Enabled()) { if (Enabled()) {
stop_time_ = OS::Ticks(); stop_time_ = OS::Ticks();
// Compute the delta between start and stop, in milliseconds. // Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000; int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
histogram_.AddSample(milliseconds); AddSample(milliseconds);
} }
if (FLAG_log_internal_timer_events) { if (FLAG_log_internal_timer_events) {
LOG(Isolate::Current(), TimerEvent(Logger::END, histogram_.name_)); LOG(isolate(), TimerEvent(Logger::END, name()));
} }
} }

99
deps/v8/src/counters.h

@ -113,14 +113,11 @@ class StatsTable {
// The row has a 32bit value for each process/thread in the table and also // The row has a 32bit value for each process/thread in the table and also
// a name (stored in the table metadata). Since the storage location can be // a name (stored in the table metadata). Since the storage location can be
// thread-specific, this class cannot be shared across threads. // thread-specific, this class cannot be shared across threads.
// class StatsCounter {
// This class is designed to be POD initialized. It will be registered with public:
// the counter system on first use. For example: StatsCounter() { }
// StatsCounter c = { "c:myctr", NULL, false }; explicit StatsCounter(const char* name)
struct StatsCounter { : name_(name), ptr_(NULL), lookup_done_(false) { }
const char* name_;
int* ptr_;
bool lookup_done_;
// Sets the counter to a specific value. // Sets the counter to a specific value.
void Set(int value) { void Set(int value) {
@ -177,39 +174,29 @@ struct StatsCounter {
private: private:
int* FindLocationInStatsTable() const; int* FindLocationInStatsTable() const;
};
// StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 };
struct StatsCounterTimer {
StatsCounter counter_;
int64_t start_time_;
int64_t stop_time_;
// Start the timer.
void Start();
// Stop the timer and record the results. const char* name_;
void Stop(); int* ptr_;
bool lookup_done_;
// Returns true if the timer is running.
bool Running() {
return counter_.Enabled() && start_time_ != 0 && stop_time_ == 0;
}
}; };
// A Histogram represents a dynamically created histogram in the StatsTable. // A Histogram represents a dynamically created histogram in the StatsTable.
// // It will be registered with the histogram system on first use.
// This class is designed to be POD initialized. It will be registered with class Histogram {
// the histogram system on first use. For example: public:
// Histogram h = { "myhist", 0, 10000, 50, NULL, false }; Histogram() { }
struct Histogram { Histogram(const char* name,
const char* name_; int min,
int min_; int max,
int max_; int num_buckets,
int num_buckets_; Isolate* isolate)
void* histogram_; : name_(name),
bool lookup_done_; min_(min),
max_(max),
num_buckets_(num_buckets),
histogram_(NULL),
lookup_done_(false),
isolate_(isolate) { }
// Add a single sample to this histogram. // Add a single sample to this histogram.
void AddSample(int sample); void AddSample(int sample);
@ -234,17 +221,33 @@ struct Histogram {
return histogram_; return histogram_;
} }
const char* name() { return name_; }
Isolate* isolate() const { return isolate_; }
private: private:
void* CreateHistogram() const; void* CreateHistogram() const;
};
// A HistogramTimer allows distributions of results to be created const char* name_;
// HistogramTimer t = { {L"foo", 0, 10000, 50, NULL, false}, 0, 0 }; int min_;
struct HistogramTimer { int max_;
Histogram histogram_; int num_buckets_;
void* histogram_;
bool lookup_done_;
Isolate* isolate_;
};
int64_t start_time_; // A HistogramTimer allows distributions of results to be created.
int64_t stop_time_; class HistogramTimer : public Histogram {
public:
HistogramTimer() { }
HistogramTimer(const char* name,
int min,
int max,
int num_buckets,
Isolate* isolate)
: Histogram(name, min, max, num_buckets, isolate),
start_time_(0),
stop_time_(0) { }
// Start the timer. // Start the timer.
void Start(); void Start();
@ -254,12 +257,12 @@ struct HistogramTimer {
// Returns true if the timer is running. // Returns true if the timer is running.
bool Running() { bool Running() {
return histogram_.Enabled() && (start_time_ != 0) && (stop_time_ == 0); return Enabled() && (start_time_ != 0) && (stop_time_ == 0);
} }
void Reset() { private:
histogram_.Reset(); int64_t start_time_;
} int64_t stop_time_;
}; };
// Helper class for scoping a HistogramTimer. // Helper class for scoping a HistogramTimer.

18
deps/v8/src/cpu-profiler.cc

@ -44,9 +44,11 @@ static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB; static const int kProfilerStackSize = 64 * KB;
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) ProfilerEventsProcessor::ProfilerEventsProcessor(
ProfileGenerator* generator, CpuProfilesCollection* profiles)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator), generator_(generator),
profiles_(profiles),
running_(true), running_(true),
ticks_buffer_(sizeof(TickSampleEventRecord), ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize, kTickSamplesBufferChunkSize,
@ -65,7 +67,7 @@ void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
rec->type = CodeEventRecord::CODE_CREATION; rec->type = CodeEventRecord::CODE_CREATION;
rec->order = ++enqueue_order_; rec->order = ++enqueue_order_;
rec->start = start; rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, prefix, name); rec->entry = profiles_->NewCodeEntry(tag, prefix, name);
rec->size = 1; rec->size = 1;
rec->shared = NULL; rec->shared = NULL;
events_buffer_.Enqueue(evt_rec); events_buffer_.Enqueue(evt_rec);
@ -85,7 +87,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->type = CodeEventRecord::CODE_CREATION; rec->type = CodeEventRecord::CODE_CREATION;
rec->order = ++enqueue_order_; rec->order = ++enqueue_order_;
rec->start = start; rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number); rec->entry = profiles_->NewCodeEntry(tag, name, resource_name, line_number);
rec->size = size; rec->size = size;
rec->shared = shared; rec->shared = shared;
events_buffer_.Enqueue(evt_rec); events_buffer_.Enqueue(evt_rec);
@ -102,7 +104,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->type = CodeEventRecord::CODE_CREATION; rec->type = CodeEventRecord::CODE_CREATION;
rec->order = ++enqueue_order_; rec->order = ++enqueue_order_;
rec->start = start; rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name); rec->entry = profiles_->NewCodeEntry(tag, name);
rec->size = size; rec->size = size;
rec->shared = NULL; rec->shared = NULL;
events_buffer_.Enqueue(evt_rec); events_buffer_.Enqueue(evt_rec);
@ -119,7 +121,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->type = CodeEventRecord::CODE_CREATION; rec->type = CodeEventRecord::CODE_CREATION;
rec->order = ++enqueue_order_; rec->order = ++enqueue_order_;
rec->start = start; rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, args_count); rec->entry = profiles_->NewCodeEntry(tag, args_count);
rec->size = size; rec->size = size;
rec->shared = NULL; rec->shared = NULL;
events_buffer_.Enqueue(evt_rec); events_buffer_.Enqueue(evt_rec);
@ -162,7 +164,7 @@ void ProfilerEventsProcessor::RegExpCodeCreateEvent(
rec->type = CodeEventRecord::CODE_CREATION; rec->type = CodeEventRecord::CODE_CREATION;
rec->order = ++enqueue_order_; rec->order = ++enqueue_order_;
rec->start = start; rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, prefix, name); rec->entry = profiles_->NewCodeEntry(tag, prefix, name);
rec->size = size; rec->size = size;
events_buffer_.Enqueue(evt_rec); events_buffer_.Enqueue(evt_rec);
} }
@ -443,7 +445,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
saved_logging_nesting_ = isolate_->logger()->logging_nesting_; saved_logging_nesting_ = isolate_->logger()->logging_nesting_;
isolate_->logger()->logging_nesting_ = 0; isolate_->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_); generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_); processor_ = new ProfilerEventsProcessor(generator_, profiles_);
is_profiling_ = true; is_profiling_ = true;
processor_->StartSynchronously(); processor_->StartSynchronously();
// Enumerate stuff we already have in the heap. // Enumerate stuff we already have in the heap.
@ -458,7 +460,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
isolate_->logger()->LogAccessorCallbacks(); isolate_->logger()->LogAccessorCallbacks();
} }
// Enable stack sampling. // Enable stack sampling.
Sampler* sampler = reinterpret_cast<Sampler*>(isolate_->logger()->ticker_); Sampler* sampler = isolate_->logger()->sampler();
sampler->IncreaseProfilingDepth(); sampler->IncreaseProfilingDepth();
if (!sampler->IsActive()) { if (!sampler->IsActive()) {
sampler->Start(); sampler->Start();

4
deps/v8/src/cpu-profiler.h

@ -125,7 +125,8 @@ class TickSampleEventRecord {
// methods called by event producers: VM and stack sampler threads. // methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread { class ProfilerEventsProcessor : public Thread {
public: public:
explicit ProfilerEventsProcessor(ProfileGenerator* generator); ProfilerEventsProcessor(ProfileGenerator* generator,
CpuProfilesCollection* profiles);
virtual ~ProfilerEventsProcessor() {} virtual ~ProfilerEventsProcessor() {}
// Thread control. // Thread control.
@ -178,6 +179,7 @@ class ProfilerEventsProcessor : public Thread {
INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag)); INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
ProfileGenerator* generator_; ProfileGenerator* generator_;
CpuProfilesCollection* profiles_;
bool running_; bool running_;
UnboundQueue<CodeEventsContainer> events_buffer_; UnboundQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_; SamplingCircularQueue ticks_buffer_;

382
deps/v8/src/d8.cc

@ -42,6 +42,13 @@
#ifdef V8_SHARED #ifdef V8_SHARED
#include <assert.h> #include <assert.h>
#endif // V8_SHARED
#ifndef V8_SHARED
#include <algorithm>
#endif // !V8_SHARED
#ifdef V8_SHARED
#include "../include/v8-testing.h" #include "../include/v8-testing.h"
#endif // V8_SHARED #endif // V8_SHARED
@ -83,7 +90,7 @@ const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
const char kArrayMarkerPropName[] = "d8::_is_typed_array_"; const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
#define FOR_EACH_SYMBOL(V) \ #define FOR_EACH_STRING(V) \
V(ArrayBuffer, "ArrayBuffer") \ V(ArrayBuffer, "ArrayBuffer") \
V(ArrayBufferMarkerPropName, kArrayBufferMarkerPropName) \ V(ArrayBufferMarkerPropName, kArrayBufferMarkerPropName) \
V(ArrayMarkerPropName, kArrayMarkerPropName) \ V(ArrayMarkerPropName, kArrayMarkerPropName) \
@ -94,36 +101,58 @@ const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
V(length, "length") V(length, "length")
class Symbols { class PerIsolateData {
public: public:
explicit Symbols(Isolate* isolate) : isolate_(isolate) { explicit PerIsolateData(Isolate* isolate) : isolate_(isolate), realms_(NULL) {
HandleScope scope(isolate); HandleScope scope(isolate);
#define INIT_SYMBOL(name, value) \ #define INIT_STRING(name, value) \
name##_ = Persistent<String>::New(isolate, String::NewSymbol(value)); name##_string_ = Persistent<String>::New(isolate, String::NewSymbol(value));
FOR_EACH_SYMBOL(INIT_SYMBOL) FOR_EACH_STRING(INIT_STRING)
#undef INIT_SYMBOL #undef INIT_STRING
isolate->SetData(this); isolate->SetData(this);
} }
~Symbols() { ~PerIsolateData() {
#define DISPOSE_SYMBOL(name, value) name##_.Dispose(isolate_); #define DISPOSE_STRING(name, value) name##_string_.Dispose(isolate_);
FOR_EACH_SYMBOL(DISPOSE_SYMBOL) FOR_EACH_STRING(DISPOSE_STRING)
#undef DISPOSE_SYMBOL #undef DISPOSE_STRING
isolate_->SetData(NULL); // Not really needed, just to be sure... isolate_->SetData(NULL); // Not really needed, just to be sure...
} }
#define DEFINE_SYMBOL_GETTER(name, value) \ inline static PerIsolateData* Get(Isolate* isolate) {
static Persistent<String> name(Isolate* isolate) { \ return reinterpret_cast<PerIsolateData*>(isolate->GetData());
return reinterpret_cast<Symbols*>(isolate->GetData())->name##_; \
} }
FOR_EACH_SYMBOL(DEFINE_SYMBOL_GETTER)
#undef DEFINE_SYMBOL_GETTER
#define DEFINE_STRING_GETTER(name, value) \
static Persistent<String> name##_string(Isolate* isolate) { \
return Get(isolate)->name##_string_; \
}
FOR_EACH_STRING(DEFINE_STRING_GETTER)
#undef DEFINE_STRING_GETTER
class RealmScope {
public:
explicit RealmScope(PerIsolateData* data);
~RealmScope();
private: private:
PerIsolateData* data_;
};
private:
friend class Shell;
friend class RealmScope;
Isolate* isolate_; Isolate* isolate_;
#define DEFINE_MEMBER(name, value) Persistent<String> name##_; int realm_count_;
FOR_EACH_SYMBOL(DEFINE_MEMBER) int realm_current_;
int realm_switch_;
Persistent<Context>* realms_;
Persistent<Value> realm_shared_;
#define DEFINE_MEMBER(name, value) Persistent<String> name##_string_;
FOR_EACH_STRING(DEFINE_MEMBER)
#undef DEFINE_MEMBER #undef DEFINE_MEMBER
int RealmFind(Handle<Context> context);
}; };
@ -207,14 +236,20 @@ bool Shell::ExecuteString(Isolate* isolate,
// When debugging make exceptions appear to be uncaught. // When debugging make exceptions appear to be uncaught.
try_catch.SetVerbose(true); try_catch.SetVerbose(true);
} }
Handle<Script> script = Script::Compile(source, name); Handle<Script> script = Script::New(source, name);
if (script.IsEmpty()) { if (script.IsEmpty()) {
// Print errors that happened during compilation. // Print errors that happened during compilation.
if (report_exceptions && !FLAG_debugger) if (report_exceptions && !FLAG_debugger)
ReportException(isolate, &try_catch); ReportException(isolate, &try_catch);
return false; return false;
} else { } else {
PerIsolateData* data = PerIsolateData::Get(isolate);
Local<Context> realm =
Local<Context>::New(data->realms_[data->realm_current_]);
realm->Enter();
Handle<Value> result = script->Run(); Handle<Value> result = script->Run();
realm->Exit();
data->realm_current_ = data->realm_switch_;
if (result.IsEmpty()) { if (result.IsEmpty()) {
ASSERT(try_catch.HasCaught()); ASSERT(try_catch.HasCaught());
// Print errors that happened during execution. // Print errors that happened during execution.
@ -255,6 +290,164 @@ bool Shell::ExecuteString(Isolate* isolate,
} }
PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realm_count_ = 1;
data_->realm_current_ = 0;
data_->realm_switch_ = 0;
data_->realms_ = new Persistent<Context>[1];
data_->realms_[0] =
Persistent<Context>::New(data_->isolate_, Context::GetEntered());
data_->realm_shared_.Clear();
}
PerIsolateData::RealmScope::~RealmScope() {
// Drop realms to avoid keeping them alive.
for (int i = 0; i < data_->realm_count_; ++i)
data_->realms_[i].Dispose(data_->isolate_);
delete[] data_->realms_;
if (!data_->realm_shared_.IsEmpty())
data_->realm_shared_.Dispose(data_->isolate_);
}
int PerIsolateData::RealmFind(Handle<Context> context) {
for (int i = 0; i < realm_count_; ++i) {
if (realms_[i] == context) return i;
}
return -1;
}
// Realm.current() returns the index of the currently active realm.
Handle<Value> Shell::RealmCurrent(const Arguments& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
int index = data->RealmFind(Context::GetEntered());
if (index == -1) return Undefined(isolate);
return Number::New(index);
}
// Realm.owner(o) returns the index of the realm that created o.
Handle<Value> Shell::RealmOwner(const Arguments& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsObject()) {
return Throw("Invalid argument");
}
int index = data->RealmFind(args[0]->ToObject()->CreationContext());
if (index == -1) return Undefined(isolate);
return Number::New(index);
}
// Realm.global(i) returns the global object of realm i.
// (Note that properties of global objects cannot be read/written cross-realm.)
Handle<Value> Shell::RealmGlobal(const Arguments& args) {
PerIsolateData* data = PerIsolateData::Get(args.GetIsolate());
if (args.Length() < 1 || !args[0]->IsNumber()) {
return Throw("Invalid argument");
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
return Throw("Invalid realm index");
}
return data->realms_[index]->Global();
}
// Realm.create() creates a new realm and returns its index.
Handle<Value> Shell::RealmCreate(const Arguments& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
Persistent<Context>* old_realms = data->realms_;
int index = data->realm_count_;
data->realms_ = new Persistent<Context>[++data->realm_count_];
for (int i = 0; i < index; ++i) data->realms_[i] = old_realms[i];
delete[] old_realms;
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
data->realms_[index] = Persistent<Context>::New(
isolate, Context::New(isolate, NULL, global_template));
return Number::New(index);
}
// Realm.dispose(i) disposes the reference to the realm i.
Handle<Value> Shell::RealmDispose(const Arguments& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsNumber()) {
return Throw("Invalid argument");
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty() ||
index == 0 ||
index == data->realm_current_ || index == data->realm_switch_) {
return Throw("Invalid realm index");
}
data->realms_[index].Dispose(isolate);
data->realms_[index].Clear();
return Undefined(isolate);
}
// Realm.switch(i) switches to the realm i for consecutive interactive inputs.
Handle<Value> Shell::RealmSwitch(const Arguments& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsNumber()) {
return Throw("Invalid argument");
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
return Throw("Invalid realm index");
}
data->realm_switch_ = index;
return Undefined(isolate);
}
// Realm.eval(i, s) evaluates s in realm i and returns the result.
Handle<Value> Shell::RealmEval(const Arguments& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 2 || !args[0]->IsNumber() || !args[1]->IsString()) {
return Throw("Invalid argument");
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
return Throw("Invalid realm index");
}
Handle<Script> script = Script::New(args[1]->ToString());
if (script.IsEmpty()) return Undefined(isolate);
Local<Context> realm = Local<Context>::New(data->realms_[index]);
realm->Enter();
Handle<Value> result = script->Run();
realm->Exit();
return result;
}
// Realm.shared is an accessor for a single shared value across realms.
Handle<Value> Shell::RealmSharedGet(Local<String> property,
const AccessorInfo& info) {
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (data->realm_shared_.IsEmpty()) return Undefined(isolate);
return data->realm_shared_;
}
void Shell::RealmSharedSet(Local<String> property,
Local<Value> value,
const AccessorInfo& info) {
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (!data->realm_shared_.IsEmpty()) data->realm_shared_.Dispose(isolate);
data->realm_shared_ = Persistent<Value>::New(isolate, value);
}
Handle<Value> Shell::Print(const Arguments& args) { Handle<Value> Shell::Print(const Arguments& args) {
Handle<Value> val = Write(args); Handle<Value> val = Write(args);
printf("\n"); printf("\n");
@ -416,7 +609,8 @@ Handle<Value> Shell::CreateExternalArrayBuffer(Isolate* isolate,
} }
memset(data, 0, length); memset(data, 0, length);
buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True()); buffer->SetHiddenValue(
PerIsolateData::ArrayBufferMarkerPropName_string(isolate), True());
Persistent<Object> persistent_array = Persistent<Object> persistent_array =
Persistent<Object>::New(isolate, buffer); Persistent<Object>::New(isolate, buffer);
persistent_array.MakeWeak(isolate, data, ExternalArrayWeakCallback); persistent_array.MakeWeak(isolate, data, ExternalArrayWeakCallback);
@ -425,7 +619,7 @@ Handle<Value> Shell::CreateExternalArrayBuffer(Isolate* isolate,
buffer->SetIndexedPropertiesToExternalArrayData( buffer->SetIndexedPropertiesToExternalArrayData(
data, v8::kExternalByteArray, length); data, v8::kExternalByteArray, length);
buffer->Set(Symbols::byteLength(isolate), buffer->Set(PerIsolateData::byteLength_string(isolate),
Int32::New(length, isolate), Int32::New(length, isolate),
ReadOnly); ReadOnly);
@ -470,20 +664,20 @@ Handle<Object> Shell::CreateExternalArray(Isolate* isolate,
array->SetIndexedPropertiesToExternalArrayData( array->SetIndexedPropertiesToExternalArrayData(
static_cast<uint8_t*>(data) + byteOffset, type, length); static_cast<uint8_t*>(data) + byteOffset, type, length);
array->SetHiddenValue(Symbols::ArrayMarkerPropName(isolate), array->SetHiddenValue(PerIsolateData::ArrayMarkerPropName_string(isolate),
Int32::New(type, isolate)); Int32::New(type, isolate));
array->Set(Symbols::byteLength(isolate), array->Set(PerIsolateData::byteLength_string(isolate),
Int32::New(byteLength, isolate), Int32::New(byteLength, isolate),
ReadOnly); ReadOnly);
array->Set(Symbols::byteOffset(isolate), array->Set(PerIsolateData::byteOffset_string(isolate),
Int32::New(byteOffset, isolate), Int32::New(byteOffset, isolate),
ReadOnly); ReadOnly);
array->Set(Symbols::length(isolate), array->Set(PerIsolateData::length_string(isolate),
Int32::New(length, isolate), Int32::New(length, isolate),
ReadOnly); ReadOnly);
array->Set(Symbols::BYTES_PER_ELEMENT(isolate), array->Set(PerIsolateData::BYTES_PER_ELEMENT_string(isolate),
Int32::New(element_size, isolate)); Int32::New(element_size, isolate));
array->Set(Symbols::buffer(isolate), array->Set(PerIsolateData::buffer_string(isolate),
buffer, buffer,
ReadOnly); ReadOnly);
@ -524,11 +718,11 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
} }
if (args[0]->IsObject() && if (args[0]->IsObject() &&
!args[0]->ToObject()->GetHiddenValue( !args[0]->ToObject()->GetHiddenValue(
Symbols::ArrayBufferMarkerPropName(isolate)).IsEmpty()) { PerIsolateData::ArrayBufferMarkerPropName_string(isolate)).IsEmpty()) {
// Construct from ArrayBuffer. // Construct from ArrayBuffer.
buffer = args[0]->ToObject(); buffer = args[0]->ToObject();
int32_t bufferLength = int32_t bufferLength = convertToUint(
convertToUint(buffer->Get(Symbols::byteLength(isolate)), &try_catch); buffer->Get(PerIsolateData::byteLength_string(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() < 2 || args[1]->IsUndefined()) { if (args.Length() < 2 || args[1]->IsUndefined()) {
@ -560,9 +754,10 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
} }
} else { } else {
if (args[0]->IsObject() && if (args[0]->IsObject() &&
args[0]->ToObject()->Has(Symbols::length(isolate))) { args[0]->ToObject()->Has(PerIsolateData::length_string(isolate))) {
// Construct from array. // Construct from array.
Local<Value> value = args[0]->ToObject()->Get(Symbols::length(isolate)); Local<Value> value =
args[0]->ToObject()->Get(PerIsolateData::length_string(isolate));
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
length = convertToUint(value, &try_catch); length = convertToUint(value, &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
@ -576,7 +771,8 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
byteOffset = 0; byteOffset = 0;
Handle<Object> global = Context::GetCurrent()->Global(); Handle<Object> global = Context::GetCurrent()->Global();
Handle<Value> array_buffer = global->Get(Symbols::ArrayBuffer(isolate)); Handle<Value> array_buffer =
global->Get(PerIsolateData::ArrayBuffer_string(isolate));
ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction()); ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction());
Handle<Value> buffer_args[] = { Uint32::New(byteLength, isolate) }; Handle<Value> buffer_args[] = { Uint32::New(byteLength, isolate) };
Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance( Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance(
@ -611,14 +807,14 @@ Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) {
Isolate* isolate = args.GetIsolate(); Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This(); Local<Object> self = args.This();
Local<Value> marker = Local<Value> marker = self->GetHiddenValue(
self->GetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate)); PerIsolateData::ArrayBufferMarkerPropName_string(isolate));
if (marker.IsEmpty()) { if (marker.IsEmpty()) {
return Throw("'slice' invoked on wrong receiver type"); return Throw("'slice' invoked on wrong receiver type");
} }
int32_t length = int32_t length = convertToUint(
convertToUint(self->Get(Symbols::byteLength(isolate)), &try_catch); self->Get(PerIsolateData::byteLength_string(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) { if (args.Length() == 0) {
@ -667,21 +863,22 @@ Handle<Value> Shell::ArraySubArray(const Arguments& args) {
Isolate* isolate = args.GetIsolate(); Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This(); Local<Object> self = args.This();
Local<Value> marker = Local<Value> marker =
self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)); self->GetHiddenValue(PerIsolateData::ArrayMarkerPropName_string(isolate));
if (marker.IsEmpty()) { if (marker.IsEmpty()) {
return Throw("'subarray' invoked on wrong receiver type"); return Throw("'subarray' invoked on wrong receiver type");
} }
Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject(); Handle<Object> buffer =
self->Get(PerIsolateData::buffer_string(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t length = int32_t length = convertToUint(
convertToUint(self->Get(Symbols::length(isolate)), &try_catch); self->Get(PerIsolateData::length_string(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset = int32_t byteOffset = convertToUint(
convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch); self->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size = int32_t element_size = convertToUint(
convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch); self->Get(PerIsolateData::BYTES_PER_ELEMENT_string(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) { if (args.Length() == 0) {
@ -726,27 +923,27 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
Isolate* isolate = args.GetIsolate(); Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This(); Local<Object> self = args.This();
Local<Value> marker = Local<Value> marker =
self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)); self->GetHiddenValue(PerIsolateData::ArrayMarkerPropName_string(isolate));
if (marker.IsEmpty()) { if (marker.IsEmpty()) {
return Throw("'set' invoked on wrong receiver type"); return Throw("'set' invoked on wrong receiver type");
} }
int32_t length = int32_t length = convertToUint(
convertToUint(self->Get(Symbols::length(isolate)), &try_catch); self->Get(PerIsolateData::length_string(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size = int32_t element_size = convertToUint(
convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch); self->Get(PerIsolateData::BYTES_PER_ELEMENT_string(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) { if (args.Length() == 0) {
return Throw("'set' must have at least one argument"); return Throw("'set' must have at least one argument");
} }
if (!args[0]->IsObject() || if (!args[0]->IsObject() ||
!args[0]->ToObject()->Has(Symbols::length(isolate))) { !args[0]->ToObject()->Has(PerIsolateData::length_string(isolate))) {
return Throw("'set' invoked with non-array argument"); return Throw("'set' invoked with non-array argument");
} }
Handle<Object> source = args[0]->ToObject(); Handle<Object> source = args[0]->ToObject();
int32_t source_length = int32_t source_length = convertToUint(
convertToUint(source->Get(Symbols::length(isolate)), &try_catch); source->Get(PerIsolateData::length_string(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t offset; int32_t offset;
@ -761,11 +958,12 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
} }
int32_t source_element_size; int32_t source_element_size;
if (source->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)).IsEmpty()) { if (source->GetHiddenValue(
PerIsolateData::ArrayMarkerPropName_string(isolate)).IsEmpty()) {
source_element_size = 0; source_element_size = 0;
} else { } else {
source_element_size = source_element_size = convertToUint(
convertToUint(source->Get(Symbols::BYTES_PER_ELEMENT(isolate)), source->Get(PerIsolateData::BYTES_PER_ELEMENT_string(isolate)),
&try_catch); &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
} }
@ -773,16 +971,17 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
if (element_size == source_element_size && if (element_size == source_element_size &&
self->GetConstructor()->StrictEquals(source->GetConstructor())) { self->GetConstructor()->StrictEquals(source->GetConstructor())) {
// Use memmove on the array buffers. // Use memmove on the array buffers.
Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject(); Handle<Object> buffer =
self->Get(PerIsolateData::buffer_string(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer = Handle<Object> source_buffer =
source->Get(Symbols::buffer(isolate))->ToObject(); source->Get(PerIsolateData::buffer_string(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset = int32_t byteOffset = convertToUint(
convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch); self->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset = int32_t source_byteOffset = convertToUint(
convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch); source->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>( uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>(
@ -798,21 +997,22 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
} }
} else { } else {
// Need to copy element-wise to make the right conversions. // Need to copy element-wise to make the right conversions.
Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject(); Handle<Object> buffer =
self->Get(PerIsolateData::buffer_string(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer = Handle<Object> source_buffer =
source->Get(Symbols::buffer(isolate))->ToObject(); source->Get(PerIsolateData::buffer_string(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
if (buffer->StrictEquals(source_buffer)) { if (buffer->StrictEquals(source_buffer)) {
// Same backing store, need to handle overlap correctly. // Same backing store, need to handle overlap correctly.
// This gets a bit tricky in the case of different element sizes // This gets a bit tricky in the case of different element sizes
// (which, of course, is extremely unlikely to ever occur in practice). // (which, of course, is extremely unlikely to ever occur in practice).
int32_t byteOffset = int32_t byteOffset = convertToUint(
convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch); self->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset = int32_t source_byteOffset = convertToUint(
convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch); source->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
// Copy as much as we can from left to right. // Copy as much as we can from left to right.
@ -860,8 +1060,8 @@ void Shell::ExternalArrayWeakCallback(v8::Isolate* isolate,
Persistent<Value> object, Persistent<Value> object,
void* data) { void* data) {
HandleScope scope(isolate); HandleScope scope(isolate);
int32_t length = int32_t length = object->ToObject()->Get(
object->ToObject()->Get(Symbols::byteLength(isolate))->Uint32Value(); PerIsolateData::byteLength_string(isolate))->Uint32Value();
isolate->AdjustAmountOfExternalAllocatedMemory(-length); isolate->AdjustAmountOfExternalAllocatedMemory(-length);
delete[] static_cast<uint8_t*>(data); delete[] static_cast<uint8_t*>(data);
object.Dispose(isolate); object.Dispose(isolate);
@ -1238,10 +1438,30 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
global_template->Set(String::New("disableProfiler"), global_template->Set(String::New("disableProfiler"),
FunctionTemplate::New(DisableProfiler)); FunctionTemplate::New(DisableProfiler));
// Bind the Realm object.
Handle<ObjectTemplate> realm_template = ObjectTemplate::New();
realm_template->Set(String::New("current"),
FunctionTemplate::New(RealmCurrent));
realm_template->Set(String::New("owner"),
FunctionTemplate::New(RealmOwner));
realm_template->Set(String::New("global"),
FunctionTemplate::New(RealmGlobal));
realm_template->Set(String::New("create"),
FunctionTemplate::New(RealmCreate));
realm_template->Set(String::New("dispose"),
FunctionTemplate::New(RealmDispose));
realm_template->Set(String::New("switch"),
FunctionTemplate::New(RealmSwitch));
realm_template->Set(String::New("eval"),
FunctionTemplate::New(RealmEval));
realm_template->SetAccessor(String::New("shared"),
RealmSharedGet, RealmSharedSet);
global_template->Set(String::New("Realm"), realm_template);
// Bind the handlers for external arrays. // Bind the handlers for external arrays.
PropertyAttribute attr = PropertyAttribute attr =
static_cast<PropertyAttribute>(ReadOnly | DontDelete); static_cast<PropertyAttribute>(ReadOnly | DontDelete);
global_template->Set(Symbols::ArrayBuffer(isolate), global_template->Set(PerIsolateData::ArrayBuffer_string(isolate),
CreateArrayBufferTemplate(ArrayBuffer), attr); CreateArrayBufferTemplate(ArrayBuffer), attr);
global_template->Set(String::New("Int8Array"), global_template->Set(String::New("Int8Array"),
CreateArrayTemplate(Int8Array), attr); CreateArrayTemplate(Int8Array), attr);
@ -1360,9 +1580,8 @@ struct CounterAndKey {
}; };
int CompareKeys(const void* a, const void* b) { inline bool operator<(const CounterAndKey& lhs, const CounterAndKey& rhs) {
return strcmp(static_cast<const CounterAndKey*>(a)->key, return strcmp(lhs.key, rhs.key) < 0;
static_cast<const CounterAndKey*>(b)->key);
} }
#endif // V8_SHARED #endif // V8_SHARED
@ -1382,7 +1601,7 @@ void Shell::OnExit() {
counters[j].counter = i.CurrentValue(); counters[j].counter = i.CurrentValue();
counters[j].key = i.CurrentKey(); counters[j].key = i.CurrentKey();
} }
qsort(counters, number_of_counters, sizeof(counters[0]), CompareKeys); std::sort(counters, counters + number_of_counters);
printf("+----------------------------------------------------------------+" printf("+----------------------------------------------------------------+"
"-------------+\n"); "-------------+\n");
printf("| Name |" printf("| Name |"
@ -1469,7 +1688,8 @@ Handle<Value> Shell::ReadBuffer(const Arguments& args) {
} }
Isolate* isolate = args.GetIsolate(); Isolate* isolate = args.GetIsolate();
Handle<Object> buffer = Object::New(); Handle<Object> buffer = Object::New();
buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True()); buffer->SetHiddenValue(
PerIsolateData::ArrayBufferMarkerPropName_string(isolate), True());
Persistent<Object> persistent_buffer = Persistent<Object> persistent_buffer =
Persistent<Object>::New(isolate, buffer); Persistent<Object>::New(isolate, buffer);
persistent_buffer.MakeWeak(isolate, data, ExternalArrayWeakCallback); persistent_buffer.MakeWeak(isolate, data, ExternalArrayWeakCallback);
@ -1478,7 +1698,7 @@ Handle<Value> Shell::ReadBuffer(const Arguments& args) {
buffer->SetIndexedPropertiesToExternalArrayData( buffer->SetIndexedPropertiesToExternalArrayData(
data, kExternalUnsignedByteArray, length); data, kExternalUnsignedByteArray, length);
buffer->Set(Symbols::byteLength(isolate), buffer->Set(PerIsolateData::byteLength_string(isolate),
Int32::New(static_cast<int32_t>(length), isolate), ReadOnly); Int32::New(static_cast<int32_t>(length), isolate), ReadOnly);
return buffer; return buffer;
} }
@ -1521,6 +1741,7 @@ Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
void Shell::RunShell(Isolate* isolate) { void Shell::RunShell(Isolate* isolate) {
Locker locker(isolate); Locker locker(isolate);
Context::Scope context_scope(evaluation_context_); Context::Scope context_scope(evaluation_context_);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
HandleScope outer_scope(isolate); HandleScope outer_scope(isolate);
Handle<String> name = String::New("(d8)"); Handle<String> name = String::New("(d8)");
LineEditor* console = LineEditor::Get(); LineEditor* console = LineEditor::Get();
@ -1573,6 +1794,7 @@ void ShellThread::Run() {
Persistent<Context> thread_context = Persistent<Context> thread_context =
Shell::CreateEvaluationContext(isolate_); Shell::CreateEvaluationContext(isolate_);
Context::Scope context_scope(thread_context); Context::Scope context_scope(thread_context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate_));
while ((ptr != NULL) && (*ptr != '\0')) { while ((ptr != NULL) && (*ptr != '\0')) {
HandleScope inner_scope(isolate_); HandleScope inner_scope(isolate_);
@ -1671,10 +1893,11 @@ void SourceGroup::ExecuteInThread() {
Isolate::Scope iscope(isolate); Isolate::Scope iscope(isolate);
Locker lock(isolate); Locker lock(isolate);
HandleScope scope(isolate); HandleScope scope(isolate);
Symbols symbols(isolate); PerIsolateData data(isolate);
Persistent<Context> context = Shell::CreateEvaluationContext(isolate); Persistent<Context> context = Shell::CreateEvaluationContext(isolate);
{ {
Context::Scope cscope(context); Context::Scope cscope(context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
Execute(isolate); Execute(isolate);
} }
context.Dispose(isolate); context.Dispose(isolate);
@ -1883,6 +2106,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
} }
{ {
Context::Scope cscope(context); Context::Scope cscope(context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
options.isolate_sources[0].Execute(isolate); options.isolate_sources[0].Execute(isolate);
} }
if (!options.last_run) { if (!options.last_run) {
@ -1933,7 +2157,7 @@ int Shell::Main(int argc, char* argv[]) {
#ifdef ENABLE_VTUNE_JIT_INTERFACE #ifdef ENABLE_VTUNE_JIT_INTERFACE
vTune::InitilizeVtuneForV8(); vTune::InitilizeVtuneForV8();
#endif #endif
Symbols symbols(isolate); PerIsolateData data(isolate);
InitializeDebugger(isolate); InitializeDebugger(isolate);
if (options.stress_opt || options.stress_deopt) { if (options.stress_opt || options.stress_deopt) {

13
deps/v8/src/d8.h

@ -298,6 +298,19 @@ class Shell : public i::AllStatic {
#endif // ENABLE_DEBUGGER_SUPPORT #endif // ENABLE_DEBUGGER_SUPPORT
#endif // V8_SHARED #endif // V8_SHARED
static Handle<Value> RealmCurrent(const Arguments& args);
static Handle<Value> RealmOwner(const Arguments& args);
static Handle<Value> RealmGlobal(const Arguments& args);
static Handle<Value> RealmCreate(const Arguments& args);
static Handle<Value> RealmDispose(const Arguments& args);
static Handle<Value> RealmSwitch(const Arguments& args);
static Handle<Value> RealmEval(const Arguments& args);
static Handle<Value> RealmSharedGet(Local<String> property,
const AccessorInfo& info);
static void RealmSharedSet(Local<String> property,
Local<Value> value,
const AccessorInfo& info);
static Handle<Value> Print(const Arguments& args); static Handle<Value> Print(const Arguments& args);
static Handle<Value> Write(const Arguments& args); static Handle<Value> Write(const Arguments& args);
static Handle<Value> Quit(const Arguments& args); static Handle<Value> Quit(const Arguments& args);

1
deps/v8/src/debug.cc

@ -612,7 +612,6 @@ void ScriptCache::Add(Handle<Script> script) {
ASSERT(*script == *reinterpret_cast<Script**>(entry->value)); ASSERT(*script == *reinterpret_cast<Script**>(entry->value));
return; return;
} }
// Globalize the script object, make it weak and use the location of the // Globalize the script object, make it weak and use the location of the
// global handle as the value in the hash map. // global handle as the value in the hash map.
Handle<Script> script_ = Handle<Script> script_ =

1
deps/v8/src/deoptimizer.h

@ -309,6 +309,7 @@ class Deoptimizer : public Malloced {
protected: protected:
MacroAssembler* masm() const { return masm_; } MacroAssembler* masm() const { return masm_; }
BailoutType type() const { return type_; } BailoutType type() const { return type_; }
Isolate* isolate() const { return masm_->isolate(); }
virtual void GeneratePrologue() { } virtual void GeneratePrologue() { }

2
deps/v8/src/dtoa.cc

@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <math.h> #include <cmath>
#include "../include/v8stdint.h" #include "../include/v8stdint.h"
#include "checks.h" #include "checks.h"

9
deps/v8/src/execution.cc

@ -76,7 +76,7 @@ static Handle<Object> Invoke(bool is_construct,
Isolate* isolate = function->GetIsolate(); Isolate* isolate = function->GetIsolate();
// Entering JavaScript. // Entering JavaScript.
VMState state(isolate, JS); VMState<JS> state(isolate);
// Placeholder for return value. // Placeholder for return value.
MaybeObject* value = reinterpret_cast<Object*>(kZapValue); MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
@ -426,6 +426,13 @@ bool StackGuard::IsTerminateExecution() {
} }
void StackGuard::CancelTerminateExecution() {
ExecutionAccess access(isolate_);
Continue(TERMINATE);
isolate_->CancelTerminateExecution();
}
void StackGuard::TerminateExecution() { void StackGuard::TerminateExecution() {
ExecutionAccess access(isolate_); ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= TERMINATE; thread_local_.interrupt_flags_ |= TERMINATE;

1
deps/v8/src/execution.h

@ -190,6 +190,7 @@ class StackGuard {
void Interrupt(); void Interrupt();
bool IsTerminateExecution(); bool IsTerminateExecution();
void TerminateExecution(); void TerminateExecution();
void CancelTerminateExecution();
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
bool IsDebugBreak(); bool IsDebugBreak();
void DebugBreak(); void DebugBreak();

13
deps/v8/src/extensions/gc-extension.cc

@ -26,12 +26,11 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "gc-extension.h" #include "gc-extension.h"
#include "platform.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
const char* const GCExtension::kSource = "native function gc();";
v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction( v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::String> str) { v8::Handle<v8::String> str) {
@ -50,7 +49,15 @@ v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
void GCExtension::Register() { void GCExtension::Register() {
static GCExtension gc_extension; static char buffer[50];
Vector<char> temp_vector(buffer, sizeof(buffer));
if (FLAG_expose_gc_as != NULL && strlen(FLAG_expose_gc_as) != 0) {
OS::SNPrintF(temp_vector, "native function %s();", FLAG_expose_gc_as);
} else {
OS::SNPrintF(temp_vector, "native function gc();");
}
static GCExtension gc_extension(buffer);
static v8::DeclareExtension declaration(&gc_extension); static v8::DeclareExtension declaration(&gc_extension);
} }

4
deps/v8/src/extensions/gc-extension.h

@ -35,13 +35,11 @@ namespace internal {
class GCExtension : public v8::Extension { class GCExtension : public v8::Extension {
public: public:
GCExtension() : v8::Extension("v8/gc", kSource) {} explicit GCExtension(const char* source) : v8::Extension("v8/gc", source) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction( virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name); v8::Handle<v8::String> name);
static v8::Handle<v8::Value> GC(const v8::Arguments& args); static v8::Handle<v8::Value> GC(const v8::Arguments& args);
static void Register(); static void Register();
private:
static const char* const kSource;
}; };
} } // namespace v8::internal } } // namespace v8::internal

12
deps/v8/src/factory.cc

@ -476,6 +476,8 @@ Handle<ExternalArray> Factory::NewExternalArray(int length,
Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell( Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
Handle<Object> value) { Handle<Object> value) {
ALLOW_HANDLE_DEREF(isolate(),
"converting a handle into a global property cell");
CALL_HEAP_FUNCTION( CALL_HEAP_FUNCTION(
isolate(), isolate(),
isolate()->heap()->AllocateJSGlobalPropertyCell(*value), isolate()->heap()->AllocateJSGlobalPropertyCell(*value),
@ -1044,6 +1046,16 @@ void Factory::EnsureCanContainElements(Handle<JSArray> array,
} }
Handle<JSArrayBuffer> Factory::NewJSArrayBuffer() {
JSFunction* array_buffer_fun =
isolate()->context()->native_context()->array_buffer_fun();
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSObject(array_buffer_fun),
JSArrayBuffer);
}
Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler, Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
Handle<Object> prototype) { Handle<Object> prototype) {
CALL_HEAP_FUNCTION( CALL_HEAP_FUNCTION(

2
deps/v8/src/factory.h

@ -313,6 +313,8 @@ class Factory {
uint32_t length, uint32_t length,
EnsureElementsMode mode); EnsureElementsMode mode);
Handle<JSArrayBuffer> NewJSArrayBuffer();
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype); Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
// Change the type of the argument into a JS object/function and reinitialize. // Change the type of the argument into a JS object/function and reinitialize.

2
deps/v8/src/fixed-dtoa.cc

@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <math.h> #include <cmath>
#include "../include/v8stdint.h" #include "../include/v8stdint.h"
#include "checks.h" #include "checks.h"

11
deps/v8/src/flag-definitions.h

@ -230,6 +230,9 @@ DEFINE_bool(stress_environments, false, "environment for every instruction")
DEFINE_int(deopt_every_n_times, DEFINE_int(deopt_every_n_times,
0, 0,
"deoptimize every n times a deopt point is passed") "deoptimize every n times a deopt point is passed")
DEFINE_int(deopt_every_n_garbage_collections,
0,
"deoptimize every n garbage collections")
DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing") DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases") DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining") DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
@ -344,6 +347,10 @@ DEFINE_bool(enable_vldr_imm, false,
DEFINE_string(expose_natives_as, NULL, "expose natives in global object") DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object") DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
DEFINE_bool(expose_gc, false, "expose gc extension") DEFINE_bool(expose_gc, false, "expose gc extension")
DEFINE_string(expose_gc_as,
NULL,
"expose gc extension under the specified name")
DEFINE_implication(expose_gc_as, expose_gc)
DEFINE_bool(expose_externalize_string, false, DEFINE_bool(expose_externalize_string, false,
"expose externalize string extension") "expose externalize string extension")
DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture") DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
@ -666,9 +673,6 @@ DEFINE_bool(collect_heap_spill_statistics, false,
DEFINE_bool(trace_isolates, false, "trace isolate state changes") DEFINE_bool(trace_isolates, false, "trace isolate state changes")
// VM state
DEFINE_bool(log_state_changes, false, "Log state changes.")
// Regexp // Regexp
DEFINE_bool(regexp_possessive_quantifier, DEFINE_bool(regexp_possessive_quantifier,
false, false,
@ -716,6 +720,7 @@ DEFINE_bool(log_internal_timer_events, false, "Time internal events.")
DEFINE_bool(log_timer_events, false, DEFINE_bool(log_timer_events, false,
"Time events including external callbacks.") "Time events including external callbacks.")
DEFINE_implication(log_timer_events, log_internal_timer_events) DEFINE_implication(log_timer_events, log_internal_timer_events)
DEFINE_implication(log_internal_timer_events, prof)
// //
// Disassembler only flags // Disassembler only flags

41
deps/v8/src/full-codegen.cc

@ -923,6 +923,20 @@ void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
} }
void FullCodeGenerator::EmitGeneratorSend(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::SEND);
}
void FullCodeGenerator::EmitGeneratorThrow(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::THROW);
}
void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
switch (expr->op()) { switch (expr->op()) {
case Token::COMMA: case Token::COMMA:
@ -1241,9 +1255,12 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
__ CallRuntime(Runtime::kPushWithContext, 2); __ CallRuntime(Runtime::kPushWithContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register()); StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
Scope* saved_scope = scope();
scope_ = stmt->scope();
{ WithOrCatch body(this); { WithOrCatch body(this);
Visit(stmt->statement()); Visit(stmt->statement());
} }
scope_ = saved_scope;
// Pop context. // Pop context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX); LoadContextField(context_register(), Context::PREVIOUS_INDEX);
@ -1548,30 +1565,6 @@ void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
} }
void FullCodeGenerator::VisitYield(Yield* expr) {
if (expr->is_delegating_yield())
UNIMPLEMENTED();
Comment cmnt(masm_, "[ Yield");
// TODO(wingo): Actually update the iterator state.
VisitForEffect(expr->generator_object());
VisitForAccumulatorValue(expr->expression());
// TODO(wingo): Assert that the operand stack depth is 0, at least while
// general yield expressions are unimplemented.
// TODO(wingo): What follows is as in VisitReturnStatement. Replace it with a
// call to a builtin that will resume the generator.
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
while (current != NULL) {
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
EmitReturnSequence();
}
void FullCodeGenerator::VisitThrow(Throw* expr) { void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw"); Comment cmnt(masm_, "[ Throw");
VisitForStackValue(expr->exception()); VisitForStackValue(expr->exception());

5
deps/v8/src/full-codegen.h

@ -486,6 +486,11 @@ class FullCodeGenerator: public AstVisitor {
INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL) INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
#undef EMIT_INLINE_RUNTIME_CALL #undef EMIT_INLINE_RUNTIME_CALL
// Platform-specific code for resuming generators.
void EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode);
// Platform-specific code for loading variables. // Platform-specific code for loading variables.
void EmitLoadGlobalCheckExtensions(Variable* var, void EmitLoadGlobalCheckExtensions(Variable* var,
TypeofState typeof_state, TypeofState typeof_state,

18
deps/v8/src/generator.js

@ -44,7 +44,7 @@ function GeneratorObjectNext() {
['[Generator].prototype.next', this]); ['[Generator].prototype.next', this]);
} }
// TODO(wingo): Implement. return %_GeneratorSend(this, void 0);
} }
function GeneratorObjectSend(value) { function GeneratorObjectSend(value) {
@ -53,7 +53,7 @@ function GeneratorObjectSend(value) {
['[Generator].prototype.send', this]); ['[Generator].prototype.send', this]);
} }
// TODO(wingo): Implement. return %_GeneratorSend(this, value);
} }
function GeneratorObjectThrow(exn) { function GeneratorObjectThrow(exn) {
@ -62,16 +62,7 @@ function GeneratorObjectThrow(exn) {
['[Generator].prototype.throw', this]); ['[Generator].prototype.throw', this]);
} }
// TODO(wingo): Implement. return %_GeneratorThrow(this, exn);
}
function GeneratorObjectClose() {
if (!IS_GENERATOR(this)) {
throw MakeTypeError('incompatible_method_receiver',
['[Generator].prototype.close', this]);
}
// TODO(wingo): Implement.
} }
function SetUpGenerators() { function SetUpGenerators() {
@ -81,8 +72,7 @@ function SetUpGenerators() {
DONT_ENUM | DONT_DELETE | READ_ONLY, DONT_ENUM | DONT_DELETE | READ_ONLY,
["next", GeneratorObjectNext, ["next", GeneratorObjectNext,
"send", GeneratorObjectSend, "send", GeneratorObjectSend,
"throw", GeneratorObjectThrow, "throw", GeneratorObjectThrow]);
"close", GeneratorObjectClose]);
%SetProperty(GeneratorObjectPrototype, "constructor", %SetProperty(GeneratorObjectPrototype, "constructor",
GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY); GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetPrototype(GeneratorFunctionPrototype, $Function.prototype); %SetPrototype(GeneratorFunctionPrototype, $Function.prototype);

176
deps/v8/src/global-handles.cc

@ -37,7 +37,13 @@ namespace internal {
ObjectGroup::~ObjectGroup() { ObjectGroup::~ObjectGroup() {
if (info_ != NULL) info_->Dispose(); if (info != NULL) info->Dispose();
delete[] objects;
}
ImplicitRefGroup::~ImplicitRefGroup() {
delete[] children;
} }
@ -267,7 +273,7 @@ class GlobalHandles::Node {
ASSERT(!object_->IsExternalTwoByteString() || ASSERT(!object_->IsExternalTwoByteString() ||
ExternalTwoByteString::cast(object_)->resource() != NULL); ExternalTwoByteString::cast(object_)->resource() != NULL);
// Leaving V8. // Leaving V8.
VMState state(isolate, EXTERNAL); VMState<EXTERNAL> state(isolate);
if (near_death_callback_ != NULL) { if (near_death_callback_ != NULL) {
if (IsWeakCallback::decode(flags_)) { if (IsWeakCallback::decode(flags_)) {
WeakReferenceCallback callback = WeakReferenceCallback callback =
@ -438,7 +444,8 @@ GlobalHandles::GlobalHandles(Isolate* isolate)
first_block_(NULL), first_block_(NULL),
first_used_block_(NULL), first_used_block_(NULL),
first_free_(NULL), first_free_(NULL),
post_gc_processing_count_(0) {} post_gc_processing_count_(0),
object_group_connections_(kObjectGroupConnectionsCapacity) {}
GlobalHandles::~GlobalHandles() { GlobalHandles::~GlobalHandles() {
@ -578,15 +585,16 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v, bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
WeakSlotCallbackWithHeap can_skip) { WeakSlotCallbackWithHeap can_skip) {
ComputeObjectGroupsAndImplicitReferences();
int last = 0; int last = 0;
bool any_group_was_visited = false; bool any_group_was_visited = false;
for (int i = 0; i < object_groups_.length(); i++) { for (int i = 0; i < object_groups_.length(); i++) {
ObjectGroup* entry = object_groups_.at(i); ObjectGroup* entry = object_groups_.at(i);
ASSERT(entry != NULL); ASSERT(entry != NULL);
Object*** objects = entry->objects_; Object*** objects = entry->objects;
bool group_should_be_visited = false; bool group_should_be_visited = false;
for (size_t j = 0; j < entry->length_; j++) { for (size_t j = 0; j < entry->length; j++) {
Object* object = *objects[j]; Object* object = *objects[j];
if (object->IsHeapObject()) { if (object->IsHeapObject()) {
if (!can_skip(isolate_->heap(), &object)) { if (!can_skip(isolate_->heap(), &object)) {
@ -603,7 +611,7 @@ bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
// An object in the group requires visiting, so iterate over all // An object in the group requires visiting, so iterate over all
// objects in the group. // objects in the group.
for (size_t j = 0; j < entry->length_; ++j) { for (size_t j = 0; j < entry->length; ++j) {
Object* object = *objects[j]; Object* object = *objects[j];
if (object->IsHeapObject()) { if (object->IsHeapObject()) {
v->VisitPointer(&object); v->VisitPointer(&object);
@ -613,7 +621,7 @@ bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
// Once the entire group has been iterated over, set the object // Once the entire group has been iterated over, set the object
// group to NULL so it won't be processed again. // group to NULL so it won't be processed again.
entry->Dispose(); delete entry;
object_groups_.at(i) = NULL; object_groups_.at(i) = NULL;
} }
object_groups_.Rewind(last); object_groups_.Rewind(last);
@ -824,7 +832,23 @@ void GlobalHandles::AddObjectGroup(Object*** handles,
if (info != NULL) info->Dispose(); if (info != NULL) info->Dispose();
return; return;
} }
object_groups_.Add(ObjectGroup::New(handles, length, info)); ObjectGroup* group = new ObjectGroup(length);
for (size_t i = 0; i < length; ++i)
group->objects[i] = handles[i];
group->info = info;
object_groups_.Add(group);
}
void GlobalHandles::SetObjectGroupId(Object** handle,
UniqueId id) {
object_group_connections_.Add(ObjectGroupConnection(id, handle));
}
void GlobalHandles::SetRetainedObjectInfo(UniqueId id,
RetainedObjectInfo* info) {
retainer_infos_.Add(ObjectGroupRetainerInfo(id, info));
} }
@ -838,23 +862,45 @@ void GlobalHandles::AddImplicitReferences(HeapObject** parent,
} }
#endif #endif
if (length == 0) return; if (length == 0) return;
implicit_ref_groups_.Add(ImplicitRefGroup::New(parent, children, length)); ImplicitRefGroup* group = new ImplicitRefGroup(parent, length);
for (size_t i = 0; i < length; ++i)
group->children[i] = children[i];
implicit_ref_groups_.Add(group);
} }
void GlobalHandles::RemoveObjectGroups() { void GlobalHandles::SetReferenceFromGroup(UniqueId id, Object** child) {
for (int i = 0; i < object_groups_.length(); i++) { ASSERT(!Node::FromLocation(child)->is_independent());
object_groups_.at(i)->Dispose(); implicit_ref_connections_.Add(ObjectGroupConnection(id, child));
}
void GlobalHandles::SetReference(HeapObject** parent, Object** child) {
ASSERT(!Node::FromLocation(child)->is_independent());
ImplicitRefGroup* group = new ImplicitRefGroup(parent, 1);
group->children[0] = child;
implicit_ref_groups_.Add(group);
} }
void GlobalHandles::RemoveObjectGroups() {
for (int i = 0; i < object_groups_.length(); i++)
delete object_groups_.at(i);
object_groups_.Clear(); object_groups_.Clear();
for (int i = 0; i < retainer_infos_.length(); ++i)
retainer_infos_[i].info->Dispose();
retainer_infos_.Clear();
object_group_connections_.Clear();
object_group_connections_.Initialize(kObjectGroupConnectionsCapacity);
} }
void GlobalHandles::RemoveImplicitRefGroups() { void GlobalHandles::RemoveImplicitRefGroups() {
for (int i = 0; i < implicit_ref_groups_.length(); i++) { for (int i = 0; i < implicit_ref_groups_.length(); i++) {
implicit_ref_groups_.at(i)->Dispose(); delete implicit_ref_groups_.at(i);
} }
implicit_ref_groups_.Clear(); implicit_ref_groups_.Clear();
implicit_ref_connections_.Clear();
} }
@ -863,4 +909,108 @@ void GlobalHandles::TearDown() {
} }
void GlobalHandles::ComputeObjectGroupsAndImplicitReferences() {
if (object_group_connections_.length() == 0) {
for (int i = 0; i < retainer_infos_.length(); ++i)
retainer_infos_[i].info->Dispose();
retainer_infos_.Clear();
implicit_ref_connections_.Clear();
return;
}
object_group_connections_.Sort();
retainer_infos_.Sort();
implicit_ref_connections_.Sort();
int info_index = 0; // For iterating retainer_infos_.
UniqueId current_group_id(0);
int current_group_start = 0;
int current_implicit_refs_start = 0;
int current_implicit_refs_end = 0;
for (int i = 0; i <= object_group_connections_.length(); ++i) {
if (i == 0)
current_group_id = object_group_connections_[i].id;
if (i == object_group_connections_.length() ||
current_group_id != object_group_connections_[i].id) {
// Group detected: objects in indices [current_group_start, i[.
// Find out which implicit references are related to this group. (We want
// to ignore object groups which only have 1 object, but that object is
// needed as a representative object for the implicit refrerence group.)
while (current_implicit_refs_start < implicit_ref_connections_.length() &&
implicit_ref_connections_[current_implicit_refs_start].id <
current_group_id)
++current_implicit_refs_start;
current_implicit_refs_end = current_implicit_refs_start;
while (current_implicit_refs_end < implicit_ref_connections_.length() &&
implicit_ref_connections_[current_implicit_refs_end].id ==
current_group_id)
++current_implicit_refs_end;
if (current_implicit_refs_end > current_implicit_refs_start) {
// Find a representative object for the implicit references.
HeapObject** representative = NULL;
for (int j = current_group_start; j < i; ++j) {
Object** object = object_group_connections_[j].object;
if ((*object)->IsHeapObject()) {
representative = reinterpret_cast<HeapObject**>(object);
break;
}
}
if (representative) {
ImplicitRefGroup* group = new ImplicitRefGroup(
representative,
current_implicit_refs_end - current_implicit_refs_start);
for (int j = current_implicit_refs_start;
j < current_implicit_refs_end;
++j) {
group->children[j - current_implicit_refs_start] =
implicit_ref_connections_[j].object;
}
implicit_ref_groups_.Add(group);
}
current_implicit_refs_start = current_implicit_refs_end;
}
// Find a RetainedObjectInfo for the group.
RetainedObjectInfo* info = NULL;
while (info_index < retainer_infos_.length() &&
retainer_infos_[info_index].id < current_group_id) {
retainer_infos_[info_index].info->Dispose();
++info_index;
}
if (info_index < retainer_infos_.length() &&
retainer_infos_[info_index].id == current_group_id) {
// This object group has an associated ObjectGroupRetainerInfo.
info = retainer_infos_[info_index].info;
++info_index;
}
// Ignore groups which only contain one object.
if (i > current_group_start + 1) {
ObjectGroup* group = new ObjectGroup(i - current_group_start);
for (int j = current_group_start; j < i; ++j) {
group->objects[j - current_group_start] =
object_group_connections_[j].object;
}
group->info = info;
object_groups_.Add(group);
} else if (info) {
info->Dispose();
}
if (i < object_group_connections_.length()) {
current_group_id = object_group_connections_[i].id;
current_group_start = i;
}
}
}
object_group_connections_.Clear();
object_group_connections_.Initialize(kObjectGroupConnectionsCapacity);
retainer_infos_.Clear();
implicit_ref_connections_.Clear();
}
} } // namespace v8::internal } } // namespace v8::internal

148
deps/v8/src/global-handles.h

@ -28,6 +28,7 @@
#ifndef V8_GLOBAL_HANDLES_H_ #ifndef V8_GLOBAL_HANDLES_H_
#define V8_GLOBAL_HANDLES_H_ #define V8_GLOBAL_HANDLES_H_
#include "../include/v8.h"
#include "../include/v8-profiler.h" #include "../include/v8-profiler.h"
#include "list.h" #include "list.h"
@ -46,70 +47,76 @@ class ObjectVisitor;
// At GC the destroyed global handles are removed from the free list // At GC the destroyed global handles are removed from the free list
// and deallocated. // and deallocated.
// Data structures for tracking object groups and implicit references.
// An object group is treated like a single JS object: if one of object in // An object group is treated like a single JS object: if one of object in
// the group is alive, all objects in the same group are considered alive. // the group is alive, all objects in the same group are considered alive.
// An object group is used to simulate object relationship in a DOM tree. // An object group is used to simulate object relationship in a DOM tree.
class ObjectGroup {
public: // An implicit references group consists of two parts: a parent object and a
static ObjectGroup* New(Object*** handles, // list of children objects. If the parent is alive, all the children are alive
size_t length, // too.
v8::RetainedObjectInfo* info) {
struct ObjectGroup {
explicit ObjectGroup(size_t length)
: info(NULL), length(length) {
ASSERT(length > 0); ASSERT(length > 0);
ObjectGroup* group = reinterpret_cast<ObjectGroup*>( objects = new Object**[length];
malloc(OFFSET_OF(ObjectGroup, objects_[length])));
group->length_ = length;
group->info_ = info;
CopyWords(group->objects_, handles, static_cast<int>(length));
return group;
} }
~ObjectGroup();
void Dispose() { v8::RetainedObjectInfo* info;
if (info_ != NULL) info_->Dispose(); Object*** objects;
free(this); size_t length;
} };
size_t length_;
v8::RetainedObjectInfo* info_;
Object** objects_[1]; // Variable sized array.
private: struct ImplicitRefGroup {
void* operator new(size_t size); ImplicitRefGroup(HeapObject** parent, size_t length)
void operator delete(void* p); : parent(parent), length(length) {
~ObjectGroup(); ASSERT(length > 0);
DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectGroup); children = new Object**[length];
}
~ImplicitRefGroup();
HeapObject** parent;
Object*** children;
size_t length;
}; };
// An implicit references group consists of two parts: a parent object and // For internal bookkeeping.
// a list of children objects. If the parent is alive, all the children struct ObjectGroupConnection {
// are alive too. ObjectGroupConnection(UniqueId id, Object** object)
class ImplicitRefGroup { : id(id), object(object) {}
public:
static ImplicitRefGroup* New(HeapObject** parent, bool operator==(const ObjectGroupConnection& other) const {
Object*** children, return id == other.id;
size_t length) {
ASSERT(length > 0);
ImplicitRefGroup* group = reinterpret_cast<ImplicitRefGroup*>(
malloc(OFFSET_OF(ImplicitRefGroup, children_[length])));
group->parent_ = parent;
group->length_ = length;
CopyWords(group->children_, children, length);
return group;
} }
void Dispose() { bool operator<(const ObjectGroupConnection& other) const {
free(this); return id < other.id;
} }
HeapObject** parent_; UniqueId id;
size_t length_; Object** object;
Object** children_[1]; // Variable sized array. };
private:
void* operator new(size_t size); struct ObjectGroupRetainerInfo {
void operator delete(void* p); ObjectGroupRetainerInfo(UniqueId id, RetainedObjectInfo* info)
~ImplicitRefGroup(); : id(id), info(info) {}
DISALLOW_IMPLICIT_CONSTRUCTORS(ImplicitRefGroup);
bool operator==(const ObjectGroupRetainerInfo& other) const {
return id == other.id;
}
bool operator<(const ObjectGroupRetainerInfo& other) const {
return id < other.id;
}
UniqueId id;
RetainedObjectInfo* info;
}; };
@ -218,6 +225,16 @@ class GlobalHandles {
size_t length, size_t length,
v8::RetainedObjectInfo* info); v8::RetainedObjectInfo* info);
// Associates handle with the object group represented by id.
// Should be only used in GC callback function before a collection.
// All groups are destroyed after a garbage collection.
void SetObjectGroupId(Object** handle, UniqueId id);
// Set RetainedObjectInfo for an object group. Should not be called more than
// once for a group. Should not be called for a group which contains no
// handles.
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
// Add an implicit references' group. // Add an implicit references' group.
// Should be only used in GC callback function before a collection. // Should be only used in GC callback function before a collection.
// All groups are destroyed after a mark-compact collection. // All groups are destroyed after a mark-compact collection.
@ -225,11 +242,23 @@ class GlobalHandles {
Object*** children, Object*** children,
size_t length); size_t length);
// Returns the object groups. // Adds an implicit reference from a group to an object. Should be only used
List<ObjectGroup*>* object_groups() { return &object_groups_; } // in GC callback function before a collection. All implicit references are
// destroyed after a mark-compact collection.
void SetReferenceFromGroup(UniqueId id, Object** child);
// Adds an implicit reference from a parent object to a child object. Should
// be only used in GC callback function before a collection. All implicit
// references are destroyed after a mark-compact collection.
void SetReference(HeapObject** parent, Object** child);
List<ObjectGroup*>* object_groups() {
ComputeObjectGroupsAndImplicitReferences();
return &object_groups_;
}
// Returns the implicit references' groups.
List<ImplicitRefGroup*>* implicit_ref_groups() { List<ImplicitRefGroup*>* implicit_ref_groups() {
ComputeObjectGroupsAndImplicitReferences();
return &implicit_ref_groups_; return &implicit_ref_groups_;
} }
@ -250,6 +279,15 @@ class GlobalHandles {
private: private:
explicit GlobalHandles(Isolate* isolate); explicit GlobalHandles(Isolate* isolate);
// Migrates data from the internal representation (object_group_connections_,
// retainer_infos_ and implicit_ref_connections_) to the public and more
// efficient representation (object_groups_ and implicit_ref_groups_).
void ComputeObjectGroupsAndImplicitReferences();
// v8::internal::List is inefficient even for small number of elements, if we
// don't assign any initial capacity.
static const int kObjectGroupConnectionsCapacity = 20;
// Internal node structures. // Internal node structures.
class Node; class Node;
class NodeBlock; class NodeBlock;
@ -275,9 +313,17 @@ class GlobalHandles {
int post_gc_processing_count_; int post_gc_processing_count_;
// Object groups and implicit references, public and more efficient
// representation.
List<ObjectGroup*> object_groups_; List<ObjectGroup*> object_groups_;
List<ImplicitRefGroup*> implicit_ref_groups_; List<ImplicitRefGroup*> implicit_ref_groups_;
// Object groups and implicit references, temporary representation while
// constructing the groups.
List<ObjectGroupConnection> object_group_connections_;
List<ObjectGroupRetainerInfo> retainer_infos_;
List<ObjectGroupConnection> implicit_ref_connections_;
friend class Isolate; friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(GlobalHandles); DISALLOW_COPY_AND_ASSIGN(GlobalHandles);

53
deps/v8/src/handles-inl.h

@ -55,13 +55,8 @@ template <typename T>
inline bool Handle<T>::is_identical_to(const Handle<T> other) const { inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
ASSERT(location_ == NULL || ASSERT(location_ == NULL ||
reinterpret_cast<Address>(*location_) != kZapValue); reinterpret_cast<Address>(*location_) != kZapValue);
#ifdef DEBUG // Dereferencing deferred handles to check object equality is safe.
if (FLAG_enable_slow_asserts) { SLOW_ASSERT(IsDereferenceAllowed(true) && other.IsDereferenceAllowed(true));
Isolate* isolate = Isolate::Current();
CHECK(isolate->AllowHandleDereference() ||
!isolate->optimizing_compiler_thread()->IsOptimizerThread());
}
#endif // DEBUG
return *location_ == *other.location_; return *location_ == *other.location_;
} }
@ -70,7 +65,7 @@ template <typename T>
inline T* Handle<T>::operator*() const { inline T* Handle<T>::operator*() const {
ASSERT(location_ != NULL); ASSERT(location_ != NULL);
ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue); ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
SLOW_ASSERT(Isolate::Current()->AllowHandleDereference()); SLOW_ASSERT(IsDereferenceAllowed(false));
return *BitCast<T**>(location_); return *BitCast<T**>(location_);
} }
@ -78,10 +73,44 @@ template <typename T>
inline T** Handle<T>::location() const { inline T** Handle<T>::location() const {
ASSERT(location_ == NULL || ASSERT(location_ == NULL ||
reinterpret_cast<Address>(*location_) != kZapValue); reinterpret_cast<Address>(*location_) != kZapValue);
SLOW_ASSERT(Isolate::Current()->AllowHandleDereference()); SLOW_ASSERT(IsDereferenceAllowed(false));
return location_; return location_;
} }
#ifdef DEBUG
template <typename T>
bool Handle<T>::IsDereferenceAllowed(bool allow_deferred) const {
if (location_ == NULL) return true;
Object* object = *BitCast<T**>(location_);
if (object->IsSmi()) return true;
HeapObject* heap_object = HeapObject::cast(object);
Isolate* isolate = heap_object->GetIsolate();
Object** handle = reinterpret_cast<Object**>(location_);
Object** roots_array_start = isolate->heap()->roots_array_start();
if (roots_array_start <= handle &&
handle < roots_array_start + Heap::kStrongRootListLength) {
return true;
}
if (isolate->optimizing_compiler_thread()->IsOptimizerThread() &&
!Heap::RelocationLock::IsLockedByOptimizerThread(isolate->heap())) {
return false;
}
switch (isolate->HandleDereferenceGuardState()) {
case HandleDereferenceGuard::ALLOW:
return true;
case HandleDereferenceGuard::DISALLOW:
return false;
case HandleDereferenceGuard::DISALLOW_DEFERRED:
// Accessing maps and internalized strings is safe.
if (heap_object->IsMap()) return true;
if (heap_object->IsInternalizedString()) return true;
return allow_deferred || !isolate->IsDeferredHandle(handle);
}
return false;
}
#endif
HandleScope::HandleScope(Isolate* isolate) { HandleScope::HandleScope(Isolate* isolate) {
v8::ImplementationUtilities::HandleScopeData* current = v8::ImplementationUtilities::HandleScopeData* current =
@ -181,13 +210,13 @@ inline NoHandleAllocation::~NoHandleAllocation() {
HandleDereferenceGuard::HandleDereferenceGuard(Isolate* isolate, State state) HandleDereferenceGuard::HandleDereferenceGuard(Isolate* isolate, State state)
: isolate_(isolate) { : isolate_(isolate) {
old_state_ = isolate_->AllowHandleDereference(); old_state_ = isolate_->HandleDereferenceGuardState();
isolate_->SetAllowHandleDereference(state == ALLOW); isolate_->SetHandleDereferenceGuardState(state);
} }
HandleDereferenceGuard::~HandleDereferenceGuard() { HandleDereferenceGuard::~HandleDereferenceGuard() {
isolate_->SetAllowHandleDereference(old_state_); isolate_->SetHandleDereferenceGuardState(old_state_);
} }
#endif #endif

4
deps/v8/src/handles.cc

@ -565,7 +565,7 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object)); LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
{ {
// Leaving JavaScript. // Leaving JavaScript.
VMState state(isolate, EXTERNAL); VMState<EXTERNAL> state(isolate);
result = enum_fun(info); result = enum_fun(info);
} }
} }
@ -590,7 +590,7 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object)); LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
{ {
// Leaving JavaScript. // Leaving JavaScript.
VMState state(isolate, EXTERNAL); VMState<EXTERNAL> state(isolate);
result = enum_fun(info); result = enum_fun(info);
#if ENABLE_EXTRA_CHECKS #if ENABLE_EXTRA_CHECKS
CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject()); CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());

20
deps/v8/src/handles.h

@ -73,8 +73,8 @@ class Handle {
INLINE(T** location() const); INLINE(T** location() const);
template <class S> static Handle<T> cast(Handle<S> that) { template <class S> static Handle<T> cast(Handle<S> that) {
T::cast(*that); T::cast(*reinterpret_cast<T**>(that.location_));
return Handle<T>(reinterpret_cast<T**>(that.location())); return Handle<T>(reinterpret_cast<T**>(that.location_));
} }
static Handle<T> null() { return Handle<T>(); } static Handle<T> null() { return Handle<T>(); }
@ -84,6 +84,10 @@ class Handle {
// implementation in api.h. // implementation in api.h.
inline Handle<T> EscapeFrom(v8::HandleScope* scope); inline Handle<T> EscapeFrom(v8::HandleScope* scope);
#ifdef DEBUG
bool IsDereferenceAllowed(bool allow_deferred) const;
#endif // DEBUG
private: private:
T** location_; T** location_;
@ -341,7 +345,7 @@ class NoHandleAllocation BASE_EMBEDDED {
class HandleDereferenceGuard BASE_EMBEDDED { class HandleDereferenceGuard BASE_EMBEDDED {
public: public:
enum State { ALLOW, DISALLOW }; enum State { ALLOW, DISALLOW, DISALLOW_DEFERRED };
#ifndef DEBUG #ifndef DEBUG
HandleDereferenceGuard(Isolate* isolate, State state) { } HandleDereferenceGuard(Isolate* isolate, State state) { }
~HandleDereferenceGuard() { } ~HandleDereferenceGuard() { }
@ -350,10 +354,18 @@ class HandleDereferenceGuard BASE_EMBEDDED {
inline ~HandleDereferenceGuard(); inline ~HandleDereferenceGuard();
private: private:
Isolate* isolate_; Isolate* isolate_;
bool old_state_; State old_state_;
#endif #endif
}; };
#ifdef DEBUG
#define ALLOW_HANDLE_DEREF(isolate, why_this_is_safe) \
HandleDereferenceGuard allow_deref(isolate, \
HandleDereferenceGuard::ALLOW);
#else
#define ALLOW_HANDLE_DEREF(isolate, why_this_is_safe)
#endif // DEBUG
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_HANDLES_H_ #endif // V8_HANDLES_H_

30
deps/v8/src/heap-inl.h

@ -211,6 +211,7 @@ MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
MaybeObject* Heap::AllocateRaw(int size_in_bytes, MaybeObject* Heap::AllocateRaw(int size_in_bytes,
AllocationSpace space, AllocationSpace space,
AllocationSpace retry_space) { AllocationSpace retry_space) {
SLOW_ASSERT(!isolate_->optimizing_compiler_thread()->IsOptimizerThread());
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
ASSERT(space != NEW_SPACE || ASSERT(space != NEW_SPACE ||
retry_space == OLD_POINTER_SPACE || retry_space == OLD_POINTER_SPACE ||
@ -577,14 +578,14 @@ Isolate* Heap::isolate() {
// Warning: Do not use the identifiers __object__, __maybe_object__ or // Warning: Do not use the identifiers __object__, __maybe_object__ or
// __scope__ in a call to this macro. // __scope__ in a call to this macro.
#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)\ #define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY, OOM)\
do { \ do { \
GC_GREEDY_CHECK(); \ GC_GREEDY_CHECK(); \
MaybeObject* __maybe_object__ = FUNCTION_CALL; \ MaybeObject* __maybe_object__ = FUNCTION_CALL; \
Object* __object__ = NULL; \ Object* __object__ = NULL; \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
if (__maybe_object__->IsOutOfMemory()) { \ if (__maybe_object__->IsOutOfMemory()) { \
v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\ OOM; \
} \ } \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \ if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \ ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
@ -593,7 +594,7 @@ Isolate* Heap::isolate() {
__maybe_object__ = FUNCTION_CALL; \ __maybe_object__ = FUNCTION_CALL; \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
if (__maybe_object__->IsOutOfMemory()) { \ if (__maybe_object__->IsOutOfMemory()) { \
v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1", true);\ OOM; \
} \ } \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \ if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \ ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \
@ -603,30 +604,41 @@ Isolate* Heap::isolate() {
__maybe_object__ = FUNCTION_CALL; \ __maybe_object__ = FUNCTION_CALL; \
} \ } \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
if (__maybe_object__->IsOutOfMemory() || \ if (__maybe_object__->IsOutOfMemory()) { \
__maybe_object__->IsRetryAfterGC()) { \ OOM; \
} \
if (__maybe_object__->IsRetryAfterGC()) { \
/* TODO(1181417): Fix this. */ \ /* TODO(1181417): Fix this. */ \
v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2", true);\ v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
} \ } \
RETURN_EMPTY; \ RETURN_EMPTY; \
} while (false) } while (false)
#define CALL_AND_RETRY_OR_DIE( \
ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
CALL_AND_RETRY( \
ISOLATE, \
FUNCTION_CALL, \
RETURN_VALUE, \
RETURN_EMPTY, \
v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY", true))
#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \ #define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
CALL_AND_RETRY(ISOLATE, \ CALL_AND_RETRY_OR_DIE(ISOLATE, \
FUNCTION_CALL, \ FUNCTION_CALL, \
return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \ return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
return Handle<TYPE>()) return Handle<TYPE>()) \
#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \ #define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, return, return) CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return)
#define CALL_HEAP_FUNCTION_PASS_EXCEPTION(ISOLATE, FUNCTION_CALL) \ #define CALL_HEAP_FUNCTION_PASS_EXCEPTION(ISOLATE, FUNCTION_CALL) \
CALL_AND_RETRY(ISOLATE, \ CALL_AND_RETRY(ISOLATE, \
FUNCTION_CALL, \ FUNCTION_CALL, \
return __object__, \ return __object__, \
return __maybe_object__, \
return __maybe_object__) return __maybe_object__)

5
deps/v8/src/heap-profiler.cc

@ -140,5 +140,10 @@ void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
snapshots_->ObjectMoveEvent(from, to); snapshots_->ObjectMoveEvent(from, to);
} }
void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
RetainedObjectInfo* info) {
// TODO(yurus, marja): Don't route this information through GlobalHandles.
heap()->isolate()->global_handles()->SetRetainedObjectInfo(id, info);
}
} } // namespace v8::internal } } // namespace v8::internal

2
deps/v8/src/heap-profiler.h

@ -80,6 +80,8 @@ class HeapProfiler {
return snapshots_->is_tracking_objects(); return snapshots_->is_tracking_objects();
} }
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
private: private:
Heap* heap() const { return snapshots_->heap(); } Heap* heap() const { return snapshots_->heap(); }

55
deps/v8/src/heap-snapshot-generator.cc

@ -190,7 +190,6 @@ template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapEntrySize = 24; static const int kExpectedHeapEntrySize = 24;
static const int kExpectedHeapSnapshotsCollectionSize = 100; static const int kExpectedHeapSnapshotsCollectionSize = 100;
static const int kExpectedHeapSnapshotSize = 132; static const int kExpectedHeapSnapshotSize = 132;
static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
}; };
template <> struct SnapshotSizeConstants<8> { template <> struct SnapshotSizeConstants<8> {
@ -198,8 +197,6 @@ template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapEntrySize = 32; static const int kExpectedHeapEntrySize = 32;
static const int kExpectedHeapSnapshotsCollectionSize = 152; static const int kExpectedHeapSnapshotsCollectionSize = 152;
static const int kExpectedHeapSnapshotSize = 160; static const int kExpectedHeapSnapshotSize = 160;
static const uint64_t kMaxSerializableSnapshotRawSize =
static_cast<uint64_t>(6000) * MB;
}; };
} // namespace } // namespace
@ -1939,18 +1936,19 @@ void NativeObjectsExplorer::FillRetainedObjects() {
Isolate* isolate = Isolate::Current(); Isolate* isolate = Isolate::Current();
const GCType major_gc_type = kGCTypeMarkSweepCompact; const GCType major_gc_type = kGCTypeMarkSweepCompact;
// Record objects that are joined into ObjectGroups. // Record objects that are joined into ObjectGroups.
isolate->heap()->CallGCPrologueCallbacks(major_gc_type); isolate->heap()->CallGCPrologueCallbacks(
major_gc_type, kGCCallbackFlagConstructRetainedObjectInfos);
List<ObjectGroup*>* groups = isolate->global_handles()->object_groups(); List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
for (int i = 0; i < groups->length(); ++i) { for (int i = 0; i < groups->length(); ++i) {
ObjectGroup* group = groups->at(i); ObjectGroup* group = groups->at(i);
if (group->info_ == NULL) continue; if (group->info == NULL) continue;
List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info_); List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info);
for (size_t j = 0; j < group->length_; ++j) { for (size_t j = 0; j < group->length; ++j) {
HeapObject* obj = HeapObject::cast(*group->objects_[j]); HeapObject* obj = HeapObject::cast(*group->objects[j]);
list->Add(obj); list->Add(obj);
in_groups_.Insert(obj); in_groups_.Insert(obj);
} }
group->info_ = NULL; // Acquire info object ownership. group->info = NULL; // Acquire info object ownership.
} }
isolate->global_handles()->RemoveObjectGroups(); isolate->global_handles()->RemoveObjectGroups();
isolate->heap()->CallGCEpilogueCallbacks(major_gc_type); isolate->heap()->CallGCEpilogueCallbacks(major_gc_type);
@ -1966,12 +1964,12 @@ void NativeObjectsExplorer::FillImplicitReferences() {
isolate->global_handles()->implicit_ref_groups(); isolate->global_handles()->implicit_ref_groups();
for (int i = 0; i < groups->length(); ++i) { for (int i = 0; i < groups->length(); ++i) {
ImplicitRefGroup* group = groups->at(i); ImplicitRefGroup* group = groups->at(i);
HeapObject* parent = *group->parent_; HeapObject* parent = *group->parent;
int parent_entry = int parent_entry =
filler_->FindOrAddEntry(parent, native_entries_allocator_)->index(); filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
ASSERT(parent_entry != HeapEntry::kNoEntry); ASSERT(parent_entry != HeapEntry::kNoEntry);
Object*** children = group->children_; Object*** children = group->children;
for (size_t j = 0; j < group->length_; ++j) { for (size_t j = 0; j < group->length; ++j) {
Object* child = *children[j]; Object* child = *children[j];
HeapEntry* child_entry = HeapEntry* child_entry =
filler_->FindOrAddEntry(child, native_entries_allocator_); filler_->FindOrAddEntry(child, native_entries_allocator_);
@ -2384,42 +2382,9 @@ const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) { void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
ASSERT(writer_ == NULL); ASSERT(writer_ == NULL);
writer_ = new OutputStreamWriter(stream); writer_ = new OutputStreamWriter(stream);
HeapSnapshot* original_snapshot = NULL;
if (snapshot_->RawSnapshotSize() >=
SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) {
// The snapshot is too big. Serialize a fake snapshot.
original_snapshot = snapshot_;
snapshot_ = CreateFakeSnapshot();
}
SerializeImpl(); SerializeImpl();
delete writer_; delete writer_;
writer_ = NULL; writer_ = NULL;
if (original_snapshot != NULL) {
delete snapshot_;
snapshot_ = original_snapshot;
}
}
HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() {
HeapSnapshot* result = new HeapSnapshot(snapshot_->collection(),
snapshot_->title(),
snapshot_->uid());
result->AddRootEntry();
const char* text = snapshot_->collection()->names()->GetFormatted(
"The snapshot is too big. "
"Maximum snapshot size is %" V8_PTR_PREFIX "u MB. "
"Actual snapshot size is %" V8_PTR_PREFIX "u MB.",
SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB,
(snapshot_->RawSnapshotSize() + MB - 1) / MB);
HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4);
result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message);
result->FillChildren();
return result;
} }

1
deps/v8/src/heap-snapshot-generator.h

@ -655,7 +655,6 @@ class HeapSnapshotJSONSerializer {
v8::internal::kZeroHashSeed); v8::internal::kZeroHashSeed);
} }
HeapSnapshot* CreateFakeSnapshot();
int GetStringId(const char* s); int GetStringId(const char* s);
int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; } int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; }
void SerializeEdge(HeapGraphEdge* edge, bool first_edge); void SerializeEdge(HeapGraphEdge* edge, bool first_edge);

73
deps/v8/src/heap.cc

@ -157,12 +157,14 @@ Heap::Heap()
ms_count_at_last_idle_notification_(0), ms_count_at_last_idle_notification_(0),
gc_count_at_last_idle_gc_(0), gc_count_at_last_idle_gc_(0),
scavenges_since_last_idle_round_(kIdleScavengeThreshold), scavenges_since_last_idle_round_(kIdleScavengeThreshold),
gcs_since_last_deopt_(0),
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
no_weak_embedded_maps_verification_scope_depth_(0), no_weak_embedded_maps_verification_scope_depth_(0),
#endif #endif
promotion_queue_(this), promotion_queue_(this),
configured_(false), configured_(false),
chunks_queued_for_free_(NULL) { chunks_queued_for_free_(NULL),
relocation_mutex_(NULL) {
// Allow build-time customization of the max semispace size. Building // Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much // V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment. // easier if you can define it as part of the build environment.
@ -487,6 +489,12 @@ void Heap::GarbageCollectionEpilogue() {
if (FLAG_gc_verbose) Print(); if (FLAG_gc_verbose) Print();
if (FLAG_code_stats) ReportCodeStatistics("After GC"); if (FLAG_code_stats) ReportCodeStatistics("After GC");
#endif #endif
if (FLAG_deopt_every_n_garbage_collections > 0) {
if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
Deoptimizer::DeoptimizeAll(isolate());
gcs_since_last_deopt_ = 0;
}
}
isolate_->counters()->alive_after_last_gc()->Set( isolate_->counters()->alive_after_last_gc()->Set(
static_cast<int>(SizeOfObjects())); static_cast<int>(SizeOfObjects()));
@ -599,7 +607,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
const char* gc_reason, const char* gc_reason,
const char* collector_reason) { const char* collector_reason) {
// The VM is in the GC state until exiting this function. // The VM is in the GC state until exiting this function.
VMState state(isolate_, GC); VMState<GC> state(isolate_);
#ifdef DEBUG #ifdef DEBUG
// Reset the allocation timeout to the GC interval, but make sure to // Reset the allocation timeout to the GC interval, but make sure to
@ -885,8 +893,8 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
{ {
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
VMState state(isolate_, EXTERNAL); VMState<EXTERNAL> state(isolate_);
CallGCPrologueCallbacks(gc_type); CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
} }
EnsureFromSpaceIsCommitted(); EnsureFromSpaceIsCommitted();
@ -1007,7 +1015,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
{ {
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
VMState state(isolate_, EXTERNAL); VMState<EXTERNAL> state(isolate_);
CallGCEpilogueCallbacks(gc_type); CallGCEpilogueCallbacks(gc_type);
} }
@ -1021,13 +1029,13 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
} }
void Heap::CallGCPrologueCallbacks(GCType gc_type) { void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) { if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
global_gc_prologue_callback_(); global_gc_prologue_callback_();
} }
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_type & gc_prologue_callbacks_[i].gc_type) { if (gc_type & gc_prologue_callbacks_[i].gc_type) {
gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags); gc_prologue_callbacks_[i].callback(gc_type, flags);
} }
} }
} }
@ -1293,6 +1301,8 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() { void Heap::Scavenge() {
RelocationLock relocation_lock(this);
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(); if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
#endif #endif
@ -2745,7 +2755,7 @@ bool Heap::CreateInitialObjects() {
if (!maybe_obj->ToObject(&obj)) return false; if (!maybe_obj->ToObject(&obj)) return false;
} }
set_minus_zero_value(HeapNumber::cast(obj)); set_minus_zero_value(HeapNumber::cast(obj));
ASSERT(signbit(minus_zero_value()->Number()) != 0); ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
{ MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED); { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
if (!maybe_obj->ToObject(&obj)) return false; if (!maybe_obj->ToObject(&obj)) return false;
@ -3414,14 +3424,14 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
return Failure::OutOfMemoryException(0x4); return Failure::OutOfMemoryException(0x4);
} }
bool is_ascii_data_in_two_byte_string = false; bool is_one_byte_data_in_two_byte_string = false;
if (!is_one_byte) { if (!is_one_byte) {
// At least one of the strings uses two-byte representation so we // At least one of the strings uses two-byte representation so we
// can't use the fast case code for short ASCII strings below, but // can't use the fast case code for short ASCII strings below, but
// we can try to save memory if all chars actually fit in ASCII. // we can try to save memory if all chars actually fit in ASCII.
is_ascii_data_in_two_byte_string = is_one_byte_data_in_two_byte_string =
first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars(); first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
if (is_ascii_data_in_two_byte_string) { if (is_one_byte_data_in_two_byte_string) {
isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment(); isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
} }
} }
@ -3456,7 +3466,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
for (int i = 0; i < second_length; i++) *dest++ = src[i]; for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result; return result;
} else { } else {
if (is_ascii_data_in_two_byte_string) { if (is_one_byte_data_in_two_byte_string) {
Object* result; Object* result;
{ MaybeObject* maybe_result = AllocateRawOneByteString(length); { MaybeObject* maybe_result = AllocateRawOneByteString(length);
if (!maybe_result->ToObject(&result)) return maybe_result; if (!maybe_result->ToObject(&result)) return maybe_result;
@ -3481,7 +3491,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
} }
} }
Map* map = (is_one_byte || is_ascii_data_in_two_byte_string) ? Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
cons_ascii_string_map() : cons_string_map(); cons_ascii_string_map() : cons_string_map();
Object* result; Object* result;
@ -3627,11 +3637,11 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte(
// For small strings we check whether the resource contains only // For small strings we check whether the resource contains only
// one byte characters. If yes, we use a different string map. // one byte characters. If yes, we use a different string map.
static const size_t kAsciiCheckLengthLimit = 32; static const size_t kOneByteCheckLengthLimit = 32;
bool is_one_byte = length <= kAsciiCheckLengthLimit && bool is_one_byte = length <= kOneByteCheckLengthLimit &&
String::IsOneByte(resource->data(), static_cast<int>(length)); String::IsOneByte(resource->data(), static_cast<int>(length));
Map* map = is_one_byte ? Map* map = is_one_byte ?
external_string_with_ascii_data_map() : external_string_map(); external_string_with_one_byte_data_map() : external_string_map();
Object* result; Object* result;
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE); { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result; if (!maybe_result->ToObject(&result)) return maybe_result;
@ -4967,14 +4977,14 @@ Map* Heap::InternalizedStringMapForString(String* string) {
case EXTERNAL_STRING_TYPE: return external_internalized_string_map(); case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
case EXTERNAL_ASCII_STRING_TYPE: case EXTERNAL_ASCII_STRING_TYPE:
return external_ascii_internalized_string_map(); return external_ascii_internalized_string_map();
case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE: case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
return external_internalized_string_with_ascii_data_map(); return external_internalized_string_with_one_byte_data_map();
case SHORT_EXTERNAL_STRING_TYPE: case SHORT_EXTERNAL_STRING_TYPE:
return short_external_internalized_string_map(); return short_external_internalized_string_map();
case SHORT_EXTERNAL_ASCII_STRING_TYPE: case SHORT_EXTERNAL_ASCII_STRING_TYPE:
return short_external_ascii_internalized_string_map(); return short_external_ascii_internalized_string_map();
case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE: case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
return short_external_internalized_string_with_ascii_data_map(); return short_external_internalized_string_with_one_byte_data_map();
default: return NULL; // No match found. default: return NULL; // No match found.
} }
} }
@ -6628,6 +6638,11 @@ bool Heap::SetUp() {
store_buffer()->SetUp(); store_buffer()->SetUp();
if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
#ifdef DEBUG
relocation_mutex_locked_by_optimizer_thread_ = false;
#endif // DEBUG
return true; return true;
} }
@ -6730,6 +6745,8 @@ void Heap::TearDown() {
incremental_marking()->TearDown(); incremental_marking()->TearDown();
isolate_->memory_allocator()->TearDown(); isolate_->memory_allocator()->TearDown();
delete relocation_mutex_;
} }
@ -7689,7 +7706,8 @@ void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
if (!getter_obj->IsJSFunction()) continue; if (!getter_obj->IsJSFunction()) continue;
getter_fun = JSFunction::cast(getter_obj); getter_fun = JSFunction::cast(getter_obj);
String* key = isolate->heap()->hidden_stack_trace_string(); String* key = isolate->heap()->hidden_stack_trace_string();
if (key != getter_fun->GetHiddenProperty(key)) continue; Object* value = getter_fun->GetHiddenProperty(key);
if (key != value) continue;
} }
budget--; budget--;
@ -7859,4 +7877,15 @@ void Heap::CheckpointObjectStats() {
ClearObjectStats(); ClearObjectStats();
} }
Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
if (FLAG_parallel_recompilation) {
heap_->relocation_mutex_->Lock();
#ifdef DEBUG
heap_->relocation_mutex_locked_by_optimizer_thread_ =
heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
#endif // DEBUG
}
}
} } // namespace v8::internal } } // namespace v8::internal

69
deps/v8/src/heap.h

@ -28,7 +28,7 @@
#ifndef V8_HEAP_H_ #ifndef V8_HEAP_H_
#define V8_HEAP_H_ #define V8_HEAP_H_
#include <math.h> #include <cmath>
#include "allocation.h" #include "allocation.h"
#include "globals.h" #include "globals.h"
@ -95,12 +95,14 @@ namespace internal {
V(Map, sliced_string_map, SlicedStringMap) \ V(Map, sliced_string_map, SlicedStringMap) \
V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \ V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \
V(Map, external_string_map, ExternalStringMap) \ V(Map, external_string_map, ExternalStringMap) \
V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \ V(Map, \
external_string_with_one_byte_data_map, \
ExternalStringWithOneByteDataMap) \
V(Map, external_ascii_string_map, ExternalAsciiStringMap) \ V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
V(Map, short_external_string_map, ShortExternalStringMap) \ V(Map, short_external_string_map, ShortExternalStringMap) \
V(Map, \ V(Map, \
short_external_string_with_ascii_data_map, \ short_external_string_with_one_byte_data_map, \
ShortExternalStringWithAsciiDataMap) \ ShortExternalStringWithOneByteDataMap) \
V(Map, internalized_string_map, InternalizedStringMap) \ V(Map, internalized_string_map, InternalizedStringMap) \
V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap) \ V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap) \
V(Map, cons_internalized_string_map, ConsInternalizedStringMap) \ V(Map, cons_internalized_string_map, ConsInternalizedStringMap) \
@ -109,8 +111,8 @@ namespace internal {
external_internalized_string_map, \ external_internalized_string_map, \
ExternalInternalizedStringMap) \ ExternalInternalizedStringMap) \
V(Map, \ V(Map, \
external_internalized_string_with_ascii_data_map, \ external_internalized_string_with_one_byte_data_map, \
ExternalInternalizedStringWithAsciiDataMap) \ ExternalInternalizedStringWithOneByteDataMap) \
V(Map, \ V(Map, \
external_ascii_internalized_string_map, \ external_ascii_internalized_string_map, \
ExternalAsciiInternalizedStringMap) \ ExternalAsciiInternalizedStringMap) \
@ -118,8 +120,8 @@ namespace internal {
short_external_internalized_string_map, \ short_external_internalized_string_map, \
ShortExternalInternalizedStringMap) \ ShortExternalInternalizedStringMap) \
V(Map, \ V(Map, \
short_external_internalized_string_with_ascii_data_map, \ short_external_internalized_string_with_one_byte_data_map, \
ShortExternalInternalizedStringWithAsciiDataMap) \ ShortExternalInternalizedStringWithOneByteDataMap) \
V(Map, \ V(Map, \
short_external_ascii_internalized_string_map, \ short_external_ascii_internalized_string_map, \
ShortExternalAsciiInternalizedStringMap) \ ShortExternalAsciiInternalizedStringMap) \
@ -240,6 +242,8 @@ namespace internal {
V(elements_field_string, "%elements") \ V(elements_field_string, "%elements") \
V(length_field_string, "%length") \ V(length_field_string, "%length") \
V(function_class_string, "Function") \ V(function_class_string, "Function") \
V(properties_field_symbol, "%properties") \
V(payload_field_symbol, "%payload") \
V(illegal_argument_string, "illegal argument") \ V(illegal_argument_string, "illegal argument") \
V(MakeReferenceError_string, "MakeReferenceError") \ V(MakeReferenceError_string, "MakeReferenceError") \
V(MakeSyntaxError_string, "MakeSyntaxError") \ V(MakeSyntaxError_string, "MakeSyntaxError") \
@ -693,6 +697,12 @@ class Heap {
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function); MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function);
// Allocates a JS ArrayBuffer object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSArrayBuffer();
// Allocates a Harmony proxy or function proxy. // Allocates a Harmony proxy or function proxy.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
@ -1543,7 +1553,8 @@ class Heap {
8 * (Page::kPageSize > MB ? Page::kPageSize : MB); 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
intptr_t OldGenPromotionLimit(intptr_t old_gen_size) { intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 10 : 3; const int divisor = FLAG_stress_compaction ? 10 :
new_space_high_promotion_mode_active_ ? 1 : 3;
intptr_t limit = intptr_t limit =
Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit); Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
limit += new_space_.Capacity(); limit += new_space_.Capacity();
@ -1553,7 +1564,8 @@ class Heap {
} }
intptr_t OldGenAllocationLimit(intptr_t old_gen_size) { intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 8 : 2; const int divisor = FLAG_stress_compaction ? 8 :
new_space_high_promotion_mode_active_ ? 1 : 2;
intptr_t limit = intptr_t limit =
Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit); Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
limit += new_space_.Capacity(); limit += new_space_.Capacity();
@ -1753,7 +1765,7 @@ class Heap {
inline Isolate* isolate(); inline Isolate* isolate();
void CallGCPrologueCallbacks(GCType gc_type); void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
void CallGCEpilogueCallbacks(GCType gc_type); void CallGCEpilogueCallbacks(GCType gc_type);
inline bool OldGenerationAllocationLimitReached(); inline bool OldGenerationAllocationLimitReached();
@ -1848,6 +1860,31 @@ class Heap {
void CheckpointObjectStats(); void CheckpointObjectStats();
// We don't use a ScopedLock here since we want to lock the heap
// only when FLAG_parallel_recompilation is true.
class RelocationLock {
public:
explicit RelocationLock(Heap* heap);
~RelocationLock() {
if (FLAG_parallel_recompilation) {
#ifdef DEBUG
heap_->relocation_mutex_locked_by_optimizer_thread_ = false;
#endif // DEBUG
heap_->relocation_mutex_->Unlock();
}
}
#ifdef DEBUG
static bool IsLockedByOptimizerThread(Heap* heap) {
return heap->relocation_mutex_locked_by_optimizer_thread_;
}
#endif // DEBUG
private:
Heap* heap_;
};
private: private:
Heap(); Heap();
@ -2295,6 +2332,11 @@ class Heap {
unsigned int gc_count_at_last_idle_gc_; unsigned int gc_count_at_last_idle_gc_;
int scavenges_since_last_idle_round_; int scavenges_since_last_idle_round_;
// If the --deopt_every_n_garbage_collections flag is set to a positive value,
// this variable holds the number of garbage collections since the last
// deoptimization triggered by garbage collection.
int gcs_since_last_deopt_;
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
int no_weak_embedded_maps_verification_scope_depth_; int no_weak_embedded_maps_verification_scope_depth_;
#endif #endif
@ -2317,6 +2359,11 @@ class Heap {
MemoryChunk* chunks_queued_for_free_; MemoryChunk* chunks_queued_for_free_;
Mutex* relocation_mutex_;
#ifdef DEBUG
bool relocation_mutex_locked_by_optimizer_thread_;
#endif // DEBUG;
friend class Factory; friend class Factory;
friend class GCTracer; friend class GCTracer;
friend class DisallowAllocationFailure; friend class DisallowAllocationFailure;

106
deps/v8/src/hydrogen-instructions.cc

@ -1310,20 +1310,18 @@ const char* HUnaryMathOperation::OpName() const {
switch (op()) { switch (op()) {
case kMathFloor: return "floor"; case kMathFloor: return "floor";
case kMathRound: return "round"; case kMathRound: return "round";
case kMathCeil: return "ceil";
case kMathAbs: return "abs"; case kMathAbs: return "abs";
case kMathLog: return "log"; case kMathLog: return "log";
case kMathSin: return "sin"; case kMathSin: return "sin";
case kMathCos: return "cos"; case kMathCos: return "cos";
case kMathTan: return "tan"; case kMathTan: return "tan";
case kMathASin: return "asin";
case kMathACos: return "acos";
case kMathATan: return "atan";
case kMathExp: return "exp"; case kMathExp: return "exp";
case kMathSqrt: return "sqrt"; case kMathSqrt: return "sqrt";
default: break; case kMathPowHalf: return "pow-half";
default:
UNREACHABLE();
return NULL;
} }
return "(unknown operation)";
} }
@ -1453,7 +1451,7 @@ HValue* HSub::Canonicalize() {
HValue* HMul::Canonicalize() { HValue* HMul::Canonicalize() {
if (IsIdentityOperation(left(), right(), 1)) return left(); if (IsIdentityOperation(left(), right(), 1)) return left();
if (IsIdentityOperation(right(), left(), 1)) return right(); if (IsIdentityOperation(right(), left(), 1)) return right();
return HArithmeticBinaryOperation::Canonicalize(); return this;
} }
@ -1683,9 +1681,15 @@ void HInstanceOf::PrintDataTo(StringStream* stream) {
Range* HValue::InferRange(Zone* zone) { Range* HValue::InferRange(Zone* zone) {
Range* result;
if (type().IsSmi()) {
result = new(zone) Range(Smi::kMinValue, Smi::kMaxValue);
result->set_can_be_minus_zero(false);
} else {
// Untagged integer32 cannot be -0, all other representations can. // Untagged integer32 cannot be -0, all other representations can.
Range* result = new(zone) Range(); result = new(zone) Range();
result->set_can_be_minus_zero(!representation().IsInteger32()); result->set_can_be_minus_zero(!representation().IsInteger32());
}
return result; return result;
} }
@ -2139,7 +2143,7 @@ HConstant::HConstant(double double_value,
has_int32_value_(IsInteger32(double_value)), has_int32_value_(IsInteger32(double_value)),
has_double_value_(true), has_double_value_(true),
is_internalized_string_(false), is_internalized_string_(false),
boolean_value_(double_value != 0 && !isnan(double_value)), boolean_value_(double_value != 0 && !std::isnan(double_value)),
int32_value_(DoubleToInt32(double_value)), int32_value_(DoubleToInt32(double_value)),
double_value_(double_value) { double_value_(double_value) {
Initialize(r); Initialize(r);
@ -2194,13 +2198,6 @@ void HConstant::PrintDataTo(StringStream* stream) {
} }
bool HArrayLiteral::IsCopyOnWrite() const {
if (!boilerplate_object_->IsJSObject()) return false;
return Handle<JSObject>::cast(boilerplate_object_)->elements()->map() ==
HEAP->fixed_cow_array_map();
}
void HBinaryOperation::PrintDataTo(StringStream* stream) { void HBinaryOperation::PrintDataTo(StringStream* stream) {
left()->PrintNameTo(stream); left()->PrintNameTo(stream);
stream->Add(" "); stream->Add(" ");
@ -2222,13 +2219,24 @@ void HBinaryOperation::InferRepresentation(HInferRepresentation* h_infer) {
} }
bool HBinaryOperation::IgnoreObservedOutputRepresentation(
Representation current_rep) {
return observed_output_representation_.IsDouble() &&
current_rep.IsInteger32() &&
// Mul in Integer32 mode would be too precise.
!this->IsMul() &&
// TODO(jkummerow): Remove blacklisting of Div when the Div
// instruction has learned not to deopt when the remainder is
// non-zero but all uses are truncating.
!this->IsDiv() &&
CheckUsesForFlag(kTruncatingToInt32);
}
Representation HBinaryOperation::RepresentationFromInputs() { Representation HBinaryOperation::RepresentationFromInputs() {
// Determine the worst case of observed input representations and // Determine the worst case of observed input representations and
// the currently assumed output representation. // the currently assumed output representation.
Representation rep = representation(); Representation rep = representation();
if (observed_output_representation_.is_more_general_than(rep)) {
rep = observed_output_representation_;
}
for (int i = 1; i <= 2; ++i) { for (int i = 1; i <= 2; ++i) {
Representation input_rep = observed_input_representation(i); Representation input_rep = observed_input_representation(i);
if (input_rep.is_more_general_than(rep)) rep = input_rep; if (input_rep.is_more_general_than(rep)) rep = input_rep;
@ -2238,20 +2246,26 @@ Representation HBinaryOperation::RepresentationFromInputs() {
Representation left_rep = left()->representation(); Representation left_rep = left()->representation();
Representation right_rep = right()->representation(); Representation right_rep = right()->representation();
if (left_rep.is_more_general_than(rep) && if (left_rep.is_more_general_than(rep) && !left_rep.IsTagged()) {
left()->CheckFlag(kFlexibleRepresentation)) {
rep = left_rep; rep = left_rep;
} }
if (right_rep.is_more_general_than(rep) && if (right_rep.is_more_general_than(rep) && !right_rep.IsTagged()) {
right()->CheckFlag(kFlexibleRepresentation)) {
rep = right_rep; rep = right_rep;
} }
// Consider observed output representation, but ignore it if it's Double,
// this instruction is not a division, and all its uses are truncating
// to Integer32.
if (observed_output_representation_.is_more_general_than(rep) &&
!IgnoreObservedOutputRepresentation(rep)) {
rep = observed_output_representation_;
}
return rep; return rep;
} }
void HBinaryOperation::AssumeRepresentation(Representation r) { void HBinaryOperation::AssumeRepresentation(Representation r) {
set_observed_input_representation(r, r); set_observed_input_representation(1, r);
set_observed_input_representation(2, r);
HValue::AssumeRepresentation(r); HValue::AssumeRepresentation(r);
} }
@ -3176,7 +3190,7 @@ HInstruction* HStringCharFromCode::New(
HConstant* c_code = HConstant::cast(char_code); HConstant* c_code = HConstant::cast(char_code);
Isolate* isolate = Isolate::Current(); Isolate* isolate = Isolate::Current();
if (c_code->HasNumberValue()) { if (c_code->HasNumberValue()) {
if (isfinite(c_code->DoubleValue())) { if (std::isfinite(c_code->DoubleValue())) {
uint32_t code = c_code->NumberValueAsInteger32() & 0xffff; uint32_t code = c_code->NumberValueAsInteger32() & 0xffff;
return new(zone) HConstant(LookupSingleCharacterStringFromCode(isolate, return new(zone) HConstant(LookupSingleCharacterStringFromCode(isolate,
code), code),
@ -3209,10 +3223,10 @@ HInstruction* HUnaryMathOperation::New(
HConstant* constant = HConstant::cast(value); HConstant* constant = HConstant::cast(value);
if (!constant->HasNumberValue()) break; if (!constant->HasNumberValue()) break;
double d = constant->DoubleValue(); double d = constant->DoubleValue();
if (isnan(d)) { // NaN poisons everything. if (std::isnan(d)) { // NaN poisons everything.
return H_CONSTANT_DOUBLE(OS::nan_value()); return H_CONSTANT_DOUBLE(OS::nan_value());
} }
if (isinf(d)) { // +Infinity and -Infinity. if (std::isinf(d)) { // +Infinity and -Infinity.
switch (op) { switch (op) {
case kMathSin: case kMathSin:
case kMathCos: case kMathCos:
@ -3276,7 +3290,7 @@ HInstruction* HPower::New(Zone* zone, HValue* left, HValue* right) {
if (c_left->HasNumberValue() && c_right->HasNumberValue()) { if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
double result = power_helper(c_left->DoubleValue(), double result = power_helper(c_left->DoubleValue(),
c_right->DoubleValue()); c_right->DoubleValue());
return H_CONSTANT_DOUBLE(isnan(result) ? OS::nan_value() : result); return H_CONSTANT_DOUBLE(std::isnan(result) ? OS::nan_value() : result);
} }
} }
return new(zone) HPower(left, right); return new(zone) HPower(left, right);
@ -3449,6 +3463,42 @@ void HBitwise::PrintDataTo(StringStream* stream) {
} }
void HPhi::SimplifyConstantInputs() {
// Convert constant inputs to integers when all uses are truncating.
// This must happen before representation inference takes place.
if (!CheckUsesForFlag(kTruncatingToInt32)) return;
for (int i = 0; i < OperandCount(); ++i) {
if (!OperandAt(i)->IsConstant()) return;
}
HGraph* graph = block()->graph();
for (int i = 0; i < OperandCount(); ++i) {
HConstant* operand = HConstant::cast(OperandAt(i));
if (operand->HasInteger32Value()) {
continue;
} else if (operand->HasDoubleValue()) {
HConstant* integer_input =
new(graph->zone()) HConstant(DoubleToInt32(operand->DoubleValue()),
Representation::Integer32());
integer_input->InsertAfter(operand);
SetOperandAt(i, integer_input);
} else if (operand == graph->GetConstantTrue()) {
SetOperandAt(i, graph->GetConstant1());
} else {
// This catches |false|, |undefined|, strings and objects.
SetOperandAt(i, graph->GetConstant0());
}
}
// Overwrite observed input representations because they are likely Tagged.
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (use->IsBinaryOperation()) {
HBinaryOperation::cast(use)->set_observed_input_representation(
it.index(), Representation::Integer32());
}
}
}
void HPhi::InferRepresentation(HInferRepresentation* h_infer) { void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation)); ASSERT(CheckFlag(kFlexibleRepresentation));
// If there are non-Phi uses, and all of them have observed the same // If there are non-Phi uses, and all of them have observed the same

200
deps/v8/src/hydrogen-instructions.h

@ -1757,12 +1757,16 @@ class HChange: public HUnaryOperation {
ASSERT(!value->representation().IsNone() && !to.IsNone()); ASSERT(!value->representation().IsNone() && !to.IsNone());
ASSERT(!value->representation().Equals(to)); ASSERT(!value->representation().Equals(to));
set_representation(to); set_representation(to);
set_type(HType::TaggedNumber());
SetFlag(kUseGVN); SetFlag(kUseGVN);
if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined); if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
if (is_truncating) SetFlag(kTruncatingToInt32); if (is_truncating) SetFlag(kTruncatingToInt32);
if (value->type().IsSmi()) {
set_type(HType::Smi());
} else {
set_type(HType::TaggedNumber());
if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion); if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
} }
}
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
virtual HType CalculateInferredType(); virtual HType CalculateInferredType();
@ -2226,6 +2230,8 @@ class HInvokeFunction: public HBinaryCall {
int argument_count) int argument_count)
: HBinaryCall(context, function, argument_count), : HBinaryCall(context, function, argument_count),
known_function_(known_function) { known_function_(known_function) {
formal_parameter_count_ = known_function.is_null()
? 0 : known_function->shared()->formal_parameter_count();
} }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
@ -2235,20 +2241,25 @@ class HInvokeFunction: public HBinaryCall {
HValue* context() { return first(); } HValue* context() { return first(); }
HValue* function() { return second(); } HValue* function() { return second(); }
Handle<JSFunction> known_function() { return known_function_; } Handle<JSFunction> known_function() { return known_function_; }
int formal_parameter_count() const { return formal_parameter_count_; }
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction) DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
private: private:
Handle<JSFunction> known_function_; Handle<JSFunction> known_function_;
int formal_parameter_count_;
}; };
class HCallConstantFunction: public HCall<0> { class HCallConstantFunction: public HCall<0> {
public: public:
HCallConstantFunction(Handle<JSFunction> function, int argument_count) HCallConstantFunction(Handle<JSFunction> function, int argument_count)
: HCall<0>(argument_count), function_(function) { } : HCall<0>(argument_count),
function_(function),
formal_parameter_count_(function->shared()->formal_parameter_count()) {}
Handle<JSFunction> function() const { return function_; } Handle<JSFunction> function() const { return function_; }
int formal_parameter_count() const { return formal_parameter_count_; }
bool IsApplyFunction() const { bool IsApplyFunction() const {
return function_->code() == return function_->code() ==
@ -2265,6 +2276,7 @@ class HCallConstantFunction: public HCall<0> {
private: private:
Handle<JSFunction> function_; Handle<JSFunction> function_;
int formal_parameter_count_;
}; };
@ -2349,11 +2361,14 @@ class HCallGlobal: public HUnaryCall {
class HCallKnownGlobal: public HCall<0> { class HCallKnownGlobal: public HCall<0> {
public: public:
HCallKnownGlobal(Handle<JSFunction> target, int argument_count) HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
: HCall<0>(argument_count), target_(target) { } : HCall<0>(argument_count),
target_(target),
formal_parameter_count_(target->shared()->formal_parameter_count()) { }
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> target() const { return target_; } Handle<JSFunction> target() const { return target_; }
int formal_parameter_count() const { return formal_parameter_count_; }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
return Representation::None(); return Representation::None();
@ -2363,6 +2378,7 @@ class HCallKnownGlobal: public HCall<0> {
private: private:
Handle<JSFunction> target_; Handle<JSFunction> target_;
int formal_parameter_count_;
}; };
@ -2587,24 +2603,26 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
switch (op) { switch (op) {
case kMathFloor: case kMathFloor:
case kMathRound: case kMathRound:
case kMathCeil:
set_representation(Representation::Integer32()); set_representation(Representation::Integer32());
break; break;
case kMathAbs: case kMathAbs:
// Not setting representation here: it is None intentionally. // Not setting representation here: it is None intentionally.
SetFlag(kFlexibleRepresentation); SetFlag(kFlexibleRepresentation);
// TODO(svenpanne) This flag is actually only needed if representation()
// is tagged, and not when it is an unboxed double or unboxed integer.
SetGVNFlag(kChangesNewSpacePromotion); SetGVNFlag(kChangesNewSpacePromotion);
break; break;
case kMathSqrt:
case kMathPowHalf:
case kMathLog: case kMathLog:
case kMathSin: case kMathSin:
case kMathCos: case kMathCos:
case kMathTan: case kMathTan:
set_representation(Representation::Double()); set_representation(Representation::Double());
// These operations use the TranscendentalCache, so they may allocate.
SetGVNFlag(kChangesNewSpacePromotion); SetGVNFlag(kChangesNewSpacePromotion);
break; break;
case kMathExp: case kMathExp:
case kMathSqrt:
case kMathPowHalf:
set_representation(Representation::Double()); set_representation(Representation::Double());
break; break;
default: default:
@ -2680,39 +2698,27 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
class HCheckMaps: public HTemplateInstruction<2> { class HCheckMaps: public HTemplateInstruction<2> {
public: public:
HCheckMaps(HValue* value, Handle<Map> map, Zone* zone, static HCheckMaps* New(HValue* value, Handle<Map> map, Zone* zone,
HValue* typecheck = NULL) HValue *typecheck = NULL) {
: map_unique_ids_(0, zone) { HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
SetOperandAt(0, value); check_map->map_set_.Add(map, zone);
// If callers don't depend on a typecheck, they can pass in NULL. In that return check_map;
// case we use a copy of the |value| argument as a dummy value.
SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kTrackSideEffectDominators);
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kDependsOnElementsKind);
map_set()->Add(map, zone);
} }
HCheckMaps(HValue* value, SmallMapList* maps, Zone* zone)
: map_unique_ids_(0, zone) { static HCheckMaps* New(HValue* value, SmallMapList* maps, Zone* zone,
SetOperandAt(0, value); HValue *typecheck = NULL) {
SetOperandAt(1, value); HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kTrackSideEffectDominators);
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kDependsOnElementsKind);
for (int i = 0; i < maps->length(); i++) { for (int i = 0; i < maps->length(); i++) {
map_set()->Add(maps->at(i), zone); check_map->map_set_.Add(maps->at(i), zone);
} }
map_set()->Sort(); check_map->map_set_.Sort();
return check_map;
} }
static HCheckMaps* NewWithTransitions(HValue* object, Handle<Map> map, static HCheckMaps* NewWithTransitions(HValue* value, Handle<Map> map,
Zone* zone) { Zone* zone) {
HCheckMaps* check_map = new(zone) HCheckMaps(object, map, zone); HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, value);
SmallMapList* map_set = check_map->map_set(); check_map->map_set_.Add(map, zone);
// Since transitioned elements maps of the initial map don't fail the map // Since transitioned elements maps of the initial map don't fail the map
// check, the CheckMaps instruction doesn't need to depend on ElementsKinds. // check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
@ -2725,10 +2731,10 @@ class HCheckMaps: public HTemplateInstruction<2> {
Map* transitioned_map = Map* transitioned_map =
map->LookupElementsTransitionMap(kind); map->LookupElementsTransitionMap(kind);
if (transitioned_map) { if (transitioned_map) {
map_set->Add(Handle<Map>(transitioned_map), zone); check_map->map_set_.Add(Handle<Map>(transitioned_map), zone);
} }
}; };
map_set->Sort(); check_map->map_set_.Sort();
return check_map; return check_map;
} }
@ -2763,6 +2769,20 @@ class HCheckMaps: public HTemplateInstruction<2> {
} }
private: private:
// Clients should use one of the static New* methods above.
HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
: map_unique_ids_(0, zone) {
SetOperandAt(0, value);
// Use the object value for the dependency if NULL is passed.
// TODO(titzer): do GVN flags already express this dependency?
SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kTrackSideEffectDominators);
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kDependsOnElementsKind);
}
SmallMapList map_set_; SmallMapList map_set_;
ZoneList<UniqueValueId> map_unique_ids_; ZoneList<UniqueValueId> map_unique_ids_;
}; };
@ -3123,6 +3143,8 @@ class HPhi: public HValue {
return true; return true;
} }
void SimplifyConstantInputs();
protected: protected:
virtual void DeleteFromGraph(); virtual void DeleteFromGraph();
virtual void InternalSetOperandAt(int index, HValue* value) { virtual void InternalSetOperandAt(int index, HValue* value) {
@ -3231,6 +3253,7 @@ class HConstant: public HTemplateInstruction<0> {
if (handle_.is_null()) { if (handle_.is_null()) {
handle_ = FACTORY->NewNumber(double_value_, TENURED); handle_ = FACTORY->NewNumber(double_value_, TENURED);
} }
ALLOW_HANDLE_DEREF(Isolate::Current(), "smi check");
ASSERT(has_int32_value_ || !handle_->IsSmi()); ASSERT(has_int32_value_ || !handle_->IsSmi());
return handle_; return handle_;
} }
@ -3239,7 +3262,7 @@ class HConstant: public HTemplateInstruction<0> {
return has_double_value_ && return has_double_value_ &&
(BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) || (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
FixedDoubleArray::is_the_hole_nan(double_value_) || FixedDoubleArray::is_the_hole_nan(double_value_) ||
isnan(double_value_)); std::isnan(double_value_));
} }
bool ImmortalImmovable() const { bool ImmortalImmovable() const {
@ -3254,8 +3277,6 @@ class HConstant: public HTemplateInstruction<0> {
} }
ASSERT(!handle_.is_null()); ASSERT(!handle_.is_null());
HandleDereferenceGuard allow_dereference_for_immovable_check(
isolate(), HandleDereferenceGuard::ALLOW);
Heap* heap = isolate()->heap(); Heap* heap = isolate()->heap();
ASSERT(unique_id_ != UniqueValueId(heap->minus_zero_value())); ASSERT(unique_id_ != UniqueValueId(heap->minus_zero_value()));
ASSERT(unique_id_ != UniqueValueId(heap->nan_value())); ASSERT(unique_id_ != UniqueValueId(heap->nan_value()));
@ -3275,9 +3296,7 @@ class HConstant: public HTemplateInstruction<0> {
return has_int32_value_; return has_int32_value_;
} }
virtual bool EmitAtUses() { virtual bool EmitAtUses() { return !representation().IsDouble(); }
return !representation().IsDouble() || IsSpecialDouble();
}
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType(); virtual HType CalculateInferredType();
bool IsInteger() { return handle()->IsSmi(); } bool IsInteger() { return handle()->IsSmi(); }
@ -3427,10 +3446,9 @@ class HBinaryOperation: public HTemplateInstruction<3> {
return right(); return right();
} }
void set_observed_input_representation(Representation left, void set_observed_input_representation(int index, Representation rep) {
Representation right) { ASSERT(index >= 1 && index <= 2);
observed_input_representation_[0] = left; observed_input_representation_[index - 1] = rep;
observed_input_representation_[1] = right;
} }
virtual void initialize_output_representation(Representation observed) { virtual void initialize_output_representation(Representation observed) {
@ -3453,6 +3471,8 @@ class HBinaryOperation: public HTemplateInstruction<3> {
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation) DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
private: private:
bool IgnoreObservedOutputRepresentation(Representation current_rep);
Representation observed_input_representation_[2]; Representation observed_input_representation_[2];
Representation observed_output_representation_; Representation observed_output_representation_;
}; };
@ -3932,6 +3952,10 @@ class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
return Representation::Tagged(); return Representation::Tagged();
} }
virtual Representation observed_input_representation(int index) {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch) DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch)
}; };
@ -4398,6 +4422,17 @@ class HMul: public HArithmeticBinaryOperation {
HValue* left, HValue* left,
HValue* right); HValue* right);
static HInstruction* NewImul(Zone* zone,
HValue* context,
HValue* left,
HValue* right) {
HMul* mul = new(zone) HMul(context, left, right);
// TODO(mstarzinger): Prevent bailout on minus zero for imul.
mul->AssumeRepresentation(Representation::Integer32());
mul->ClearFlag(HValue::kCanOverflow);
return mul;
}
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
virtual HValue* Canonicalize(); virtual HValue* Canonicalize();
@ -4878,6 +4913,12 @@ class HAllocateObject: public HTemplateInstruction<1> {
SetOperandAt(0, context); SetOperandAt(0, context);
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetGVNFlag(kChangesNewSpacePromotion); SetGVNFlag(kChangesNewSpacePromotion);
constructor_initial_map_ = constructor->has_initial_map()
? Handle<Map>(constructor->initial_map())
: Handle<Map>::null();
// If slack tracking finished, the instance size and property counts
// remain unchanged so that we can allocate memory for the object.
ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
} }
// Maximum instance size for which allocations will be inlined. // Maximum instance size for which allocations will be inlined.
@ -4885,13 +4926,14 @@ class HAllocateObject: public HTemplateInstruction<1> {
HValue* context() { return OperandAt(0); } HValue* context() { return OperandAt(0); }
Handle<JSFunction> constructor() { return constructor_; } Handle<JSFunction> constructor() { return constructor_; }
Handle<Map> constructor_initial_map() { return constructor_initial_map_; }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged(); return Representation::Tagged();
} }
virtual Handle<Map> GetMonomorphicJSObjectMap() { virtual Handle<Map> GetMonomorphicJSObjectMap() {
ASSERT(constructor()->has_initial_map()); ASSERT(!constructor_initial_map_.is_null());
return Handle<Map>(constructor()->initial_map()); return constructor_initial_map_;
} }
virtual HType CalculateInferredType(); virtual HType CalculateInferredType();
@ -4902,6 +4944,7 @@ class HAllocateObject: public HTemplateInstruction<1> {
// virtual bool IsDeletable() const { return true; } // virtual bool IsDeletable() const { return true; }
Handle<JSFunction> constructor_; Handle<JSFunction> constructor_;
Handle<Map> constructor_initial_map_;
}; };
@ -4923,6 +4966,19 @@ class HAllocate: public HTemplateInstruction<2> {
SetGVNFlag(kChangesNewSpacePromotion); SetGVNFlag(kChangesNewSpacePromotion);
} }
static Flags DefaultFlags() {
return CAN_ALLOCATE_IN_NEW_SPACE;
}
static Flags DefaultFlags(ElementsKind kind) {
Flags flags = CAN_ALLOCATE_IN_NEW_SPACE;
if (IsFastDoubleElementsKind(kind)) {
flags = static_cast<HAllocate::Flags>(
flags | HAllocate::ALLOCATE_DOUBLE_ALIGNED);
}
return flags;
}
HValue* context() { return OperandAt(0); } HValue* context() { return OperandAt(0); }
HValue* size() { return OperandAt(1); } HValue* size() { return OperandAt(1); }
@ -6049,27 +6105,35 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
public: public:
HArrayLiteral(HValue* context, HArrayLiteral(HValue* context,
Handle<HeapObject> boilerplate_object, Handle<HeapObject> boilerplate_object,
Handle<FixedArray> literals,
int length, int length,
int literal_index, int literal_index,
int depth, int depth,
AllocationSiteMode mode) AllocationSiteMode mode)
: HMaterializedLiteral<1>(literal_index, depth, mode), : HMaterializedLiteral<1>(literal_index, depth, mode),
length_(length), length_(length),
boilerplate_object_(boilerplate_object) { boilerplate_object_(boilerplate_object),
literals_(literals) {
SetOperandAt(0, context); SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion); SetGVNFlag(kChangesNewSpacePromotion);
boilerplate_elements_kind_ = boilerplate_object_->IsJSObject()
? Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind()
: TERMINAL_FAST_ELEMENTS_KIND;
is_copy_on_write_ = boilerplate_object_->IsJSObject() &&
(Handle<JSObject>::cast(boilerplate_object_)->elements()->map() ==
HEAP->fixed_cow_array_map());
} }
HValue* context() { return OperandAt(0); } HValue* context() { return OperandAt(0); }
ElementsKind boilerplate_elements_kind() const { ElementsKind boilerplate_elements_kind() const {
if (!boilerplate_object_->IsJSObject()) { return boilerplate_elements_kind_;
return TERMINAL_FAST_ELEMENTS_KIND;
}
return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
} }
Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; } Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; }
Handle<FixedArray> literals() const { return literals_; }
int length() const { return length_; } int length() const { return length_; }
bool IsCopyOnWrite() const; bool IsCopyOnWrite() const { return is_copy_on_write_; }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged(); return Representation::Tagged();
@ -6081,6 +6145,9 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
private: private:
int length_; int length_;
Handle<HeapObject> boilerplate_object_; Handle<HeapObject> boilerplate_object_;
Handle<FixedArray> literals_;
ElementsKind boilerplate_elements_kind_;
bool is_copy_on_write_;
}; };
@ -6088,12 +6155,15 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
public: public:
HObjectLiteral(HValue* context, HObjectLiteral(HValue* context,
Handle<FixedArray> constant_properties, Handle<FixedArray> constant_properties,
Handle<FixedArray> literals,
bool fast_elements, bool fast_elements,
int literal_index, int literal_index,
int depth, int depth,
bool has_function) bool has_function)
: HMaterializedLiteral<1>(literal_index, depth), : HMaterializedLiteral<1>(literal_index, depth),
constant_properties_(constant_properties), constant_properties_(constant_properties),
constant_properties_length_(constant_properties->length()),
literals_(literals),
fast_elements_(fast_elements), fast_elements_(fast_elements),
has_function_(has_function) { has_function_(has_function) {
SetOperandAt(0, context); SetOperandAt(0, context);
@ -6104,6 +6174,10 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
Handle<FixedArray> constant_properties() const { Handle<FixedArray> constant_properties() const {
return constant_properties_; return constant_properties_;
} }
int constant_properties_length() const {
return constant_properties_length_;
}
Handle<FixedArray> literals() const { return literals_; }
bool fast_elements() const { return fast_elements_; } bool fast_elements() const { return fast_elements_; }
bool has_function() const { return has_function_; } bool has_function() const { return has_function_; }
@ -6116,8 +6190,10 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
private: private:
Handle<FixedArray> constant_properties_; Handle<FixedArray> constant_properties_;
bool fast_elements_; int constant_properties_length_;
bool has_function_; Handle<FixedArray> literals_;
bool fast_elements_ : 1;
bool has_function_ : 1;
}; };
@ -6160,7 +6236,11 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
HFunctionLiteral(HValue* context, HFunctionLiteral(HValue* context,
Handle<SharedFunctionInfo> shared, Handle<SharedFunctionInfo> shared,
bool pretenure) bool pretenure)
: shared_info_(shared), pretenure_(pretenure) { : shared_info_(shared),
pretenure_(pretenure),
has_no_literals_(shared->num_literals() == 0),
is_generator_(shared->is_generator()),
language_mode_(shared->language_mode()) {
SetOperandAt(0, context); SetOperandAt(0, context);
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetGVNFlag(kChangesNewSpacePromotion); SetGVNFlag(kChangesNewSpacePromotion);
@ -6177,12 +6257,18 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; } Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
bool pretenure() const { return pretenure_; } bool pretenure() const { return pretenure_; }
bool has_no_literals() const { return has_no_literals_; }
bool is_generator() const { return is_generator_; }
LanguageMode language_mode() const { return language_mode_; }
private: private:
virtual bool IsDeletable() const { return true; } virtual bool IsDeletable() const { return true; }
Handle<SharedFunctionInfo> shared_info_; Handle<SharedFunctionInfo> shared_info_;
bool pretenure_; bool pretenure_ : 1;
bool has_no_literals_ : 1;
bool is_generator_ : 1;
LanguageMode language_mode_;
}; };

587
deps/v8/src/hydrogen.cc

@ -25,9 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "hydrogen.h" #include "hydrogen.h"
#include <algorithm>
#include "v8.h"
#include "codegen.h" #include "codegen.h"
#include "full-codegen.h" #include "full-codegen.h"
#include "hashmap.h" #include "hashmap.h"
@ -509,9 +511,8 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
void HGraph::Verify(bool do_full_verify) const { void HGraph::Verify(bool do_full_verify) const {
// Allow dereferencing for debug mode verification. Heap::RelocationLock(isolate()->heap());
HandleDereferenceGuard allow_handle_deref(isolate(), ALLOW_HANDLE_DEREF(isolate(), "debug mode verification");
HandleDereferenceGuard::ALLOW);
for (int i = 0; i < blocks_.length(); i++) { for (int i = 0; i < blocks_.length(); i++) {
HBasicBlock* block = blocks_.at(i); HBasicBlock* block = blocks_.at(i);
@ -603,6 +604,19 @@ HConstant* HGraph::GetConstantInt32(SetOncePointer<HConstant>* pointer,
} }
HConstant* HGraph::GetConstantSmi(SetOncePointer<HConstant>* pointer,
int32_t value) {
if (!pointer->is_set()) {
HConstant* constant =
new(zone()) HConstant(Handle<Object>(Smi::FromInt(value), isolate()),
Representation::Tagged());
constant->InsertAfter(GetConstantUndefined());
pointer->set(constant);
}
return pointer->get();
}
HConstant* HGraph::GetConstant0() { HConstant* HGraph::GetConstant0() {
return GetConstantInt32(&constant_0_, 0); return GetConstantInt32(&constant_0_, 0);
} }
@ -638,69 +652,20 @@ HConstant* HGraph::GetConstant##Name() { \
DEFINE_GET_CONSTANT(True, true, HType::Boolean(), true) DEFINE_GET_CONSTANT(True, true, HType::Boolean(), true)
DEFINE_GET_CONSTANT(False, false, HType::Boolean(), false) DEFINE_GET_CONSTANT(False, false, HType::Boolean(), false)
DEFINE_GET_CONSTANT(Hole, the_hole, HType::Tagged(), false) DEFINE_GET_CONSTANT(Hole, the_hole, HType::Tagged(), false)
DEFINE_GET_CONSTANT(Null, null, HType::Tagged(), false)
#undef DEFINE_GET_CONSTANT
HGraphBuilder::CheckBuilder::CheckBuilder(HGraphBuilder* builder) HConstant* HGraph::GetConstantSmi0() {
: builder_(builder), return GetConstantSmi(&constant_smi_0_, 0);
finished_(false) {
HEnvironment* env = builder->environment();
failure_block_ = builder->CreateBasicBlock(env->Copy());
merge_block_ = builder->CreateBasicBlock(env->Copy());
} }
HValue* HGraphBuilder::CheckBuilder::CheckNotUndefined(HValue* value) { HConstant* HGraph::GetConstantSmi1() {
HEnvironment* env = builder_->environment(); return GetConstantSmi(&constant_smi_1_, 1);
HCompareObjectEqAndBranch* compare =
new(zone()) HCompareObjectEqAndBranch(
value,
builder_->graph()->GetConstantUndefined());
HBasicBlock* success_block = builder_->CreateBasicBlock(env->Copy());
HBasicBlock* failure_block = builder_->CreateBasicBlock(env->Copy());
compare->SetSuccessorAt(0, failure_block);
compare->SetSuccessorAt(1, success_block);
failure_block->GotoNoSimulate(failure_block_);
builder_->current_block()->Finish(compare);
builder_->set_current_block(success_block);
return compare;
} }
HValue* HGraphBuilder::CheckBuilder::CheckIntegerCompare(HValue* left, #undef DEFINE_GET_CONSTANT
HValue* right,
Token::Value op) {
HEnvironment* env = builder_->environment();
HCompareIDAndBranch* compare =
new(zone()) HCompareIDAndBranch(left, right, op);
compare->AssumeRepresentation(Representation::Integer32());
HBasicBlock* success_block = builder_->CreateBasicBlock(env->Copy());
HBasicBlock* failure_block = builder_->CreateBasicBlock(env->Copy());
compare->SetSuccessorAt(0, success_block);
compare->SetSuccessorAt(1, failure_block);
failure_block->GotoNoSimulate(failure_block_);
builder_->current_block()->Finish(compare);
builder_->set_current_block(success_block);
return compare;
}
HValue* HGraphBuilder::CheckBuilder::CheckIntegerEq(HValue* left,
HValue* right) {
return CheckIntegerCompare(left, right, Token::EQ);
}
void HGraphBuilder::CheckBuilder::End() {
ASSERT(!finished_);
builder_->current_block()->GotoNoSimulate(merge_block_);
if (failure_block_->HasPredecessor()) {
failure_block_->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
}
builder_->set_current_block(merge_block_);
finished_ = true;
}
HConstant* HGraph::GetInvalidContext() { HConstant* HGraph::GetInvalidContext() {
@ -714,8 +679,6 @@ HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, int position)
finished_(false), finished_(false),
did_then_(false), did_then_(false),
did_else_(false), did_else_(false),
deopt_then_(false),
deopt_else_(false),
did_and_(false), did_and_(false),
did_or_(false), did_or_(false),
captured_(false), captured_(false),
@ -736,8 +699,6 @@ HGraphBuilder::IfBuilder::IfBuilder(
finished_(false), finished_(false),
did_then_(false), did_then_(false),
did_else_(false), did_else_(false),
deopt_then_(false),
deopt_else_(false),
did_and_(false), did_and_(false),
did_or_(false), did_or_(false),
captured_(false), captured_(false),
@ -835,8 +796,9 @@ void HGraphBuilder::IfBuilder::CaptureContinuation(
HBasicBlock* true_block = last_true_block_ == NULL HBasicBlock* true_block = last_true_block_ == NULL
? first_true_block_ ? first_true_block_
: last_true_block_; : last_true_block_;
HBasicBlock* false_block = HBasicBlock* false_block = did_else_ && (first_false_block_ != NULL)
did_else_ ? builder_->current_block() : first_false_block_; ? builder_->current_block()
: first_false_block_;
continuation->Capture(true_block, false_block, position_); continuation->Capture(true_block, false_block, position_);
captured_ = true; captured_ = true;
End(); End();
@ -869,12 +831,23 @@ void HGraphBuilder::IfBuilder::Else() {
void HGraphBuilder::IfBuilder::Deopt() { void HGraphBuilder::IfBuilder::Deopt() {
ASSERT(!(did_then_ ^ did_else_));
HBasicBlock* block = builder_->current_block(); HBasicBlock* block = builder_->current_block();
block->FinishExitWithDeoptimization(HDeoptimize::kUseAll); block->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
if (did_else_) { if (did_else_) {
first_false_block_ = NULL; first_false_block_ = NULL;
did_else_ = false; } else {
first_true_block_ = NULL;
}
}
void HGraphBuilder::IfBuilder::Return(HValue* value) {
HBasicBlock* block = builder_->current_block();
block->Finish(new(zone()) HReturn(value,
builder_->environment()->LookupContext(),
builder_->graph()->GetConstantMinus1()));
if (did_else_) {
first_false_block_ = NULL;
} else { } else {
first_true_block_ = NULL; first_true_block_ = NULL;
} }
@ -888,8 +861,9 @@ void HGraphBuilder::IfBuilder::End() {
last_true_block_ = builder_->current_block(); last_true_block_ = builder_->current_block();
} }
if (first_true_block_ == NULL) { if (first_true_block_ == NULL) {
// Deopt on true. Nothing to do, just continue the else block. // Deopt on true. Nothing to do, just continue the false block.
} else if (first_false_block_ == NULL) { } else if (first_false_block_ == NULL) {
// Deopt on false. Nothing to do except switching to the true block.
builder_->set_current_block(last_true_block_); builder_->set_current_block(last_true_block_);
} else { } else {
HEnvironment* merge_env = last_true_block_->last_environment()->Copy(); HEnvironment* merge_env = last_true_block_->last_environment()->Copy();
@ -1081,7 +1055,7 @@ HValue* HGraphBuilder::BuildCheckNonSmi(HValue* obj) {
HValue* HGraphBuilder::BuildCheckMap(HValue* obj, HValue* HGraphBuilder::BuildCheckMap(HValue* obj,
Handle<Map> map) { Handle<Map> map) {
HCheckMaps* check = new(zone()) HCheckMaps(obj, map, zone()); HCheckMaps* check = HCheckMaps::New(obj, map, zone());
AddInstruction(check); AddInstruction(check);
return check; return check;
} }
@ -1297,7 +1271,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
AddInstruction(new(zone) HLoadElements(object, mapcheck)); AddInstruction(new(zone) HLoadElements(object, mapcheck));
if (is_store && (fast_elements || fast_smi_only_elements) && if (is_store && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) { store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
HCheckMaps* check_cow_map = new(zone) HCheckMaps( HCheckMaps* check_cow_map = HCheckMaps::New(
elements, isolate()->factory()->fixed_array_map(), zone); elements, isolate()->factory()->fixed_array_map(), zone);
check_cow_map->ClearGVNFlag(kDependsOnElementsKind); check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
AddInstruction(check_cow_map); AddInstruction(check_cow_map);
@ -1319,14 +1293,15 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
IfBuilder length_checker(this); IfBuilder length_checker(this);
length_checker.IfCompare(key, length, Token::LT); length_checker.IfCompare(key, length, Token::LT);
length_checker.Then(); length_checker.Then();
CheckBuilder negative_checker(this); IfBuilder negative_checker(this);
HValue* bounds_check = negative_checker.CheckIntegerCompare( HValue* bounds_check = negative_checker.IfCompare(
key, graph()->GetConstant0(), Token::GTE); key, graph()->GetConstant0(), Token::GTE);
negative_checker.End(); negative_checker.Then();
HInstruction* result = BuildExternalArrayElementAccess( HInstruction* result = BuildExternalArrayElementAccess(
external_elements, key, val, bounds_check, external_elements, key, val, bounds_check,
elements_kind, is_store); elements_kind, is_store);
AddInstruction(result); AddInstruction(result);
negative_checker.ElseDeopt();
length_checker.End(); length_checker.End();
return result; return result;
} else { } else {
@ -1371,7 +1346,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
elements = BuildCopyElementsOnWrite(object, elements, elements_kind, elements = BuildCopyElementsOnWrite(object, elements, elements_kind,
length); length);
} else { } else {
HCheckMaps* check_cow_map = new(zone) HCheckMaps( HCheckMaps* check_cow_map = HCheckMaps::New(
elements, isolate()->factory()->fixed_array_map(), zone); elements, isolate()->factory()->fixed_array_map(), zone);
check_cow_map->ClearGVNFlag(kDependsOnElementsKind); check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
AddInstruction(check_cow_map); AddInstruction(check_cow_map);
@ -1407,8 +1382,10 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
total_size->ChangeRepresentation(Representation::Integer32()); total_size->ChangeRepresentation(Representation::Integer32());
total_size->ClearFlag(HValue::kCanOverflow); total_size->ClearFlag(HValue::kCanOverflow);
HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE; HAllocate::Flags flags = HAllocate::DefaultFlags(kind);
if (FLAG_pretenure_literals) { if (FLAG_pretenure_literals) {
// TODO(hpayer): When pretenuring can be internalized, flags can become
// private to HAllocate.
if (IsFastDoubleElementsKind(kind)) { if (IsFastDoubleElementsKind(kind)) {
flags = static_cast<HAllocate::Flags>( flags = static_cast<HAllocate::Flags>(
flags | HAllocate::CAN_ALLOCATE_IN_OLD_DATA_SPACE); flags | HAllocate::CAN_ALLOCATE_IN_OLD_DATA_SPACE);
@ -1417,10 +1394,6 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE); flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
} }
} }
if (IsFastDoubleElementsKind(kind)) {
flags = static_cast<HAllocate::Flags>(
flags | HAllocate::ALLOCATE_DOUBLE_ALIGNED);
}
HValue* elements = HValue* elements =
AddInstruction(new(zone) HAllocate(context, total_size, AddInstruction(new(zone) HAllocate(context, total_size,
@ -1456,6 +1429,63 @@ HValue* HGraphBuilder::BuildAllocateAndInitializeElements(HValue* context,
} }
HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
HValue* array_map,
AllocationSiteMode mode,
HValue* allocation_site_payload,
HValue* length_field) {
BuildStoreMap(array, array_map);
HConstant* empty_fixed_array =
new(zone()) HConstant(
Handle<FixedArray>(isolate()->heap()->empty_fixed_array()),
Representation::Tagged());
AddInstruction(empty_fixed_array);
AddInstruction(new(zone()) HStoreNamedField(array,
isolate()->factory()->properties_field_symbol(),
empty_fixed_array,
true,
JSArray::kPropertiesOffset));
HInstruction* length_store = AddInstruction(
new(zone()) HStoreNamedField(array,
isolate()->factory()->length_field_string(),
length_field,
true,
JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
if (mode == TRACK_ALLOCATION_SITE) {
BuildCreateAllocationSiteInfo(array,
JSArray::kSize,
allocation_site_payload);
}
int elements_location = JSArray::kSize;
if (mode == TRACK_ALLOCATION_SITE) {
elements_location += AllocationSiteInfo::kSize;
}
HInnerAllocatedObject* elements = new(zone()) HInnerAllocatedObject(
array,
elements_location);
AddInstruction(elements);
HInstruction* elements_store = AddInstruction(
new(zone()) HStoreNamedField(
array,
isolate()->factory()->elements_field_string(),
elements,
true,
JSArray::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
return elements;
}
HInstruction* HGraphBuilder::BuildStoreMap(HValue* object, HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
HValue* map) { HValue* map) {
Zone* zone = this->zone(); Zone* zone = this->zone();
@ -1569,6 +1599,30 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* context,
: AddInstruction(new(zone) HConstant(nan_double, : AddInstruction(new(zone) HConstant(nan_double,
Representation::Double())); Representation::Double()));
// Special loop unfolding case
static const int kLoopUnfoldLimit = 4;
bool unfold_loop = false;
int initial_capacity = JSArray::kPreallocatedArrayElements;
if (from->IsConstant() && to->IsConstant() &&
initial_capacity <= kLoopUnfoldLimit) {
HConstant* constant_from = HConstant::cast(from);
HConstant* constant_to = HConstant::cast(to);
if (constant_from->HasInteger32Value() &&
constant_from->Integer32Value() == 0 &&
constant_to->HasInteger32Value() &&
constant_to->Integer32Value() == initial_capacity) {
unfold_loop = true;
}
}
if (unfold_loop) {
for (int i = 0; i < initial_capacity; i++) {
HInstruction* key = AddInstruction(new(zone)
HConstant(i, Representation::Integer32()));
AddInstruction(new(zone) HStoreKeyed(elements, key, hole, elements_kind));
}
} else {
LoopBuilder builder(this, context, LoopBuilder::kPostIncrement); LoopBuilder builder(this, context, LoopBuilder::kPostIncrement);
HValue* key = builder.BeginBody(from, to, Token::LT); HValue* key = builder.BeginBody(from, to, Token::LT);
@ -1577,6 +1631,7 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* context,
builder.EndBody(); builder.EndBody();
} }
}
void HGraphBuilder::BuildCopyElements(HValue* context, void HGraphBuilder::BuildCopyElements(HValue* context,
@ -1642,12 +1697,7 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
: FixedArray::SizeFor(length); : FixedArray::SizeFor(length);
} }
HAllocate::Flags allocate_flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE; HAllocate::Flags allocate_flags = HAllocate::DefaultFlags(kind);
if (IsFastDoubleElementsKind(kind)) {
allocate_flags = static_cast<HAllocate::Flags>(
allocate_flags | HAllocate::ALLOCATE_DOUBLE_ALIGNED);
}
// Allocate both the JS array and the elements array in one big // Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks. // allocation. This avoids multiple limit checks.
HValue* size_in_bytes = HValue* size_in_bytes =
@ -1676,15 +1726,7 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
// Create an allocation site info if requested. // Create an allocation site info if requested.
if (mode == TRACK_ALLOCATION_SITE) { if (mode == TRACK_ALLOCATION_SITE) {
HValue* alloc_site = BuildCreateAllocationSiteInfo(object, JSArray::kSize, boilerplate);
AddInstruction(new(zone) HInnerAllocatedObject(object, JSArray::kSize));
Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map());
BuildStoreMap(alloc_site, alloc_site_map);
int alloc_payload_offset = AllocationSiteInfo::kPayloadOffset;
AddInstruction(new(zone) HStoreNamedField(alloc_site,
factory->empty_string(),
boilerplate,
true, alloc_payload_offset));
} }
if (length > 0) { if (length > 0) {
@ -1733,6 +1775,205 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
} }
void HGraphBuilder::BuildCompareNil(
HValue* value,
EqualityKind kind,
CompareNilICStub::Types types,
Handle<Map> map,
int position,
HIfContinuation* continuation) {
IfBuilder if_nil(this, position);
bool needs_or = false;
if ((types & CompareNilICStub::kCompareAgainstNull) != 0) {
if (needs_or) if_nil.Or();
if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
needs_or = true;
}
if ((types & CompareNilICStub::kCompareAgainstUndefined) != 0) {
if (needs_or) if_nil.Or();
if_nil.If<HCompareObjectEqAndBranch>(value,
graph()->GetConstantUndefined());
needs_or = true;
}
// Handle either undetectable or monomorphic, not both.
ASSERT(((types & CompareNilICStub::kCompareAgainstUndetectable) == 0) ||
((types & CompareNilICStub::kCompareAgainstMonomorphicMap) == 0));
if ((types & CompareNilICStub::kCompareAgainstUndetectable) != 0) {
if (needs_or) if_nil.Or();
if_nil.If<HIsUndetectableAndBranch>(value);
} else {
if_nil.Then();
if_nil.Else();
if ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0) {
BuildCheckNonSmi(value);
// For ICs, the map checked below is a sentinel map that gets replaced by
// the monomorphic map when the code is used as a template to generate a
// new IC. For optimized functions, there is no sentinel map, the map
// emitted below is the actual monomorphic map.
BuildCheckMap(value, map);
} else {
if (kind == kNonStrictEquality) {
if_nil.Deopt();
}
}
}
if_nil.CaptureContinuation(continuation);
}
HValue* HGraphBuilder::BuildCreateAllocationSiteInfo(HValue* previous_object,
int previous_object_size,
HValue* payload) {
HInnerAllocatedObject* alloc_site = new(zone())
HInnerAllocatedObject(previous_object, previous_object_size);
AddInstruction(alloc_site);
Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map());
BuildStoreMap(alloc_site, alloc_site_map);
AddInstruction(new(zone()) HStoreNamedField(alloc_site,
isolate()->factory()->payload_string(),
payload,
true,
AllocationSiteInfo::kPayloadOffset));
return alloc_site;
}
HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
HValue* allocation_site_payload,
AllocationSiteMode mode) :
builder_(builder),
kind_(kind),
allocation_site_payload_(allocation_site_payload) {
if (mode == DONT_TRACK_ALLOCATION_SITE) {
mode_ = mode;
} else {
mode_ = AllocationSiteInfo::GetMode(kind);
}
}
HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode(HValue* context) {
// Get the global context, the native context, the map array
HInstruction* global_object = AddInstruction(new(zone())
HGlobalObject(context));
HInstruction* native_context = AddInstruction(new(zone())
HLoadNamedField(global_object, true, GlobalObject::kNativeContextOffset));
int offset = Context::kHeaderSize +
kPointerSize * Context::JS_ARRAY_MAPS_INDEX;
HInstruction* map_array = AddInstruction(new(zone())
HLoadNamedField(native_context, true, offset));
offset = kind_ * kPointerSize + FixedArrayBase::kHeaderSize;
return AddInstruction(new(zone()) HLoadNamedField(map_array, true, offset));
}
HValue* HGraphBuilder::JSArrayBuilder::EstablishAllocationSize(
HValue* length_node) {
HValue* context = builder()->environment()->LookupContext();
ASSERT(length_node != NULL);
int base_size = JSArray::kSize;
if (mode_ == TRACK_ALLOCATION_SITE) {
base_size += AllocationSiteInfo::kSize;
}
if (IsFastDoubleElementsKind(kind_)) {
base_size += FixedDoubleArray::kHeaderSize;
} else {
base_size += FixedArray::kHeaderSize;
}
HInstruction* elements_size_value = new(zone())
HConstant(elements_size(), Representation::Integer32());
AddInstruction(elements_size_value);
HInstruction* mul = HMul::New(zone(), context, length_node,
elements_size_value);
mul->ChangeRepresentation(Representation::Integer32());
mul->ClearFlag(HValue::kCanOverflow);
AddInstruction(mul);
HInstruction* base = new(zone()) HConstant(base_size,
Representation::Integer32());
AddInstruction(base);
HInstruction* total_size = HAdd::New(zone(), context, base, mul);
total_size->ChangeRepresentation(Representation::Integer32());
total_size->ClearFlag(HValue::kCanOverflow);
AddInstruction(total_size);
return total_size;
}
HValue* HGraphBuilder::JSArrayBuilder::EstablishEmptyArrayAllocationSize() {
int base_size = JSArray::kSize;
if (mode_ == TRACK_ALLOCATION_SITE) {
base_size += AllocationSiteInfo::kSize;
}
base_size += IsFastDoubleElementsKind(kind_)
? FixedDoubleArray::SizeFor(initial_capacity())
: FixedArray::SizeFor(initial_capacity());
HConstant* array_size =
new(zone()) HConstant(base_size, Representation::Integer32());
AddInstruction(array_size);
return array_size;
}
HValue* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() {
HValue* size_in_bytes = EstablishEmptyArrayAllocationSize();
HConstant* capacity =
new(zone()) HConstant(initial_capacity(), Representation::Integer32());
AddInstruction(capacity);
return AllocateArray(size_in_bytes,
capacity,
builder()->graph()->GetConstant0(),
true);
}
HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* capacity,
HValue* length_field,
bool fill_with_hole) {
HValue* size_in_bytes = EstablishAllocationSize(capacity);
return AllocateArray(size_in_bytes, capacity, length_field, fill_with_hole);
}
HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
HValue* capacity,
HValue* length_field,
bool fill_with_hole) {
HValue* context = builder()->environment()->LookupContext();
// Allocate (dealing with failure appropriately)
HAllocate::Flags flags = HAllocate::DefaultFlags(kind_);
HAllocate* new_object = new(zone()) HAllocate(context, size_in_bytes,
HType::JSArray(), flags);
AddInstruction(new_object);
// Fill in the fields: map, properties, length
HValue* map = EmitMapCode(context);
elements_location_ = builder()->BuildJSArrayHeader(new_object,
map,
mode_,
allocation_site_payload_,
length_field);
// Initialize the elements
builder()->BuildInitializeElements(elements_location_, kind_, capacity);
if (fill_with_hole) {
builder()->BuildFillElementsWithHole(context, elements_location_, kind_,
graph()->GetConstant0(), capacity);
}
return new_object;
}
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info, HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
TypeFeedbackOracle* oracle) TypeFeedbackOracle* oracle)
: HGraphBuilder(info), : HGraphBuilder(info),
@ -3506,7 +3747,12 @@ void HInferRepresentation::Analyze() {
} }
} }
// (3a) Use the phi reachability information from step 2 to // Simplify constant phi inputs where possible.
for (int i = 0; i < phi_count; ++i) {
phi_list->at(i)->SimplifyConstantInputs();
}
// Use the phi reachability information from step 2 to
// push information about values which can't be converted to integer // push information about values which can't be converted to integer
// without deoptimization through the phi use-def chains, avoiding // without deoptimization through the phi use-def chains, avoiding
// unnecessary deoptimizations later. // unnecessary deoptimizations later.
@ -3523,7 +3769,7 @@ void HInferRepresentation::Analyze() {
} }
} }
// (3b) Use the phi reachability information from step 2 to // Use the phi reachability information from step 2 to
// sum up the non-phi use counts of all connected phis. // sum up the non-phi use counts of all connected phis.
for (int i = 0; i < phi_count; ++i) { for (int i = 0; i < phi_count; ++i) {
HPhi* phi = phi_list->at(i); HPhi* phi = phi_list->at(i);
@ -6412,9 +6658,11 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
pointer_size, pointer_size,
DONT_TRACK_ALLOCATION_SITE); DONT_TRACK_ALLOCATION_SITE);
} else { } else {
Handle<FixedArray> closure_literals(closure->literals(), isolate());
literal = AddInstruction( literal = AddInstruction(
new(zone()) HObjectLiteral(context, new(zone()) HObjectLiteral(context,
expr->constant_properties(), expr->constant_properties(),
closure_literals,
expr->fast_elements(), expr->fast_elements(),
expr->literal_index(), expr->literal_index(),
expr->depth(), expr->depth(),
@ -6503,7 +6751,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* context = environment()->LookupContext(); HValue* context = environment()->LookupContext();
HInstruction* literal; HInstruction* literal;
Handle<FixedArray> literals(environment()->closure()->literals()); Handle<FixedArray> literals(environment()->closure()->literals(), isolate());
Handle<Object> raw_boilerplate(literals->get(expr->literal_index()), Handle<Object> raw_boilerplate(literals->get(expr->literal_index()),
isolate()); isolate());
@ -6555,6 +6803,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
literal = AddInstruction( literal = AddInstruction(
new(zone()) HArrayLiteral(context, new(zone()) HArrayLiteral(context,
original_boilerplate_object, original_boilerplate_object,
literals,
length, length,
expr->literal_index(), expr->literal_index(),
expr->depth(), expr->depth(),
@ -6652,7 +6901,7 @@ static int ComputeLoadStoreFieldIndex(Handle<Map> type,
void HOptimizedGraphBuilder::AddCheckMap(HValue* object, Handle<Map> map) { void HOptimizedGraphBuilder::AddCheckMap(HValue* object, Handle<Map> map) {
AddInstruction(new(zone()) HCheckNonSmi(object)); AddInstruction(new(zone()) HCheckNonSmi(object));
AddInstruction(new(zone()) HCheckMaps(object, map, zone())); AddInstruction(HCheckMaps::New(object, map, zone()));
} }
@ -6781,7 +7030,7 @@ bool HOptimizedGraphBuilder::HandlePolymorphicArrayLengthLoad(
AddInstruction(new(zone()) HCheckNonSmi(object)); AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* typecheck = HInstruction* typecheck =
AddInstruction(new(zone()) HCheckMaps(object, types, zone())); AddInstruction(HCheckMaps::New(object, types, zone()));
HInstruction* instr = HInstruction* instr =
HLoadNamedField::NewArrayLength(zone(), object, typecheck); HLoadNamedField::NewArrayLength(zone(), object, typecheck);
instr->set_position(expr->position()); instr->set_position(expr->position());
@ -6833,7 +7082,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
AddInstruction(new(zone()) HCheckNonSmi(object)); AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* instr; HInstruction* instr;
if (count == types->length() && is_monomorphic_field) { if (count == types->length() && is_monomorphic_field) {
AddInstruction(new(zone()) HCheckMaps(object, types, zone())); AddInstruction(HCheckMaps::New(object, types, zone()));
instr = BuildLoadNamedField(object, map, &lookup); instr = BuildLoadNamedField(object, map, &lookup);
} else { } else {
HValue* context = environment()->LookupContext(); HValue* context = environment()->LookupContext();
@ -7510,8 +7759,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
Handle<Map> map, Handle<Map> map,
bool is_store, bool is_store,
KeyedAccessStoreMode store_mode) { KeyedAccessStoreMode store_mode) {
HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map, HCheckMaps* mapcheck = HCheckMaps::New(object, map, zone(), dependency);
zone(), dependency);
AddInstruction(mapcheck); AddInstruction(mapcheck);
if (dependency) { if (dependency) {
mapcheck->ClearGVNFlag(kDependsOnElementsKind); mapcheck->ClearGVNFlag(kDependsOnElementsKind);
@ -7568,7 +7816,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
} }
if (!has_double_maps && !has_smi_or_object_maps) return NULL; if (!has_double_maps && !has_smi_or_object_maps) return NULL;
HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone()); HCheckMaps* check_maps = HCheckMaps::New(object, maps, zone());
AddInstruction(check_maps); AddInstruction(check_maps);
HInstruction* instr = BuildUncheckedMonomorphicElementAccess( HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
object, key, val, check_maps, object, key, val, check_maps,
@ -7720,7 +7968,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HInstruction* access; HInstruction* access;
if (IsFastElementsKind(elements_kind)) { if (IsFastElementsKind(elements_kind)) {
if (is_store && !IsFastDoubleElementsKind(elements_kind)) { if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
AddInstruction(new(zone()) HCheckMaps( AddInstruction(HCheckMaps::New(
elements, isolate()->factory()->fixed_array_map(), elements, isolate()->factory()->fixed_array_map(),
zone(), elements_kind_branch)); zone(), elements_kind_branch));
} }
@ -7754,10 +8002,12 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
} }
*has_side_effects |= access->HasObservableSideEffects(); *has_side_effects |= access->HasObservableSideEffects();
// The caller will use has_side_effects and add correct Simulate.
access->SetFlag(HValue::kHasNoObservableSideEffects);
if (position != -1) { if (position != -1) {
access->set_position(position); access->set_position(position);
} }
if_jsarray->Goto(join); if_jsarray->GotoNoSimulate(join);
set_current_block(if_fastobject); set_current_block(if_fastobject);
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements)); length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
@ -7777,18 +8027,19 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
elements_kind_branch, elements_kind, is_store)); elements_kind_branch, elements_kind, is_store));
} }
*has_side_effects |= access->HasObservableSideEffects(); *has_side_effects |= access->HasObservableSideEffects();
// The caller will use has_side_effects and add correct Simulate.
access->SetFlag(HValue::kHasNoObservableSideEffects);
if (position != RelocInfo::kNoPosition) access->set_position(position); if (position != RelocInfo::kNoPosition) access->set_position(position);
if (!is_store) { if (!is_store) {
Push(access); Push(access);
} }
current_block()->Goto(join); current_block()->GotoNoSimulate(join);
set_current_block(if_false); set_current_block(if_false);
} }
} }
// Deopt if none of the cases matched. // Deopt if none of the cases matched.
current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses); current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
join->SetJoinId(ast_id);
set_current_block(join); set_current_block(join);
return is_store ? NULL : Pop(); return is_store ? NULL : Pop();
} }
@ -8067,14 +8318,12 @@ class FunctionSorter {
}; };
static int CompareHotness(void const* a, void const* b) { inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) {
FunctionSorter const* function1 = reinterpret_cast<FunctionSorter const*>(a); int diff = lhs.ticks() - rhs.ticks();
FunctionSorter const* function2 = reinterpret_cast<FunctionSorter const*>(b); if (diff != 0) return diff > 0;
int diff = function1->ticks() - function2->ticks(); diff = lhs.ast_length() - rhs.ast_length();
if (diff != 0) return -diff; if (diff != 0) return diff < 0;
diff = function1->ast_length() - function2->ast_length(); return lhs.src_length() < rhs.src_length();
if (diff != 0) return diff;
return function1->src_length() - function2->src_length();
} }
@ -8117,10 +8366,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
} }
} }
qsort(reinterpret_cast<void*>(&order[0]), std::sort(order, order + ordered_functions);
ordered_functions,
sizeof(order[0]),
&CompareHotness);
HBasicBlock* number_block = NULL; HBasicBlock* number_block = NULL;
@ -8697,6 +8943,18 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
return true; return true;
} }
break; break;
case kMathImul:
if (expr->arguments()->length() == 2) {
HValue* right = Pop();
HValue* left = Pop();
Drop(1); // Receiver.
HValue* context = environment()->LookupContext();
HInstruction* op = HMul::NewImul(zone(), context, left, right);
if (drop_extra) Drop(1); // Optionally drop the function.
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
break;
default: default:
// Not supported for inlining yet. // Not supported for inlining yet.
break; break;
@ -8844,6 +9102,18 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
return true; return true;
} }
break; break;
case kMathImul:
if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
HValue* right = Pop();
HValue* left = Pop();
Drop(1); // Receiver.
HValue* context = environment()->LookupContext();
HInstruction* result = HMul::NewImul(zone(), context, left, right);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
default: default:
// Not yet supported for inlining. // Not yet supported for inlining.
break; break;
@ -9276,19 +9546,31 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
} else { } else {
// The constructor function is both an operand to the instruction and an // The constructor function is both an operand to the instruction and an
// argument to the construct call. // argument to the construct call.
bool use_call_new_array = FLAG_optimize_constructed_arrays &&
!(expr->target().is_null()) &&
*(expr->target()) == isolate()->global_context()->array_function();
CHECK_ALIVE(VisitArgument(expr->expression())); CHECK_ALIVE(VisitArgument(expr->expression()));
HValue* constructor = HPushArgument::cast(Top())->argument(); HValue* constructor = HPushArgument::cast(Top())->argument();
CHECK_ALIVE(VisitArgumentList(expr->arguments())); CHECK_ALIVE(VisitArgumentList(expr->arguments()));
HCallNew* call; HCallNew* call;
if (FLAG_optimize_constructed_arrays && if (use_call_new_array) {
!(expr->target().is_null()) && AddInstruction(new(zone()) HCheckFunction(constructor,
*(expr->target()) == isolate()->global_context()->array_function()) { Handle<JSFunction>(isolate()->global_context()->array_function())));
Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId()); Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId());
ASSERT(feedback->IsSmi()); ASSERT(feedback->IsSmi());
// TODO(mvstanton): It would be better to use the already created global
// property cell that is shared by full code gen. That way, any transition
// information that happened after crankshaft won't be lost. The right
// way to do that is to begin passing the cell to the type feedback oracle
// instead of just the value in the cell. Do this in a follow-up checkin.
Handle<JSGlobalPropertyCell> cell = Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(feedback); isolate()->factory()->NewJSGlobalPropertyCell(feedback);
AddInstruction(new(zone()) HCheckFunction(constructor,
Handle<JSFunction>(isolate()->global_context()->array_function()))); // TODO(mvstanton): Here we should probably insert code to check if the
// type cell elements kind is different from when we compiled, and deopt
// in that case. Do this in a follow-up checin.
call = new(zone()) HCallNewArray(context, constructor, argument_count, call = new(zone()) HCallNewArray(context, constructor, argument_count,
cell); cell);
} else { } else {
@ -9433,7 +9715,8 @@ void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
info = TypeInfo::Unknown(); info = TypeInfo::Unknown();
} }
if (instr->IsBinaryOperation()) { if (instr->IsBinaryOperation()) {
HBinaryOperation::cast(instr)->set_observed_input_representation(rep, rep); HBinaryOperation::cast(instr)->set_observed_input_representation(1, rep);
HBinaryOperation::cast(instr)->set_observed_input_representation(2, rep);
} }
return ast_context()->ReturnInstruction(instr, expr->id()); return ast_context()->ReturnInstruction(instr, expr->id());
} }
@ -9874,7 +10157,8 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
if (instr->IsBinaryOperation()) { if (instr->IsBinaryOperation()) {
HBinaryOperation* binop = HBinaryOperation::cast(instr); HBinaryOperation* binop = HBinaryOperation::cast(instr);
binop->set_observed_input_representation(left_rep, right_rep); binop->set_observed_input_representation(1, left_rep);
binop->set_observed_input_representation(2, right_rep);
binop->initialize_output_representation(result_rep); binop->initialize_output_representation(result_rep);
} }
return instr; return instr;
@ -10254,7 +10538,8 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
if (combined_rep.IsTagged() || combined_rep.IsNone()) { if (combined_rep.IsTagged() || combined_rep.IsNone()) {
HCompareGeneric* result = HCompareGeneric* result =
new(zone()) HCompareGeneric(context, left, right, op); new(zone()) HCompareGeneric(context, left, right, op);
result->set_observed_input_representation(left_rep, right_rep); result->set_observed_input_representation(1, left_rep);
result->set_observed_input_representation(2, right_rep);
result->set_position(expr->position()); result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id()); return ast_context()->ReturnInstruction(result, expr->id());
} else { } else {
@ -10276,9 +10561,24 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
ASSERT(current_block()->HasPredecessor()); ASSERT(current_block()->HasPredecessor());
EqualityKind kind = EqualityKind kind =
expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality; expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil); HIfContinuation continuation;
instr->set_position(expr->position()); TypeFeedbackId id = expr->CompareOperationFeedbackId();
return ast_context()->ReturnControl(instr, expr->id()); CompareNilICStub::Types types;
if (kind == kStrictEquality) {
if (nil == kNullValue) {
types = CompareNilICStub::kCompareAgainstNull;
} else {
types = CompareNilICStub::kCompareAgainstUndefined;
}
} else {
types = static_cast<CompareNilICStub::Types>(
oracle()->CompareNilTypes(id));
if (types == 0) types = CompareNilICStub::kFullCompare;
}
Handle<Map> map_handle(oracle()->CompareNilMonomorphicReceiverType(id));
BuildCompareNil(value, kind, types, map_handle,
expr->position(), &continuation);
return ast_context()->ReturnContinuation(&continuation, expr->id());
} }
@ -10401,15 +10701,7 @@ void HOptimizedGraphBuilder::BuildEmitDeepCopy(
// Build Allocation Site Info if desired // Build Allocation Site Info if desired
if (create_allocation_site_info) { if (create_allocation_site_info) {
HValue* alloc_site = BuildCreateAllocationSiteInfo(target, JSArray::kSize, original_boilerplate);
AddInstruction(new(zone) HInnerAllocatedObject(target, JSArray::kSize));
Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map());
BuildStoreMap(alloc_site, alloc_site_map);
int alloc_payload_offset = AllocationSiteInfo::kPayloadOffset;
AddInstruction(new(zone) HStoreNamedField(alloc_site,
factory->payload_string(),
original_boilerplate,
true, alloc_payload_offset));
} }
if (object_elements != NULL) { if (object_elements != NULL) {
@ -11201,6 +11493,17 @@ void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
} }
// Support for generators.
void HOptimizedGraphBuilder::GenerateGeneratorSend(CallRuntime* call) {
return Bailout("inlined runtime function: GeneratorSend");
}
void HOptimizedGraphBuilder::GenerateGeneratorThrow(CallRuntime* call) {
return Bailout("inlined runtime function: GeneratorThrow");
}
#undef CHECK_BAILOUT #undef CHECK_BAILOUT
#undef CHECK_ALIVE #undef CHECK_ALIVE
@ -11521,16 +11824,14 @@ void HTracer::TraceCompilation(CompilationInfo* info) {
void HTracer::TraceLithium(const char* name, LChunk* chunk) { void HTracer::TraceLithium(const char* name, LChunk* chunk) {
ASSERT(!FLAG_parallel_recompilation); ASSERT(!FLAG_parallel_recompilation);
HandleDereferenceGuard allow_handle_deref(chunk->isolate(), ALLOW_HANDLE_DEREF(chunk->isolate(), "debug output");
HandleDereferenceGuard::ALLOW);
Trace(name, chunk->graph(), chunk); Trace(name, chunk->graph(), chunk);
} }
void HTracer::TraceHydrogen(const char* name, HGraph* graph) { void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
ASSERT(!FLAG_parallel_recompilation); ASSERT(!FLAG_parallel_recompilation);
HandleDereferenceGuard allow_handle_deref(graph->isolate(), ALLOW_HANDLE_DEREF(graph->isolate(), "debug output");
HandleDereferenceGuard::ALLOW);
Trace(name, graph, NULL); Trace(name, graph, NULL);
} }

123
deps/v8/src/hydrogen.h

@ -36,6 +36,7 @@
#include "hydrogen-instructions.h" #include "hydrogen-instructions.h"
#include "type-info.h" #include "type-info.h"
#include "zone.h" #include "zone.h"
#include "scopes.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -304,10 +305,13 @@ class HGraph: public ZoneObject {
HConstant* GetConstantUndefined() const { return undefined_constant_.get(); } HConstant* GetConstantUndefined() const { return undefined_constant_.get(); }
HConstant* GetConstant0(); HConstant* GetConstant0();
HConstant* GetConstant1(); HConstant* GetConstant1();
HConstant* GetConstantSmi0();
HConstant* GetConstantSmi1();
HConstant* GetConstantMinus1(); HConstant* GetConstantMinus1();
HConstant* GetConstantTrue(); HConstant* GetConstantTrue();
HConstant* GetConstantFalse(); HConstant* GetConstantFalse();
HConstant* GetConstantHole(); HConstant* GetConstantHole();
HConstant* GetConstantNull();
HConstant* GetInvalidContext(); HConstant* GetInvalidContext();
HBasicBlock* CreateBasicBlock(); HBasicBlock* CreateBasicBlock();
@ -395,6 +399,8 @@ class HGraph: public ZoneObject {
private: private:
HConstant* GetConstantInt32(SetOncePointer<HConstant>* pointer, HConstant* GetConstantInt32(SetOncePointer<HConstant>* pointer,
int32_t integer_value); int32_t integer_value);
HConstant* GetConstantSmi(SetOncePointer<HConstant>* pointer,
int32_t integer_value);
void MarkAsDeoptimizingRecursively(HBasicBlock* block); void MarkAsDeoptimizingRecursively(HBasicBlock* block);
void NullifyUnreachableInstructions(); void NullifyUnreachableInstructions();
@ -424,10 +430,13 @@ class HGraph: public ZoneObject {
SetOncePointer<HConstant> undefined_constant_; SetOncePointer<HConstant> undefined_constant_;
SetOncePointer<HConstant> constant_0_; SetOncePointer<HConstant> constant_0_;
SetOncePointer<HConstant> constant_1_; SetOncePointer<HConstant> constant_1_;
SetOncePointer<HConstant> constant_smi_0_;
SetOncePointer<HConstant> constant_smi_1_;
SetOncePointer<HConstant> constant_minus1_; SetOncePointer<HConstant> constant_minus1_;
SetOncePointer<HConstant> constant_true_; SetOncePointer<HConstant> constant_true_;
SetOncePointer<HConstant> constant_false_; SetOncePointer<HConstant> constant_false_;
SetOncePointer<HConstant> constant_the_hole_; SetOncePointer<HConstant> constant_the_hole_;
SetOncePointer<HConstant> constant_null_;
SetOncePointer<HConstant> constant_invalid_context_; SetOncePointer<HConstant> constant_invalid_context_;
SetOncePointer<HArgumentsObject> arguments_object_; SetOncePointer<HArgumentsObject> arguments_object_;
@ -890,7 +899,6 @@ class HIfContinuation {
HBasicBlock* false_branch, HBasicBlock* false_branch,
int position) { int position) {
ASSERT(!continuation_captured_); ASSERT(!continuation_captured_);
ASSERT(true_branch != NULL || false_branch != NULL);
true_branch_ = true_branch; true_branch_ = true_branch;
false_branch_ = false_branch; false_branch_ = false_branch;
position_ = position; position_ = position;
@ -940,6 +948,10 @@ class HGraphBuilder {
HGraph* CreateGraph(); HGraph* CreateGraph();
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
HValue* Pop() { return environment()->Pop(); }
// Adding instructions. // Adding instructions.
HInstruction* AddInstruction(HInstruction* instr); HInstruction* AddInstruction(HInstruction* instr);
void AddSimulate(BailoutId id, void AddSimulate(BailoutId id,
@ -1013,27 +1025,6 @@ class HGraphBuilder {
HInstruction* BuildStoreMap(HValue* object, HValue* map); HInstruction* BuildStoreMap(HValue* object, HValue* map);
HInstruction* BuildStoreMap(HValue* object, Handle<Map> map); HInstruction* BuildStoreMap(HValue* object, Handle<Map> map);
class CheckBuilder {
public:
explicit CheckBuilder(HGraphBuilder* builder);
~CheckBuilder() {
if (!finished_) End();
}
HValue* CheckNotUndefined(HValue* value);
HValue* CheckIntegerCompare(HValue* left, HValue* right, Token::Value op);
HValue* CheckIntegerEq(HValue* left, HValue* right);
void End();
private:
Zone* zone() { return builder_->zone(); }
HGraphBuilder* builder_;
bool finished_;
HBasicBlock* failure_block_;
HBasicBlock* merge_block_;
};
class IfBuilder { class IfBuilder {
public: public:
explicit IfBuilder(HGraphBuilder* builder, explicit IfBuilder(HGraphBuilder* builder,
@ -1067,7 +1058,17 @@ class HGraphBuilder {
return compare; return compare;
} }
template<class Condition> template<class Condition, class P2>
HInstruction* IfNot(HValue* p1, P2 p2) {
HControlInstruction* compare = new(zone()) Condition(p1, p2);
AddCompare(compare);
HBasicBlock* block0 = compare->SuccessorAt(0);
HBasicBlock* block1 = compare->SuccessorAt(1);
compare->SetSuccessorAt(0, block1);
compare->SetSuccessorAt(1, block0);
return compare;
}
HInstruction* OrIfCompare( HInstruction* OrIfCompare(
HValue* p1, HValue* p1,
HValue* p2, HValue* p2,
@ -1094,7 +1095,6 @@ class HGraphBuilder {
return If<Condition>(p1, p2); return If<Condition>(p1, p2);
} }
template<class Condition>
HInstruction* AndIfCompare( HInstruction* AndIfCompare(
HValue* p1, HValue* p1,
HValue* p2, HValue* p2,
@ -1131,6 +1131,13 @@ class HGraphBuilder {
void End(); void End();
void Deopt(); void Deopt();
void ElseDeopt() {
Else();
Deopt();
End();
}
void Return(HValue* value);
private: private:
void AddCompare(HControlInstruction* compare); void AddCompare(HControlInstruction* compare);
@ -1142,8 +1149,6 @@ class HGraphBuilder {
bool finished_ : 1; bool finished_ : 1;
bool did_then_ : 1; bool did_then_ : 1;
bool did_else_ : 1; bool did_else_ : 1;
bool deopt_then_ : 1;
bool deopt_else_ : 1;
bool did_and_ : 1; bool did_and_ : 1;
bool did_or_ : 1; bool did_or_ : 1;
bool captured_ : 1; bool captured_ : 1;
@ -1212,6 +1217,46 @@ class HGraphBuilder {
void BuildNewSpaceArrayCheck(HValue* length, void BuildNewSpaceArrayCheck(HValue* length,
ElementsKind kind); ElementsKind kind);
class JSArrayBuilder {
public:
JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
HValue* allocation_site_payload,
AllocationSiteMode mode);
HValue* AllocateEmptyArray();
HValue* AllocateArray(HValue* capacity, HValue* length_field,
bool fill_with_hole);
HValue* GetElementsLocation() { return elements_location_; }
private:
Zone* zone() const { return builder_->zone(); }
int elements_size() const {
return IsFastDoubleElementsKind(kind_) ? kDoubleSize : kPointerSize;
}
HInstruction* AddInstruction(HInstruction* instr) {
return builder_->AddInstruction(instr);
}
HGraphBuilder* builder() { return builder_; }
HGraph* graph() { return builder_->graph(); }
int initial_capacity() {
STATIC_ASSERT(JSArray::kPreallocatedArrayElements > 0);
return JSArray::kPreallocatedArrayElements;
}
HValue* EmitMapCode(HValue* context);
HValue* EstablishEmptyArrayAllocationSize();
HValue* EstablishAllocationSize(HValue* length_node);
HValue* AllocateArray(HValue* size_in_bytes, HValue* capacity,
HValue* length_field, bool fill_with_hole);
HGraphBuilder* builder_;
ElementsKind kind_;
AllocationSiteMode mode_;
HValue* allocation_site_payload_;
HInnerAllocatedObject* elements_location_;
};
HValue* BuildAllocateElements(HValue* context, HValue* BuildAllocateElements(HValue* context,
ElementsKind kind, ElementsKind kind,
HValue* capacity); HValue* capacity);
@ -1224,6 +1269,16 @@ class HGraphBuilder {
ElementsKind kind, ElementsKind kind,
HValue* capacity); HValue* capacity);
// array must have been allocated with enough room for
// 1) the JSArray, 2) a AllocationSiteInfo if mode requires it,
// 3) a FixedArray or FixedDoubleArray.
// A pointer to the Fixed(Double)Array is returned.
HInnerAllocatedObject* BuildJSArrayHeader(HValue* array,
HValue* array_map,
AllocationSiteMode mode,
HValue* allocation_site_payload,
HValue* length_field);
HValue* BuildGrowElementsCapacity(HValue* object, HValue* BuildGrowElementsCapacity(HValue* object,
HValue* elements, HValue* elements,
ElementsKind kind, ElementsKind kind,
@ -1250,6 +1305,18 @@ class HGraphBuilder {
ElementsKind kind, ElementsKind kind,
int length); int length);
void BuildCompareNil(
HValue* value,
EqualityKind kind,
CompareNilICStub::Types types,
Handle<Map> map,
int position,
HIfContinuation* continuation);
HValue* BuildCreateAllocationSiteInfo(HValue* previous_object,
int previous_object_size,
HValue* payload);
private: private:
HGraphBuilder(); HGraphBuilder();
CompilationInfo* info_; CompilationInfo* info_;
@ -1328,10 +1395,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
void AddSoftDeoptimize(); void AddSoftDeoptimize();
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
HValue* Pop() { return environment()->Pop(); }
void Bailout(const char* reason); void Bailout(const char* reason);
HBasicBlock* CreateJoin(HBasicBlock* first, HBasicBlock* CreateJoin(HBasicBlock* first,

16
deps/v8/src/ia32/assembler-ia32-inl.h

@ -330,9 +330,14 @@ Immediate::Immediate(Label* internal_offset) {
Immediate::Immediate(Handle<Object> handle) { Immediate::Immediate(Handle<Object> handle) {
#ifdef DEBUG
Isolate* isolate = Isolate::Current();
#endif
ALLOW_HANDLE_DEREF(isolate,
"using and embedding raw address, heap object check");
// Verify all Objects referred by code are NOT in new space. // Verify all Objects referred by code are NOT in new space.
Object* obj = *handle; Object* obj = *handle;
ASSERT(!HEAP->InNewSpace(obj)); ASSERT(!isolate->heap()->InNewSpace(obj));
if (obj->IsHeapObject()) { if (obj->IsHeapObject()) {
x_ = reinterpret_cast<intptr_t>(handle.location()); x_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT; rmode_ = RelocInfo::EMBEDDED_OBJECT;
@ -363,6 +368,7 @@ void Assembler::emit(uint32_t x) {
void Assembler::emit(Handle<Object> handle) { void Assembler::emit(Handle<Object> handle) {
ALLOW_HANDLE_DEREF(isolate(), "heap object check");
// Verify all Objects referred by code are NOT in new space. // Verify all Objects referred by code are NOT in new space.
Object* obj = *handle; Object* obj = *handle;
ASSERT(!isolate()->heap()->InNewSpace(obj)); ASSERT(!isolate()->heap()->InNewSpace(obj));
@ -386,6 +392,14 @@ void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
} }
void Assembler::emit(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId id) {
ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
emit(reinterpret_cast<intptr_t>(code.location()), rmode, id);
}
void Assembler::emit(const Immediate& x) { void Assembler::emit(const Immediate& x) {
if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) { if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
Label* label = reinterpret_cast<Label*>(x.x_); Label* label = reinterpret_cast<Label*>(x.x_);

6
deps/v8/src/ia32/assembler-ia32.cc

@ -1459,7 +1459,7 @@ void Assembler::call(Handle<Code> code,
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
ASSERT(RelocInfo::IsCodeTarget(rmode)); ASSERT(RelocInfo::IsCodeTarget(rmode));
EMIT(0xE8); EMIT(0xE8);
emit(reinterpret_cast<intptr_t>(code.location()), rmode, ast_id); emit(code, rmode, ast_id);
} }
@ -1513,7 +1513,7 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
ASSERT(RelocInfo::IsCodeTarget(rmode)); ASSERT(RelocInfo::IsCodeTarget(rmode));
EMIT(0xE9); EMIT(0xE9);
emit(reinterpret_cast<intptr_t>(code.location()), rmode); emit(code, rmode);
} }
@ -1568,7 +1568,7 @@ void Assembler::j(Condition cc, Handle<Code> code) {
// 0000 1111 1000 tttn #32-bit disp // 0000 1111 1000 tttn #32-bit disp
EMIT(0x0F); EMIT(0x0F);
EMIT(0x80 | cc); EMIT(0x80 | cc);
emit(reinterpret_cast<intptr_t>(code.location()), RelocInfo::CODE_TARGET); emit(code, RelocInfo::CODE_TARGET);
} }

4
deps/v8/src/ia32/assembler-ia32.h

@ -411,6 +411,7 @@ class Operand BASE_EMBEDDED {
} }
static Operand Cell(Handle<JSGlobalPropertyCell> cell) { static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
ALLOW_HANDLE_DEREF(Isolate::Current(), "embedding raw address");
return Operand(reinterpret_cast<int32_t>(cell.location()), return Operand(reinterpret_cast<int32_t>(cell.location()),
RelocInfo::GLOBAL_PROPERTY_CELL); RelocInfo::GLOBAL_PROPERTY_CELL);
} }
@ -1149,6 +1150,9 @@ class Assembler : public AssemblerBase {
inline void emit(uint32_t x, inline void emit(uint32_t x,
RelocInfo::Mode rmode, RelocInfo::Mode rmode,
TypeFeedbackId id = TypeFeedbackId::None()); TypeFeedbackId id = TypeFeedbackId::None());
inline void emit(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId id = TypeFeedbackId::None());
inline void emit(const Immediate& x); inline void emit(const Immediate& x);
inline void emit_w(const Immediate& x); inline void emit_w(const Immediate& x);

36
deps/v8/src/ia32/builtins-ia32.cc

@ -1207,7 +1207,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// that for a construct call the constructor function in edi needs to be // that for a construct call the constructor function in edi needs to be
// preserved for entering the generic code. In both cases argc in eax needs to // preserved for entering the generic code. In both cases argc in eax needs to
// be preserved. // be preserved.
static void ArrayNativeCode(MacroAssembler* masm, void ArrayNativeCode(MacroAssembler* masm,
bool construct_call, bool construct_call,
Label* call_generic_code) { Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call, Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
@ -1494,7 +1494,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
} }
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : argc // -- eax : argc
// -- ebx : type info cell // -- ebx : type info cell
@ -1513,39 +1513,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
__ Assert(not_zero, "Unexpected initial map for Array function"); __ Assert(not_zero, "Unexpected initial map for Array function");
__ CmpObjectType(ecx, MAP_TYPE, ecx); __ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal, "Unexpected initial map for Array function"); __ Assert(equal, "Unexpected initial map for Array function");
if (FLAG_optimize_constructed_arrays) {
// We should either have undefined in ebx or a valid jsglobalpropertycell
Label okay_here;
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(), masm->isolate());
Handle<Map> global_property_cell_map(
masm->isolate()->heap()->global_property_cell_map());
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &okay_here);
__ cmp(FieldOperand(ebx, 0), Immediate(global_property_cell_map));
__ Assert(equal, "Expected property cell in register ebx");
__ bind(&okay_here);
}
} }
if (FLAG_optimize_constructed_arrays) {
Label not_zero_case, not_one_case;
__ test(eax, eax);
__ j(not_zero, &not_zero_case);
ArrayNoArgumentConstructorStub no_argument_stub;
__ TailCallStub(&no_argument_stub);
__ bind(&not_zero_case);
__ cmp(eax, 1);
__ j(greater, &not_one_case);
ArraySingleArgumentConstructorStub single_argument_stub;
__ TailCallStub(&single_argument_stub);
__ bind(&not_one_case);
ArrayNArgumentsConstructorStub n_argument_stub;
__ TailCallStub(&n_argument_stub);
} else {
Label generic_constructor; Label generic_constructor;
// Run the native code for the Array function called as constructor. // Run the native code for the Array function called as constructor.
ArrayNativeCode(masm, true, &generic_constructor); ArrayNativeCode(masm, true, &generic_constructor);
@ -1557,7 +1526,6 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
masm->isolate()->builtins()->JSConstructStubGeneric(); masm->isolate()->builtins()->JSConstructStubGeneric();
__ jmp(generic_construct_stub, RelocInfo::CODE_TARGET); __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
} }
}
void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {

282
deps/v8/src/ia32/code-stubs-ia32.cc

@ -101,16 +101,21 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
} }
static void InitializeArrayConstructorDescriptor(Isolate* isolate, static void InitializeArrayConstructorDescriptor(
CodeStubInterfaceDescriptor* descriptor) { Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state // register state
// edi -- constructor function // eax -- number of arguments
// ebx -- type info cell with elements kind // ebx -- type info cell with elements kind
// eax -- number of arguments to the constructor function static Register registers[] = { ebx };
static Register registers[] = { edi, ebx }; descriptor->register_param_count_ = 1;
descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument) // stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &eax; descriptor->stack_parameter_count_ = &eax;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->register_params_ = registers; descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ = descriptor->deoptimization_handler_ =
@ -121,26 +126,64 @@ static void InitializeArrayConstructorDescriptor(Isolate* isolate,
void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
Isolate* isolate, Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) { CodeStubInterfaceDescriptor* descriptor) {
InitializeArrayConstructorDescriptor(isolate, descriptor); InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
} }
void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
Isolate* isolate, Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) { CodeStubInterfaceDescriptor* descriptor) {
InitializeArrayConstructorDescriptor(isolate, descriptor); InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
} }
void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
Isolate* isolate, Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) { CodeStubInterfaceDescriptor* descriptor) {
InitializeArrayConstructorDescriptor(isolate, descriptor); InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
}
void CompareNilICStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { eax };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
descriptor->miss_handler_ =
ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate);
} }
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
Isolate* isolate = masm->isolate();
isolate->counters()->code_stubs()->Increment();
CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
int param_count = descriptor->register_param_count_;
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
ASSERT(descriptor->register_param_count_ == 0 ||
eax.is(descriptor->register_params_[param_count - 1]));
// Push arguments
for (int i = 0; i < param_count; ++i) {
__ push(descriptor->register_params_[i]);
}
ExternalReference miss = descriptor->miss_handler_;
__ CallExternalReference(miss, descriptor->register_param_count_);
}
__ ret(0);
}
void ToNumberStub::Generate(MacroAssembler* masm) { void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in eax. // The ToNumber stub takes one argument in eax.
Label check_heap_number, call_builtin; Label check_heap_number, call_builtin;
@ -531,7 +574,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm); AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, ecx); __ PrepareCallCFunction(argument_count, ecx);
__ mov(Operand(esp, 0 * kPointerSize), __ mov(Operand(esp, 0 * kPointerSize),
Immediate(ExternalReference::isolate_address())); Immediate(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction( __ CallCFunction(
ExternalReference::store_buffer_overflow_function(masm->isolate()), ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count); argument_count);
@ -3851,7 +3894,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 9: Pass current isolate address. // Argument 9: Pass current isolate address.
__ mov(Operand(esp, 8 * kPointerSize), __ mov(Operand(esp, 8 * kPointerSize),
Immediate(ExternalReference::isolate_address())); Immediate(ExternalReference::isolate_address(masm->isolate())));
// Argument 8: Indicate that this is a direct call from JavaScript. // Argument 8: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 7 * kPointerSize), Immediate(1)); __ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
@ -4927,6 +4970,9 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub::GenerateAheadOfTime(isolate); StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first. // It is important that the store buffer overflow stubs are generated first.
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
if (FLAG_optimize_constructed_arrays) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
} }
@ -5005,7 +5051,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ mov(Operand(esp, 0 * kPointerSize), edi); // argc. __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv. __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
__ mov(Operand(esp, 2 * kPointerSize), __ mov(Operand(esp, 2 * kPointerSize),
Immediate(ExternalReference::isolate_address())); Immediate(ExternalReference::isolate_address(masm->isolate())));
__ call(ebx); __ call(ebx);
// Result is in eax or edx:eax - do not destroy these registers! // Result is in eax or edx:eax - do not destroy these registers!
@ -5013,12 +5059,17 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ dec(Operand::StaticVariable(scope_depth)); __ dec(Operand::StaticVariable(scope_depth));
} }
// Make sure we're not trying to return 'the hole' from the runtime // Runtime functions should not return 'the hole'. Allowing it to escape may
// call as this may lead to crashes in the IC code later. // lead to crashes in the IC code later.
if (FLAG_debug_code) { if (FLAG_debug_code) {
Label okay; Label okay;
__ cmp(eax, masm->isolate()->factory()->the_hole_value()); __ cmp(eax, masm->isolate()->factory()->the_hole_value());
__ j(not_equal, &okay, Label::kNear); __ j(not_equal, &okay, Label::kNear);
// TODO(wingo): Currently SuspendJSGeneratorObject returns the hole. Change
// to return another sentinel like a harmony symbol.
__ cmp(ebx, Immediate(ExternalReference(
Runtime::kSuspendJSGeneratorObject, masm->isolate())));
__ j(equal, &okay, Label::kNear);
__ int3(); __ int3();
__ bind(&okay); __ bind(&okay);
} }
@ -5777,17 +5828,17 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ret(2 * kPointerSize); __ ret(2 * kPointerSize);
__ bind(&non_ascii); __ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens // At least one of the strings is two-byte. Check whether it happens
// to contain only ASCII characters. // to contain only one byte characters.
// ecx: first instance type AND second instance type. // ecx: first instance type AND second instance type.
// edi: second instance type. // edi: second instance type.
__ test(ecx, Immediate(kAsciiDataHintMask)); __ test(ecx, Immediate(kOneByteDataHintMask));
__ j(not_zero, &ascii_data); __ j(not_zero, &ascii_data);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ xor_(edi, ecx); __ xor_(edi, ecx);
STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0); STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
__ and_(edi, kOneByteStringTag | kAsciiDataHintTag); __ and_(edi, kOneByteStringTag | kOneByteDataHintTag);
__ cmp(edi, kOneByteStringTag | kAsciiDataHintTag); __ cmp(edi, kOneByteStringTag | kOneByteDataHintTag);
__ j(equal, &ascii_data); __ j(equal, &ascii_data);
// Allocate a two byte cons string. // Allocate a two byte cons string.
__ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime); __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
@ -7446,7 +7497,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
__ mov(Operand(esp, 0 * kPointerSize), regs_.object()); __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
__ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot. __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
__ mov(Operand(esp, 2 * kPointerSize), __ mov(Operand(esp, 2 * kPointerSize),
Immediate(ExternalReference::isolate_address())); Immediate(ExternalReference::isolate_address(masm->isolate())));
AllowExternalCallThatCantCauseGC scope(masm); AllowExternalCallThatCantCauseGC scope(masm);
if (mode == INCREMENTAL_COMPACTION) { if (mode == INCREMENTAL_COMPACTION) {
@ -7686,6 +7737,197 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ ret(0); __ ret(0);
} }
template<class T>
static void CreateArrayDispatch(MacroAssembler* masm) {
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(edx, kind);
__ j(not_equal, &next);
T stub(kind);
__ TailCallStub(&stub);
__ bind(&next);
}
// If we reached this point there is a problem.
__ Abort("Unexpected ElementsKind in array constructor");
}
static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
// ebx - type info cell
// edx - kind
// eax - number of arguments
// edi - constructor?
// esp[0] - return address
// esp[4] - last argument
ASSERT(FAST_SMI_ELEMENTS == 0);
ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
ASSERT(FAST_ELEMENTS == 2);
ASSERT(FAST_HOLEY_ELEMENTS == 3);
ASSERT(FAST_DOUBLE_ELEMENTS == 4);
ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
// is the low bit set? If so, we are holey and that is good.
__ test_b(edx, 1);
Label normal_sequence;
__ j(not_zero, &normal_sequence);
// look at the first argument
__ mov(ecx, Operand(esp, kPointerSize));
__ test(ecx, ecx);
__ j(zero, &normal_sequence);
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry
__ inc(edx);
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &normal_sequence);
// Save the resulting elements kind in type info
__ SmiTag(edx);
__ mov(FieldOperand(ebx, kPointerSize), edx);
__ SmiUntag(edx);
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(edx, kind);
__ j(not_equal, &next);
ArraySingleArgumentConstructorStub stub(kind);
__ TailCallStub(&stub);
__ bind(&next);
}
// If we reached this point there is a problem.
__ Abort("Unexpected ElementsKind in array constructor");
}
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
}
}
void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
isolate);
ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
isolate);
ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
isolate);
}
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc (only if argument_count_ == ANY)
// -- ebx : type info cell
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ecx, Immediate(kSmiTagMask));
__ Assert(not_zero, "Unexpected initial map for Array function");
__ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal, "Unexpected initial map for Array function");
// We should either have undefined in ebx or a valid jsglobalpropertycell
Label okay_here;
Handle<Map> global_property_cell_map(
masm->isolate()->heap()->global_property_cell_map());
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &okay_here);
__ cmp(FieldOperand(ebx, 0), Immediate(global_property_cell_map));
__ Assert(equal, "Expected property cell in register ebx");
__ bind(&okay_here);
}
if (FLAG_optimize_constructed_arrays) {
Label no_info, switch_ready;
// Get the elements kind and case on that.
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, kPointerSize));
// There is no info if the call site went megamorphic either
// TODO(mvstanton): Really? I thought if it was the array function that
// the cell wouldn't get stamped as megamorphic.
__ cmp(edx, Immediate(TypeFeedbackCells::MegamorphicSentinel(
masm->isolate())));
__ j(equal, &no_info);
__ SmiUntag(edx);
__ jmp(&switch_ready);
__ bind(&no_info);
__ mov(edx, Immediate(GetInitialFastElementsKind()));
__ bind(&switch_ready);
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
__ test(eax, eax);
__ j(not_zero, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
__ bind(&not_zero_case);
__ cmp(eax, 1);
__ j(greater, &not_one_case);
CreateArrayDispatchOneArgument(masm);
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else if (argument_count_ == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
} else if (argument_count_ == ONE) {
CreateArrayDispatchOneArgument(masm);
} else if (argument_count_ == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else {
UNREACHABLE();
}
} else {
Label generic_constructor;
// Run the native code for the Array function called as constructor.
ArrayNativeCode(masm, true, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
}
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

4
deps/v8/src/ia32/code-stubs-ia32.h

@ -36,6 +36,10 @@ namespace v8 {
namespace internal { namespace internal {
void ArrayNativeCode(MacroAssembler* masm,
bool construct_call,
Label* call_generic_code);
// Compute a transcendental math function natively, or call the // Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function. // TranscendentalCache runtime function.
class TranscendentalCacheStub: public PlatformCodeStub { class TranscendentalCacheStub: public PlatformCodeStub {

2
deps/v8/src/ia32/codegen-ia32.cc

@ -635,6 +635,8 @@ OS::MemMoveFunction CreateMemMoveFunction() {
ASSERT(!RelocInfo::RequiresRelocation(desc)); ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size); CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size); OS::ProtectCode(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<OS::MemMoveFunction>(buffer); return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
} }

8
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -716,8 +716,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
void Deoptimizer::EntryGenerator::Generate() { void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue(); GeneratePrologue();
Isolate* isolate = masm()->isolate();
// Save all general purpose registers before messing with them. // Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters; const int kNumberOfRegisters = Register::kNumRegisters;
@ -762,10 +760,10 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0. __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta. __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kPointerSize), __ mov(Operand(esp, 5 * kPointerSize),
Immediate(ExternalReference::isolate_address())); Immediate(ExternalReference::isolate_address(isolate())));
{ {
AllowExternalCallThatCantCauseGC scope(masm()); AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
} }
// Preserve deoptimizer object in register eax and get the input // Preserve deoptimizer object in register eax and get the input
@ -828,7 +826,7 @@ void Deoptimizer::EntryGenerator::Generate() {
{ {
AllowExternalCallThatCantCauseGC scope(masm()); AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction( __ CallCFunction(
ExternalReference::compute_output_frames_function(isolate), 1); ExternalReference::compute_output_frames_function(isolate()), 1);
} }
__ pop(eax); __ pop(eax);

177
deps/v8/src/ia32/full-codegen-ia32.cc

@ -1883,6 +1883,156 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
} }
void FullCodeGenerator::VisitYield(Yield* expr) {
Comment cmnt(masm_, "[ Yield");
// Evaluate yielded value first; the initial iterator definition depends on
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
case Yield::INITIAL:
case Yield::SUSPEND: {
VisitForStackValue(expr->generator_object());
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
Label resume;
__ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
__ j(not_equal, &resume);
__ pop(result_register());
if (expr->yield_kind() == Yield::SUSPEND) {
// TODO(wingo): Box into { value: VALUE, done: false }.
}
EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
break;
}
case Yield::FINAL: {
VisitForAccumulatorValue(expr->generator_object());
__ mov(FieldOperand(result_register(),
JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ pop(result_register());
// TODO(wingo): Box into { value: VALUE, done: true }.
// Exit all nested statements.
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
while (current != NULL) {
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
EmitReturnSequence();
break;
}
case Yield::DELEGATING:
UNIMPLEMENTED();
}
}
void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in eax, and is ultimately read by the resumed generator, as
// if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. ebx
// will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
__ pop(ebx);
// Check generator state.
Label wrong_state, done;
STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
__ cmp(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(0)));
__ j(less_equal, &wrong_state);
// Load suspended function and context.
__ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
__ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
// Push receiver.
__ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
// Push holes for arguments to generator function.
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(edx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(ecx, isolate()->factory()->the_hole_value());
Label push_argument_holes, push_frame;
__ bind(&push_argument_holes);
__ sub(edx, Immediate(1));
__ j(carry, &push_frame);
__ push(ecx);
__ jmp(&push_argument_holes);
// Enter a new JavaScript frame, and initialize its slots as they were when
// the generator was suspended.
Label resume_frame;
__ bind(&push_frame);
__ call(&resume_frame);
__ jmp(&done);
__ bind(&resume_frame);
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
__ push(edi); // Callee's JS Function.
// Load the operand stack size.
__ mov(edx, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
__ mov(edx, FieldOperand(edx, FixedArray::kLengthOffset));
__ SmiUntag(edx);
// If we are sending a value and there is no operand stack, we can jump back
// in directly.
if (resume_mode == JSGeneratorObject::SEND) {
Label slow_resume;
__ cmp(edx, Immediate(0));
__ j(not_zero, &slow_resume);
__ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
__ SmiUntag(ecx);
__ add(edx, ecx);
__ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
__ jmp(edx);
__ bind(&slow_resume);
}
// Otherwise, we push holes for the operand stack and call the runtime to fix
// up the stack and the handlers.
Label push_operand_holes, call_resume;
__ bind(&push_operand_holes);
__ sub(edx, Immediate(1));
__ j(carry, &call_resume);
__ push(ecx);
__ jmp(&push_operand_holes);
__ bind(&call_resume);
__ push(ebx);
__ push(result_register());
__ Push(Smi::FromInt(resume_mode));
__ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ Abort("Generator failed to resume.");
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(ebx);
__ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position()); SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral(); Literal* key = prop->key()->AsLiteral();
@ -4384,24 +4534,21 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr); VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Handle<Object> nil_value = nil == kNullValue ?
isolate()->factory()->null_value() : EqualityKind kind = expr->op() == Token::EQ_STRICT
isolate()->factory()->undefined_value(); ? kStrictEquality : kNonStrictEquality;
Handle<Object> nil_value = nil == kNullValue
? isolate()->factory()->null_value()
: isolate()->factory()->undefined_value();
if (kind == kStrictEquality) {
__ cmp(eax, nil_value); __ cmp(eax, nil_value);
if (expr->op() == Token::EQ_STRICT) {
Split(equal, if_true, if_false, fall_through); Split(equal, if_true, if_false, fall_through);
} else { } else {
Handle<Object> other_nil_value = nil == kNullValue ? Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(),
isolate()->factory()->undefined_value() : kNonStrictEquality,
isolate()->factory()->null_value(); nil);
__ j(equal, if_true); CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
__ cmp(eax, other_nil_value); __ test(eax, eax);
__ j(equal, if_true);
__ JumpIfSmi(eax, if_false);
// It can be an undetectable object.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset));
__ test(edx, Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through); Split(not_zero, if_true, if_false, fall_through);
} }
context()->Plug(if_true, if_false); context()->Plug(if_true, if_false);

246
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -336,52 +336,31 @@ bool LCodeGen::GenerateBody() {
!is_aborted() && current_instruction_ < instructions_->length(); !is_aborted() && current_instruction_ < instructions_->length();
current_instruction_++) { current_instruction_++) {
LInstruction* instr = instructions_->at(current_instruction_); LInstruction* instr = instructions_->at(current_instruction_);
// Don't emit code for basic blocks with a replacement.
if (instr->IsLabel()) { if (instr->IsLabel()) {
LLabel* label = LLabel::cast(instr); emit_instructions = !LLabel::cast(instr)->HasReplacement();
emit_instructions = !label->HasReplacement();
} }
if (!emit_instructions) continue;
if (emit_instructions) { if (FLAG_code_comments && instr->HasInterestingComment(this)) {
if (FLAG_code_comments) { Comment(";;; <@%d,#%d> %s",
HValue* hydrogen = instr->hydrogen_value(); current_instruction_,
if (hydrogen != NULL) { instr->hydrogen_value()->id(),
if (hydrogen->IsChange()) { instr->Mnemonic());
HValue* changed_value = HChange::cast(hydrogen)->value();
int use_id = 0;
const char* use_mnemo = "dead";
if (hydrogen->UseCount() >= 1) {
HValue* use_value = hydrogen->uses().value();
use_id = use_value->id();
use_mnemo = use_value->Mnemonic();
}
Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
current_instruction_, instr->Mnemonic(),
changed_value->id(), changed_value->Mnemonic(),
use_id, use_mnemo);
} else {
Comment(";;; @%d: %s. <#%d>", current_instruction_,
instr->Mnemonic(), hydrogen->id());
}
} else {
Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
}
} }
if (!CpuFeatures::IsSupported(SSE2)) { if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
FlushX87StackIfNecessary(instr);
}
instr->CompileToNative(this); instr->CompileToNative(this);
if (!CpuFeatures::IsSupported(SSE2)) { if (!CpuFeatures::IsSupported(SSE2)) {
ASSERT(!instr->HasDoubleRegisterResult() || x87_stack_depth_ == 1); ASSERT(!instr->HasDoubleRegisterResult() || x87_stack_depth_ == 1);
if (FLAG_debug_code && FLAG_enable_slow_asserts) { if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(x87_stack_depth_); __ VerifyX87StackDepth(x87_stack_depth_);
} }
} }
} }
}
EnsureSpaceForLazyDeopt(); EnsureSpaceForLazyDeopt();
return !is_aborted(); return !is_aborted();
} }
@ -390,6 +369,9 @@ bool LCodeGen::GenerateBody() {
bool LCodeGen::GenerateJumpTable() { bool LCodeGen::GenerateJumpTable() {
Label needs_frame_not_call; Label needs_frame_not_call;
Label needs_frame_is_call; Label needs_frame_is_call;
if (jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
}
for (int i = 0; i < jump_table_.length(); i++) { for (int i = 0; i < jump_table_.length(); i++) {
__ bind(&jump_table_[i].label); __ bind(&jump_table_[i].label);
Address entry = jump_table_[i].address; Address entry = jump_table_[i].address;
@ -465,11 +447,14 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) { if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i]; LDeferredCode* code = deferred_[i];
__ bind(code->entry()); Comment(";;; <@%d,#%d> "
if (NeedsDeferredFrame()) { "-------------------- Deferred %s --------------------",
Comment(";;; Deferred build frame @%d: %s.",
code->instruction_index(), code->instruction_index(),
code->instr()->hydrogen_value()->id(),
code->instr()->Mnemonic()); code->instr()->Mnemonic());
__ bind(code->entry());
if (NeedsDeferredFrame()) {
Comment(";;; Build frame");
ASSERT(!frame_is_built_); ASSERT(!frame_is_built_);
ASSERT(info()->IsStub()); ASSERT(info()->IsStub());
frame_is_built_ = true; frame_is_built_ = true;
@ -478,15 +463,11 @@ bool LCodeGen::GenerateDeferredCode() {
__ push(Operand(ebp, StandardFrameConstants::kContextOffset)); __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(Smi::FromInt(StackFrame::STUB))); __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
__ lea(ebp, Operand(esp, 2 * kPointerSize)); __ lea(ebp, Operand(esp, 2 * kPointerSize));
Comment(";;; Deferred code");
} }
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate(); code->Generate();
if (NeedsDeferredFrame()) { if (NeedsDeferredFrame()) {
Comment(";;; Deferred destroy frame @%d: %s.", Comment(";;; Destroy frame");
code->instruction_index(),
code->instr()->Mnemonic());
ASSERT(frame_is_built_); ASSERT(frame_is_built_);
frame_is_built_ = false; frame_is_built_ = false;
__ mov(esp, ebp); __ mov(esp, ebp);
@ -654,7 +635,7 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
pushed_arguments_index, pushed_arguments_index,
pushed_arguments_count); pushed_arguments_count);
bool has_closure_id = !info()->closure().is_null() && bool has_closure_id = !info()->closure().is_null() &&
*info()->closure() != *environment->closure(); !info()->closure().is_identical_to(environment->closure());
int closure_id = has_closure_id int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure()) ? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId; : Translation::kSelfLiteralId;
@ -1021,10 +1002,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<FixedArray> literals = Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
{ ALLOW_HANDLE_DEREF(isolate(),
"copying a ZoneList of handles into a FixedArray");
for (int i = 0; i < deoptimization_literals_.length(); i++) { for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]); literals->set(i, *deoptimization_literals_[i]);
} }
data->SetLiteralArray(*literals); data->SetLiteralArray(*literals);
}
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
@ -1125,10 +1109,19 @@ void LCodeGen::RecordPosition(int position) {
} }
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
return "";
}
void LCodeGen::DoLabel(LLabel* label) { void LCodeGen::DoLabel(LLabel* label) {
Comment(";;; -------------------- B%d%s --------------------", Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
current_instruction_,
label->hydrogen_value()->id(),
label->block_id(), label->block_id(),
label->is_loop_header() ? " (loop header)" : ""); LabelType(label));
__ bind(label->label()); __ bind(label->label());
current_block_ = label->block_id(); current_block_ = label->block_id();
DoGap(label); DoGap(label);
@ -1797,6 +1790,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) { void LCodeGen::DoConstantT(LConstantT* instr) {
Register reg = ToRegister(instr->result()); Register reg = ToRegister(instr->result());
Handle<Object> handle = instr->value(); Handle<Object> handle = instr->value();
ALLOW_HANDLE_DEREF(isolate(), "smi check");
if (handle->IsHeapObject()) { if (handle->IsHeapObject()) {
__ LoadHeapObject(reg, Handle<HeapObject>::cast(handle)); __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
} else { } else {
@ -2056,17 +2050,16 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
} }
int LCodeGen::GetNextEmittedBlock(int block) { int LCodeGen::GetNextEmittedBlock() const {
for (int i = block + 1; i < graph()->blocks()->length(); ++i) { for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
LLabel* label = chunk_->GetLabel(i); if (!chunk_->GetLabel(i)->HasReplacement()) return i;
if (!label->HasReplacement()) return i;
} }
return -1; return -1;
} }
void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
int next_block = GetNextEmittedBlock(current_block_); int next_block = GetNextEmittedBlock();
right_block = chunk_->LookupDestination(right_block); right_block = chunk_->LookupDestination(right_block);
left_block = chunk_->LookupDestination(left_block); left_block = chunk_->LookupDestination(left_block);
@ -2204,10 +2197,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
void LCodeGen::EmitGoto(int block) { void LCodeGen::EmitGoto(int block) {
block = chunk_->LookupDestination(block); if (!IsNextEmittedBlock(block)) {
int next_block = GetNextEmittedBlock(current_block_); __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
if (block != next_block) {
__ jmp(chunk_->GetAssemblyLabel(block));
} }
} }
@ -2786,6 +2777,8 @@ void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
__ Ret((parameter_count + extra_value_count) * kPointerSize, ecx); __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
} else { } else {
Register reg = ToRegister(instr->parameter_count()); Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
__ SmiUntag(reg);
Register return_addr_reg = reg.is(ecx) ? ebx : ecx; Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
if (dynamic_frame_alignment && FLAG_debug_code) { if (dynamic_frame_alignment && FLAG_debug_code) {
ASSERT(extra_value_count == 2); ASSERT(extra_value_count == 2);
@ -3019,6 +3012,7 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister()); ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) { if (operand->IsConstantOperand()) {
Handle<Object> object = ToHandle(LConstantOperand::cast(operand)); Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
ALLOW_HANDLE_DEREF(isolate(), "smi check");
if (object->IsSmi()) { if (object->IsSmi()) {
__ Push(Handle<Smi>::cast(object)); __ Push(Handle<Smi>::cast(object));
} else { } else {
@ -3198,14 +3192,22 @@ void LCodeGen::DoLoadExternalArrayPointer(
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments()); Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
if (instr->length()->IsConstantOperand() &&
instr->index()->IsConstantOperand()) {
int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
int index = (const_length - const_index) + 1;
__ mov(result, Operand(arguments, index * kPointerSize));
} else {
Register length = ToRegister(instr->length()); Register length = ToRegister(instr->length());
Operand index = ToOperand(instr->index()); Operand index = ToOperand(instr->index());
Register result = ToRegister(instr->result());
// There are two words between the frame pointer and the last argument. // There are two words between the frame pointer and the last argument.
// Subtracting from length accounts for one of them add one more. // Subtracting from length accounts for one of them add one more.
__ sub(length, index); __ sub(length, index);
__ mov(result, Operand(arguments, length, times_4, kPointerSize)); __ mov(result, Operand(arguments, length, times_4, kPointerSize));
} }
}
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
@ -3595,12 +3597,15 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function, void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity, int arity,
LInstruction* instr, LInstruction* instr,
CallKind call_kind, CallKind call_kind,
EDIState edi_state) { EDIState edi_state) {
bool can_invoke_directly = !function->NeedsArgumentsAdaption() || bool dont_adapt_arguments =
function->shared()->formal_parameter_count() == arity; formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
LPointerMap* pointers = instr->pointer_map(); LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position()); RecordPosition(pointers->position());
@ -3615,13 +3620,13 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Set eax to arguments count if adaption is not needed. Assumes that eax // Set eax to arguments count if adaption is not needed. Assumes that eax
// is available to write to at this point. // is available to write to at this point.
if (!function->NeedsArgumentsAdaption()) { if (dont_adapt_arguments) {
__ mov(eax, arity); __ mov(eax, arity);
} }
// Invoke function directly. // Invoke function directly.
__ SetCallKind(ecx, call_kind); __ SetCallKind(ecx, call_kind);
if (*function == *info()->closure()) { if (function.is_identical_to(info()->closure())) {
__ CallSelf(); __ CallSelf();
} else { } else {
__ call(FieldOperand(edi, JSFunction::kCodeEntryOffset)); __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
@ -3632,14 +3637,17 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
SafepointGenerator generator( SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt); this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity); ParameterCount count(arity);
__ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind); ParameterCount expected(formal_parameter_count);
__ InvokeFunction(
function, expected, count, CALL_FUNCTION, generator, call_kind);
} }
} }
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(eax)); ASSERT(ToRegister(instr->result()).is(eax));
CallKnownFunction(instr->function(), CallKnownFunction(instr->hydrogen()->function(),
instr->hydrogen()->formal_parameter_count(),
instr->arity(), instr->arity(),
instr, instr,
CALL_AS_METHOD, CALL_AS_METHOD,
@ -4101,7 +4109,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->function()).is(edi)); ASSERT(ToRegister(instr->function()).is(edi));
ASSERT(instr->HasPointerMap()); ASSERT(instr->HasPointerMap());
if (instr->known_function().is_null()) { Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map(); LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position()); RecordPosition(pointers->position());
SafepointGenerator generator( SafepointGenerator generator(
@ -4109,7 +4118,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ParameterCount count(instr->arity()); ParameterCount count(instr->arity());
__ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD); __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
} else { } else {
CallKnownFunction(instr->known_function(), CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
instr->arity(), instr->arity(),
instr, instr,
CALL_AS_METHOD, CALL_AS_METHOD,
@ -4169,7 +4179,8 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(eax)); ASSERT(ToRegister(instr->result()).is(eax));
CallKnownFunction(instr->target(), CallKnownFunction(instr->hydrogen()->target(),
instr->hydrogen()->formal_parameter_count(),
instr->arity(), instr->arity(),
instr, instr,
CALL_AS_FUNCTION, CALL_AS_FUNCTION,
@ -4200,11 +4211,20 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(eax)); ASSERT(ToRegister(instr->result()).is(eax));
ASSERT(FLAG_optimize_constructed_arrays); ASSERT(FLAG_optimize_constructed_arrays);
__ mov(ebx, instr->hydrogen()->property_cell());
Handle<Code> array_construct_code =
isolate()->builtins()->ArrayConstructCode();
__ Set(eax, Immediate(instr->arity())); __ Set(eax, Immediate(instr->arity()));
CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); __ mov(ebx, instr->hydrogen()->property_cell());
Object* cell_value = instr->hydrogen()->property_cell()->value();
ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value());
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
ArraySingleArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else {
ArrayNArgumentsConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
} }
@ -4222,7 +4242,6 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object()); Register object = ToRegister(instr->object());
Register value = ToRegister(instr->value());
int offset = instr->offset(); int offset = instr->offset();
if (!instr->transition().is_null()) { if (!instr->transition().is_null()) {
@ -4248,36 +4267,44 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
HType type = instr->hydrogen()->value()->type(); HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed = SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (instr->is_in_object()) {
__ mov(FieldOperand(object, offset), value); Register write_register = object;
if (instr->hydrogen()->NeedsWriteBarrier()) { if (!instr->is_in_object()) {
Register temp = ToRegister(instr->temp()); write_register = ToRegister(instr->temp());
// Update the write barrier for the object for in-object properties. __ mov(write_register,
__ RecordWriteField(object, FieldOperand(object, JSObject::kPropertiesOffset));
offset,
value,
temp,
GetSaveFPRegsMode(),
EMIT_REMEMBERED_SET,
check_needed);
} }
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (IsInteger32(operand_value)) {
// In lithium register preparation, we made sure that the constant integer
// operand fits into smi range.
Smi* smi_value = Smi::FromInt(ToInteger32(operand_value));
__ mov(FieldOperand(write_register, offset), Immediate(smi_value));
} else if (operand_value->IsRegister()) {
__ mov(FieldOperand(write_register, offset), ToRegister(operand_value));
} else { } else {
Register temp = ToRegister(instr->temp()); Handle<Object> handle_value = ToHandle(operand_value);
__ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset)); __ mov(FieldOperand(write_register, offset), handle_value);
__ mov(FieldOperand(temp, offset), value); }
} else {
__ mov(FieldOperand(write_register, offset), ToRegister(instr->value()));
}
if (instr->hydrogen()->NeedsWriteBarrier()) { if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array. Register value = ToRegister(instr->value());
// object is used as a scratch register. Register temp = instr->is_in_object() ? ToRegister(instr->temp()) : object;
__ RecordWriteField(temp, // Update the write barrier for the object for in-object properties.
__ RecordWriteField(write_register,
offset, offset,
value, value,
object, temp,
GetSaveFPRegsMode(), GetSaveFPRegsMode(),
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
check_needed); check_needed);
} }
} }
}
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
@ -4453,7 +4480,6 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
Register value = ToRegister(instr->value());
Register elements = ToRegister(instr->elements()); Register elements = ToRegister(instr->elements());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
@ -4464,9 +4490,22 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
FAST_ELEMENTS, FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag, FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index()); instr->additional_index());
__ mov(operand, value); if (instr->value()->IsRegister()) {
__ mov(operand, ToRegister(instr->value()));
} else {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (IsInteger32(operand_value)) {
Smi* smi_value = Smi::FromInt(ToInteger32(operand_value));
__ mov(operand, Immediate(smi_value));
} else {
Handle<Object> handle_value = ToHandle(operand_value);
__ mov(operand, handle_value);
}
}
if (instr->hydrogen()->NeedsWriteBarrier()) { if (instr->hydrogen()->NeedsWriteBarrier()) {
ASSERT(instr->value()->IsRegister());
Register value = ToRegister(instr->value());
ASSERT(!instr->key()->IsConstantOperand()); ASSERT(!instr->key()->IsConstantOperand());
HType type = instr->hydrogen()->value()->type(); HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed = SmiCheck check_needed =
@ -5876,16 +5915,12 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp()); Register scratch = ToRegister(instr->temp());
Handle<JSFunction> constructor = instr->hydrogen()->constructor(); Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map()); Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
int instance_size = initial_map->instance_size(); int instance_size = initial_map->instance_size();
ASSERT(initial_map->pre_allocated_property_fields() + ASSERT(initial_map->pre_allocated_property_fields() +
initial_map->unused_property_fields() - initial_map->unused_property_fields() -
initial_map->inobject_properties() == 0); initial_map->inobject_properties() == 0);
// Allocate memory for the object. The initial map might change when
// the constructor's prototype changes, but instance size and property
// counts remain unchanged (if slack tracking finished).
ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
__ Allocate(instance_size, result, no_reg, scratch, deferred->entry(), __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(),
TAG_OBJECT); TAG_OBJECT);
@ -5936,8 +5971,7 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Handle<JSFunction> constructor = instr->hydrogen()->constructor(); Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size(); int instance_size = initial_map->instance_size();
// TODO(3095996): Get rid of this. For now, we need to make the // TODO(3095996): Get rid of this. For now, we need to make the
@ -6016,7 +6050,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->context()).is(esi));
Handle<FixedArray> literals(instr->environment()->closure()->literals()); Handle<FixedArray> literals = instr->hydrogen()->literals();
ElementsKind boilerplate_elements_kind = ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind(); instr->hydrogen()->boilerplate_elements_kind();
AllocationSiteMode allocation_site_mode = AllocationSiteMode allocation_site_mode =
@ -6077,7 +6111,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->context()).is(esi));
Handle<FixedArray> literals(instr->environment()->closure()->literals()); Handle<FixedArray> literals = instr->hydrogen()->literals();
Handle<FixedArray> constant_properties = Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties(); instr->hydrogen()->constant_properties();
@ -6090,7 +6124,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
// Set up the parameters to the stub/runtime call and pick the right // Set up the parameters to the stub/runtime call and pick the right
// runtime function or stub to call. // runtime function or stub to call.
int properties_count = constant_properties->length() / 2; int properties_count = instr->hydrogen()->constant_properties_length() / 2;
if (instr->hydrogen()->depth() > 1) { if (instr->hydrogen()->depth() > 1) {
__ PushHeapObject(literals); __ PushHeapObject(literals);
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
@ -6178,18 +6212,16 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->context()).is(esi));
// Use the fast case closure allocation code that allocates in new // Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning. // space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure(); bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) { if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(shared_info->language_mode(), FastNewClosureStub stub(instr->hydrogen()->language_mode(),
shared_info->is_generator()); instr->hydrogen()->is_generator());
__ push(Immediate(shared_info)); __ push(Immediate(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else { } else {
__ push(esi); __ push(esi);
__ push(Immediate(shared_info)); __ push(Immediate(instr->hydrogen()->shared_info()));
__ push(Immediate(pretenure __ push(Immediate(pretenure ? factory()->true_value()
? factory()->true_value()
: factory()->false_value())); : factory()->false_value()));
CallRuntime(Runtime::kNewClosure, 3, instr); CallRuntime(Runtime::kNewClosure, 3, instr);
} }

17
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -84,10 +84,20 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); } Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
// TODO(svenpanne) Use this consistently.
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
bool IsNextEmittedBlock(int block_id) const {
return LookupDestination(block_id) == GetNextEmittedBlock();
}
bool NeedsEagerFrame() const { bool NeedsEagerFrame() const {
return GetStackSlotCount() > 0 || return GetStackSlotCount() > 0 ||
info()->is_non_deferred_calling() || info()->is_non_deferred_calling() ||
!info()->IsStub(); !info()->IsStub() ||
info()->requires_frame();
} }
bool NeedsDeferredFrame() const { bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling(); return !NeedsEagerFrame() && info()->is_deferred_calling();
@ -188,9 +198,9 @@ class LCodeGen BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; } LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; } Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); } HGraph* graph() const { return chunk()->graph(); }
int GetNextEmittedBlock(int block); int GetNextEmittedBlock() const;
void EmitClassOfTest(Label* if_true, void EmitClassOfTest(Label* if_true,
Label* if_false, Label* if_false,
@ -254,6 +264,7 @@ class LCodeGen BASE_EMBEDDED {
// Generate a direct call to a known function. Expects the function // Generate a direct call to a known function. Expects the function
// to be in edi. // to be in edi.
void CallKnownFunction(Handle<JSFunction> function, void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity, int arity,
LInstruction* instr, LInstruction* instr,
CallKind call_kind, CallKind call_kind,

74
deps/v8/src/ia32/lithium-ia32.cc

@ -99,7 +99,7 @@ bool LInstruction::HasDoubleRegisterResult() {
bool LInstruction::HasDoubleRegisterInput() { bool LInstruction::HasDoubleRegisterInput() {
for (int i = 0; i < InputCount(); i++) { for (int i = 0; i < InputCount(); i++) {
LOperand* op = InputAt(i); LOperand* op = InputAt(i);
if (op->IsDoubleRegister()) { if (op != NULL && op->IsDoubleRegister()) {
return true; return true;
} }
} }
@ -210,6 +210,11 @@ const char* LArithmeticT::Mnemonic() const {
} }
bool LGoto::HasInterestingComment(LCodeGen* gen) const {
return !gen->IsNextEmittedBlock(block_id());
}
void LGoto::PrintDataTo(StringStream* stream) { void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id()); stream->Add("B%d", block_id());
} }
@ -1056,11 +1061,13 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) { LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
info()->MarkAsRequiresFrame();
return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value()))); return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
} }
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
info()->MarkAsRequiresFrame();
return DefineAsRegister(new(zone()) LArgumentsElements); return DefineAsRegister(new(zone()) LArgumentsElements);
} }
@ -2280,6 +2287,19 @@ LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
} }
// DoStoreKeyed and DoStoreNamedField have special considerations for allowing
// use of a constant instead of a register.
static bool StoreConstantValueAllowed(HValue* value) {
if (value->IsConstant()) {
HConstant* constant_value = HConstant::cast(value);
return constant_value->HasSmiValue()
|| constant_value->HasDoubleValue()
|| constant_value->ImmortalImmovable();
}
return false;
}
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (!instr->is_external()) { if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged()); ASSERT(instr->elements()->representation().IsTagged());
@ -2295,19 +2315,30 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
val = UseX87TopOfStack(instr->value()); val = UseX87TopOfStack(instr->value());
} }
LOperand* key = UseRegisterOrConstantAtStart(instr->key()); LOperand* key = UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyed(object, key, val); return new(zone()) LStoreKeyed(object, key, val);
} else { } else {
ASSERT(instr->value()->representation().IsTagged()); ASSERT(instr->value()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier(); bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* obj = UseRegister(instr->elements()); LOperand* obj = UseRegister(instr->elements());
LOperand* val = needs_write_barrier LOperand* val;
? UseTempRegister(instr->value()) LOperand* key;
: UseRegisterAtStart(instr->value()); if (needs_write_barrier) {
LOperand* key = needs_write_barrier val = UseTempRegister(instr->value());
? UseTempRegister(instr->key()) key = UseTempRegister(instr->key());
: UseRegisterOrConstantAtStart(instr->key()); } else {
if (StoreConstantValueAllowed(instr->value())) {
val = UseRegisterOrConstantAtStart(instr->value());
} else {
val = UseRegisterAtStart(instr->value());
}
if (StoreConstantValueAllowed(instr->key())) {
key = UseRegisterOrConstantAtStart(instr->key());
} else {
key = UseRegisterAtStart(instr->key());
}
}
return new(zone()) LStoreKeyed(obj, key, val); return new(zone()) LStoreKeyed(obj, key, val);
} }
} }
@ -2407,9 +2438,14 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
: UseRegisterAtStart(instr->object()); : UseRegisterAtStart(instr->object());
} }
LOperand* val = needs_write_barrier LOperand* val;
? UseTempRegister(instr->value()) if (needs_write_barrier) {
: UseRegister(instr->value()); val = UseTempRegister(instr->value());
} else if (StoreConstantValueAllowed(instr->value())) {
val = UseRegisterOrConstant(instr->value());
} else {
val = UseRegister(instr->value());
}
// We only need a scratch register if we have a write barrier or we // We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property). // have a store into the properties array (not in-object-property).
@ -2480,6 +2516,7 @@ LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling(); info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context()); LOperand* context = UseAny(instr->context());
// TODO(mvstanton): why can't size be a constant if possible?
LOperand* size = UseTempRegister(instr->size()); LOperand* size = UseTempRegister(instr->size());
LOperand* temp = TempRegister(); LOperand* temp = TempRegister();
LAllocate* result = new(zone()) LAllocate(context, size, temp); LAllocate* result = new(zone()) LAllocate(context, size, temp);
@ -2541,7 +2578,8 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
ASSERT(info()->IsStub()); ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor = CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
Register reg = descriptor->register_params_[instr->index()]; int index = static_cast<int>(instr->index());
Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
return DefineFixed(result, reg); return DefineFixed(result, reg);
} }
} }
@ -2575,9 +2613,17 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments()); LOperand* args = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length()); LOperand* length;
LOperand* index = Use(instr->index()); LOperand* index;
if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
length = UseRegisterOrConstant(instr->length());
index = UseOrConstant(instr->index());
} else {
length = UseTempRegister(instr->length());
index = Use(instr->index());
}
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
} }

17
deps/v8/src/ia32/lithium-ia32.h

@ -278,6 +278,8 @@ class LInstruction: public ZoneObject {
LOperand* FirstInput() { return InputAt(0); } LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; } LOperand* Output() { return HasResult() ? result() : NULL; }
virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
#ifdef DEBUG #ifdef DEBUG
void VerifyCall(); void VerifyCall();
#endif #endif
@ -378,6 +380,10 @@ class LInstructionGap: public LGap {
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
virtual bool ClobbersDoubleRegisters() const { return false; } virtual bool ClobbersDoubleRegisters() const { return false; }
virtual bool HasInterestingComment(LCodeGen* gen) const {
return !IsRedundant();
}
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
}; };
@ -386,6 +392,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> {
public: public:
explicit LGoto(int block_id) : block_id_(block_id) { } explicit LGoto(int block_id) : block_id_(block_id) { }
virtual bool HasInterestingComment(LCodeGen* gen) const;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; } virtual bool IsControl() const { return true; }
@ -423,12 +430,14 @@ class LLabel: public LGap {
explicit LLabel(HBasicBlock* block) explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { } : LGap(block), replacement_(NULL) { }
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(Label, "label") DECLARE_CONCRETE_INSTRUCTION(Label, "label")
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
int block_id() const { return block()->block_id(); } int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); } bool is_loop_header() const { return block()->IsLoopHeader(); }
bool is_osr_entry() const { return block()->is_osr_entry(); }
Label* label() { return &label_; } Label* label() { return &label_; }
LLabel* replacement() const { return replacement_; } LLabel* replacement() const { return replacement_; }
void set_replacement(LLabel* label) { replacement_ = label; } void set_replacement(LLabel* label) { replacement_ = label; }
@ -442,6 +451,7 @@ class LLabel: public LGap {
class LParameter: public LTemplateInstruction<1, 0, 0> { class LParameter: public LTemplateInstruction<1, 0, 0> {
public: public:
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
}; };
@ -465,6 +475,7 @@ class LCallStub: public LTemplateInstruction<1, 1, 0> {
class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> { class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
public: public:
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
}; };
@ -1475,6 +1486,7 @@ class LReturn: public LTemplateInstruction<0, 3, 0> {
LOperand* parameter_count() { return inputs_[2]; } LOperand* parameter_count() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Return, "return") DECLARE_CONCRETE_INSTRUCTION(Return, "return")
DECLARE_HYDROGEN_ACCESSOR(Return)
}; };
@ -1853,7 +1865,6 @@ class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; } int arity() const { return hydrogen()->argument_count() - 1; }
Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
}; };
@ -1936,7 +1947,6 @@ class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> target() const { return hydrogen()->target(); }
int arity() const { return hydrogen()->argument_count() - 1; } int arity() const { return hydrogen()->argument_count() - 1; }
}; };
@ -2604,8 +2614,6 @@ class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal") DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral) DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
}; };
@ -2673,6 +2681,7 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public: public:
LOsrEntry(); LOsrEntry();
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
LOperand** SpilledRegisterArray() { return register_spills_; } LOperand** SpilledRegisterArray() { return register_spills_; }

22
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -1984,8 +1984,10 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
if (FLAG_log_timer_events) { if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL); FrameScope frame(this, StackFrame::MANUAL);
PushSafepointRegisters(); PushSafepointRegisters();
PrepareCallCFunction(0, eax); PrepareCallCFunction(1, eax);
CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0); mov(Operand(esp, 0),
Immediate(ExternalReference::isolate_address(isolate())));
CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
PopSafepointRegisters(); PopSafepointRegisters();
} }
@ -1995,8 +1997,10 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
if (FLAG_log_timer_events) { if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL); FrameScope frame(this, StackFrame::MANUAL);
PushSafepointRegisters(); PushSafepointRegisters();
PrepareCallCFunction(0, eax); PrepareCallCFunction(1, eax);
CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); mov(Operand(esp, 0),
Immediate(ExternalReference::isolate_address(isolate())));
CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
PopSafepointRegisters(); PopSafepointRegisters();
} }
@ -2086,7 +2090,8 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&delete_allocated_handles); bind(&delete_allocated_handles);
mov(Operand::StaticVariable(limit_address), edi); mov(Operand::StaticVariable(limit_address), edi);
mov(edi, eax); mov(edi, eax);
mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address())); mov(Operand(esp, 0),
Immediate(ExternalReference::isolate_address(isolate())));
mov(eax, Immediate(delete_extensions)); mov(eax, Immediate(delete_extensions));
call(eax); call(eax);
mov(eax, edi); mov(eax, edi);
@ -2278,6 +2283,7 @@ void MacroAssembler::InvokeFunction(Register fun,
void MacroAssembler::InvokeFunction(Handle<JSFunction> function, void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag, InvokeFlag flag,
const CallWrapper& call_wrapper, const CallWrapper& call_wrapper,
@ -2289,7 +2295,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
LoadHeapObject(edi, function); LoadHeapObject(edi, function);
mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
// We call indirectly through the code field in the function to // We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the // allow recompilation to take effect without changing any of the
// call sites. // call sites.
@ -2480,6 +2485,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
void MacroAssembler::LoadHeapObject(Register result, void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) { Handle<HeapObject> object) {
ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
if (isolate()->heap()->InNewSpace(*object)) { if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell = Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object); isolate()->factory()->NewJSGlobalPropertyCell(object);
@ -2491,6 +2497,7 @@ void MacroAssembler::LoadHeapObject(Register result,
void MacroAssembler::PushHeapObject(Handle<HeapObject> object) { void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
ALLOW_HANDLE_DEREF(isolate(), "using raw address");
if (isolate()->heap()->InNewSpace(*object)) { if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell = Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object); isolate()->factory()->NewJSGlobalPropertyCell(object);
@ -2531,10 +2538,7 @@ void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
and_(eax, kTopMask); and_(eax, kTopMask);
shr(eax, 11); shr(eax, 11);
cmp(eax, Immediate(tos)); cmp(eax, Immediate(tos));
Label all_ok;
j(equal, &all_ok);
Check(equal, "Unexpected FPU stack depth after instruction"); Check(equal, "Unexpected FPU stack depth after instruction");
bind(&all_ok);
fnclex(); fnclex();
pop(eax); pop(eax);
} }

2
deps/v8/src/ia32/macro-assembler-ia32.h

@ -271,6 +271,7 @@ class MacroAssembler: public Assembler {
void PushHeapObject(Handle<HeapObject> object); void PushHeapObject(Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) { void LoadObject(Register result, Handle<Object> object) {
ALLOW_HANDLE_DEREF(isolate(), "heap object check");
if (object->IsHeapObject()) { if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object)); LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else { } else {
@ -320,6 +321,7 @@ class MacroAssembler: public Assembler {
CallKind call_kind); CallKind call_kind);
void InvokeFunction(Handle<JSFunction> function, void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag, InvokeFlag flag,
const CallWrapper& call_wrapper, const CallWrapper& call_wrapper,

20
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -401,7 +401,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Set isolate. // Set isolate.
__ mov(Operand(esp, 3 * kPointerSize), __ mov(Operand(esp, 3 * kPointerSize),
Immediate(ExternalReference::isolate_address())); Immediate(ExternalReference::isolate_address(isolate())));
// Set byte_length. // Set byte_length.
__ mov(Operand(esp, 2 * kPointerSize), ebx); __ mov(Operand(esp, 2 * kPointerSize), ebx);
// Set byte_offset2. // Set byte_offset2.
@ -417,7 +417,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
{ {
AllowExternalCallThatCantCauseGC scope(masm_); AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference compare = ExternalReference compare =
ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); ExternalReference::re_case_insensitive_compare_uc16(isolate());
__ CallCFunction(compare, argument_count); __ CallCFunction(compare, argument_count);
} }
// Pop original values before reacting on result value. // Pop original values before reacting on result value.
@ -745,7 +745,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label stack_ok; Label stack_ok;
ExternalReference stack_limit = ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm_->isolate()); ExternalReference::address_of_stack_limit(isolate());
__ mov(ecx, esp); __ mov(ecx, esp);
__ sub(ecx, Operand::StaticVariable(stack_limit)); __ sub(ecx, Operand::StaticVariable(stack_limit));
// Handle it if the stack pointer is already below the stack limit. // Handle it if the stack pointer is already below the stack limit.
@ -972,12 +972,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
static const int num_arguments = 3; static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, ebx); __ PrepareCallCFunction(num_arguments, ebx);
__ mov(Operand(esp, 2 * kPointerSize), __ mov(Operand(esp, 2 * kPointerSize),
Immediate(ExternalReference::isolate_address())); Immediate(ExternalReference::isolate_address(isolate())));
__ lea(eax, Operand(ebp, kStackHighEnd)); __ lea(eax, Operand(ebp, kStackHighEnd));
__ mov(Operand(esp, 1 * kPointerSize), eax); __ mov(Operand(esp, 1 * kPointerSize), eax);
__ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer()); __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
ExternalReference grow_stack = ExternalReference grow_stack =
ExternalReference::re_grow_stack(masm_->isolate()); ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments); __ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and // If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception. // must exit with a stack-overflow exception.
@ -1002,10 +1002,10 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
CodeDesc code_desc; CodeDesc code_desc;
masm_->GetCode(&code_desc); masm_->GetCode(&code_desc);
Handle<Code> code = Handle<Code> code =
masm_->isolate()->factory()->NewCode(code_desc, isolate()->factory()->NewCode(code_desc,
Code::ComputeFlags(Code::REGEXP), Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject()); masm_->CodeObject());
PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source)); PROFILE(isolate(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code); return Handle<HeapObject>::cast(code);
} }
@ -1161,7 +1161,7 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
__ lea(eax, Operand(esp, -kPointerSize)); __ lea(eax, Operand(esp, -kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), eax); __ mov(Operand(esp, 0 * kPointerSize), eax);
ExternalReference check_stack_guard = ExternalReference check_stack_guard =
ExternalReference::re_check_stack_guard_state(masm_->isolate()); ExternalReference::re_check_stack_guard_state(isolate());
__ CallCFunction(check_stack_guard, num_arguments); __ CallCFunction(check_stack_guard, num_arguments);
} }
@ -1353,7 +1353,7 @@ void RegExpMacroAssemblerIA32::CheckPreemption() {
// Check for preemption. // Check for preemption.
Label no_preempt; Label no_preempt;
ExternalReference stack_limit = ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm_->isolate()); ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit)); __ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above, &no_preempt); __ j(above, &no_preempt);
@ -1366,7 +1366,7 @@ void RegExpMacroAssemblerIA32::CheckPreemption() {
void RegExpMacroAssemblerIA32::CheckStackLimit() { void RegExpMacroAssemblerIA32::CheckStackLimit() {
Label no_stack_overflow; Label no_stack_overflow;
ExternalReference stack_limit = ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(masm_->isolate()); ExternalReference::address_of_regexp_stack_limit(isolate());
__ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit)); __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
__ j(above, &no_stack_overflow); __ j(above, &no_stack_overflow);

3
deps/v8/src/ia32/regexp-macro-assembler-ia32.h

@ -30,6 +30,7 @@
#include "ia32/assembler-ia32.h" #include "ia32/assembler-ia32.h"
#include "ia32/assembler-ia32-inl.h" #include "ia32/assembler-ia32-inl.h"
#include "macro-assembler.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -196,6 +197,8 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// (ecx) and increments it by a word size. // (ecx) and increments it by a word size.
inline void Pop(Register target); inline void Pop(Register target);
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_; MacroAssembler* masm_;
// Which mode to generate code for (ASCII or UC16). // Which mode to generate code for (ASCII or UC16).

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save