diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index 10121a1132..a04811d3c1 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -19,6 +19,7 @@ *~ .cpplint-cache .d8_history +bsuite d8 d8_g shell diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index d25fc5af5c..1a927c4573 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -9,6 +9,7 @@ ARM Ltd. Hewlett-Packard Development Company, LP Igalia, S.L. Joyent, Inc. +Bloomberg Finance L.P. Akinori MUSHA Alexander Botero-Lowry diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 5f64a3e923..e34464395d 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,35 @@ +2013-04-26: Version 3.18.4 + + Added a preliminary API for ES6 ArrayBuffers + + Replaced qsort with std::sort. (Chromium issue 2639) + + Performance and stability improvements on all platforms. + + +2013-04-24: Version 3.18.3 + + Exposed the GC under a name that is less collision prone than window.gc. + (issue 2641) + + Do not emit double values at their use sites. (Chromium issue 234101) + + Added methods to allow resuming execution after calling + TerminateExecution(). (issue 2361) + + Performance and stability improvements on all platforms. + + +2013-04-22: Version 3.18.2 + + OS::MemMove/OS::MemCopy: Don't call through to generated code when size + == 0 to avoid prefetching invalid memory (Chromium issue 233500) + + Removed heap snapshot size limit. (Chromium issue 232305) + + Performance and stability improvements on all platforms. + + 2013-04-18: Version 3.18.1 Removed SCons related files and deprecated test suite configurations. diff --git a/deps/v8/build/README.txt b/deps/v8/build/README.txt index ea6287f7b7..5f242ada34 100644 --- a/deps/v8/build/README.txt +++ b/deps/v8/build/README.txt @@ -1,66 +1,9 @@ -This directory contains the V8 GYP files used to generate actual project files -for different build systems. +For build instructions, please refer to: -This is currently work in progress but this is expected to replace the SCons -based build system. +https://code.google.com/p/v8/wiki/BuildingWithGYP -To use this a checkout of GYP is needed inside this directory. From the root of -the V8 project do the following: +TL;DR version on *nix: +$ make dependencies # Only needed once. +$ make ia32.release -j8 +$ make ia32.release.check # Optionally: run tests. -$ svn co http://gyp.googlecode.com/svn/trunk build/gyp - -Note for the command lines below that Debug is the default configuration, -so specifying that on the command lines is not required. - - -To generate Makefiles on Linux: -------------------------------- - -$ build/gyp_v8 - -This will build makefiles for ia32, x64 and the ARM simulator with names -Makefile-ia32, Makefile-x64 and Makefile-armu respectively. - -To build and run for ia32 in debug and release version do: - -$ make -f Makefile-ia32 -$ out/Debug/shell -$ make -f Makefile-ia32 BUILDTYPE=Release -$ out/Release/shell - -Change the makefile to build and run for the other architectures. - - -To generate Xcode project files on Mac OS: ------------------------------------------- - -$ build/gyp_v8 - -This will make an Xcode project for the ia32 architecture. To build and run do: - -$ xcodebuild -project build/all.xcodeproj -$ samples/build/Debug/shell -$ xcodebuild -project build/all.xcodeproj -configuration Release -$ samples/build/Release/shell - - -To generate Visual Studio solution and project files on Windows: ----------------------------------------------------------------- - -On Windows an additional third party component is required. This is cygwin in -the same version as is used by the Chromium project. This can be checked out -from the Chromium repository. From the root of the V8 project do the following: - -> svn co http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844 third_party/cygwin - -To run GYP Python is required and it is recommended to use the same version as -is used by the Chromium project. This can also be checked out from the Chromium -repository. From the root of the V8 project do the following: - -> svn co http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111 third_party/python_26 - -Now generate Visual Studio solution and project files for the ia32 architecture: - -> third_party\python_26\python build/gyp_v8 - -Now open build\All.sln in Visual Studio. diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi index 8028b3eecb..0b1f397268 100644 --- a/deps/v8/build/common.gypi +++ b/deps/v8/build/common.gypi @@ -454,6 +454,15 @@ }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \ or OS=="android"', { + 'cflags!': [ + '-O2', + '-Os', + ], + 'cflags': [ + '-fdata-sections', + '-ffunction-sections', + '-O3', + ], 'conditions': [ [ 'gcc_version==44 and clang==0', { 'cflags': [ diff --git a/deps/v8/build/gyp_v8 b/deps/v8/build/gyp_v8 index bf81ad34dc..73a66a72fd 100755 --- a/deps/v8/build/gyp_v8 +++ b/deps/v8/build/gyp_v8 @@ -32,6 +32,7 @@ import glob import os +import platform import shlex import sys @@ -43,9 +44,6 @@ if __name__ == '__main__': script_dir = os.path.dirname(__file__) v8_root = '.' -sys.path.insert(0, os.path.join(v8_root, 'tools')) -import utils - sys.path.insert(0, os.path.join(v8_root, 'build', 'gyp', 'pylib')) import gyp @@ -164,6 +162,6 @@ if __name__ == '__main__': # Generate for the architectures supported on the given platform. gyp_args = list(args) - if utils.GuessOS() == 'linux': + if platform.system() == 'Linux': gyp_args.append('--generator-output=out') run_gyp(gyp_args) diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index 5c5c7a946c..bc50b6f42b 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -554,6 +554,11 @@ class V8EXPORT HeapProfiler { /** Returns memory used for profiler internal data and snapshots. */ size_t GetProfilerMemorySize(); + /** + * Sets a RetainedObjectInfo for an object group (see V8::SetObjectGroupId). + */ + void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info); + private: HeapProfiler(); ~HeapProfiler(); diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index e1c020310b..f300884965 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -92,6 +92,14 @@ #define V8_DEPRECATED(declarator) declarator #endif +#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) + #define V8_UNLIKELY(condition) __builtin_expect((condition), 0) + #define V8_LIKELY(condition) __builtin_expect((condition), 1) +#else + #define V8_UNLIKELY(condition) (condition) + #define V8_LIKELY(condition) (condition) +#endif + /** * The v8 JavaScript engine. */ @@ -145,6 +153,31 @@ class Object; } +/** + * General purpose unique identifier. + */ +class UniqueId { + public: + explicit UniqueId(intptr_t data) + : data_(data) {} + + bool operator==(const UniqueId& other) const { + return data_ == other.data_; + } + + bool operator!=(const UniqueId& other) const { + return data_ != other.data_; + } + + bool operator<(const UniqueId& other) const { + return data_ < other.data_; + } + + private: + intptr_t data_; +}; + + // --- Weak Handles --- @@ -376,6 +409,14 @@ template class Persistent : public Handle { template V8_INLINE(Persistent(S* that)) : Handle(that) { } + /** + * A constructor that creates a new global cell pointing to that. In contrast + * to the copy constructor, this creates a new persistent handle which needs + * to be separately disposed. + */ + template V8_INLINE(Persistent(Isolate* isolate, Handle that)) + : Handle(New(isolate, that)) { } + /** * "Casts" a plain handle which is known to be a persistent handle * to a persistent handle. @@ -1142,12 +1183,10 @@ class V8EXPORT String : public Primitive { int Utf8Length() const; /** - * A fast conservative check for non-ASCII characters. May - * return true even for ASCII strings, but if it returns - * false you can be sure that all characters are in the range - * 0-127. + * This function is no longer useful. */ - bool MayContainNonAscii() const; + // TODO(dcarney): deprecate + V8_INLINE(bool MayContainNonAscii()) const { return true; } /** * Returns whether this string contains only one byte data. @@ -1326,22 +1365,48 @@ class V8EXPORT String : public Primitive { V8_INLINE(static String* Cast(v8::Value* obj)); + // TODO(dcarney): deprecate /** * Allocates a new string from either UTF-8 encoded or ASCII data. * The second parameter 'length' gives the buffer length. If omitted, * the function calls 'strlen' to determine the buffer length. */ - static Local New(const char* data, int length = -1); + V8_INLINE(static Local New(const char* data, int length = -1)); + // TODO(dcarney): deprecate /** Allocates a new string from 16-bit character codes.*/ - static Local New(const uint16_t* data, int length = -1); + V8_INLINE(static Local New(const uint16_t* data, int length = -1)); + // TODO(dcarney): deprecate /** * Creates an internalized string (historically called a "symbol", * not to be confused with ES6 symbols). Returns one if it exists already. - * TODO(rossberg): Deprecate me when the new string API is here. */ - static Local NewSymbol(const char* data, int length = -1); + V8_INLINE(static Local NewSymbol(const char* data, int length = -1)); + + enum NewStringType { + kNormalString, kInternalizedString, kUndetectableString + }; + + /** Allocates a new string from UTF-8 data.*/ + static Local NewFromUtf8(Isolate* isolate, + const char* data, + NewStringType type = kNormalString, + int length = -1); + + /** Allocates a new string from Latin-1 data.*/ + static Local NewFromOneByte( + Isolate* isolate, + const uint8_t* data, + NewStringType type = kNormalString, + int length = -1); + + /** Allocates a new string from UTF-16 data.*/ + static Local NewFromTwoByte( + Isolate* isolate, + const uint16_t* data, + NewStringType type = kNormalString, + int length = -1); /** * Creates a new string by concatenating the left and the right strings @@ -1396,11 +1461,15 @@ class V8EXPORT String : public Primitive { */ bool CanMakeExternal(); + // TODO(dcarney): deprecate /** Creates an undetectable string from the supplied ASCII or UTF-8 data.*/ - static Local NewUndetectable(const char* data, int length = -1); + V8_INLINE( + static Local NewUndetectable(const char* data, int length = -1)); + // TODO(dcarney): deprecate /** Creates an undetectable string from the supplied 16-bit character codes.*/ - static Local NewUndetectable(const uint16_t* data, int length = -1); + V8_INLINE(static Local NewUndetectable( + const uint16_t* data, int length = -1)); /** * Converts an object to a UTF-8-encoded character array. Useful if @@ -1940,6 +2009,43 @@ class V8EXPORT Function : public Object { }; +/** + * An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5). + * This API is experimental and may change significantly. + */ +class V8EXPORT ArrayBuffer : public Object { + public: + /** + * Data length in bytes. + */ + size_t ByteLength() const; + /** + * Raw pointer to the array buffer data + */ + void* Data() const; + + /** + * Create a new ArrayBuffer. Allocate |byte_length| bytes. + * Allocated memory will be owned by a created ArrayBuffer and + * will be deallocated when it is garbage-collected. + */ + static Local New(size_t byte_length); + + /** + * Create a new ArrayBuffer over an existing memory block. + * The memory block will not be reclaimed when a created ArrayBuffer + * is garbage-collected. + */ + static Local New(void* data, size_t byte_length); + + V8_INLINE(static ArrayBuffer* Cast(Value* obj)); + + private: + ArrayBuffer(); + static void CheckCast(Value* obj); +}; + + /** * An instance of the built-in Date constructor (ECMA-262, 15.9). */ @@ -2953,7 +3059,8 @@ enum GCType { enum GCCallbackFlags { kNoGCCallbackFlags = 0, - kGCCallbackFlagCompacted = 1 << 0 + kGCCallbackFlagCompacted = 1 << 0, + kGCCallbackFlagConstructRetainedObjectInfos = 1 << 1 }; typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags); @@ -3110,6 +3217,39 @@ class V8EXPORT Isolate { /** Returns the context that is on the top of the stack. */ Local GetCurrentContext(); + /** + * Allows the host application to group objects together. If one + * object in the group is alive, all objects in the group are alive. + * After each garbage collection, object groups are removed. It is + * intended to be used in the before-garbage-collection callback + * function, for instance to simulate DOM tree connections among JS + * wrapper objects. Object groups for all dependent handles need to + * be provided for kGCTypeMarkSweepCompact collections, for all other + * garbage collection types it is sufficient to provide object groups + * for partially dependent handles only. + */ + void SetObjectGroupId(const Persistent& object, + UniqueId id); + + /** + * Allows the host application to declare implicit references from an object + * group to an object. If the objects of the object group are alive, the child + * object is alive too. After each garbage collection, all implicit references + * are removed. It is intended to be used in the before-garbage-collection + * callback function. + */ + void SetReferenceFromGroup(UniqueId id, + const Persistent& child); + + /** + * Allows the host application to declare implicit references from an object + * to another object. If the parent object is alive, the child object is alive + * too. After each garbage collection, all implicit references are removed. It + * is intended to be used in the before-garbage-collection callback function. + */ + void SetReference(const Persistent& parent, + const Persistent& child); + private: Isolate(); Isolate(const Isolate&); @@ -3514,6 +3654,8 @@ class V8EXPORT V8 { * for partially dependent handles only. * See v8-profiler.h for RetainedObjectInfo interface description. */ + // TODO(marja): deprecate AddObjectGroup. Use Isolate::SetObjectGroupId and + // HeapProfiler::SetRetainedObjectInfo instead. static void AddObjectGroup(Persistent* objects, size_t length, RetainedObjectInfo* info = NULL); @@ -3529,6 +3671,8 @@ class V8EXPORT V8 { * are removed. It is intended to be used in the before-garbage-collection * callback function. */ + // TODO(marja): Deprecate AddImplicitReferences. Use + // Isolate::SetReferenceFromGroup instead. static void AddImplicitReferences(Persistent parent, Persistent* children, size_t length); @@ -3675,6 +3819,24 @@ class V8EXPORT V8 { */ static bool IsExecutionTerminating(Isolate* isolate = NULL); + /** + * Resume execution capability in the given isolate, whose execution + * was previously forcefully terminated using TerminateExecution(). + * + * When execution is forcefully terminated using TerminateExecution(), + * the isolate can not resume execution until all JavaScript frames + * have propagated the uncatchable exception which is generated. This + * method allows the program embedding the engine to handle the + * termination event and resume execution capability, even if + * JavaScript frames remain on the stack. + * + * This method can be used by any thread even if that thread has not + * acquired the V8 lock with a Locker object. + * + * \param isolate The isolate in which to resume execution capability. + */ + static void CancelTerminateExecution(Isolate* isolate); + /** * Releases any resources used by v8 and stops any utility threads * that may be running. Note that disposing v8 is permanent, it @@ -3785,20 +3947,29 @@ class V8EXPORT TryCatch { bool HasCaught() const; /** - * For certain types of exceptions, it makes no sense to continue - * execution. + * For certain types of exceptions, it makes no sense to continue execution. * - * Currently, the only type of exception that can be caught by a - * TryCatch handler and for which it does not make sense to continue - * is termination exception. Such exceptions are thrown when the - * TerminateExecution methods are called to terminate a long-running - * script. - * - * If CanContinue returns false, the correct action is to perform - * any C++ cleanup needed and then return. + * If CanContinue returns false, the correct action is to perform any C++ + * cleanup needed and then return. If CanContinue returns false and + * HasTerminated returns true, it is possible to call + * CancelTerminateExecution in order to continue calling into the engine. */ bool CanContinue() const; + /** + * Returns true if an exception has been caught due to script execution + * being terminated. + * + * There is no JavaScript representation of an execution termination + * exception. Such exceptions are thrown when the TerminateExecution + * methods are called to terminate a long-running script. + * + * If such an exception has been thrown, HasTerminated will return true, + * indicating that it is possible to call CancelTerminateExecution in order + * to continue calling into the engine. + */ + bool HasTerminated() const; + /** * Throws the exception caught by this TryCatch in a way that avoids * it being caught again by this same TryCatch. As with ThrowException @@ -3874,6 +4045,7 @@ class V8EXPORT TryCatch { bool can_continue_ : 1; bool capture_message_ : 1; bool rethrow_ : 1; + bool has_terminated_ : 1; friend class v8::internal::Isolate; }; @@ -4371,7 +4543,7 @@ class Internals { static const int kJSObjectHeaderSize = 3 * kApiPointerSize; static const int kFixedArrayHeaderSize = 2 * kApiPointerSize; static const int kContextHeaderSize = 2 * kApiPointerSize; - static const int kContextEmbedderDataIndex = 55; + static const int kContextEmbedderDataIndex = 56; static const int kFullStringRepresentationMask = 0x07; static const int kStringEncodingMask = 0x4; static const int kExternalTwoByteRepresentationTag = 0x02; @@ -4813,7 +4985,7 @@ void* Object::GetAlignedPointerFromInternalField(int index) { O* obj = *reinterpret_cast(this); // Fast path: If the object is a plain JSObject, which is the common case, we // know where to find the internal fields and can return the value directly. - if (I::GetInstanceType(obj) == I::kJSObjectType) { + if (V8_LIKELY(I::GetInstanceType(obj) == I::kJSObjectType)) { int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index); return I::ReadField(obj, offset); } @@ -4839,6 +5011,32 @@ Local String::Empty(Isolate* isolate) { } +Local String::New(const char* data, int length) { + return NewFromUtf8(Isolate::GetCurrent(), data, kNormalString, length); +} + + +Local String::New(const uint16_t* data, int length) { + return NewFromTwoByte(Isolate::GetCurrent(), data, kNormalString, length); +} + + +Local String::NewSymbol(const char* data, int length) { + return NewFromUtf8(Isolate::GetCurrent(), data, kInternalizedString, length); +} + + +Local String::NewUndetectable(const char* data, int length) { + return NewFromUtf8(Isolate::GetCurrent(), data, kUndetectableString, length); +} + + +Local String::NewUndetectable(const uint16_t* data, int length) { + return NewFromTwoByte( + Isolate::GetCurrent(), data, kUndetectableString, length); +} + + String::ExternalStringResource* String::GetExternalStringResource() const { typedef internal::Object O; typedef internal::Internals I; @@ -5018,6 +5216,14 @@ Array* Array::Cast(v8::Value* value) { } +ArrayBuffer* ArrayBuffer::Cast(v8::Value* value) { +#ifdef V8_ENABLE_CHECKS + CheckCast(value); +#endif + return static_cast(value); +} + + Function* Function::Cast(v8::Value* value) { #ifdef V8_ENABLE_CHECKS CheckCast(value); diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index 0b0f9b0757..64047a2847 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -441,6 +441,13 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = { // +Handle Accessors::FunctionGetPrototype(Handle object) { + Isolate* isolate = Isolate::Current(); + CALL_HEAP_FUNCTION( + isolate, Accessors::FunctionGetPrototype(*object, 0), Object); +} + + MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { Isolate* isolate = Isolate::Current(); JSFunction* function = FindInstanceOf(isolate, object); diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h index 0740d92e56..9a83ab8a85 100644 --- a/deps/v8/src/accessors.h +++ b/deps/v8/src/accessors.h @@ -79,6 +79,8 @@ class Accessors : public AllStatic { // Accessor functions called directly from the runtime system. MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object, void*); + static Handle FunctionGetPrototype(Handle object); + MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object, Object* value, void*); diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 15831ec6a8..2b24ab07f6 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -27,8 +27,8 @@ #include "api.h" -#include // For isnan. #include // For memcpy, strlen. +#include // For isnan. #include "../include/v8-debug.h" #include "../include/v8-profiler.h" #include "../include/v8-testing.h" @@ -52,6 +52,7 @@ #include "profile-generator-inl.h" #include "property-details.h" #include "property.h" +#include "runtime.h" #include "runtime-profiler.h" #include "scanner-character-streams.h" #include "snapshot.h" @@ -63,11 +64,9 @@ #define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr)) -#define ENTER_V8(isolate) \ - ASSERT((isolate)->IsInitialized()); \ - i::VMState __state__((isolate), i::OTHER) -#define LEAVE_V8(isolate) \ - i::VMState __state__((isolate), i::EXTERNAL) +#define ENTER_V8(isolate) \ + ASSERT((isolate)->IsInitialized()); \ + i::VMState __state__((isolate)) namespace v8 { @@ -131,7 +130,7 @@ static void DefaultFatalErrorHandler(const char* location, const char* message) { i::Isolate* isolate = i::Isolate::Current(); if (isolate->IsInitialized()) { - i::VMState __state__(isolate, i::OTHER); + i::VMState state(isolate); API_Fatal(location, message); } else { API_Fatal(location, message); @@ -216,14 +215,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) { i::V8::SetFatalError(); FatalErrorCallback callback = GetFatalErrorHandler(); const char* message = "Allocation failed - process out of memory"; - { - if (isolate->IsInitialized()) { - LEAVE_V8(isolate); - callback(location, message); - } else { - callback(location, message); - } - } + callback(location, message); // If the callback returns, we stop execution. UNREACHABLE(); } @@ -1909,7 +1901,8 @@ v8::TryCatch::TryCatch() is_verbose_(false), can_continue_(true), capture_message_(true), - rethrow_(false) { + rethrow_(false), + has_terminated_(false) { isolate_->RegisterTryCatchHandler(this); } @@ -1937,6 +1930,11 @@ bool v8::TryCatch::CanContinue() const { } +bool v8::TryCatch::HasTerminated() const { + return has_terminated_; +} + + v8::Handle v8::TryCatch::ReThrow() { if (!HasCaught()) return v8::Local(); rethrow_ = true; @@ -2748,6 +2746,15 @@ void v8::Array::CheckCast(Value* that) { } +void v8::ArrayBuffer::CheckCast(Value* that) { + if (IsDeadCheck(i::Isolate::Current(), "v8::ArrayBuffer::Cast()")) return; + i::Handle obj = Utils::OpenHandle(that); + ApiCheck(obj->IsJSArrayBuffer(), + "v8::ArrayBuffer::Cast()", + "Could not convert to ArrayBuffer"); +} + + void v8::Date::CheckCast(v8::Value* that) { i::Isolate* isolate = i::Isolate::Current(); if (IsDeadCheck(isolate, "v8::Date::Cast()")) return; @@ -2984,7 +2991,7 @@ bool Value::StrictEquals(Handle that) const { double x = obj->Number(); double y = other->Number(); // Must check explicitly for NaN:s on Windows, but -0 works fine. - return x == y && !isnan(x) && !isnan(y); + return x == y && !std::isnan(x) && !std::isnan(y); } else if (*obj == *other) { // Also covers Booleans. return true; } else if (obj->IsSmi()) { @@ -4048,14 +4055,6 @@ int String::Length() const { return str->length(); } -bool String::MayContainNonAscii() const { - i::Handle str = Utils::OpenHandle(this); - if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) { - return false; - } - return !str->HasOnlyAsciiChars(); -} - bool String::IsOneByte() const { i::Handle str = Utils::OpenHandle(this); @@ -4509,25 +4508,6 @@ int String::WriteAscii(char* buffer, FlattenString(str); // Flatten the string for efficiency. } - if (str->HasOnlyAsciiChars()) { - // WriteToFlat is faster than using the StringCharacterStream. - if (length == -1) length = str->length() + 1; - int len = i::Min(length, str->length() - start); - i::String::WriteToFlat(*str, - reinterpret_cast(buffer), - start, - start + len); - if (!(options & PRESERVE_ASCII_NULL)) { - for (int i = 0; i < len; i++) { - if (buffer[i] == '\0') buffer[i] = ' '; - } - } - if (!(options & NO_NULL_TERMINATION) && length > len) { - buffer[len] = '\0'; - } - return len; - } - int end = length; if ((length == -1) || (length > str->length() - start)) { end = str->length() - start; @@ -5283,78 +5263,131 @@ Local v8::String::Empty() { } -Local v8::String::New(const char* data, int length) { - i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, "v8::String::New()"); - LOG_API(isolate, "String::New(char)"); - if (length == 0) return Empty(); - ENTER_V8(isolate); - if (length == -1) length = i::StrLength(data); - i::Handle result = - isolate->factory()->NewStringFromUtf8( - i::Vector(data, length)); - return Utils::ToLocal(result); -} - +// anonymous namespace for string creation helper functions +namespace { -Local v8::String::Concat(Handle left, Handle right) { - i::Handle left_string = Utils::OpenHandle(*left); - i::Isolate* isolate = left_string->GetIsolate(); - EnsureInitializedForIsolate(isolate, "v8::String::New()"); - LOG_API(isolate, "String::New(char)"); - ENTER_V8(isolate); - i::Handle right_string = Utils::OpenHandle(*right); - i::Handle result = isolate->factory()->NewConsString(left_string, - right_string); - return Utils::ToLocal(result); +inline int StringLength(const char* string) { + return i::StrLength(string); } -Local v8::String::NewUndetectable(const char* data, int length) { - i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()"); - LOG_API(isolate, "String::NewUndetectable(char)"); - ENTER_V8(isolate); - if (length == -1) length = i::StrLength(data); - i::Handle result = - isolate->factory()->NewStringFromUtf8( - i::Vector(data, length)); - result->MarkAsUndetectable(); - return Utils::ToLocal(result); +inline int StringLength(const uint8_t* string) { + return i::StrLength(reinterpret_cast(string)); } -static int TwoByteStringLength(const uint16_t* data) { +inline int StringLength(const uint16_t* string) { int length = 0; - while (data[length] != '\0') length++; + while (string[length] != '\0') + length++; return length; } -Local v8::String::New(const uint16_t* data, int length) { - i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, "v8::String::New()"); - LOG_API(isolate, "String::New(uint16_)"); - if (length == 0) return Empty(); +inline i::Handle NewString(i::Factory* factory, + String::NewStringType type, + i::Vector string) { + if (type ==String::kInternalizedString) { + return factory->InternalizeUtf8String(string); + } + return factory->NewStringFromUtf8(string); +} + + +inline i::Handle NewString(i::Factory* factory, + String::NewStringType type, + i::Vector string) { + if (type == String::kInternalizedString) { + return factory->InternalizeOneByteString(string); + } + return factory->NewStringFromOneByte(string); +} + + +inline i::Handle NewString(i::Factory* factory, + String::NewStringType type, + i::Vector string) { + if (type == String::kInternalizedString) { + return factory->InternalizeTwoByteString(string); + } + return factory->NewStringFromTwoByte(string); +} + + +template +inline Local NewString(Isolate* v8_isolate, + const char* location, + const char* env, + const Char* data, + String::NewStringType type, + int length) { + i::Isolate* isolate = reinterpret_cast(v8_isolate); + EnsureInitializedForIsolate(isolate, location); + LOG_API(isolate, env); + if (length == 0 && type != String::kUndetectableString) { + return String::Empty(); + } ENTER_V8(isolate); - if (length == -1) length = TwoByteStringLength(data); - i::Handle result = - isolate->factory()->NewStringFromTwoByte( - i::Vector(data, length)); + if (length == -1) length = StringLength(data); + i::Handle result = NewString( + isolate->factory(), type, i::Vector(data, length)); + if (type == String::kUndetectableString) { + result->MarkAsUndetectable(); + } return Utils::ToLocal(result); } +} // anonymous namespace -Local v8::String::NewUndetectable(const uint16_t* data, int length) { - i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()"); - LOG_API(isolate, "String::NewUndetectable(uint16_)"); + +Local String::NewFromUtf8(Isolate* isolate, + const char* data, + NewStringType type, + int length) { + return NewString(isolate, + "v8::String::NewFromUtf8()", + "String::NewFromUtf8", + data, + type, + length); +} + + +Local String::NewFromOneByte(Isolate* isolate, + const uint8_t* data, + NewStringType type, + int length) { + return NewString(isolate, + "v8::String::NewFromOneByte()", + "String::NewFromOneByte", + data, + type, + length); +} + + +Local String::NewFromTwoByte(Isolate* isolate, + const uint16_t* data, + NewStringType type, + int length) { + return NewString(isolate, + "v8::String::NewFromTwoByte()", + "String::NewFromTwoByte", + data, + type, + length); +} + + +Local v8::String::Concat(Handle left, Handle right) { + i::Handle left_string = Utils::OpenHandle(*left); + i::Isolate* isolate = left_string->GetIsolate(); + EnsureInitializedForIsolate(isolate, "v8::String::New()"); + LOG_API(isolate, "String::New(char)"); ENTER_V8(isolate); - if (length == -1) length = TwoByteStringLength(data); - i::Handle result = - isolate->factory()->NewStringFromTwoByte( - i::Vector(data, length)); - result->MarkAsUndetectable(); + i::Handle right_string = Utils::OpenHandle(*right); + i::Handle result = isolate->factory()->NewConsString(left_string, + right_string); return Utils::ToLocal(result); } @@ -5568,7 +5601,7 @@ Local v8::Date::New(double time) { i::Isolate* isolate = i::Isolate::Current(); EnsureInitializedForIsolate(isolate, "v8::Date::New()"); LOG_API(isolate, "Date::New"); - if (isnan(time)) { + if (std::isnan(time)) { // Introduce only canonical NaN value into the VM, to avoid signaling NaNs. time = i::OS::nan_value(); } @@ -5733,15 +5766,43 @@ Local Array::CloneElementAt(uint32_t index) { } -Local v8::String::NewSymbol(const char* data, int length) { +size_t v8::ArrayBuffer::ByteLength() const { + i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + if (IsDeadCheck(isolate, "v8::ArrayBuffer::ByteLength()")) return 0; + i::Handle obj = Utils::OpenHandle(this); + return static_cast(obj->byte_length()->Number()); +} + + +void* v8::ArrayBuffer::Data() const { + i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + if (IsDeadCheck(isolate, "v8::ArrayBuffer::Data()")) return 0; + i::Handle obj = Utils::OpenHandle(this); + return obj->backing_store(); +} + + +Local v8::ArrayBuffer::New(size_t byte_length) { i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, "v8::String::NewSymbol()"); - LOG_API(isolate, "String::NewSymbol(char)"); + EnsureInitializedForIsolate(isolate, "v8::ArrayBuffer::New(size_t)"); + LOG_API(isolate, "v8::ArrayBuffer::New(size_t)"); ENTER_V8(isolate); - if (length == -1) length = i::StrLength(data); - i::Handle result = isolate->factory()->InternalizeUtf8String( - i::Vector(data, length)); - return Utils::ToLocal(result); + i::Handle obj = + isolate->factory()->NewJSArrayBuffer(); + i::Runtime::SetupArrayBufferAllocatingData(isolate, obj, byte_length); + return Utils::ToLocal(obj); +} + + +Local v8::ArrayBuffer::New(void* data, size_t byte_length) { + i::Isolate* isolate = i::Isolate::Current(); + EnsureInitializedForIsolate(isolate, "v8::ArrayBuffer::New(void*, size_t)"); + LOG_API(isolate, "v8::ArrayBuffer::New(void*, size_t)"); + ENTER_V8(isolate); + i::Handle obj = + isolate->factory()->NewJSArrayBuffer(); + i::Runtime::SetupArrayBuffer(isolate, obj, data, byte_length); + return Utils::ToLocal(obj); } @@ -5772,7 +5833,7 @@ Local v8::Symbol::New(Isolate* isolate, const char* data, int length) { Local v8::Number::New(double value) { i::Isolate* isolate = i::Isolate::Current(); EnsureInitializedForIsolate(isolate, "v8::Number::New()"); - if (isnan(value)) { + if (std::isnan(value)) { // Introduce only canonical NaN value into the VM, to avoid signaling NaNs. value = i::OS::nan_value(); } @@ -5981,6 +6042,31 @@ v8::Local Isolate::GetCurrentContext() { } +void Isolate::SetObjectGroupId(const Persistent& object, + UniqueId id) { + i::Isolate* internal_isolate = reinterpret_cast(this); + internal_isolate->global_handles()->SetObjectGroupId( + reinterpret_cast(*object), id); +} + + +void Isolate::SetReferenceFromGroup(UniqueId id, + const Persistent& object) { + i::Isolate* internal_isolate = reinterpret_cast(this); + internal_isolate->global_handles() + ->SetReferenceFromGroup(id, reinterpret_cast(*object)); +} + + +void Isolate::SetReference(const Persistent& parent, + const Persistent& child) { + i::Isolate* internal_isolate = reinterpret_cast(this); + internal_isolate->global_handles()->SetReference( + i::Handle::cast(Utils::OpenHandle(*parent)).location(), + reinterpret_cast(*child)); +} + + void V8::SetGlobalGCPrologueCallback(GCCallback callback) { i::Isolate* isolate = i::Isolate::Current(); if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return; @@ -6116,6 +6202,12 @@ bool V8::IsExecutionTerminating(Isolate* isolate) { } +void V8::CancelTerminateExecution(Isolate* isolate) { + i::Isolate* i_isolate = reinterpret_cast(isolate); + i_isolate->stack_guard()->CancelTerminateExecution(); +} + + Isolate* Isolate::GetCurrent() { i::Isolate* isolate = i::Isolate::UncheckedCurrent(); return reinterpret_cast(isolate); @@ -7174,6 +7266,12 @@ size_t HeapProfiler::GetProfilerMemorySize() { } +void HeapProfiler::SetRetainedObjectInfo(UniqueId id, + RetainedObjectInfo* info) { + reinterpret_cast(this)->SetRetainedObjectInfo(id, info); +} + + v8::Testing::StressType internal::Testing::stress_type_ = v8::Testing::kStressTypeOpt; diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 0cd16f1f01..f62541dc03 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -170,6 +170,7 @@ class RegisteredExtension { V(RegExp, JSRegExp) \ V(Object, JSObject) \ V(Array, JSArray) \ + V(ArrayBuffer, JSArrayBuffer) \ V(String, String) \ V(Symbol, Symbol) \ V(Script, Object) \ @@ -205,6 +206,8 @@ class Utils { v8::internal::Handle obj); static inline Local ToLocal( v8::internal::Handle obj); + static inline Local ToLocal( + v8::internal::Handle obj); static inline Local MessageToLocal( v8::internal::Handle obj); static inline Local StackTraceToLocal( @@ -275,6 +278,7 @@ MAKE_TO_LOCAL(ToLocal, Symbol, Symbol) MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp) MAKE_TO_LOCAL(ToLocal, JSObject, Object) MAKE_TO_LOCAL(ToLocal, JSArray, Array) +MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer) MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate) MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate) MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature) diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index b473c6b52b..b39d9ee122 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -305,16 +305,20 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { // See assembler-arm-inl.h for inlined constructors Operand::Operand(Handle handle) { +#ifdef DEBUG + Isolate* isolate = Isolate::Current(); +#endif + ALLOW_HANDLE_DEREF(isolate, "using and embedding raw address"); rm_ = no_reg; // Verify all Objects referred by code are NOT in new space. Object* obj = *handle; - ASSERT(!HEAP->InNewSpace(obj)); + ASSERT(!isolate->heap()->InNewSpace(obj)); if (obj->IsHeapObject()) { imm32_ = reinterpret_cast(handle.location()); rmode_ = RelocInfo::EMBEDDED_OBJECT; } else { // no relocation needed - imm32_ = reinterpret_cast(obj); + imm32_ = reinterpret_cast(obj); rmode_ = RelocInfo::NONE32; } } diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index ebb9e1235f..3cc2797e94 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -306,8 +306,7 @@ static void AllocateJSArray(MacroAssembler* masm, // entering the generic code. In both cases argc in r0 needs to be preserved. // Both registers are preserved by this code so no need to differentiate between // construct call and normal call. -static void ArrayNativeCode(MacroAssembler* masm, - Label* call_generic_code) { +void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { Counters* counters = masm->isolate()->counters(); Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array, has_non_smi_element, finish, cant_transition_map, not_double; @@ -532,7 +531,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { } -void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { +void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0 : number of arguments // -- r1 : constructor function @@ -550,51 +549,17 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { __ Assert(ne, "Unexpected initial map for Array function"); __ CompareObjectType(r3, r3, r4, MAP_TYPE); __ Assert(eq, "Unexpected initial map for Array function"); - - if (FLAG_optimize_constructed_arrays) { - // We should either have undefined in r2 or a valid jsglobalpropertycell - Label okay_here; - Handle undefined_sentinel( - masm->isolate()->heap()->undefined_value(), masm->isolate()); - Handle global_property_cell_map( - masm->isolate()->heap()->global_property_cell_map()); - __ cmp(r2, Operand(undefined_sentinel)); - __ b(eq, &okay_here); - __ ldr(r3, FieldMemOperand(r2, 0)); - __ cmp(r3, Operand(global_property_cell_map)); - __ Assert(eq, "Expected property cell in register ebx"); - __ bind(&okay_here); - } - } - - if (FLAG_optimize_constructed_arrays) { - Label not_zero_case, not_one_case; - __ tst(r0, r0); - __ b(ne, ¬_zero_case); - ArrayNoArgumentConstructorStub no_argument_stub; - __ TailCallStub(&no_argument_stub); - - __ bind(¬_zero_case); - __ cmp(r0, Operand(1)); - __ b(gt, ¬_one_case); - ArraySingleArgumentConstructorStub single_argument_stub; - __ TailCallStub(&single_argument_stub); - - __ bind(¬_one_case); - ArrayNArgumentsConstructorStub n_argument_stub; - __ TailCallStub(&n_argument_stub); - } else { - Label generic_constructor; - // Run the native code for the Array function called as a constructor. - ArrayNativeCode(masm, &generic_constructor); - - // Jump to the generic construct code in case the specialized code cannot - // handle the construction. - __ bind(&generic_constructor); - Handle generic_construct_stub = - masm->isolate()->builtins()->JSConstructStubGeneric(); - __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); } + Label generic_constructor; + // Run the native code for the Array function called as a constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index ef2dbb3892..cc6caca3d8 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -96,16 +96,33 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor( } -static void InitializeArrayConstructorDescriptor(Isolate* isolate, +void CompareNilICStub::InitializeInterfaceDescriptor( + Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { r0 }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(CompareNilIC_Miss); + descriptor->miss_handler_ = + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate); +} + + +static void InitializeArrayConstructorDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor, + int constant_stack_parameter_count) { // register state - // r1 -- constructor function + // r0 -- number of arguments // r2 -- type info cell with elements kind - // r0 -- number of arguments to the constructor function - static Register registers[] = { r1, r2 }; - descriptor->register_param_count_ = 2; - // stack param count needs (constructor pointer, and single argument) - descriptor->stack_parameter_count_ = &r0; + static Register registers[] = { r2 }; + descriptor->register_param_count_ = 1; + if (constant_stack_parameter_count != 0) { + // stack param count needs (constructor pointer, and single argument) + descriptor->stack_parameter_count_ = &r0; + } + descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; descriptor->register_params_ = registers; descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; descriptor->deoptimization_handler_ = @@ -116,21 +133,21 @@ static void InitializeArrayConstructorDescriptor(Isolate* isolate, void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, 0); } void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, 1); } void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, -1); } @@ -161,6 +178,30 @@ static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, } +void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { + // Update the static counter each time a new code stub is generated. + Isolate* isolate = masm->isolate(); + isolate->counters()->code_stubs()->Increment(); + + CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); + int param_count = descriptor->register_param_count_; + { + // Call the runtime system in a fresh internal frame. + FrameScope scope(masm, StackFrame::INTERNAL); + ASSERT(descriptor->register_param_count_ == 0 || + r0.is(descriptor->register_params_[param_count - 1])); + // Push arguments + for (int i = 0; i < param_count; ++i) { + __ push(descriptor->register_params_[i]); + } + ExternalReference miss = descriptor->miss_handler_; + __ CallExternalReference(miss, descriptor->register_param_count_); + } + + __ Ret(); +} + + void ToNumberStub::Generate(MacroAssembler* masm) { // The ToNumber stub takes one argument in eax. Label check_heap_number, call_builtin; @@ -506,318 +547,6 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { } -void FloatingPointHelper::LoadSmis(MacroAssembler* masm, - FloatingPointHelper::Destination destination, - Register scratch1, - Register scratch2) { - __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); - __ vmov(d7.high(), scratch1); - __ vcvt_f64_s32(d7, d7.high()); - __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); - __ vmov(d6.high(), scratch1); - __ vcvt_f64_s32(d6, d6.high()); - if (destination == kCoreRegisters) { - __ vmov(r2, r3, d7); - __ vmov(r0, r1, d6); - } -} - - -void FloatingPointHelper::LoadNumber(MacroAssembler* masm, - Destination destination, - Register object, - DwVfpRegister dst, - Register dst1, - Register dst2, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* not_number) { - __ AssertRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - - Label is_smi, done; - - // Smi-check - __ UntagAndJumpIfSmi(scratch1, object, &is_smi); - // Heap number check - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); - - // Handle loading a double from a heap number. - if (destination == kVFPRegisters) { - // Load the double from tagged HeapNumber to double register. - __ sub(scratch1, object, Operand(kHeapObjectTag)); - __ vldr(dst, scratch1, HeapNumber::kValueOffset); - } else { - ASSERT(destination == kCoreRegisters); - // Load the double from heap number to dst1 and dst2 in double format. - __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); - } - __ jmp(&done); - - // Handle loading a double from a smi. - __ bind(&is_smi); - // Convert smi to double using VFP instructions. - __ vmov(dst.high(), scratch1); - __ vcvt_f64_s32(dst, dst.high()); - if (destination == kCoreRegisters) { - // Load the converted smi to dst1 and dst2 in double format. - __ vmov(dst1, dst2, dst); - } - - __ bind(&done); -} - - -void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, - Register object, - Register dst, - Register heap_number_map, - Register scratch1, - Register scratch2, - Register scratch3, - DwVfpRegister double_scratch1, - DwVfpRegister double_scratch2, - Label* not_number) { - Label done; - __ AssertRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - - __ UntagAndJumpIfSmi(dst, object, &done); - __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); - __ cmp(scratch1, heap_number_map); - __ b(ne, not_number); - __ ECMAConvertNumberToInt32(object, dst, - scratch1, scratch2, scratch3, - double_scratch1, double_scratch2); - __ bind(&done); -} - - -void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, - Register int_scratch, - Destination destination, - DwVfpRegister double_dst, - Register dst_mantissa, - Register dst_exponent, - Register scratch2, - SwVfpRegister single_scratch) { - ASSERT(!int_scratch.is(scratch2)); - ASSERT(!int_scratch.is(dst_mantissa)); - ASSERT(!int_scratch.is(dst_exponent)); - - Label done; - - __ vmov(single_scratch, int_scratch); - __ vcvt_f64_s32(double_dst, single_scratch); - if (destination == kCoreRegisters) { - __ vmov(dst_mantissa, dst_exponent, double_dst); - } - __ bind(&done); -} - - -void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, - Register object, - Destination destination, - DwVfpRegister double_dst, - DwVfpRegister double_scratch, - Register dst_mantissa, - Register dst_exponent, - Register heap_number_map, - Register scratch1, - Register scratch2, - SwVfpRegister single_scratch, - Label* not_int32) { - ASSERT(!scratch1.is(object) && !scratch2.is(object)); - ASSERT(!scratch1.is(scratch2)); - ASSERT(!heap_number_map.is(object) && - !heap_number_map.is(scratch1) && - !heap_number_map.is(scratch2)); - - Label done, obj_is_not_smi; - - __ JumpIfNotSmi(object, &obj_is_not_smi); - __ SmiUntag(scratch1, object); - ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa, - dst_exponent, scratch2, single_scratch); - __ b(&done); - - __ bind(&obj_is_not_smi); - __ AssertRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); - - // Load the number. - // Load the double value. - __ sub(scratch1, object, Operand(kHeapObjectTag)); - __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); - - __ TestDoubleIsInt32(double_dst, double_scratch); - // Jump to not_int32 if the operation did not succeed. - __ b(ne, not_int32); - - if (destination == kCoreRegisters) { - __ vmov(dst_mantissa, dst_exponent, double_dst); - } - __ bind(&done); -} - - -void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, - Register object, - Register dst, - Register heap_number_map, - Register scratch1, - Register scratch2, - Register scratch3, - DwVfpRegister double_scratch0, - DwVfpRegister double_scratch1, - Label* not_int32) { - ASSERT(!dst.is(object)); - ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); - ASSERT(!scratch1.is(scratch2) && - !scratch1.is(scratch3) && - !scratch2.is(scratch3)); - - Label done, maybe_undefined; - - __ UntagAndJumpIfSmi(dst, object, &done); - - __ AssertRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); - - // Object is a heap number. - // Convert the floating point value to a 32-bit integer. - // Load the double value. - __ sub(scratch1, object, Operand(kHeapObjectTag)); - __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); - - __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); - // Jump to not_int32 if the operation did not succeed. - __ b(ne, not_int32); - __ b(&done); - - __ bind(&maybe_undefined); - __ CompareRoot(object, Heap::kUndefinedValueRootIndex); - __ b(ne, not_int32); - // |undefined| is truncated to 0. - __ mov(dst, Operand(Smi::FromInt(0))); - // Fall through. - - __ bind(&done); -} - - -void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, - Register src_exponent, - Register src_mantissa, - Register dst, - Register scratch, - Label* not_int32) { - // Get exponent alone in scratch. - __ Ubfx(scratch, - src_exponent, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - - // Substract the bias from the exponent. - __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC); - - // src1: higher (exponent) part of the double value. - // src2: lower (mantissa) part of the double value. - // scratch: unbiased exponent. - - // Fast cases. Check for obvious non 32-bit integer values. - // Negative exponent cannot yield 32-bit integers. - __ b(mi, not_int32); - // Exponent greater than 31 cannot yield 32-bit integers. - // Also, a positive value with an exponent equal to 31 is outside of the - // signed 32-bit integer range. - // Another way to put it is that if (exponent - signbit) > 30 then the - // number cannot be represented as an int32. - Register tmp = dst; - __ sub(tmp, scratch, Operand(src_exponent, LSR, 31)); - __ cmp(tmp, Operand(30)); - __ b(gt, not_int32); - // - Bits [21:0] in the mantissa are not null. - __ tst(src_mantissa, Operand(0x3fffff)); - __ b(ne, not_int32); - - // Otherwise the exponent needs to be big enough to shift left all the - // non zero bits left. So we need the (30 - exponent) last bits of the - // 31 higher bits of the mantissa to be null. - // Because bits [21:0] are null, we can check instead that the - // (32 - exponent) last bits of the 32 higher bits of the mantissa are null. - - // Get the 32 higher bits of the mantissa in dst. - __ Ubfx(dst, - src_mantissa, - HeapNumber::kMantissaBitsInTopWord, - 32 - HeapNumber::kMantissaBitsInTopWord); - __ orr(dst, - dst, - Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord)); - - // Create the mask and test the lower bits (of the higher bits). - __ rsb(scratch, scratch, Operand(32)); - __ mov(src_mantissa, Operand(1)); - __ mov(src_exponent, Operand(src_mantissa, LSL, scratch)); - __ sub(src_exponent, src_exponent, Operand(1)); - __ tst(dst, src_exponent); - __ b(ne, not_int32); -} - - -void FloatingPointHelper::CallCCodeForDoubleOperation( - MacroAssembler* masm, - Token::Value op, - Register heap_number_result, - Register scratch) { - // Using core registers: - // r0: Left value (least significant part of mantissa). - // r1: Left value (sign, exponent, top of mantissa). - // r2: Right value (least significant part of mantissa). - // r3: Right value (sign, exponent, top of mantissa). - - // Assert that heap_number_result is callee-saved. - // We currently always use r5 to pass it. - ASSERT(heap_number_result.is(r5)); - - // Push the current return address before the C call. Return will be - // through pop(pc) below. - __ push(lr); - __ PrepareCallCFunction(0, 2, scratch); - if (masm->use_eabi_hardfloat()) { - __ vmov(d0, r0, r1); - __ vmov(d1, r2, r3); - } - { - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction( - ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); - } - // Store answer in the overwritable heap number. Double returned in - // registers r0 and r1 or in d0. - if (masm->use_eabi_hardfloat()) { - __ vstr(d0, - FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); - } else { - __ Strd(r0, r1, FieldMemOperand(heap_number_result, - HeapNumber::kValueOffset)); - } - // Place heap_number_result in r0 and return to the pushed return address. - __ mov(r0, Operand(heap_number_result)); - __ pop(pc); -} - - bool WriteInt32ToHeapNumberStub::IsPregenerated() { // These variants are compiled ahead of time. See next method. if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { @@ -1055,57 +784,6 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, } -void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { - bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); - Register rhs_exponent = exp_first ? r0 : r1; - Register lhs_exponent = exp_first ? r2 : r3; - Register rhs_mantissa = exp_first ? r1 : r0; - Register lhs_mantissa = exp_first ? r3 : r2; - Label one_is_nan, neither_is_nan; - - __ Sbfx(r4, - lhs_exponent, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r4, Operand(-1)); - __ b(ne, lhs_not_nan); - __ mov(r4, - Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), - SetCC); - __ b(ne, &one_is_nan); - __ cmp(lhs_mantissa, Operand::Zero()); - __ b(ne, &one_is_nan); - - __ bind(lhs_not_nan); - __ Sbfx(r4, - rhs_exponent, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r4, Operand(-1)); - __ b(ne, &neither_is_nan); - __ mov(r4, - Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), - SetCC); - __ b(ne, &one_is_nan); - __ cmp(rhs_mantissa, Operand::Zero()); - __ b(eq, &neither_is_nan); - - __ bind(&one_is_nan); - // NaN comparisons always fail. - // Load whatever we need in r0 to make the comparison fail. - if (cond == lt || cond == le) { - __ mov(r0, Operand(GREATER)); - } else { - __ mov(r0, Operand(LESS)); - } - __ Ret(); - - __ bind(&neither_is_nan); -} - - // See comment at call site. static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs, @@ -1627,33 +1305,19 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { const Register scratch = r1; if (save_doubles_ == kSaveFPRegs) { - // Check CPU flags for number of registers, setting the Z condition flag. - __ CheckFor32DRegs(scratch); - - __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); - for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - __ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); - } + __ SaveFPRegs(sp, scratch); } const int argument_count = 1; const int fp_argument_count = 0; AllowExternalCallThatCantCauseGC scope(masm); __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); - __ mov(r0, Operand(ExternalReference::isolate_address())); + __ mov(r0, Operand(ExternalReference::isolate_address(masm->isolate()))); __ CallCFunction( ExternalReference::store_buffer_overflow_function(masm->isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { - // Check CPU flags for number of registers, setting the Z condition flag. - __ CheckFor32DRegs(scratch); - - for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - __ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); - } - __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); + __ RestoreFPRegs(sp, scratch); } __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). } @@ -1835,8 +1499,10 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow) { EmitCheckForHeapNumber(masm, r0, r1, r6, slow); + // Convert the heap number in r0 to an untagged integer in r1. - __ ECMAConvertNumberToInt32(r0, r1, r2, r3, r4, d0, d1); + __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ ECMAToInt32(r1, d0, r2, r3, r4, d1); // Do the bitwise operation and check if the result fits in a smi. Label try_float; @@ -1928,6 +1594,50 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { } +// Generates code to call a C function to do a double operation. +// This code never falls through, but returns with a heap number containing +// the result in r0. +// Register heapnumber_result must be a heap number in which the +// result of the operation will be stored. +// Requires the following layout on entry: +// d0: Left value. +// d1: Right value. +// If soft float ABI, use also r0, r1, r2, r3. +static void CallCCodeForDoubleOperation(MacroAssembler* masm, + Token::Value op, + Register heap_number_result, + Register scratch) { + // Assert that heap_number_result is callee-saved. + // We currently always use r5 to pass it. + ASSERT(heap_number_result.is(r5)); + + // Push the current return address before the C call. Return will be + // through pop(pc) below. + __ push(lr); + __ PrepareCallCFunction(0, 2, scratch); + if (!masm->use_eabi_hardfloat()) { + __ vmov(r0, r1, d0); + __ vmov(r2, r3, d1); + } + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); + } + // Store answer in the overwritable heap number. Double returned in + // registers r0 and r1 or in d0. + if (masm->use_eabi_hardfloat()) { + __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); + } else { + __ Strd(r0, r1, + FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); + } + // Place heap_number_result in r0 and return to the pushed return address. + __ mov(r0, Operand(heap_number_result)); + __ pop(pc); +} + + void BinaryOpStub::Initialize() { platform_specific_bit_ = true; // VFP2 is a base requirement for V8 } @@ -2205,64 +1915,56 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, case Token::MUL: case Token::DIV: case Token::MOD: { - // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 - // depending on whether VFP3 is available or not. - FloatingPointHelper::Destination destination = - op != Token::MOD ? - FloatingPointHelper::kVFPRegisters : - FloatingPointHelper::kCoreRegisters; - // Allocate new heap number for result. Register result = r5; BinaryOpStub_GenerateHeapResultAllocation( masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); - // Load the operands. + // Load left and right operands into d0 and d1. if (smi_operands) { - FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); + __ SmiUntag(scratch1, right); + __ vmov(d1.high(), scratch1); + __ vcvt_f64_s32(d1, d1.high()); + __ SmiUntag(scratch1, left); + __ vmov(d0.high(), scratch1); + __ vcvt_f64_s32(d0, d0.high()); } else { - // Load right operand to d7 or r2/r3. + // Load right operand into d1. if (right_type == BinaryOpIC::INT32) { - FloatingPointHelper::LoadNumberAsInt32Double( - masm, right, destination, d7, d8, r2, r3, heap_number_map, - scratch1, scratch2, s0, miss); + __ LoadNumberAsInt32Double( + right, d1, heap_number_map, scratch1, d8, miss); } else { Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; - FloatingPointHelper::LoadNumber( - masm, destination, right, d7, r2, r3, heap_number_map, - scratch1, scratch2, fail); + __ LoadNumber(right, d1, heap_number_map, scratch1, fail); } - // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it - // jumps to |miss|. + // Load left operand into d0. if (left_type == BinaryOpIC::INT32) { - FloatingPointHelper::LoadNumberAsInt32Double( - masm, left, destination, d6, d8, r0, r1, heap_number_map, - scratch1, scratch2, s0, miss); + __ LoadNumberAsInt32Double( + left, d0, heap_number_map, scratch1, d8, miss); } else { Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; - FloatingPointHelper::LoadNumber( - masm, destination, left, d6, r0, r1, heap_number_map, - scratch1, scratch2, fail); + __ LoadNumber( + left, d0, heap_number_map, scratch1, fail); } } // Calculate the result. - if (destination == FloatingPointHelper::kVFPRegisters) { + if (op != Token::MOD) { // Using VFP registers: - // d6: Left value - // d7: Right value + // d0: Left value + // d1: Right value switch (op) { case Token::ADD: - __ vadd(d5, d6, d7); + __ vadd(d5, d0, d1); break; case Token::SUB: - __ vsub(d5, d6, d7); + __ vsub(d5, d0, d1); break; case Token::MUL: - __ vmul(d5, d6, d7); + __ vmul(d5, d0, d1); break; case Token::DIV: - __ vdiv(d5, d6, d7); + __ vdiv(d5, d0, d1); break; default: UNREACHABLE(); @@ -2274,10 +1976,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, __ Ret(); } else { // Call the C function to handle the double operation. - FloatingPointHelper::CallCCodeForDoubleOperation(masm, - op, - result, - scratch1); + CallCCodeForDoubleOperation(masm, op, result, scratch1); if (FLAG_debug_code) { __ stop("Unreachable code."); } @@ -2295,26 +1994,12 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, __ SmiUntag(r2, right); } else { // Convert operands to 32-bit integers. Right in r2 and left in r3. - FloatingPointHelper::ConvertNumberToInt32(masm, - left, - r3, - heap_number_map, - scratch1, - scratch2, - scratch3, - d0, - d1, - not_numbers); - FloatingPointHelper::ConvertNumberToInt32(masm, - right, - r2, - heap_number_map, - scratch1, - scratch2, - scratch3, - d0, - d1, - not_numbers); + __ ConvertNumberToInt32( + left, r3, heap_number_map, + scratch1, scratch2, scratch3, d0, d1, not_numbers); + __ ConvertNumberToInt32( + right, r2, heap_number_map, + scratch1, scratch2, scratch3, d0, d1, not_numbers); } Label result_not_a_smi; @@ -2533,49 +2218,25 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Load both operands and check that they are 32-bit integer. // Jump to type transition if they are not. The registers r0 and r1 (right // and left) are preserved for the runtime call. - FloatingPointHelper::Destination destination = (op_ != Token::MOD) - ? FloatingPointHelper::kVFPRegisters - : FloatingPointHelper::kCoreRegisters; - - FloatingPointHelper::LoadNumberAsInt32Double(masm, - right, - destination, - d7, - d8, - r2, - r3, - heap_number_map, - scratch1, - scratch2, - s0, - &transition); - FloatingPointHelper::LoadNumberAsInt32Double(masm, - left, - destination, - d6, - d8, - r4, - r5, - heap_number_map, - scratch1, - scratch2, - s0, - &transition); - - if (destination == FloatingPointHelper::kVFPRegisters) { + __ LoadNumberAsInt32Double( + right, d1, heap_number_map, scratch1, d8, &transition); + __ LoadNumberAsInt32Double( + left, d0, heap_number_map, scratch1, d8, &transition); + + if (op_ != Token::MOD) { Label return_heap_number; switch (op_) { case Token::ADD: - __ vadd(d5, d6, d7); + __ vadd(d5, d0, d1); break; case Token::SUB: - __ vsub(d5, d6, d7); + __ vsub(d5, d0, d1); break; case Token::MUL: - __ vmul(d5, d6, d7); + __ vmul(d5, d0, d1); break; case Token::DIV: - __ vdiv(d5, d6, d7); + __ vdiv(d5, d0, d1); break; default: UNREACHABLE(); @@ -2601,13 +2262,15 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ add(scratch2, scratch1, Operand(0x40000000), SetCC); // If not try to return a heap number. __ b(mi, &return_heap_number); - // Check for minus zero. Return heap number for minus zero. + // Check for minus zero. Return heap number for minus zero if + // double results are allowed; otherwise transition. Label not_zero; __ cmp(scratch1, Operand::Zero()); __ b(ne, ¬_zero); __ vmov(scratch2, d5.high()); __ tst(scratch2, Operand(HeapNumber::kSignMask)); - __ b(ne, &return_heap_number); + __ b(ne, result_type_ <= BinaryOpIC::INT32 ? &transition + : &return_heap_number); __ bind(¬_zero); // Tag the result and return. @@ -2620,22 +2283,19 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ bind(&return_heap_number); // Return a heap number, or fall through to type transition or runtime // call if we can't. - if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::NUMBER - : BinaryOpIC::INT32)) { - // We are using vfp registers so r5 is available. - heap_number_result = r5; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime, - mode_); - __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); - __ vstr(d5, r0, HeapNumber::kValueOffset); - __ mov(r0, heap_number_result); - __ Ret(); - } + // We are using vfp registers so r5 is available. + heap_number_result = r5; + BinaryOpStub_GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime, + mode_); + __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); + __ vstr(d5, r0, HeapNumber::kValueOffset); + __ mov(r0, heap_number_result); + __ Ret(); // A DIV operation expecting an integer result falls through // to type transition. @@ -2661,8 +2321,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ Pop(r1, r0); // Call the C function to handle the double operation. - FloatingPointHelper::CallCCodeForDoubleOperation( - masm, op_, heap_number_result, scratch1); + CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); if (FLAG_debug_code) { __ stop("Unreachable code."); } @@ -2682,30 +2341,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { case Token::SHR: case Token::SHL: { Label return_heap_number; - Register scratch3 = r5; // Convert operands to 32-bit integers. Right in r2 and left in r3. The // registers r0 and r1 (right and left) are preserved for the runtime // call. - FloatingPointHelper::LoadNumberAsInt32(masm, - left, - r3, - heap_number_map, - scratch1, - scratch2, - scratch3, - d0, - d1, - &transition); - FloatingPointHelper::LoadNumberAsInt32(masm, - right, - r2, - heap_number_map, - scratch1, - scratch2, - scratch3, - d0, - d1, - &transition); + __ LoadNumberAsInt32(left, r3, heap_number_map, + scratch1, d0, d1, &transition); + __ LoadNumberAsInt32(right, r2, heap_number_map, + scratch1, d0, d1, &transition); // The ECMA-262 standard specifies that, for shift operations, only the // 5 least significant bits of the shift value should be used. @@ -3385,6 +3027,9 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); StubFailureTrampolineStub::GenerateAheadOfTime(isolate); RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); + if (FLAG_optimize_constructed_arrays) { + ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); + } } @@ -3477,7 +3122,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, } #endif - __ mov(r2, Operand(ExternalReference::isolate_address())); + __ mov(r2, Operand(ExternalReference::isolate_address(isolate))); // To let the GC traverse the return address of the exit frames, we need to // know where the return address is. The CEntryStub is unmovable, so @@ -4694,7 +4339,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Arguments are before that on the stack or in registers. // Argument 9 (sp[20]): Pass current isolate address. - __ mov(r0, Operand(ExternalReference::isolate_address())); + __ mov(r0, Operand(ExternalReference::isolate_address(isolate))); __ str(r0, MemOperand(sp, 5 * kPointerSize)); // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. @@ -5089,7 +4734,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), LAST_FAST_ELEMENTS_KIND); __ cmp(r3, Operand(terminal_kind_sentinel)); - __ b(ne, &miss); + __ b(gt, &miss); // Make sure the function is the Array() function __ LoadArrayFunction(r3); __ cmp(r1, r3); @@ -6305,16 +5950,16 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ bind(&non_ascii); // At least one of the strings is two-byte. Check whether it happens - // to contain only ASCII characters. + // to contain only one byte characters. // r4: first instance type. // r5: second instance type. - __ tst(r4, Operand(kAsciiDataHintMask)); - __ tst(r5, Operand(kAsciiDataHintMask), ne); + __ tst(r4, Operand(kOneByteDataHintMask)); + __ tst(r5, Operand(kOneByteDataHintMask), ne); __ b(ne, &ascii_data); __ eor(r4, r4, Operand(r5)); - STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0); - __ and_(r4, r4, Operand(kOneByteStringTag | kAsciiDataHintTag)); - __ cmp(r4, Operand(kOneByteStringTag | kAsciiDataHintTag)); + STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); + __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); + __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); __ b(eq, &ascii_data); // Allocate a two byte cons string. @@ -7170,6 +6815,9 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { StoreBufferOverflowStub stub1(kDontSaveFPRegs); stub1.GetCode(isolate)->set_is_pregenerated(true); + // Hydrogen code stubs need stub2 at snapshot time. + StoreBufferOverflowStub stub2(kSaveFPRegs); + stub2.GetCode(isolate)->set_is_pregenerated(true); } @@ -7288,7 +6936,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { __ Move(address, regs_.address()); __ Move(r0, regs_.object()); __ Move(r1, address); - __ mov(r2, Operand(ExternalReference::isolate_address())); + __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate()))); AllowExternalCallThatCantCauseGC scope(masm); if (mode == INCREMENTAL_COMPACTION) { @@ -7445,10 +7093,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. __ bind(&double_elements); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ StoreNumberToDoubleElements(r0, r3, - // Overwrites all regs after this. - r5, r9, r6, r7, r2, - &slow_elements); + __ StoreNumberToDoubleElements(r0, r3, r5, r6, &slow_elements); __ Ret(); } @@ -7528,6 +7173,196 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { __ Ret(); } + +template +static void CreateArrayDispatch(MacroAssembler* masm) { + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + Label next; + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + __ cmp(r3, Operand(kind)); + __ b(ne, &next); + T stub(kind); + __ TailCallStub(&stub); + __ bind(&next); + } + + // If we reached this point there is a problem. + __ Abort("Unexpected ElementsKind in array constructor"); +} + + +static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { + // r2 - type info cell + // r3 - kind + // r0 - number of arguments + // r1 - constructor? + // sp[0] - last argument + ASSERT(FAST_SMI_ELEMENTS == 0); + ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + ASSERT(FAST_ELEMENTS == 2); + ASSERT(FAST_HOLEY_ELEMENTS == 3); + ASSERT(FAST_DOUBLE_ELEMENTS == 4); + ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); + + Handle undefined_sentinel( + masm->isolate()->heap()->undefined_value(), + masm->isolate()); + + // is the low bit set? If so, we are holey and that is good. + __ tst(r3, Operand(1)); + Label normal_sequence; + __ b(ne, &normal_sequence); + + // look at the first argument + __ ldr(r5, MemOperand(sp, 0)); + __ cmp(r5, Operand::Zero()); + __ b(eq, &normal_sequence); + + // We are going to create a holey array, but our kind is non-holey. + // Fix kind and retry + __ add(r3, r3, Operand(1)); + __ cmp(r2, Operand(undefined_sentinel)); + __ b(eq, &normal_sequence); + + // Save the resulting elements kind in type info + __ SmiTag(r3); + __ str(r3, FieldMemOperand(r2, kPointerSize)); + __ SmiUntag(r3); + + __ bind(&normal_sequence); + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + Label next; + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + __ cmp(r3, Operand(kind)); + __ b(ne, &next); + ArraySingleArgumentConstructorStub stub(kind); + __ TailCallStub(&stub); + __ bind(&next); + } + + // If we reached this point there is a problem. + __ Abort("Unexpected ElementsKind in array constructor"); +} + + +template +static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { + int to_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= to_index; ++i) { + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + T stub(kind); + stub.GetCode(isolate)->set_is_pregenerated(true); + } +} + + +void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { + ArrayConstructorStubAheadOfTimeHelper( + isolate); + ArrayConstructorStubAheadOfTimeHelper( + isolate); + ArrayConstructorStubAheadOfTimeHelper( + isolate); +} + + +void ArrayConstructorStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- r0 : argc (only if argument_count_ == ANY) + // -- r1 : constructor + // -- r2 : type info cell + // -- sp[0] : return address + // -- sp[4] : last argument + // ----------------------------------- + Handle undefined_sentinel( + masm->isolate()->heap()->undefined_value(), + masm->isolate()); + + if (FLAG_debug_code) { + // The array construct code is only set for the global and natives + // builtin Array functions which always have maps. + + // Initial map for the builtin Array function should be a map. + __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi. + __ tst(r3, Operand(kSmiTagMask)); + __ Assert(ne, "Unexpected initial map for Array function"); + __ CompareObjectType(r3, r3, r4, MAP_TYPE); + __ Assert(eq, "Unexpected initial map for Array function"); + + // We should either have undefined in ebx or a valid jsglobalpropertycell + Label okay_here; + Handle global_property_cell_map( + masm->isolate()->heap()->global_property_cell_map()); + __ cmp(r2, Operand(undefined_sentinel)); + __ b(eq, &okay_here); + __ ldr(r3, FieldMemOperand(r2, 0)); + __ cmp(r3, Operand(global_property_cell_map)); + __ Assert(eq, "Expected property cell in register ebx"); + __ bind(&okay_here); + } + + if (FLAG_optimize_constructed_arrays) { + Label no_info, switch_ready; + // Get the elements kind and case on that. + __ cmp(r2, Operand(undefined_sentinel)); + __ b(eq, &no_info); + __ ldr(r3, FieldMemOperand(r2, kPointerSize)); + + // There is no info if the call site went megamorphic either + // TODO(mvstanton): Really? I thought if it was the array function that + // the cell wouldn't get stamped as megamorphic. + __ cmp(r3, + Operand(TypeFeedbackCells::MegamorphicSentinel(masm->isolate()))); + __ b(eq, &no_info); + __ SmiUntag(r3); + __ jmp(&switch_ready); + __ bind(&no_info); + __ mov(r3, Operand(GetInitialFastElementsKind())); + __ bind(&switch_ready); + + if (argument_count_ == ANY) { + Label not_zero_case, not_one_case; + __ tst(r0, r0); + __ b(ne, ¬_zero_case); + CreateArrayDispatch(masm); + + __ bind(¬_zero_case); + __ cmp(r0, Operand(1)); + __ b(gt, ¬_one_case); + CreateArrayDispatchOneArgument(masm); + + __ bind(¬_one_case); + CreateArrayDispatch(masm); + } else if (argument_count_ == NONE) { + CreateArrayDispatch(masm); + } else if (argument_count_ == ONE) { + CreateArrayDispatchOneArgument(masm); + } else if (argument_count_ == MORE_THAN_ONE) { + CreateArrayDispatch(masm); + } else { + UNREACHABLE(); + } + } else { + Label generic_constructor; + // Run the native code for the Array function called as a constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + } +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 75cbf6582c..0b1a8b8472 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -34,6 +34,9 @@ namespace v8 { namespace internal { +void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); + + // Compute a transcendental math function natively, or call the // TranscendentalCache runtime function. class TranscendentalCacheStub: public PlatformCodeStub { @@ -469,34 +472,14 @@ class RecordWriteStub: public PlatformCodeStub { void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); if (mode == kSaveFPRegs) { - // Number of d-regs not known at snapshot time. - ASSERT(!Serializer::enabled()); - masm->sub(sp, - sp, - Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1))); - // Save all VFP registers except d0. - // TODO(hans): We should probably save d0 too. And maybe use vstm. - for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); - } + masm->SaveFPRegs(sp, scratch0_); } } inline void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) { if (mode == kSaveFPRegs) { - // Number of d-regs not known at snapshot time. - ASSERT(!Serializer::enabled()); - // Restore all VFP registers except d0. - // TODO(hans): We should probably restore d0 too. And maybe use vldm. - for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); - } - masm->add(sp, - sp, - Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1))); + masm->RestoreFPRegs(sp, scratch0_); } masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); } @@ -608,142 +591,6 @@ class DirectCEntryStub: public PlatformCodeStub { }; -class FloatingPointHelper : public AllStatic { - public: - enum Destination { - kVFPRegisters, - kCoreRegisters - }; - - - // Loads smis from r0 and r1 (right and left in binary operations) into - // floating point registers. Depending on the destination the values ends up - // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is - // floating point registers VFP3 must be supported. If core registers are - // requested when VFP3 is supported d6 and d7 will be scratched. - static void LoadSmis(MacroAssembler* masm, - Destination destination, - Register scratch1, - Register scratch2); - - // Convert the smi or heap number in object to an int32 using the rules - // for ToInt32 as described in ECMAScript 9.5.: the value is truncated - // and brought into the range -2^31 .. +2^31 - 1. - static void ConvertNumberToInt32(MacroAssembler* masm, - Register object, - Register dst, - Register heap_number_map, - Register scratch1, - Register scratch2, - Register scratch3, - DwVfpRegister double_scratch1, - DwVfpRegister double_scratch2, - Label* not_int32); - - // Converts the integer (untagged smi) in |int_scratch| to a double, storing - // the result either in |double_dst| or |dst2:dst1|, depending on - // |destination|. - // Warning: The value in |int_scratch| will be changed in the process! - static void ConvertIntToDouble(MacroAssembler* masm, - Register int_scratch, - Destination destination, - DwVfpRegister double_dst, - Register dst1, - Register dst2, - Register scratch2, - SwVfpRegister single_scratch); - - // Load the number from object into double_dst in the double format. - // Control will jump to not_int32 if the value cannot be exactly represented - // by a 32-bit integer. - // Floating point value in the 32-bit integer range that are not exact integer - // won't be loaded. - static void LoadNumberAsInt32Double(MacroAssembler* masm, - Register object, - Destination destination, - DwVfpRegister double_dst, - DwVfpRegister double_scratch, - Register dst1, - Register dst2, - Register heap_number_map, - Register scratch1, - Register scratch2, - SwVfpRegister single_scratch, - Label* not_int32); - - // Loads the number from object into dst as a 32-bit integer. - // Control will jump to not_int32 if the object cannot be exactly represented - // by a 32-bit integer. - // Floating point value in the 32-bit integer range that are not exact integer - // won't be converted. - // scratch3 is not used when VFP3 is supported. - static void LoadNumberAsInt32(MacroAssembler* masm, - Register object, - Register dst, - Register heap_number_map, - Register scratch1, - Register scratch2, - Register scratch3, - DwVfpRegister double_scratch0, - DwVfpRegister double_scratch1, - Label* not_int32); - - // Generate non VFP3 code to check if a double can be exactly represented by a - // 32-bit integer. This does not check for 0 or -0, which need - // to be checked for separately. - // Control jumps to not_int32 if the value is not a 32-bit integer, and falls - // through otherwise. - // src1 and src2 will be cloberred. - // - // Expected input: - // - src1: higher (exponent) part of the double value. - // - src2: lower (mantissa) part of the double value. - // Output status: - // - dst: 32 higher bits of the mantissa. (mantissa[51:20]) - // - src2: contains 1. - // - other registers are clobbered. - static void DoubleIs32BitInteger(MacroAssembler* masm, - Register src1, - Register src2, - Register dst, - Register scratch, - Label* not_int32); - - // Generates code to call a C function to do a double operation using core - // registers. (Used when VFP3 is not supported.) - // This code never falls through, but returns with a heap number containing - // the result in r0. - // Register heapnumber_result must be a heap number in which the - // result of the operation will be stored. - // Requires the following layout on entry: - // r0: Left value (least significant part of mantissa). - // r1: Left value (sign, exponent, top of mantissa). - // r2: Right value (least significant part of mantissa). - // r3: Right value (sign, exponent, top of mantissa). - static void CallCCodeForDoubleOperation(MacroAssembler* masm, - Token::Value op, - Register heap_number_result, - Register scratch); - - // Loads the objects from |object| into floating point registers. - // Depending on |destination| the value ends up either in |dst| or - // in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3 - // must be supported. If kCoreRegisters are requested and VFP3 is - // supported, |dst| will be scratched. If |object| is neither smi nor - // heap number, |not_number| is jumped to with |object| still intact. - static void LoadNumber(MacroAssembler* masm, - FloatingPointHelper::Destination destination, - Register object, - DwVfpRegister dst, - Register dst1, - Register dst2, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* not_number); -}; - - class NameDictionaryLookupStub: public PlatformCodeStub { public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 25ad85c4bc..001d3c830d 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -604,8 +604,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { void Deoptimizer::EntryGenerator::Generate() { GeneratePrologue(); - Isolate* isolate = masm()->isolate(); - // Save all general purpose registers before messing with them. const int kNumberOfRegisters = Register::kNumRegisters; @@ -665,12 +663,12 @@ void Deoptimizer::EntryGenerator::Generate() { // r2: bailout id already loaded. // r3: code address or 0 already loaded. __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta. - __ mov(r5, Operand(ExternalReference::isolate_address())); + __ mov(r5, Operand(ExternalReference::isolate_address(isolate()))); __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate. // Call Deoptimizer::New(). { AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); } // Preserve "deoptimizer" object in register r0 and get the input @@ -731,7 +729,7 @@ void Deoptimizer::EntryGenerator::Generate() { { AllowExternalCallThatCantCauseGC scope(masm()); __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 1); + ExternalReference::compute_output_frames_function(isolate()), 1); } __ pop(r0); // Restore deoptimizer object (class Deoptimizer). diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 6a33234031..0ef4be064d 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -1922,6 +1922,158 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } +void FullCodeGenerator::VisitYield(Yield* expr) { + Comment cmnt(masm_, "[ Yield"); + // Evaluate yielded value first; the initial iterator definition depends on + // this. It stays on the stack while we update the iterator. + VisitForStackValue(expr->expression()); + + switch (expr->yield_kind()) { + case Yield::INITIAL: + case Yield::SUSPEND: { + VisitForStackValue(expr->generator_object()); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); + __ ldr(context_register(), + MemOperand(fp, StandardFrameConstants::kContextOffset)); + + Label resume; + __ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex); + __ b(ne, &resume); + __ pop(result_register()); + if (expr->yield_kind() == Yield::SUSPEND) { + // TODO(wingo): Box into { value: VALUE, done: false }. + } + EmitReturnSequence(); + + __ bind(&resume); + context()->Plug(result_register()); + break; + } + + case Yield::FINAL: { + VisitForAccumulatorValue(expr->generator_object()); + __ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed))); + __ str(r1, FieldMemOperand(result_register(), + JSGeneratorObject::kContinuationOffset)); + __ pop(result_register()); + // TODO(wingo): Box into { value: VALUE, done: true }. + + // Exit all nested statements. + NestedStatement* current = nesting_stack_; + int stack_depth = 0; + int context_length = 0; + while (current != NULL) { + current = current->Exit(&stack_depth, &context_length); + } + __ Drop(stack_depth); + EmitReturnSequence(); + break; + } + + case Yield::DELEGATING: + UNIMPLEMENTED(); + } +} + + +void FullCodeGenerator::EmitGeneratorResume(Expression *generator, + Expression *value, + JSGeneratorObject::ResumeMode resume_mode) { + // The value stays in r0, and is ultimately read by the resumed generator, as + // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. r1 + // will hold the generator object until the activation has been resumed. + VisitForStackValue(generator); + VisitForAccumulatorValue(value); + __ pop(r1); + + // Check generator state. + Label wrong_state, done; + __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset)); + STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0); + STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0); + __ cmp(r3, Operand(Smi::FromInt(0))); + __ b(le, &wrong_state); + + // Load suspended function and context. + __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset)); + __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset)); + + // Load receiver and store as the first argument. + __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset)); + __ push(r2); + + // Push holes for the rest of the arguments to the generator function. + __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(r3, + FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset)); + __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); + Label push_argument_holes, push_frame; + __ bind(&push_argument_holes); + __ sub(r3, r3, Operand(1), SetCC); + __ b(mi, &push_frame); + __ push(r2); + __ jmp(&push_argument_holes); + + // Enter a new JavaScript frame, and initialize its slots as they were when + // the generator was suspended. + Label resume_frame; + __ bind(&push_frame); + __ bl(&resume_frame); + __ jmp(&done); + __ bind(&resume_frame); + __ push(lr); // Return address. + __ push(fp); // Caller's frame pointer. + __ mov(fp, sp); + __ push(cp); // Callee's context. + __ push(r4); // Callee's JS Function. + + // Load the operand stack size. + __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset)); + __ ldr(r3, FieldMemOperand(r3, FixedArray::kLengthOffset)); + __ SmiUntag(r3); + + // If we are sending a value and there is no operand stack, we can jump back + // in directly. + if (resume_mode == JSGeneratorObject::SEND) { + Label slow_resume; + __ cmp(r3, Operand(0)); + __ b(ne, &slow_resume); + __ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); + __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset)); + __ SmiUntag(r2); + __ add(r3, r3, r2); + __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))); + __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset)); + __ Jump(r3); + __ bind(&slow_resume); + } + + // Otherwise, we push holes for the operand stack and call the runtime to fix + // up the stack and the handlers. + Label push_operand_holes, call_resume; + __ bind(&push_operand_holes); + __ sub(r3, r3, Operand(1), SetCC); + __ b(mi, &call_resume); + __ push(r2); + __ b(&push_operand_holes); + __ bind(&call_resume); + __ push(r1); + __ push(result_register()); + __ Push(Smi::FromInt(resume_mode)); + __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); + // Not reached: the runtime call returns elsewhere. + __ stop("not-reached"); + + // Throw error if we attempt to operate on a running generator. + __ bind(&wrong_state); + __ push(r1); + __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); + + __ bind(&done); + context()->Plug(result_register()); +} + + void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Literal* key = prop->key()->AsLiteral(); @@ -4383,28 +4535,22 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, VisitForAccumulatorValue(sub_expr); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - Heap::RootListIndex nil_value = nil == kNullValue ? - Heap::kNullValueRootIndex : - Heap::kUndefinedValueRootIndex; - __ LoadRoot(r1, nil_value); - __ cmp(r0, r1); - if (expr->op() == Token::EQ_STRICT) { - Split(eq, if_true, if_false, fall_through); - } else { - Heap::RootListIndex other_nil_value = nil == kNullValue ? - Heap::kUndefinedValueRootIndex : - Heap::kNullValueRootIndex; - __ b(eq, if_true); - __ LoadRoot(r1, other_nil_value); + EqualityKind kind = expr->op() == Token::EQ_STRICT + ? kStrictEquality : kNonStrictEquality; + if (kind == kStrictEquality) { + Heap::RootListIndex nil_value = nil == kNullValue ? + Heap::kNullValueRootIndex : + Heap::kUndefinedValueRootIndex; + __ LoadRoot(r1, nil_value); __ cmp(r0, r1); - __ b(eq, if_true); - __ JumpIfSmi(r0, if_false); - // It can be an undetectable object. - __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset)); - __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); - __ cmp(r1, Operand(1 << Map::kIsUndetectable)); Split(eq, if_true, if_false, fall_through); + } else { + Handle ic = CompareNilICStub::GetUninitialized(isolate(), + kNonStrictEquality, + nil); + CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId()); + __ cmp(r0, Operand(0)); + Split(ne, if_true, if_false, fall_through); } context()->Plug(if_true, if_false); } diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 84a11b6144..893ac4e116 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1340,13 +1340,7 @@ static void KeyedStoreGenerateGenericHelper( __ b(ne, slow); } __ bind(&fast_double_without_map_check); - __ StoreNumberToDoubleElements(value, - key, - elements, // Overwritten. - r3, // Scratch regs... - r4, - r5, - r6, + __ StoreNumberToDoubleElements(value, key, elements, r3, &transition_double_elements); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 64083e8597..66c108d4fe 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -192,6 +192,11 @@ const char* LArithmeticT::Mnemonic() const { } +bool LGoto::HasInterestingComment(LCodeGen* gen) const { + return !gen->IsNextEmittedBlock(block_id()); +} + + void LGoto::PrintDataTo(StringStream* stream) { stream->Add("B%d", block_id()); } @@ -989,12 +994,14 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) { + info()->MarkAsRequiresFrame(); LOperand* value = UseRegister(instr->value()); return DefineAsRegister(new(zone()) LArgumentsLength(value)); } LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { + info()->MarkAsRequiresFrame(); return DefineAsRegister(new(zone()) LArgumentsElements); } @@ -2424,7 +2431,8 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { ASSERT(info()->IsStub()); CodeStubInterfaceDescriptor* descriptor = info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); - Register reg = descriptor->register_params_[instr->index()]; + int index = static_cast(instr->index()); + Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index); return DefineFixed(result, reg); } } @@ -2456,9 +2464,17 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { + info()->MarkAsRequiresFrame(); LOperand* args = UseRegister(instr->arguments()); - LOperand* length = UseTempRegister(instr->length()); - LOperand* index = UseRegister(instr->index()); + LOperand* length; + LOperand* index; + if (instr->length()->IsConstant() && instr->index()->IsConstant()) { + length = UseRegisterOrConstant(instr->length()); + index = UseOrConstant(instr->index()); + } else { + length = UseTempRegister(instr->length()); + index = Use(instr->index()); + } return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 6486cad2bb..d81881e6fb 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -282,6 +282,8 @@ class LInstruction: public ZoneObject { LOperand* FirstInput() { return InputAt(0); } LOperand* Output() { return HasResult() ? result() : NULL; } + virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } + #ifdef DEBUG void VerifyCall(); #endif @@ -381,6 +383,10 @@ class LInstructionGap: public LGap { public: explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } + virtual bool HasInterestingComment(LCodeGen* gen) const { + return !IsRedundant(); + } + DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") }; @@ -389,6 +395,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> { public: explicit LGoto(int block_id) : block_id_(block_id) { } + virtual bool HasInterestingComment(LCodeGen* gen) const; DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") virtual void PrintDataTo(StringStream* stream); virtual bool IsControl() const { return true; } @@ -436,12 +443,14 @@ class LLabel: public LGap { explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) { } + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(Label, "label") virtual void PrintDataTo(StringStream* stream); int block_id() const { return block()->block_id(); } bool is_loop_header() const { return block()->IsLoopHeader(); } + bool is_osr_entry() const { return block()->is_osr_entry(); } Label* label() { return &label_; } LLabel* replacement() const { return replacement_; } void set_replacement(LLabel* label) { replacement_ = label; } @@ -455,6 +464,7 @@ class LLabel: public LGap { class LParameter: public LTemplateInstruction<1, 0, 0> { public: + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") }; @@ -472,6 +482,7 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> { class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> { public: + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") }; @@ -1843,7 +1854,6 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> { virtual void PrintDataTo(StringStream* stream); int arity() const { return hydrogen()->argument_count() - 1; } - Handle known_function() { return hydrogen()->known_function(); } }; @@ -1911,7 +1921,6 @@ class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> { virtual void PrintDataTo(StringStream* stream); - Handle target() const { return hydrogen()->target(); } int arity() const { return hydrogen()->argument_count() - 1; } }; @@ -2488,8 +2497,6 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal") DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral) - - Handle shared_info() { return hydrogen()->shared_info(); } }; @@ -2566,6 +2573,7 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> { public: LOsrEntry(); + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") LOperand** SpilledRegisterArray() { return register_spills_; } diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index d2f44b05c0..29e01b9182 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -238,7 +238,12 @@ bool LCodeGen::GeneratePrologue() { __ str(r0, target); // Update the write barrier. This clobbers r3 and r0. __ RecordWriteContextSlot( - cp, target.offset(), r0, r3, GetLinkRegisterState(), kSaveFPRegs); + cp, + target.offset(), + r0, + r3, + GetLinkRegisterState(), + kSaveFPRegs); } } Comment(";;; End allocate local context"); @@ -259,38 +264,21 @@ bool LCodeGen::GenerateBody() { !is_aborted() && current_instruction_ < instructions_->length(); current_instruction_++) { LInstruction* instr = instructions_->at(current_instruction_); + + // Don't emit code for basic blocks with a replacement. if (instr->IsLabel()) { - LLabel* label = LLabel::cast(instr); - emit_instructions = !label->HasReplacement(); + emit_instructions = !LLabel::cast(instr)->HasReplacement(); } + if (!emit_instructions) continue; - if (emit_instructions) { - if (FLAG_code_comments) { - HValue* hydrogen = instr->hydrogen_value(); - if (hydrogen != NULL) { - if (hydrogen->IsChange()) { - HValue* changed_value = HChange::cast(hydrogen)->value(); - int use_id = 0; - const char* use_mnemo = "dead"; - if (hydrogen->UseCount() >= 1) { - HValue* use_value = hydrogen->uses().value(); - use_id = use_value->id(); - use_mnemo = use_value->Mnemonic(); - } - Comment(";;; @%d: %s. ", - current_instruction_, instr->Mnemonic(), - changed_value->id(), changed_value->Mnemonic(), - use_id, use_mnemo); - } else { - Comment(";;; @%d: %s. <#%d>", current_instruction_, - instr->Mnemonic(), hydrogen->id()); - } - } else { - Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); - } - } - instr->CompileToNative(this); + if (FLAG_code_comments && instr->HasInterestingComment(this)) { + Comment(";;; <@%d,#%d> %s", + current_instruction_, + instr->hydrogen_value()->id(), + instr->Mnemonic()); } + + instr->CompileToNative(this); } EnsureSpaceForLazyDeopt(); return !is_aborted(); @@ -302,11 +290,14 @@ bool LCodeGen::GenerateDeferredCode() { if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; + Comment(";;; <@%d,#%d> " + "-------------------- Deferred %s --------------------", + code->instruction_index(), + code->instr()->hydrogen_value()->id(), + code->instr()->Mnemonic()); __ bind(code->entry()); if (NeedsDeferredFrame()) { - Comment(";;; Deferred build frame @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); + Comment(";;; Build frame"); ASSERT(!frame_is_built_); ASSERT(info()->IsStub()); frame_is_built_ = true; @@ -314,15 +305,11 @@ bool LCodeGen::GenerateDeferredCode() { __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); __ push(scratch0()); __ add(fp, sp, Operand(2 * kPointerSize)); + Comment(";;; Deferred code"); } - Comment(";;; Deferred code @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); code->Generate(); if (NeedsDeferredFrame()) { - Comment(";;; Deferred destroy frame @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); + Comment(";;; Destroy frame"); ASSERT(frame_is_built_); __ pop(ip); __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit()); @@ -353,7 +340,9 @@ bool LCodeGen::GenerateDeoptJumpTable() { Abort("Generated code is too large"); } - __ RecordComment("[ Deoptimisation jump table"); + if (deopt_jump_table_.length() > 0) { + Comment(";;; -------------------- Jump table --------------------"); + } Label table_start; __ bind(&table_start); Label needs_frame_not_call; @@ -414,7 +403,6 @@ bool LCodeGen::GenerateDeoptJumpTable() { } masm()->CheckConstPool(false, false); } - __ RecordComment("]"); // Force constant pool emission at the end of the deopt jump table to make // sure that no constant pools are emitted after. @@ -607,7 +595,7 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, pushed_arguments_index, pushed_arguments_count); bool has_closure_id = !info()->closure().is_null() && - *info()->closure() != *environment->closure(); + !info()->closure().is_identical_to(environment->closure()); int closure_id = has_closure_id ? DefineDeoptimizationLiteral(environment->closure()) : Translation::kSelfLiteralId; @@ -923,10 +911,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle code) { Handle literals = factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); - for (int i = 0; i < deoptimization_literals_.length(); i++) { - literals->set(i, *deoptimization_literals_[i]); + { ALLOW_HANDLE_DEREF(isolate(), + "copying a ZoneList of handles into a FixedArray"); + for (int i = 0; i < deoptimization_literals_.length(); i++) { + literals->set(i, *deoptimization_literals_[i]); + } + data->SetLiteralArray(*literals); } - data->SetLiteralArray(*literals); data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); @@ -1042,10 +1033,19 @@ void LCodeGen::RecordPosition(int position) { } +static const char* LabelType(LLabel* label) { + if (label->is_loop_header()) return " (loop header)"; + if (label->is_osr_entry()) return " (OSR entry)"; + return ""; +} + + void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; -------------------- B%d%s --------------------", + Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", + current_instruction_, + label->hydrogen_value()->id(), label->block_id(), - label->is_loop_header() ? " (loop header)" : ""); + LabelType(label)); __ bind(label->label()); current_block_ = label->block_id(); DoGap(label); @@ -1904,6 +1904,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { void LCodeGen::DoConstantT(LConstantT* instr) { Handle value = instr->value(); + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (value->IsSmi()) { __ mov(ToRegister(instr->result()), Operand(value)); } else { @@ -2170,17 +2171,16 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { } -int LCodeGen::GetNextEmittedBlock(int block) { - for (int i = block + 1; i < graph()->blocks()->length(); ++i) { - LLabel* label = chunk_->GetLabel(i); - if (!label->HasReplacement()) return i; +int LCodeGen::GetNextEmittedBlock() const { + for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { + if (!chunk_->GetLabel(i)->HasReplacement()) return i; } return -1; } void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { - int next_block = GetNextEmittedBlock(current_block_); + int next_block = GetNextEmittedBlock(); right_block = chunk_->LookupDestination(right_block); left_block = chunk_->LookupDestination(left_block); @@ -2317,10 +2317,8 @@ void LCodeGen::DoBranch(LBranch* instr) { void LCodeGen::EmitGoto(int block) { - block = chunk_->LookupDestination(block); - int next_block = GetNextEmittedBlock(current_block_); - if (block != next_block) { - __ jmp(chunk_->GetAssemblyLabel(block)); + if (!IsNextEmittedBlock(block)) { + __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); } } @@ -2944,19 +2942,20 @@ void LCodeGen::DoReturn(LReturn* instr) { if (NeedsEagerFrame()) { __ mov(sp, fp); __ ldm(ia_w, sp, fp.bit() | lr.bit()); - - if (instr->has_constant_parameter_count()) { - int parameter_count = ToInteger32(instr->constant_parameter_count()); - int32_t sp_delta = (parameter_count + 1) * kPointerSize; - if (sp_delta != 0) { - __ add(sp, sp, Operand(sp_delta)); - } - } else { - Register reg = ToRegister(instr->parameter_count()); - __ add(reg, reg, Operand(1)); - __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2)); + } + if (instr->has_constant_parameter_count()) { + int parameter_count = ToInteger32(instr->constant_parameter_count()); + int32_t sp_delta = (parameter_count + 1) * kPointerSize; + if (sp_delta != 0) { + __ add(sp, sp, Operand(sp_delta)); } + } else { + Register reg = ToRegister(instr->parameter_count()); + // The argument count parameter is a smi + __ SmiUntag(reg); + __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2)); } + __ Jump(lr); } @@ -3274,14 +3273,22 @@ void LCodeGen::DoLoadExternalArrayPointer( void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { Register arguments = ToRegister(instr->arguments()); - Register length = ToRegister(instr->length()); - Register index = ToRegister(instr->index()); Register result = ToRegister(instr->result()); - // There are two words between the frame pointer and the last argument. - // Subtracting from length accounts for one of them add one more. - __ sub(length, length, index); - __ add(length, length, Operand(1)); - __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2)); + if (instr->length()->IsConstantOperand() && + instr->index()->IsConstantOperand()) { + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + int const_length = ToInteger32(LConstantOperand::cast(instr->length())); + int index = (const_length - const_index) + 1; + __ ldr(result, MemOperand(arguments, index * kPointerSize)); + } else { + Register length = ToRegister(instr->length()); + Register index = ToRegister(instr->index()); + // There are two words between the frame pointer and the last argument. + // Subtracting from length accounts for one of them add one more. + __ sub(length, length, index); + __ add(length, length, Operand(1)); + __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2)); + } } @@ -3703,12 +3710,15 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { void LCodeGen::CallKnownFunction(Handle function, + int formal_parameter_count, int arity, LInstruction* instr, CallKind call_kind, R1State r1_state) { - bool can_invoke_directly = !function->NeedsArgumentsAdaption() || - function->shared()->formal_parameter_count() == arity; + bool dont_adapt_arguments = + formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; + bool can_invoke_directly = + dont_adapt_arguments || formal_parameter_count == arity; LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); @@ -3723,7 +3733,7 @@ void LCodeGen::CallKnownFunction(Handle function, // Set r0 to arguments count if adaption is not needed. Assumes that r0 // is available to write to at this point. - if (!function->NeedsArgumentsAdaption()) { + if (dont_adapt_arguments) { __ mov(r0, Operand(arity)); } @@ -3737,7 +3747,9 @@ void LCodeGen::CallKnownFunction(Handle function, } else { SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); ParameterCount count(arity); - __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind); + ParameterCount expected(formal_parameter_count); + __ InvokeFunction( + function, expected, count, CALL_FUNCTION, generator, call_kind); } // Restore context. @@ -3747,7 +3759,8 @@ void LCodeGen::CallKnownFunction(Handle function, void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { ASSERT(ToRegister(instr->result()).is(r0)); - CallKnownFunction(instr->function(), + CallKnownFunction(instr->hydrogen()->function(), + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_METHOD, @@ -4119,7 +4132,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { ASSERT(ToRegister(instr->function()).is(r1)); ASSERT(instr->HasPointerMap()); - if (instr->known_function().is_null()) { + Handle known_function = instr->hydrogen()->known_function(); + if (known_function.is_null()) { LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); @@ -4127,7 +4141,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } else { - CallKnownFunction(instr->known_function(), + CallKnownFunction(known_function, + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_METHOD, @@ -4187,7 +4202,8 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { ASSERT(ToRegister(instr->result()).is(r0)); - CallKnownFunction(instr->target(), + CallKnownFunction(instr->hydrogen()->target(), + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_FUNCTION, @@ -4218,10 +4234,18 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { __ mov(r0, Operand(instr->arity())); __ mov(r2, Operand(instr->hydrogen()->property_cell())); - Handle array_construct_code = - isolate()->builtins()->ArrayConstructCode(); - - CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); + Object* cell_value = instr->hydrogen()->property_cell()->value(); + ElementsKind kind = static_cast(Smi::cast(cell_value)->value()); + if (instr->arity() == 0) { + ArrayNoArgumentConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } else if (instr->arity() == 1) { + ArraySingleArgumentConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } else { + ArrayNArgumentsConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } } @@ -5038,8 +5062,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ sub(scratch1, input_reg, Operand(kHeapObjectTag)); __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset); - __ ECMAToInt32(input_reg, double_scratch2, double_scratch, - scratch1, scratch2, scratch3); + __ ECMAToInt32(input_reg, double_scratch2, + scratch1, scratch2, scratch3, double_scratch); } else { // Deoptimize if we don't have a heap number. @@ -5136,8 +5160,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { if (instr->truncating()) { Register scratch3 = ToRegister(instr->temp2()); - __ ECMAToInt32(result_reg, double_input, double_scratch, - scratch1, scratch2, scratch3); + __ ECMAToInt32(result_reg, double_input, + scratch1, scratch2, scratch3, double_scratch); } else { __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); // Deoptimize if the input wasn't a int32 (inside a double). @@ -5207,6 +5231,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckFunction(LCheckFunction* instr) { Register reg = ToRegister(instr->value()); Handle target = instr->hydrogen()->target(); + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (isolate()->heap()->InNewSpace(*target)) { Register reg = ToRegister(instr->value()); Handle cell = @@ -5348,16 +5373,12 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { Register scratch = ToRegister(instr->temp()); Register scratch2 = ToRegister(instr->temp2()); Handle constructor = instr->hydrogen()->constructor(); - Handle initial_map(constructor->initial_map()); + Handle initial_map = instr->hydrogen()->constructor_initial_map(); int instance_size = initial_map->instance_size(); ASSERT(initial_map->pre_allocated_property_fields() + initial_map->unused_property_fields() - initial_map->inobject_properties() == 0); - // Allocate memory for the object. The initial map might change when - // the constructor's prototype changes, but instance size and property - // counts remain unchanged (if slack tracking finished). - ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress()); __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(), TAG_OBJECT); @@ -5392,8 +5413,7 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { Register result = ToRegister(instr->result()); - Handle constructor = instr->hydrogen()->constructor(); - Handle initial_map(constructor->initial_map()); + Handle initial_map = instr->hydrogen()->constructor_initial_map(); int instance_size = initial_map->instance_size(); // TODO(3095996): Get rid of this. For now, we need to make the @@ -5476,7 +5496,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { - Handle literals(instr->environment()->closure()->literals()); + Handle literals = instr->hydrogen()->literals(); ElementsKind boilerplate_elements_kind = instr->hydrogen()->boilerplate_elements_kind(); AllocationSiteMode allocation_site_mode = @@ -5531,7 +5551,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { - Handle literals(instr->environment()->closure()->literals()); + Handle literals = instr->hydrogen()->literals(); Handle constant_properties = instr->hydrogen()->constant_properties(); @@ -5545,7 +5565,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { __ mov(r0, Operand(Smi::FromInt(flags))); // Pick the right runtime function or stub to call. - int properties_count = constant_properties->length() / 2; + int properties_count = instr->hydrogen()->constant_properties_length() / 2; if (instr->hydrogen()->depth() > 1) { __ Push(r3, r2, r1, r0); CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); @@ -5614,19 +5634,17 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. - Handle shared_info = instr->shared_info(); bool pretenure = instr->hydrogen()->pretenure(); - if (!pretenure && shared_info->num_literals() == 0) { - FastNewClosureStub stub(shared_info->language_mode(), - shared_info->is_generator()); - __ mov(r1, Operand(shared_info)); + if (!pretenure && instr->hydrogen()->has_no_literals()) { + FastNewClosureStub stub(instr->hydrogen()->language_mode(), + instr->hydrogen()->is_generator()); + __ mov(r1, Operand(instr->hydrogen()->shared_info())); __ push(r1); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else { - __ mov(r2, Operand(shared_info)); - __ mov(r1, Operand(pretenure - ? factory()->true_value() - : factory()->false_value())); + __ mov(r2, Operand(instr->hydrogen()->shared_info())); + __ mov(r1, Operand(pretenure ? factory()->true_value() + : factory()->false_value())); __ Push(cp, r2, r1); CallRuntime(Runtime::kNewClosure, 3, instr); } diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index c55558cff5..ae175e52d3 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -80,10 +80,20 @@ class LCodeGen BASE_EMBEDDED { Heap* heap() const { return isolate()->heap(); } Zone* zone() const { return zone_; } + // TODO(svenpanne) Use this consistently. + int LookupDestination(int block_id) const { + return chunk()->LookupDestination(block_id); + } + + bool IsNextEmittedBlock(int block_id) const { + return LookupDestination(block_id) == GetNextEmittedBlock(); + } + bool NeedsEagerFrame() const { return GetStackSlotCount() > 0 || info()->is_non_deferred_calling() || - !info()->IsStub(); + !info()->IsStub() || + info()->requires_frame(); } bool NeedsDeferredFrame() const { return !NeedsEagerFrame() && info()->is_deferred_calling(); @@ -195,12 +205,12 @@ class LCodeGen BASE_EMBEDDED { LPlatformChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } - HGraph* graph() const { return chunk_->graph(); } + HGraph* graph() const { return chunk()->graph(); } Register scratch0() { return r9; } DwVfpRegister double_scratch0() { return kScratchDoubleReg; } - int GetNextEmittedBlock(int block); + int GetNextEmittedBlock() const; LInstruction* GetNextInstruction(); void EmitClassOfTest(Label* if_true, @@ -266,6 +276,7 @@ class LCodeGen BASE_EMBEDDED { // Generate a direct call to a known function. Expects the function // to be in r1. void CallKnownFunction(Handle function, + int formal_parameter_count, int arity, LInstruction* instr, CallKind call_kind, diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 616d02d867..b7cd3db046 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -74,6 +74,7 @@ void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond) { ASSERT(RelocInfo::IsCodeTarget(rmode)); // 'code' is always generated ARM code, never THUMB code + ALLOW_HANDLE_DEREF(isolate(), "embedding raw address"); Jump(reinterpret_cast(code.location()), rmode, cond); } @@ -162,6 +163,7 @@ int MacroAssembler::CallSize(Handle code, RelocInfo::Mode rmode, TypeFeedbackId ast_id, Condition cond) { + ALLOW_HANDLE_DEREF(isolate(), "using raw address"); return CallSize(reinterpret_cast
(code.location()), rmode, cond); } @@ -179,6 +181,7 @@ void MacroAssembler::Call(Handle code, rmode = RelocInfo::CODE_TARGET_WITH_ID; } // 'code' is always generated ARM code, never THUMB code + ALLOW_HANDLE_DEREF(isolate(), "embedding raw address"); Call(reinterpret_cast
(code.location()), rmode, cond, mode); } @@ -395,6 +398,7 @@ void MacroAssembler::StoreRoot(Register source, void MacroAssembler::LoadHeapObject(Register result, Handle object) { + ALLOW_HANDLE_DEREF(isolate(), "using raw address"); if (isolate()->heap()->InNewSpace(*object)) { Handle cell = isolate()->factory()->NewJSGlobalPropertyCell(object); @@ -790,6 +794,116 @@ void MacroAssembler::Vmov(const DwVfpRegister dst, } +void MacroAssembler::ConvertNumberToInt32(Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Register scratch3, + DwVfpRegister double_scratch1, + DwVfpRegister double_scratch2, + Label* not_number) { + Label done; + UntagAndJumpIfSmi(dst, object, &done); + JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); + vldr(double_scratch1, FieldMemOperand(object, HeapNumber::kValueOffset)); + ECMAToInt32(dst, double_scratch1, + scratch1, scratch2, scratch3, double_scratch2); + + bind(&done); +} + + +void MacroAssembler::LoadNumber(Register object, + DwVfpRegister dst, + Register heap_number_map, + Register scratch, + Label* not_number) { + Label is_smi, done; + + UntagAndJumpIfSmi(scratch, object, &is_smi); + JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); + + vldr(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); + b(&done); + + // Handle loading a double from a smi. + bind(&is_smi); + vmov(dst.high(), scratch); + vcvt_f64_s32(dst, dst.high()); + + bind(&done); +} + + +void MacroAssembler::LoadNumberAsInt32Double(Register object, + DwVfpRegister double_dst, + Register heap_number_map, + Register scratch, + DwVfpRegister double_scratch, + Label* not_int32) { + ASSERT(!scratch.is(object)); + ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch)); + + Label done, obj_is_not_smi; + + UntagAndJumpIfNotSmi(scratch, object, &obj_is_not_smi); + vmov(double_scratch.low(), scratch); + vcvt_f64_s32(double_dst, double_scratch.low()); + b(&done); + + bind(&obj_is_not_smi); + JumpIfNotHeapNumber(object, heap_number_map, scratch, not_int32); + + // Load the number. + // Load the double value. + vldr(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); + + TestDoubleIsInt32(double_dst, double_scratch); + // Jump to not_int32 if the operation did not succeed. + b(ne, not_int32); + + bind(&done); +} + + +void MacroAssembler::LoadNumberAsInt32(Register object, + Register dst, + Register heap_number_map, + Register scratch, + DwVfpRegister double_scratch0, + DwVfpRegister double_scratch1, + Label* not_int32) { + ASSERT(!dst.is(object)); + ASSERT(!scratch.is(object)); + + Label done, maybe_undefined; + + UntagAndJumpIfSmi(dst, object, &done); + + JumpIfNotHeapNumber(object, heap_number_map, scratch, &maybe_undefined); + + // Object is a heap number. + // Convert the floating point value to a 32-bit integer. + // Load the double value. + vldr(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset)); + + TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); + // Jump to not_int32 if the operation did not succeed. + b(ne, not_int32); + b(&done); + + bind(&maybe_undefined); + CompareRoot(object, Heap::kUndefinedValueRootIndex); + b(ne, not_int32); + // |undefined| is truncated to 0. + mov(dst, Operand(Smi::FromInt(0))); + // Fall through. + + bind(&done); +} + + void MacroAssembler::EnterFrame(StackFrame::Type type) { // r0-r3: preserved stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); @@ -837,14 +951,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { // Optionally save all double registers. if (save_doubles) { - // Check CPU flags for number of registers, setting the Z condition flag. - CheckFor32DRegs(ip); - - // Push registers d0-d15, and possibly d16-d31, on the stack. - // If d16-d31 are not pushed, decrease the stack pointer instead. - vstm(db_w, sp, d16, d31, ne); - sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); - vstm(db_w, sp, d0, d15); + SaveFPRegs(sp, ip); // Note that d0 will be accessible at // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize, // since the sp slot and code slot were pushed after the fp. @@ -905,15 +1012,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, const int offset = 2 * kPointerSize; sub(r3, fp, Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize)); - - // Check CPU flags for number of registers, setting the Z condition flag. - CheckFor32DRegs(ip); - - // Pop registers d0-d15, and possibly d16-d31, from r3. - // If d16-d31 are not popped, increase r3 instead. - vldm(ia_w, r3, d0, d15); - vldm(ia_w, r3, d16, d31, ne); - add(r3, r3, Operand(16 * kDoubleSize), LeaveCC, eq); + RestoreFPRegs(r3, ip); } // Clear top frame. @@ -1132,6 +1231,7 @@ void MacroAssembler::InvokeFunction(Register fun, void MacroAssembler::InvokeFunction(Handle function, + const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper, @@ -1143,7 +1243,6 @@ void MacroAssembler::InvokeFunction(Handle function, LoadHeapObject(r1, function); ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - ParameterCount expected(function->shared()->formal_parameter_count()); // We call indirectly through the code field in the function to // allow recompilation to take effect without changing any of the // call sites. @@ -1945,14 +2044,9 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register elements_reg, Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, Label* fail, int elements_offset) { Label smi_value, store; - Register mantissa_reg = scratch2; - Register exponent_reg = scratch3; // Handle smi values specially. JumpIfSmi(value_reg, &smi_value); @@ -1977,9 +2071,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, bind(&smi_value); Register untagged_value = scratch1; SmiUntag(untagged_value, value_reg); - FloatingPointHelper::ConvertIntToDouble( - this, untagged_value, FloatingPointHelper::kVFPRegisters, d0, - mantissa_reg, exponent_reg, scratch4, s2); + vmov(s2, untagged_value); + vcvt_f64_s32(d0, s2); bind(&store); add(scratch1, elements_reg, @@ -2171,8 +2264,9 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); - PrepareCallCFunction(0, r0); - CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0); + PrepareCallCFunction(1, r0); + mov(r0, Operand(ExternalReference::isolate_address(isolate()))); + CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); PopSafepointRegisters(); } @@ -2185,8 +2279,9 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); - PrepareCallCFunction(0, r0); - CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); + PrepareCallCFunction(1, r0); + mov(r0, Operand(ExternalReference::isolate_address(isolate()))); + CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); PopSafepointRegisters(); } @@ -2238,7 +2333,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, str(r5, MemOperand(r7, kLimitOffset)); mov(r4, r0); PrepareCallCFunction(1, r5); - mov(r0, Operand(ExternalReference::isolate_address())); + mov(r0, Operand(ExternalReference::isolate_address(isolate()))); CallCFunction( ExternalReference::delete_handle_scope_extensions(isolate()), 1); mov(r0, r4); @@ -2401,34 +2496,21 @@ void MacroAssembler::TryInt32Floor(Register result, } -void MacroAssembler::ECMAConvertNumberToInt32(Register source, - Register result, - Register input_low, - Register input_high, - Register scratch, - DwVfpRegister double_scratch1, - DwVfpRegister double_scratch2) { - vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset)); - ECMAToInt32(result, double_scratch1, double_scratch2, - scratch, input_high, input_low); -} - - void MacroAssembler::ECMAToInt32(Register result, DwVfpRegister double_input, - DwVfpRegister double_scratch, Register scratch, - Register input_high, - Register input_low) { - ASSERT(!input_high.is(result)); - ASSERT(!input_low.is(result)); - ASSERT(!input_low.is(input_high)); + Register scratch_high, + Register scratch_low, + DwVfpRegister double_scratch) { + ASSERT(!scratch_high.is(result)); + ASSERT(!scratch_low.is(result)); + ASSERT(!scratch_low.is(scratch_high)); ASSERT(!scratch.is(result) && - !scratch.is(input_high) && - !scratch.is(input_low)); + !scratch.is(scratch_high) && + !scratch.is(scratch_low)); ASSERT(!double_input.is(double_scratch)); - Label out_of_range, negate, done; + Label out_of_range, only_low, negate, done; vcvt_s32_f64(double_scratch.low(), double_input); vmov(result, double_scratch.low()); @@ -2438,8 +2520,8 @@ void MacroAssembler::ECMAToInt32(Register result, cmp(scratch, Operand(0x7ffffffe)); b(lt, &done); - vmov(input_low, input_high, double_input); - Ubfx(scratch, input_high, + vmov(scratch_low, scratch_high, double_input); + Ubfx(scratch, scratch_high, HeapNumber::kExponentShift, HeapNumber::kExponentBits); // Load scratch with exponent - 1. This is faster than loading // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. @@ -2454,59 +2536,45 @@ void MacroAssembler::ECMAToInt32(Register result, // If we reach this code, 31 <= exponent <= 83. // So, we don't have to handle cases where 0 <= exponent <= 20 for // which we would need to shift right the high part of the mantissa. - ECMAToInt32Tail(result, scratch, input_high, input_low, - &out_of_range, &negate, &done); -} - - -void MacroAssembler::ECMAToInt32Tail(Register result, - Register scratch, - Register input_high, - Register input_low, - Label* out_of_range, - Label* negate, - Label* done) { - Label only_low; - - // On entry, scratch contains exponent - 1. + // Scratch contains exponent - 1. // Load scratch with 52 - exponent (load with 51 - (exponent - 1)). rsb(scratch, scratch, Operand(51), SetCC); b(ls, &only_low); - // 21 <= exponent <= 51, shift input_low and input_high + // 21 <= exponent <= 51, shift scratch_low and scratch_high // to generate the result. - mov(input_low, Operand(input_low, LSR, scratch)); + mov(scratch_low, Operand(scratch_low, LSR, scratch)); // Scratch contains: 52 - exponent. // We needs: exponent - 20. // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20. rsb(scratch, scratch, Operand(32)); - Ubfx(result, input_high, + Ubfx(result, scratch_high, 0, HeapNumber::kMantissaBitsInTopWord); - // Set the implicit 1 before the mantissa part in input_high. + // Set the implicit 1 before the mantissa part in scratch_high. orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord)); - orr(result, input_low, Operand(result, LSL, scratch)); - b(negate); + orr(result, scratch_low, Operand(result, LSL, scratch)); + b(&negate); - bind(out_of_range); + bind(&out_of_range); mov(result, Operand::Zero()); - b(done); + b(&done); bind(&only_low); - // 52 <= exponent <= 83, shift only input_low. + // 52 <= exponent <= 83, shift only scratch_low. // On entry, scratch contains: 52 - exponent. rsb(scratch, scratch, Operand::Zero()); - mov(result, Operand(input_low, LSL, scratch)); + mov(result, Operand(scratch_low, LSL, scratch)); - bind(negate); - // If input was positive, input_high ASR 31 equals 0 and - // input_high LSR 31 equals zero. + bind(&negate); + // If input was positive, scratch_high ASR 31 equals 0 and + // scratch_high LSR 31 equals zero. // New result = (result eor 0) + 0 = result. // If the input was negative, we have to negate the result. - // Input_high ASR 31 equals 0xffffffff and input_high LSR 31 equals 1. + // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1. // New result = (result eor 0xffffffff) + 1 = 0 - result. - eor(result, result, Operand(input_high, ASR, 31)); - add(result, result, Operand(input_high, LSR, 31)); + eor(result, result, Operand(scratch_high, ASR, 31)); + add(result, result, Operand(scratch_high, LSR, 31)); - bind(done); + bind(&done); } @@ -2688,16 +2756,6 @@ void MacroAssembler::Assert(Condition cond, const char* msg) { } -void MacroAssembler::AssertRegisterIsRoot(Register reg, - Heap::RootListIndex index) { - if (emit_debug_code()) { - LoadRoot(ip, index); - cmp(reg, ip); - Check(eq, "Register did not match expected root"); - } -} - - void MacroAssembler::AssertFastElements(Register elements) { if (emit_debug_code()) { ASSERT(!elements.is(ip)); @@ -2991,12 +3049,10 @@ void MacroAssembler::AssertName(Register object) { -void MacroAssembler::AssertRootValue(Register src, - Heap::RootListIndex root_value_index, - const char* message) { +void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { if (emit_debug_code()) { - CompareRoot(src, root_value_index); - Check(eq, message); + CompareRoot(reg, index); + Check(eq, "HeapNumberMap register clobbered."); } } @@ -3006,7 +3062,7 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object, Register scratch, Label* on_not_heap_number) { ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); - AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); cmp(scratch, heap_number_map); b(ne, on_not_heap_number); } @@ -3063,7 +3119,7 @@ void MacroAssembler::AllocateHeapNumber(Register result, tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); // Store heap number map in the allocated object. - AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); if (tagging_mode == TAG_RESULT) { str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); } else { @@ -3183,6 +3239,22 @@ void MacroAssembler::CheckFor32DRegs(Register scratch) { } +void MacroAssembler::SaveFPRegs(Register location, Register scratch) { + CheckFor32DRegs(scratch); + vstm(db_w, location, d16, d31, ne); + sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); + vstm(db_w, location, d0, d15); +} + + +void MacroAssembler::RestoreFPRegs(Register location, Register scratch) { + CheckFor32DRegs(scratch); + vldm(ia_w, location, d0, d15); + vldm(ia_w, location, d16, d31, ne); + add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); +} + + void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( Register first, Register second, diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index f9f672bac6..b736c8f3af 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -178,6 +178,7 @@ class MacroAssembler: public Assembler { void LoadHeapObject(Register dst, Handle object); void LoadObject(Register result, Handle object) { + ALLOW_HANDLE_DEREF(isolate(), "heap object check"); if (object->IsHeapObject()) { LoadHeapObject(result, Handle::cast(object)); } else { @@ -495,6 +496,54 @@ class MacroAssembler: public Assembler { const double imm, const Register scratch = no_reg); + // Converts the smi or heap number in object to an int32 using the rules + // for ToInt32 as described in ECMAScript 9.5.: the value is truncated + // and brought into the range -2^31 .. +2^31 - 1. + void ConvertNumberToInt32(Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Register scratch3, + DwVfpRegister double_scratch1, + DwVfpRegister double_scratch2, + Label* not_int32); + + // Loads the number from object into dst register. + // If |object| is neither smi nor heap number, |not_number| is jumped to + // with |object| still intact. + void LoadNumber(Register object, + DwVfpRegister dst, + Register heap_number_map, + Register scratch, + Label* not_number); + + // Loads the number from object into double_dst in the double format. + // Control will jump to not_int32 if the value cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be loaded. + void LoadNumberAsInt32Double(Register object, + DwVfpRegister double_dst, + Register heap_number_map, + Register scratch, + DwVfpRegister double_scratch, + Label* not_int32); + + // Loads the number from object into dst as a 32-bit integer. + // Control will jump to not_int32 if the object cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be converted. + void LoadNumberAsInt32(Register object, + Register dst, + Register heap_number_map, + Register scratch, + DwVfpRegister double_scratch0, + DwVfpRegister double_scratch1, + Label* not_int32); + + // Enter exit frame. // stack_space - extra stack space, used for alignment before call to C. void EnterExitFrame(bool save_doubles, int stack_space = 0); @@ -573,6 +622,7 @@ class MacroAssembler: public Assembler { CallKind call_kind); void InvokeFunction(Handle function, + const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper, @@ -831,16 +881,11 @@ class MacroAssembler: public Assembler { // Check to see if maybe_number can be stored as a double in // FastDoubleElements. If it can, store it at the index specified by key in - // the FastDoubleElements array elements. Otherwise jump to fail, in which - // case scratch2, scratch3 and scratch4 are unmodified. + // the FastDoubleElements array elements. Otherwise jump to fail. void StoreNumberToDoubleElements(Register value_reg, Register key_reg, - // All regs below here overwritten. Register elements_reg, Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, Label* fail, int elements_offset = 0); @@ -972,31 +1017,28 @@ class MacroAssembler: public Assembler { Label* done, Label* exact); - // Performs a truncating conversion of a heap floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. - // Exits with 'result' holding the answer. - void ECMAConvertNumberToInt32(Register source, - Register result, - Register input_low, - Register input_high, - Register scratch, - DwVfpRegister double_scratch1, - DwVfpRegister double_scratch2); - // Performs a truncating conversion of a floating point number as used by // the JS bitwise operations. See ECMA-262 9.5: ToInt32. + // Double_scratch must be between d0 and d15. // Exits with 'result' holding the answer and all other registers clobbered. void ECMAToInt32(Register result, DwVfpRegister double_input, - DwVfpRegister double_scratch, Register scratch, - Register input_high, - Register input_low); + Register scratch_high, + Register scratch_low, + DwVfpRegister double_scratch); // Check whether d16-d31 are available on the CPU. The result is given by the // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise. void CheckFor32DRegs(Register scratch); + // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double + // values to location, saving [d0..(d15|d31)]. + void SaveFPRegs(Register location, Register scratch); + + // Does a runtime check for 16/32 FP registers. Either way, pops 32 double + // values to location, restoring [d0..(d15|d31)]. + void RestoreFPRegs(Register location, Register scratch); // --------------------------------------------------------------------------- // Runtime calls @@ -1120,7 +1162,6 @@ class MacroAssembler: public Assembler { // Calls Abort(msg) if the condition cond is not satisfied. // Use --debug_code to enable. void Assert(Condition cond, const char* msg); - void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index); void AssertFastElements(Register elements); // Like Assert(), but always enabled. @@ -1230,11 +1271,9 @@ class MacroAssembler: public Assembler { // Abort execution if argument is not a name, enabled via --debug-code. void AssertName(Register object); - // Abort execution if argument is not the root value with the given index, + // Abort execution if reg is not the root value with the given index, // enabled via --debug-code. - void AssertRootValue(Register src, - Heap::RootListIndex root_value_index, - const char* message); + void AssertIsRoot(Register reg, Heap::RootListIndex index); // --------------------------------------------------------------------------- // HeapNumber utilities @@ -1365,16 +1404,6 @@ class MacroAssembler: public Assembler { // it. See the implementation for register usage. void JumpToHandlerEntry(); - // Helper for ECMAToInt32VFP and ECMAToInt32NoVFP. - // It is expected that 31 <= exponent <= 83, and scratch is exponent - 1. - void ECMAToInt32Tail(Register result, - Register scratch, - Register input_high, - Register input_low, - Label* out_of_range, - Label* negate, - Label* done); - // Compute memory operands for safepoint stack slots. static int SafepointRegisterStackIndex(int reg_code); MemOperand SafepointRegisterSlot(Register reg); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index 0cb80c0ac2..da7afee3fb 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -380,12 +380,12 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( // Address of current input position. __ add(r1, current_input_offset(), Operand(end_of_input_address())); // Isolate. - __ mov(r3, Operand(ExternalReference::isolate_address())); + __ mov(r3, Operand(ExternalReference::isolate_address(isolate()))); { AllowExternalCallThatCantCauseGC scope(masm_); ExternalReference function = - ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); + ExternalReference::re_case_insensitive_compare_uc16(isolate()); __ CallCFunction(function, argument_count); } @@ -682,7 +682,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { Label stack_ok; ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(masm_->isolate()); + ExternalReference::address_of_stack_limit(isolate()); __ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0)); __ sub(r0, sp, r0, SetCC); @@ -893,9 +893,9 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { __ PrepareCallCFunction(num_arguments, r0); __ mov(r0, backtrack_stackpointer()); __ add(r1, frame_pointer(), Operand(kStackHighEnd)); - __ mov(r2, Operand(ExternalReference::isolate_address())); + __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); ExternalReference grow_stack = - ExternalReference::re_grow_stack(masm_->isolate()); + ExternalReference::re_grow_stack(isolate()); __ CallCFunction(grow_stack, num_arguments); // If return NULL, we have failed to grow the stack, and // must exit with a stack-overflow exception. @@ -1111,7 +1111,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) { __ mov(r1, Operand(masm_->CodeObject())); // r0 becomes return address pointer. ExternalReference stack_guard_check = - ExternalReference::re_check_stack_guard_state(masm_->isolate()); + ExternalReference::re_check_stack_guard_state(isolate()); CallCFunctionUsingStub(stack_guard_check, num_arguments); } @@ -1292,7 +1292,7 @@ void RegExpMacroAssemblerARM::Pop(Register target) { void RegExpMacroAssemblerARM::CheckPreemption() { // Check for preemption. ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(masm_->isolate()); + ExternalReference::address_of_stack_limit(isolate()); __ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0)); __ cmp(sp, r0); @@ -1302,7 +1302,7 @@ void RegExpMacroAssemblerARM::CheckPreemption() { void RegExpMacroAssemblerARM::CheckStackLimit() { ExternalReference stack_limit = - ExternalReference::address_of_regexp_stack_limit(masm_->isolate()); + ExternalReference::address_of_regexp_stack_limit(isolate()); __ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0)); __ cmp(backtrack_stackpointer(), Operand(r0)); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index c45669ae89..921d8f5474 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -30,6 +30,7 @@ #include "arm/assembler-arm.h" #include "arm/assembler-arm-inl.h" +#include "macro-assembler.h" namespace v8 { namespace internal { @@ -223,6 +224,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { inline void CallCFunctionUsingStub(ExternalReference function, int num_arguments); + Isolate* isolate() const { return masm_->isolate(); } MacroAssembler* masm_; diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index ea79310447..036fd7f877 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -26,7 +26,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include -#include +#include #include #include "v8.h" @@ -331,7 +331,7 @@ void ArmDebugger::Debug() { PrintF("\n"); } } - for (int i = 0; i < kNumVFPDoubleRegisters; i++) { + for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) { dvalue = GetVFPDoubleRegisterValue(i); uint64_t as_words = BitCast(dvalue); PrintF("%3s: %f 0x%08x %08x\n", @@ -1297,7 +1297,7 @@ bool Simulator::OverflowFrom(int32_t alu_out, // Support for VFP comparisons. void Simulator::Compute_FPSCR_Flags(double val1, double val2) { - if (isnan(val1) || isnan(val2)) { + if (std::isnan(val1) || std::isnan(val2)) { n_flag_FPSCR_ = false; z_flag_FPSCR_ = false; c_flag_FPSCR_ = true; @@ -1866,7 +1866,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { double Simulator::canonicalizeNaN(double value) { - return (FPSCR_default_NaN_mode_ && isnan(value)) ? + return (FPSCR_default_NaN_mode_ && std::isnan(value)) ? FixedDoubleArray::canonical_not_the_hole_nan_as_double() : value; } @@ -2947,7 +2947,7 @@ void Simulator::DecodeVCMP(Instruction* instr) { // Raise exceptions for quiet NaNs if necessary. if (instr->Bit(7) == 1) { - if (isnan(dd_value)) { + if (std::isnan(dd_value)) { inv_op_vfp_flag_ = true; } } diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index f22acb4709..ddcbd623ba 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -726,7 +726,7 @@ static void PushInterceptorArguments(MacroAssembler* masm, __ push(holder); __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset)); __ push(scratch); - __ mov(scratch, Operand(ExternalReference::isolate_address())); + __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate()))); __ push(scratch); } @@ -798,7 +798,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, } else { __ Move(r6, call_data); } - __ mov(r7, Operand(ExternalReference::isolate_address())); + __ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate()))); // Store JS function, call data and isolate. __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit()); @@ -954,7 +954,9 @@ class CallInterceptorCompiler BASE_EMBEDDED { CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; - __ InvokeFunction(optimization.constant_function(), arguments_, + Handle function = optimization.constant_function(); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments_, JUMP_FUNCTION, NullCallWrapper(), call_kind); } @@ -1165,7 +1167,7 @@ Register StubCompiler::CheckPrototypes(Handle object, } // Log the check depth. - LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1)); + LOG(isolate(), IntEvent("check-maps-depth", depth + 1)); if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) { // Check the holder map. @@ -1293,11 +1295,11 @@ void BaseLoadStubCompiler::GenerateLoadCallback( __ ldr(scratch3(), FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset)); } else { - __ Move(scratch3(), Handle(callback->data(), - callback->GetIsolate())); + __ Move(scratch3(), Handle(callback->data(), isolate())); } __ Push(reg, scratch3()); - __ mov(scratch3(), Operand(ExternalReference::isolate_address())); + __ mov(scratch3(), + Operand(ExternalReference::isolate_address(isolate()))); __ Push(scratch3(), name()); __ mov(r0, sp); // r0 = Handle @@ -1313,10 +1315,8 @@ void BaseLoadStubCompiler::GenerateLoadCallback( const int kStackUnwindSpace = 5; Address getter_address = v8::ToCData
(callback->getter()); ApiFunction fun(getter_address); - ExternalReference ref = - ExternalReference(&fun, - ExternalReference::DIRECT_GETTER_CALL, - masm()->isolate()); + ExternalReference ref = ExternalReference( + &fun, ExternalReference::DIRECT_GETTER_CALL, isolate()); __ CallApiFunctionAndReturn(ref, kStackUnwindSpace); } @@ -1404,7 +1404,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor( ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), - masm()->isolate()); + isolate()); __ TailCallExternalReference(ref, 6, 1); } } @@ -1620,9 +1620,8 @@ Handle CallStubCompiler::CompileArrayPushCall( __ b(gt, &call_builtin); __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); - __ StoreNumberToDoubleElements( - r4, r0, elements, r5, r2, r3, r9, - &call_builtin, argc * kDoubleSize); + __ StoreNumberToDoubleElements(r4, r0, elements, r5, + &call_builtin, argc * kDoubleSize); // Save new length. __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -1715,11 +1714,10 @@ Handle CallStubCompiler::CompileArrayPushCall( __ CheckFastObjectElements(r7, r7, &call_builtin); __ bind(&no_fast_elements_check); - Isolate* isolate = masm()->isolate(); ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate); + ExternalReference::new_space_allocation_top_address(isolate()); ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate); + ExternalReference::new_space_allocation_limit_address(isolate()); const int kAllocationDelta = 4; // Load top and check if it is the end of elements. @@ -1758,10 +1756,8 @@ Handle CallStubCompiler::CompileArrayPushCall( __ Ret(); } __ bind(&call_builtin); - __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush, - masm()->isolate()), - argc + 1, - 1); + __ TailCallExternalReference( + ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1); } // Handle call cache miss. @@ -1845,10 +1841,8 @@ Handle CallStubCompiler::CompileArrayPopCall( __ Ret(); __ bind(&call_builtin); - __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop, - masm()->isolate()), - argc + 1, - 1); + __ TailCallExternalReference( + ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1); // Handle call cache miss. __ bind(&miss); @@ -2085,8 +2079,9 @@ Handle CallStubCompiler::CompileStringFromCharCodeCall( // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. __ bind(&slow); - __ InvokeFunction( - function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ bind(&miss); // r2: function name. @@ -2196,8 +2191,9 @@ Handle CallStubCompiler::CompileMathFloorCall( __ bind(&slow); // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. - __ InvokeFunction( - function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ bind(&miss); // r2: function name. @@ -2295,8 +2291,9 @@ Handle CallStubCompiler::CompileMathAbsCall( // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. __ bind(&slow); - __ InvokeFunction( - function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ bind(&miss); // r2: function name. @@ -2384,8 +2381,7 @@ void CallStubCompiler::CompileHandlerFrontend(Handle object, ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK); switch (check) { case RECEIVER_MAP_CHECK: - __ IncrementCounter(masm()->isolate()->counters()->call_const(), - 1, r0, r3); + __ IncrementCounter(isolate()->counters()->call_const(), 1, r0, r3); // Check that the maps haven't changed. CheckPrototypes(Handle::cast(object), r1, holder, r0, r3, r4, @@ -2470,8 +2466,9 @@ void CallStubCompiler::CompileHandlerBackend(Handle function) { CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; - __ InvokeFunction( - function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), call_kind); } @@ -2574,7 +2571,7 @@ Handle CallStubCompiler::CompileCallGlobal( __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); // Jump to the cached code (tail call). - Counters* counters = masm()->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->call_global_inline(), 1, r3, r4); ParameterCount expected(function->shared()->formal_parameter_count()); CallKind call_kind = CallICBase::Contextual::decode(extra_state_) @@ -2617,8 +2614,7 @@ Handle StoreStubCompiler::CompileStoreCallback( // Do tail-call to the runtime system. ExternalReference store_callback_property = - ExternalReference(IC_Utility(IC::kStoreCallbackProperty), - masm()->isolate()); + ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate()); __ TailCallExternalReference(store_callback_property, 4, 1); // Handle store cache miss. @@ -2653,8 +2649,9 @@ void StoreStubCompiler::GenerateStoreViaSetter( // Call the JavaScript setter with receiver and value on the stack. __ Push(r1, r0); ParameterCount actual(1); - __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(), - CALL_AS_METHOD); + ParameterCount expected(setter); + __ InvokeFunction(setter, expected, actual, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -2700,8 +2697,7 @@ Handle StoreStubCompiler::CompileStoreInterceptor( // Do tail-call to the runtime system. ExternalReference store_ic_property = - ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), - masm()->isolate()); + ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate()); __ TailCallExternalReference(store_ic_property, 4, 1); // Handle store cache miss. @@ -2740,7 +2736,7 @@ Handle StoreStubCompiler::CompileStoreGlobal( FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset)); // Cells are always rescanned, so no write barrier here. - Counters* counters = masm()->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter( counters->named_store_global_inline(), 1, scratch1(), scratch2()); __ Ret(); @@ -2838,8 +2834,9 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, // Call the JavaScript getter with the receiver on the stack. __ push(r0); ParameterCount actual(0); - __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(), - CALL_AS_METHOD); + ParameterCount expected(getter); + __ InvokeFunction(getter, expected, actual, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -2884,7 +2881,7 @@ Handle LoadStubCompiler::CompileLoadGlobal( HandlerFrontendFooter(&success, &miss); __ bind(&success); - Counters* counters = masm()->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3); __ mov(r0, r4); __ Ret(); @@ -3088,7 +3085,7 @@ Handle ConstructStubCompiler::CompileConstructStub( // Remove caller arguments and receiver from the stack and return. __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2)); __ add(sp, sp, Operand(kPointerSize)); - Counters* counters = masm()->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->constructed_objects(), 1, r1, r2); __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2); __ Jump(lr); @@ -3096,7 +3093,7 @@ Handle ConstructStubCompiler::CompileConstructStub( // Jump to the generic stub in case the specialized code cannot handle the // construction. __ bind(&generic_stub_call); - Handle code = masm()->isolate()->builtins()->JSConstructStubGeneric(); + Handle code = isolate()->builtins()->JSConstructStubGeneric(); __ Jump(code, RelocInfo::CODE_TARGET); // Return the generated code. @@ -3246,14 +3243,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( StoreIntAsFloat(masm, r3, r4, r5, r7); break; case EXTERNAL_DOUBLE_ELEMENTS: + __ vmov(s2, r5); + __ vcvt_f64_s32(d0, s2); __ add(r3, r3, Operand(key, LSL, 2)); // r3: effective address of the double element - FloatingPointHelper::Destination destination; - destination = FloatingPointHelper::kVFPRegisters; - FloatingPointHelper::ConvertIntToDouble( - masm, r5, destination, - d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent. - r4, s2); // These are: scratch2, single_scratch. __ vstr(d0, r3, 0); break; case FAST_ELEMENTS: @@ -3303,7 +3296,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // not include -kHeapObjectTag into it. __ sub(r5, value, Operand(kHeapObjectTag)); __ vldr(d0, r5, HeapNumber::kValueOffset); - __ ECMAToInt32(r5, d0, d1, r6, r7, r9); + __ ECMAToInt32(r5, d0, r6, r7, r9, d1); switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: @@ -3537,9 +3530,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- r3 : scratch (elements backing store) // -- r4 : scratch // -- r5 : scratch - // -- r6 : scratch - // -- r7 : scratch - // -- r9 : scratch // ----------------------------------- Label miss_force_generic, transition_elements_kind, grow, slow; Label finish_store, check_capacity; @@ -3550,9 +3540,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Register elements_reg = r3; Register scratch1 = r4; Register scratch2 = r5; - Register scratch3 = r6; - Register scratch4 = r7; - Register scratch5 = r9; Register length_reg = r7; // This stub is meant to be tail-jumped to, the receiver must already @@ -3581,15 +3568,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( } __ bind(&finish_store); - __ StoreNumberToDoubleElements(value_reg, - key_reg, - // All registers after this are overwritten. - elements_reg, - scratch1, - scratch3, - scratch4, - scratch2, - &transition_elements_kind); + __ StoreNumberToDoubleElements(value_reg, key_reg, elements_reg, + scratch1, &transition_elements_kind); __ Ret(); // Handle store cache miss, replacing the ic with the generic stub. @@ -3636,15 +3616,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); __ mov(scratch1, elements_reg); - __ StoreNumberToDoubleElements(value_reg, - key_reg, - // All registers after this are overwritten. - scratch1, - scratch2, - scratch3, - scratch4, - scratch5, - &transition_elements_kind); + __ StoreNumberToDoubleElements(value_reg, key_reg, scratch1, + scratch2, &transition_elements_kind); __ mov(scratch1, Operand(kHoleNanLower32)); __ mov(scratch2, Operand(kHoleNanUpper32)); diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index 5bde8c5383..fff588af35 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -34,7 +34,7 @@ #include "assembler.h" -#include // For cos, log, pow, sin, tan, etc. +#include #include "api.h" #include "builtins.h" #include "counters.h" @@ -969,8 +969,8 @@ ExternalReference::ExternalReference(const Runtime::Function* f, : address_(Redirect(isolate, f->entry)) {} -ExternalReference ExternalReference::isolate_address() { - return ExternalReference(Isolate::Current()); +ExternalReference ExternalReference::isolate_address(Isolate* isolate) { + return ExternalReference(isolate); } @@ -1459,10 +1459,11 @@ double power_helper(double x, double y) { return power_double_int(x, y_int); // Returns 1 if exponent is 0. } if (y == 0.5) { - return (isinf(x)) ? V8_INFINITY : fast_sqrt(x + 0.0); // Convert -0 to +0. + return (std::isinf(x)) ? V8_INFINITY + : fast_sqrt(x + 0.0); // Convert -0 to +0. } if (y == -0.5) { - return (isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0. + return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0. } return power_double_double(x, y); } @@ -1492,7 +1493,7 @@ double power_double_double(double x, double y) { (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1) // MinGW64 has a custom implementation for pow. This handles certain // special cases that are different. - if ((x == 0.0 || isinf(x)) && isfinite(y)) { + if ((x == 0.0 || std::isinf(x)) && std::isfinite(y)) { double f; if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0; } @@ -1505,7 +1506,9 @@ double power_double_double(double x, double y) { // The checks for special cases can be dropped in ia32 because it has already // been done in generated code before bailing out here. - if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value(); + if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) { + return OS::nan_value(); + } return pow(x, y); } diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 381ae0a801..32424cfb6b 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -50,7 +50,7 @@ class ApiFunction; namespace internal { -struct StatsCounter; +class StatsCounter; // ----------------------------------------------------------------------------- // Platform independent assembler base class. @@ -681,7 +681,7 @@ class ExternalReference BASE_EMBEDDED { explicit ExternalReference(const SCTableReference& table_ref); // Isolate::Current() as an external reference. - static ExternalReference isolate_address(); + static ExternalReference isolate_address(Isolate* isolate); // One-of-a-kind references. These references are not part of a general // pattern. This means that they have to be added to the diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index e8b065c4ea..d241355fc1 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -27,7 +27,7 @@ #include "ast.h" -#include // For isfinite. +#include // For isfinite. #include "builtins.h" #include "code-stubs.h" #include "conversions.h" @@ -70,6 +70,11 @@ bool Expression::IsNullLiteral() { } +bool Expression::IsUndefinedLiteral() { + return AsLiteral() != NULL && AsLiteral()->handle()->IsUndefined(); +} + + VariableProxy::VariableProxy(Isolate* isolate, Variable* var) : Expression(isolate), name_(var->name()), @@ -241,8 +246,8 @@ bool IsEqualNumber(void* first, void* second) { if (h2->IsSmi()) return false; Handle n1 = Handle::cast(h1); Handle n2 = Handle::cast(h2); - ASSERT(isfinite(n1->value())); - ASSERT(isfinite(n2->value())); + ASSERT(std::isfinite(n1->value())); + ASSERT(std::isfinite(n2->value())); return n1->value() == n2->value(); } @@ -352,7 +357,8 @@ static bool IsVoidOfLiteral(Expression* expr) { } -// Check for the pattern: void equals +// Check for the pattern: void equals or +// undefined equals static bool MatchLiteralCompareUndefined(Expression* left, Token::Value op, Expression* right, @@ -361,6 +367,10 @@ static bool MatchLiteralCompareUndefined(Expression* left, *expr = right; return true; } + if (left->IsUndefinedLiteral() && Token::IsEqualityOp(op)) { + *expr = right; + return true; + } return false; } diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index b7331388fd..10ae7de458 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -339,6 +339,9 @@ class Expression: public AstNode { // True iff the expression is the null literal. bool IsNullLiteral(); + // True iff the expression is the undefined literal. + bool IsUndefinedLiteral(); + // Type feedback information for assignments and properties. virtual bool IsMonomorphic() { UNREACHABLE(); @@ -939,15 +942,18 @@ class WithStatement: public Statement { public: DECLARE_NODE_TYPE(WithStatement) + Scope* scope() { return scope_; } Expression* expression() const { return expression_; } Statement* statement() const { return statement_; } protected: - WithStatement(Expression* expression, Statement* statement) - : expression_(expression), + WithStatement(Scope* scope, Expression* expression, Statement* statement) + : scope_(scope), + expression_(expression), statement_(statement) { } private: + Scope* scope_; Expression* expression_; Statement* statement_; }; @@ -1964,27 +1970,34 @@ class Yield: public Expression { public: DECLARE_NODE_TYPE(Yield) + enum Kind { + INITIAL, // The initial yield that returns the unboxed generator object. + SUSPEND, // A normal yield: { value: EXPRESSION, done: false } + DELEGATING, // A yield*. + FINAL // A return: { value: EXPRESSION, done: true } + }; + Expression* generator_object() const { return generator_object_; } Expression* expression() const { return expression_; } - bool is_delegating_yield() const { return is_delegating_yield_; } + Kind yield_kind() const { return yield_kind_; } virtual int position() const { return pos_; } protected: Yield(Isolate* isolate, Expression* generator_object, Expression* expression, - bool is_delegating_yield, + Kind yield_kind, int pos) : Expression(isolate), generator_object_(generator_object), expression_(expression), - is_delegating_yield_(is_delegating_yield), + yield_kind_(yield_kind), pos_(pos) { } private: Expression* generator_object_; Expression* expression_; - bool is_delegating_yield_; + Kind yield_kind_; int pos_; }; @@ -2777,9 +2790,11 @@ class AstNodeFactory BASE_EMBEDDED { VISIT_AND_RETURN(ReturnStatement, stmt) } - WithStatement* NewWithStatement(Expression* expression, + WithStatement* NewWithStatement(Scope* scope, + Expression* expression, Statement* statement) { - WithStatement* stmt = new(zone_) WithStatement(expression, statement); + WithStatement* stmt = new(zone_) WithStatement( + scope, expression, statement); VISIT_AND_RETURN(WithStatement, stmt) } @@ -2966,10 +2981,10 @@ class AstNodeFactory BASE_EMBEDDED { Yield* NewYield(Expression *generator_object, Expression* expression, - bool is_delegating_yield, + Yield::Kind yield_kind, int pos) { Yield* yield = new(zone_) Yield( - isolate_, generator_object, expression, is_delegating_yield, pos); + isolate_, generator_object, expression, yield_kind, pos); VISIT_AND_RETURN(Yield, yield) } diff --git a/deps/v8/src/bignum-dtoa.cc b/deps/v8/src/bignum-dtoa.cc index a9616909d0..c5ad4420c8 100644 --- a/deps/v8/src/bignum-dtoa.cc +++ b/deps/v8/src/bignum-dtoa.cc @@ -25,7 +25,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include +#include #include "../include/v8stdint.h" #include "checks.h" diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 12f0cdac6b..85bf96e4d4 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -43,6 +43,7 @@ #include "extensions/externalize-string-extension.h" #include "extensions/gc-extension.h" #include "extensions/statistics-extension.h" +#include "code-stubs.h" namespace v8 { namespace internal { @@ -862,8 +863,6 @@ bool Genesis::InitializeGlobal(Handle inner_global, InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize, isolate->initial_object_prototype(), Builtins::kArrayCode, true); - array_function->shared()->set_construct_stub( - isolate->builtins()->builtin(Builtins::kArrayConstructCode)); array_function->shared()->DontAdaptArguments(); // This seems a bit hackish, but we need to make sure Array.length @@ -890,6 +889,17 @@ bool Genesis::InitializeGlobal(Handle inner_global, // as the constructor. 'Array' property on a global object can be // overwritten by JS code. native_context()->set_array_function(*array_function); + + if (FLAG_optimize_constructed_arrays) { + // Cache the array maps, needed by ArrayConstructorStub + CacheInitialJSArrayMaps(native_context(), initial_map); + ArrayConstructorStub array_constructor_stub(isolate); + Handle code = array_constructor_stub.GetCode(isolate); + array_function->shared()->set_construct_stub(*code); + } else { + array_function->shared()->set_construct_stub( + isolate->builtins()->builtin(Builtins::kCommonArrayConstructCode)); + } } { // --- N u m b e r --- @@ -1303,10 +1313,12 @@ void Genesis::InitializeExperimentalGlobal() { if (FLAG_harmony_typed_arrays) { { // -- A r r a y B u f f e r - InstallFunction(global, "__ArrayBuffer", JS_ARRAY_BUFFER_TYPE, - JSArrayBuffer::kSize, - isolate()->initial_object_prototype(), - Builtins::kIllegal, true); + Handle array_buffer_fun = + InstallFunction(global, "__ArrayBuffer", JS_ARRAY_BUFFER_TYPE, + JSArrayBuffer::kSize, + isolate()->initial_object_prototype(), + Builtins::kIllegal, true); + native_context()->set_array_buffer_fun(*array_buffer_fun); } { // -- T y p e d A r r a y s @@ -1533,13 +1545,8 @@ Handle Genesis::InstallInternalArray( factory()->NewJSObject(isolate()->object_function(), TENURED); SetPrototype(array_function, prototype); - // TODO(mvstanton): For performance reasons, this code would have to - // be changed to successfully run with FLAG_optimize_constructed_arrays. - // The next checkin to enable FLAG_optimize_constructed_arrays by - // default will address this. - CHECK(!FLAG_optimize_constructed_arrays); array_function->shared()->set_construct_stub( - isolate()->builtins()->builtin(Builtins::kArrayConstructCode)); + isolate()->builtins()->builtin(Builtins::kCommonArrayConstructCode)); array_function->shared()->DontAdaptArguments(); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 30edf579e6..571818030b 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -192,9 +192,8 @@ BUILTIN(EmptyFunction) { RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) { CONVERT_ARG_STUB_CALLER_ARGS(caller_args); - // ASSERT(args.length() == 3); - Handle function = args.at(1); - Handle type_info = args.at(2); + ASSERT(args.length() == 2); + Handle type_info = args.at(1); JSArray* array = NULL; bool holey = false; @@ -226,8 +225,7 @@ RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) { } } - ASSERT(function->has_initial_map()); - ElementsKind kind = function->initial_map()->elements_kind(); + ElementsKind kind = GetInitialFastElementsKind(); if (holey) { kind = GetHoleyElementsKind(kind); } @@ -934,7 +932,7 @@ BUILTIN(ArraySplice) { if (start < kMinInt || start > kMaxInt) { return CallJsBuiltin(isolate, "ArraySplice", args); } - relative_start = static_cast(start); + relative_start = std::isnan(start) ? 0 : static_cast(start); } else if (!arg1->IsUndefined()) { return CallJsBuiltin(isolate, "ArraySplice", args); } @@ -1321,7 +1319,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper( v8::Handle value; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState state(isolate); ExternalCallbackScope call_scope(isolate, v8::ToCData
(callback_obj)); value = callback(new_args); @@ -1398,7 +1396,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor( v8::Handle value; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState state(isolate); ExternalCallbackScope call_scope(isolate, v8::ToCData
(callback_obj)); value = callback(new_args); diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index 12ed56af79..ab7722832f 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -199,7 +199,7 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(ArrayCode, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ - V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \ + V(CommonArrayConstructCode, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ \ V(StringConstructCode, BUILTIN, UNINITIALIZED, \ @@ -388,7 +388,7 @@ class Builtins { static void Generate_InternalArrayCode(MacroAssembler* masm); static void Generate_ArrayCode(MacroAssembler* masm); - static void Generate_ArrayConstructCode(MacroAssembler* masm); + static void Generate_CommonArrayConstructCode(MacroAssembler* masm); static void Generate_StringConstructCode(MacroAssembler* masm); static void Generate_OnStackReplacement(MacroAssembler* masm); diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc index 9241d26582..fbfaf26159 100644 --- a/deps/v8/src/cached-powers.cc +++ b/deps/v8/src/cached-powers.cc @@ -26,8 +26,8 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include -#include #include +#include #include "../include/v8stdint.h" #include "globals.h" diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc index 60ddf9b462..b6720795ba 100644 --- a/deps/v8/src/code-stubs-hydrogen.cc +++ b/deps/v8/src/code-stubs-hydrogen.cc @@ -61,11 +61,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder { arguments_length_(NULL), info_(stub, isolate), context_(NULL) { - int major_key = stub->MajorKey(); - descriptor_ = isolate->code_stub_interface_descriptor(major_key); - if (descriptor_->register_param_count_ < 0) { - stub->InitializeInterfaceDescriptor(isolate, descriptor_); - } + descriptor_ = stub->GetInterfaceDescriptor(isolate); parameters_.Reset(new HParameter*[descriptor_->register_param_count_]); } virtual bool BuildGraph(); @@ -96,6 +92,9 @@ class CodeStubGraphBuilderBase : public HGraphBuilder { bool CodeStubGraphBuilderBase::BuildGraph() { + // Update the static counter each time a new code stub is generated. + isolate()->counters()->code_stubs()->Increment(); + if (FLAG_trace_hydrogen) { const char* name = CodeStub::MajorName(stub()->MajorKey(), false); PrintF("-----------------------------------------------------------\n"); @@ -130,9 +129,10 @@ bool CodeStubGraphBuilderBase::BuildGraph() { stack_parameter_count = new(zone) HParameter(param_count, HParameter::REGISTER_PARAMETER, Representation::Integer32()); + stack_parameter_count->set_type(HType::Smi()); // it's essential to bind this value to the environment in case of deopt - start_environment->Bind(param_count, stack_parameter_count); AddInstruction(stack_parameter_count); + start_environment->Bind(param_count, stack_parameter_count); arguments_length_ = stack_parameter_count; } else { ASSERT(descriptor_->environment_length() == param_count); @@ -154,17 +154,26 @@ bool CodeStubGraphBuilderBase::BuildGraph() { // arguments above HInstruction* stack_pop_count = stack_parameter_count; if (descriptor_->function_mode_ == JS_FUNCTION_STUB_MODE) { - HInstruction* amount = graph()->GetConstant1(); - stack_pop_count = AddInstruction( - HAdd::New(zone, context_, stack_parameter_count, amount)); - stack_pop_count->ChangeRepresentation(Representation::Integer32()); - stack_pop_count->ClearFlag(HValue::kCanOverflow); + if (!stack_parameter_count->IsConstant() && + descriptor_->hint_stack_parameter_count_ < 0) { + HInstruction* amount = graph()->GetConstant1(); + stack_pop_count = AddInstruction( + HAdd::New(zone, context_, stack_parameter_count, amount)); + stack_pop_count->ChangeRepresentation(Representation::Integer32()); + stack_pop_count->ClearFlag(HValue::kCanOverflow); + } else { + int count = descriptor_->hint_stack_parameter_count_; + stack_pop_count = AddInstruction(new(zone) + HConstant(count, Representation::Integer32())); + } } - HReturn* hreturn_instruction = new(zone) HReturn(return_value, - context_, - stack_pop_count); - current_block()->Finish(hreturn_instruction); + if (!current_block()->IsFinished()) { + HReturn* hreturn_instruction = new(zone) HReturn(return_value, + context_, + stack_pop_count); + current_block()->Finish(hreturn_instruction); + } return true; } @@ -176,16 +185,88 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase { : CodeStubGraphBuilderBase(Isolate::Current(), stub) {} protected: - virtual HValue* BuildCodeStub(); + virtual HValue* BuildCodeStub() { + if (casted_stub()->IsMiss()) { + return BuildCodeInitializedStub(); + } else { + return BuildCodeUninitializedStub(); + } + } + + virtual HValue* BuildCodeInitializedStub() { + UNIMPLEMENTED(); + return NULL; + } + + virtual HValue* BuildCodeUninitializedStub() { + // Force a deopt that falls back to the runtime. + HValue* undefined = graph()->GetConstantUndefined(); + IfBuilder builder(this); + builder.IfNot(undefined, undefined); + builder.Then(); + builder.ElseDeopt(); + return undefined; + } + Stub* casted_stub() { return static_cast(stub()); } }; +Handle HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) { + Factory* factory = isolate->factory(); + + // Generate the new code. + MacroAssembler masm(isolate, NULL, 256); + + { + // Update the static counter each time a new code stub is generated. + isolate->counters()->code_stubs()->Increment(); + + // Nested stubs are not allowed for leaves. + AllowStubCallsScope allow_scope(&masm, false); + + // Generate the code for the stub. + masm.set_generating_stub(true); + NoCurrentFrameScope scope(&masm); + GenerateLightweightMiss(&masm); + } + + // Create the code object. + CodeDesc desc; + masm.GetCode(&desc); + + // Copy the generated code into a heap object. + Code::Flags flags = Code::ComputeFlags( + GetCodeKind(), + GetICState(), + GetExtraICState(), + GetStubType(), -1); + Handle new_object = factory->NewCode( + desc, flags, masm.CodeObject(), NeedsImmovableCode()); + return new_object; +} + + template static Handle DoGenerateCode(Stub* stub) { - CodeStubGraphBuilder builder(stub); - LChunk* chunk = OptimizeGraph(builder.CreateGraph()); - return chunk->Codegen(); + Isolate* isolate = Isolate::Current(); + CodeStub::Major major_key = + static_cast(stub)->MajorKey(); + CodeStubInterfaceDescriptor* descriptor = + isolate->code_stub_interface_descriptor(major_key); + if (descriptor->register_param_count_ < 0) { + stub->InitializeInterfaceDescriptor(isolate, descriptor); + } + // The miss case without stack parameters can use a light-weight stub to enter + // the runtime that is significantly faster than using the standard + // stub-failure deopt mechanism. + if (stub->IsMiss() && descriptor->stack_parameter_count_ == NULL) { + return stub->GenerateLightweightMissCode(isolate); + } else { + CodeStubGraphBuilder builder(stub); + LChunk* chunk = OptimizeGraph(builder.CreateGraph()); + return chunk->Codegen(); + } } @@ -193,6 +274,7 @@ template <> HValue* CodeStubGraphBuilder::BuildCodeStub() { Zone* zone = this->zone(); Factory* factory = isolate()->factory(); + HValue* undefined = graph()->GetConstantUndefined(); AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode(); FastCloneShallowArrayStub::Mode mode = casted_stub()->mode(); int length = casted_stub()->length(); @@ -203,8 +285,9 @@ HValue* CodeStubGraphBuilder::BuildCodeStub() { NULL, FAST_ELEMENTS)); - CheckBuilder builder(this); - builder.CheckNotUndefined(boilerplate); + IfBuilder checker(this); + checker.IfNot(boilerplate, undefined); + checker.Then(); if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) { HValue* elements = @@ -243,14 +326,14 @@ HValue* CodeStubGraphBuilder::BuildCodeStub() { length)); } - return environment()->Pop(); + HValue* result = environment()->Pop(); + checker.ElseDeopt(); + return result; } Handle FastCloneShallowArrayStub::GenerateCode() { - CodeStubGraphBuilder builder(this); - LChunk* chunk = OptimizeGraph(builder.CreateGraph()); - return chunk->Codegen(); + return DoGenerateCode(this); } @@ -258,6 +341,7 @@ template <> HValue* CodeStubGraphBuilder::BuildCodeStub() { Zone* zone = this->zone(); Factory* factory = isolate()->factory(); + HValue* undefined = graph()->GetConstantUndefined(); HInstruction* boilerplate = AddInstruction(new(zone) HLoadKeyed(GetParameter(0), @@ -265,8 +349,9 @@ HValue* CodeStubGraphBuilder::BuildCodeStub() { NULL, FAST_ELEMENTS)); - CheckBuilder builder(this); - builder.CheckNotUndefined(boilerplate); + IfBuilder checker(this); + checker.IfNot(boilerplate, undefined); + checker.And(); int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize; HValue* boilerplate_size = @@ -274,7 +359,8 @@ HValue* CodeStubGraphBuilder::BuildCodeStub() { HValue* size_in_words = AddInstruction(new(zone) HConstant(size >> kPointerSizeLog2, Representation::Integer32())); - builder.CheckIntegerEq(boilerplate_size, size_in_words); + checker.IfCompare(boilerplate_size, size_in_words, Token::EQ); + checker.Then(); HValue* size_in_bytes = AddInstruction(new(zone) HConstant(size, Representation::Integer32())); @@ -298,7 +384,7 @@ HValue* CodeStubGraphBuilder::BuildCodeStub() { true, i)); } - builder.End(); + checker.ElseDeopt(); return object; } @@ -401,10 +487,18 @@ Handle TransitionElementsKindStub::GenerateCode() { template <> HValue* CodeStubGraphBuilder::BuildCodeStub() { - HInstruction* deopt = new(zone()) HSoftDeoptimize(); - AddInstruction(deopt); - current_block()->MarkAsDeoptimizing(); - return GetParameter(0); + // ----------- S t a t e ------------- + // -- Parameter 1 : type info cell + // -- Parameter 0 : constructor + // ----------------------------------- + // Get the right map + // Should be a constant + JSArrayBuilder array_builder( + this, + casted_stub()->elements_kind(), + GetParameter(ArrayConstructorStubBase::kPropertyCell), + casted_stub()->mode()); + return array_builder.AllocateEmptyArray(); } @@ -416,10 +510,49 @@ Handle ArrayNoArgumentConstructorStub::GenerateCode() { template <> HValue* CodeStubGraphBuilder:: BuildCodeStub() { - HInstruction* deopt = new(zone()) HSoftDeoptimize(); - AddInstruction(deopt); - current_block()->MarkAsDeoptimizing(); - return GetParameter(0); + // Smi check and range check on the input arg. + HValue* constant_one = graph()->GetConstant1(); + HValue* constant_zero = graph()->GetConstant0(); + + HInstruction* elements = AddInstruction( + new(zone()) HArgumentsElements(false)); + HInstruction* argument = AddInstruction( + new(zone()) HAccessArgumentsAt(elements, constant_one, constant_zero)); + + HConstant* max_alloc_length = + new(zone()) HConstant(JSObject::kInitialMaxFastElementArray, + Representation::Tagged()); + AddInstruction(max_alloc_length); + const int initial_capacity = JSArray::kPreallocatedArrayElements; + HConstant* initial_capacity_node = + new(zone()) HConstant(initial_capacity, Representation::Tagged()); + AddInstruction(initial_capacity_node); + + // Since we're forcing Integer32 representation for this HBoundsCheck, + // there's no need to Smi-check the index. + HBoundsCheck* checked_arg = AddBoundsCheck(argument, max_alloc_length, + ALLOW_SMI_KEY, + Representation::Tagged()); + IfBuilder if_builder(this); + if_builder.IfCompare(checked_arg, constant_zero, Token::EQ); + if_builder.Then(); + Push(initial_capacity_node); // capacity + Push(constant_zero); // length + if_builder.Else(); + Push(checked_arg); // capacity + Push(checked_arg); // length + if_builder.End(); + + // Figure out total size + HValue* length = Pop(); + HValue* capacity = Pop(); + + JSArrayBuilder array_builder( + this, + casted_stub()->elements_kind(), + GetParameter(ArrayConstructorStubBase::kPropertyCell), + casted_stub()->mode()); + return array_builder.AllocateArray(capacity, length, true); } @@ -430,10 +563,46 @@ Handle ArraySingleArgumentConstructorStub::GenerateCode() { template <> HValue* CodeStubGraphBuilder::BuildCodeStub() { - HInstruction* deopt = new(zone()) HSoftDeoptimize(); - AddInstruction(deopt); - current_block()->MarkAsDeoptimizing(); - return GetParameter(0); + ElementsKind kind = casted_stub()->elements_kind(); + HValue* length = GetArgumentsLength(); + + JSArrayBuilder array_builder( + this, + kind, + GetParameter(ArrayConstructorStubBase::kPropertyCell), + casted_stub()->mode()); + + // We need to fill with the hole if it's a smi array in the multi-argument + // case because we might have to bail out while copying arguments into + // the array because they aren't compatible with a smi array. + // If it's a double array, no problem, and if it's fast then no + // problem either because doubles are boxed. + bool fill_with_hole = IsFastSmiElementsKind(kind); + HValue* new_object = array_builder.AllocateArray(length, + length, + fill_with_hole); + HValue* elements = array_builder.GetElementsLocation(); + ASSERT(elements != NULL); + + // Now populate the elements correctly. + LoopBuilder builder(this, + context(), + LoopBuilder::kPostIncrement); + HValue* start = graph()->GetConstant0(); + HValue* key = builder.BeginBody(start, length, Token::LT); + HInstruction* argument_elements = AddInstruction( + new(zone()) HArgumentsElements(false)); + HInstruction* argument = AddInstruction(new(zone()) HAccessArgumentsAt( + argument_elements, length, key)); + + // Checks to prevent incompatible stores + if (IsFastSmiElementsKind(kind)) { + AddInstruction(new(zone()) HCheckSmi(argument)); + } + + AddInstruction(new(zone()) HStoreKeyed(elements, key, argument, kind)); + builder.EndBody(); + return new_object; } @@ -441,4 +610,30 @@ Handle ArrayNArgumentsConstructorStub::GenerateCode() { return DoGenerateCode(this); } + +template <> +HValue* CodeStubGraphBuilder::BuildCodeUninitializedStub() { + CompareNilICStub* stub = casted_stub(); + HIfContinuation continuation; + Handle sentinel_map(graph()->isolate()->heap()->meta_map()); + BuildCompareNil(GetParameter(0), stub->GetKind(), + stub->GetTypes(), sentinel_map, + RelocInfo::kNoPosition, &continuation); + IfBuilder if_nil(this, &continuation); + if_nil.Then(); + if (continuation.IsFalseReachable()) { + if_nil.Else(); + if_nil.Return(graph()->GetConstantSmi0()); + } + if_nil.End(); + return continuation.IsTrueReachable() + ? graph()->GetConstantSmi1() + : graph()->GetConstantUndefined(); +} + + +Handle CompareNilICStub::GenerateCode() { + return DoGenerateCode(this); +} + } } // namespace v8::internal diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 497dde54e7..df9855d09c 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -37,6 +37,17 @@ namespace v8 { namespace internal { + +CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor() + : register_param_count_(-1), + stack_parameter_count_(NULL), + hint_stack_parameter_count_(-1), + function_mode_(NOT_JS_FUNCTION_STUB_MODE), + register_params_(NULL), + deoptimization_handler_(NULL), + miss_handler_(IC_Utility(IC::kUnreachable), Isolate::Current()) { } + + bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) { UnseededNumberDictionary* stubs = isolate->heap()->code_stubs(); int index = stubs->FindEntry(GetKey()); @@ -397,6 +408,42 @@ void ICCompareStub::Generate(MacroAssembler* masm) { } +CompareNilICStub::Types CompareNilICStub::GetPatchedICFlags( + Code::ExtraICState extra_ic_state, + Handle object, + bool* already_monomorphic) { + Types types = TypesField::decode(extra_ic_state); + NilValue nil = NilValueField::decode(extra_ic_state); + EqualityKind kind = EqualityKindField::decode(extra_ic_state); + ASSERT(types != CompareNilICStub::kFullCompare); + *already_monomorphic = + (types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0; + if (kind == kStrictEquality) { + if (nil == kNullValue) { + return CompareNilICStub::kCompareAgainstNull; + } else { + return CompareNilICStub::kCompareAgainstUndefined; + } + } else { + if (object->IsNull()) { + types = static_cast( + types | CompareNilICStub::kCompareAgainstNull); + } else if (object->IsUndefined()) { + types = static_cast( + types | CompareNilICStub::kCompareAgainstUndefined); + } else if (object->IsUndetectableObject() || !object->IsHeapObject()) { + types = CompareNilICStub::kFullCompare; + } else if ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0) { + types = CompareNilICStub::kFullCompare; + } else { + types = static_cast( + types | CompareNilICStub::kCompareAgainstMonomorphicMap); + } + } + return types; +} + + void InstanceofStub::PrintName(StringStream* stream) { const char* args = ""; if (HasArgsInRegisters()) { @@ -557,7 +604,7 @@ bool ToBooleanStub::Types::Record(Handle object) { ASSERT(!object->IsUndetectableObject()); Add(HEAP_NUMBER); double value = HeapNumber::cast(*object)->value(); - return value != 0 && !isnan(value); + return value != 0 && !std::isnan(value); } else { // We should never see an internal object at runtime here! UNREACHABLE(); @@ -647,4 +694,45 @@ bool ProfileEntryHookStub::SetFunctionEntryHook(FunctionEntryHook entry_hook) { } +static void InstallDescriptor(Isolate* isolate, HydrogenCodeStub* stub) { + int major_key = stub->MajorKey(); + CodeStubInterfaceDescriptor* descriptor = + isolate->code_stub_interface_descriptor(major_key); + if (!descriptor->initialized()) { + stub->InitializeInterfaceDescriptor(isolate, descriptor); + } +} + + +void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) { + ArrayNoArgumentConstructorStub stub1(GetInitialFastElementsKind()); + InstallDescriptor(isolate, &stub1); + ArraySingleArgumentConstructorStub stub2(GetInitialFastElementsKind()); + InstallDescriptor(isolate, &stub2); + ArrayNArgumentsConstructorStub stub3(GetInitialFastElementsKind()); + InstallDescriptor(isolate, &stub3); +} + + +ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate) + : argument_count_(ANY) { + ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); +} + + +ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate, + int argument_count) { + if (argument_count == 0) { + argument_count_ = NONE; + } else if (argument_count == 1) { + argument_count_ = ONE; + } else if (argument_count >= 2) { + argument_count_ = MORE_THAN_ONE; + } else { + UNREACHABLE(); + } + ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); +} + + } } // namespace v8::internal diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 56b595583d..ea895d669f 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -29,6 +29,7 @@ #define V8_CODE_STUBS_H_ #include "allocation.h" +#include "assembler.h" #include "globals.h" #include "codegen.h" @@ -46,6 +47,7 @@ namespace internal { V(StringCompare) \ V(Compare) \ V(CompareIC) \ + V(CompareNilIC) \ V(MathPow) \ V(StringLength) \ V(FunctionPrototype) \ @@ -82,6 +84,7 @@ namespace internal { V(TransitionElementsKind) \ V(StoreArrayLiteralElement) \ V(StubFailureTrampoline) \ + V(ArrayConstructor) \ V(ProfileEntryHook) \ /* IC Handler stubs */ \ V(LoadField) @@ -260,17 +263,18 @@ class PlatformCodeStub : public CodeStub { enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE }; + struct CodeStubInterfaceDescriptor { - CodeStubInterfaceDescriptor() - : register_param_count_(-1), - stack_parameter_count_(NULL), - function_mode_(NOT_JS_FUNCTION_STUB_MODE), - register_params_(NULL) { } + CodeStubInterfaceDescriptor(); int register_param_count_; const Register* stack_parameter_count_; + // if hint_stack_parameter_count_ > 0, the code stub can optimize the + // return sequence. Default value is -1, which means it is ignored. + int hint_stack_parameter_count_; StubFunctionMode function_mode_; Register* register_params_; Address deoptimization_handler_; + ExternalReference miss_handler_; int environment_length() const { if (stack_parameter_count_ != NULL) { @@ -278,13 +282,28 @@ struct CodeStubInterfaceDescriptor { } return register_param_count_; } + + bool initialized() const { return register_param_count_ >= 0; } }; +// A helper to make up for the fact that type Register is not fully +// defined outside of the platform directories +#define DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index) \ + ((index) == (descriptor)->register_param_count_) \ + ? *((descriptor)->stack_parameter_count_) \ + : (descriptor)->register_params_[(index)] + class HydrogenCodeStub : public CodeStub { public: - // Retrieve the code for the stub. Generate the code if needed. - virtual Handle GenerateCode() = 0; + enum InitializationState { + CODE_STUB_IS_NOT_MISS, + CODE_STUB_IS_MISS + }; + + explicit HydrogenCodeStub(InitializationState state) { + is_miss_ = (state == CODE_STUB_IS_MISS); + } virtual Code::Kind GetCodeKind() const { return Code::STUB; } @@ -292,9 +311,36 @@ class HydrogenCodeStub : public CodeStub { return isolate->code_stub_interface_descriptor(MajorKey()); } + bool IsMiss() { return is_miss_; } + + template + static Handle GetUninitialized(Isolate* isolate) { + SubClass::GenerateAheadOfTime(isolate); + return SubClass().GetCode(isolate); + } + virtual void InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) = 0; + + // Retrieve the code for the stub. Generate the code if needed. + virtual Handle GenerateCode() = 0; + + virtual int NotMissMinorKey() = 0; + + Handle GenerateLightweightMissCode(Isolate* isolate); + + private: + class MinorKeyBits: public BitField {}; + class IsMissBits: public BitField {}; + + void GenerateLightweightMiss(MacroAssembler* masm); + virtual int MinorKey() { + return IsMissBits::encode(is_miss_) | + MinorKeyBits::encode(NotMissMinorKey()); + } + + bool is_miss_; }; @@ -467,7 +513,8 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub { FastCloneShallowArrayStub(Mode mode, AllocationSiteMode allocation_site_mode, int length) - : mode_(mode), + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), + mode_(mode), allocation_site_mode_(allocation_site_mode), length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) { ASSERT_GE(length_, 0); @@ -513,7 +560,7 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub { STATIC_ASSERT(kFastCloneModeCount < 16); STATIC_ASSERT(kMaximumClonedLength < 16); Major MajorKey() { return FastCloneShallowArray; } - int MinorKey() { + int NotMissMinorKey() { return AllocationSiteModeBits::encode(allocation_site_mode_) | ModeBits::encode(mode_) | LengthBits::encode(length_); @@ -526,7 +573,9 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub { // Maximum number of properties in copied object. static const int kMaximumClonedProperties = 6; - explicit FastCloneShallowObjectStub(int length) : length_(length) { + explicit FastCloneShallowObjectStub(int length) + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), + length_(length) { ASSERT_GE(length_, 0); ASSERT_LE(length_, kMaximumClonedProperties); } @@ -543,7 +592,7 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub { int length_; Major MajorKey() { return FastCloneShallowObject; } - int MinorKey() { return length_; } + int NotMissMinorKey() { return length_; } DISALLOW_COPY_AND_ASSIGN(FastCloneShallowObjectStub); }; @@ -587,6 +636,22 @@ class InstanceofStub: public PlatformCodeStub { }; +class ArrayConstructorStub: public PlatformCodeStub { + public: + enum ArgumentCountKey { ANY, NONE, ONE, MORE_THAN_ONE }; + ArrayConstructorStub(Isolate* isolate, int argument_count); + explicit ArrayConstructorStub(Isolate* isolate); + + void Generate(MacroAssembler* masm); + + private: + virtual CodeStub::Major MajorKey() { return ArrayConstructor; } + virtual int MinorKey() { return argument_count_; } + + ArgumentCountKey argument_count_; +}; + + class MathPowStub: public PlatformCodeStub { public: enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK}; @@ -911,6 +976,102 @@ class ICCompareStub: public PlatformCodeStub { }; +class CompareNilICStub : public HydrogenCodeStub { + public: + enum Types { + kCompareAgainstNull = 1 << 0, + kCompareAgainstUndefined = 1 << 1, + kCompareAgainstMonomorphicMap = 1 << 2, + kCompareAgainstUndetectable = 1 << 3, + kFullCompare = kCompareAgainstNull | kCompareAgainstUndefined | + kCompareAgainstUndetectable + }; + + CompareNilICStub(EqualityKind kind, NilValue nil, Types types) + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), bit_field_(0) { + bit_field_ = EqualityKindField::encode(kind) | + NilValueField::encode(nil) | + TypesField::encode(types); + } + + virtual InlineCacheState GetICState() { + Types types = GetTypes(); + if (types == kFullCompare) { + return MEGAMORPHIC; + } else if ((types & kCompareAgainstMonomorphicMap) != 0) { + return MONOMORPHIC; + } else { + return PREMONOMORPHIC; + } + } + + virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; } + + Handle GenerateCode(); + + static Handle GetUninitialized(Isolate* isolate, + EqualityKind kind, + NilValue nil) { + return CompareNilICStub(kind, nil).GetCode(isolate); + } + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); + + static void InitializeForIsolate(Isolate* isolate) { + CompareNilICStub compare_stub(kStrictEquality, kNullValue); + compare_stub.InitializeInterfaceDescriptor( + isolate, + isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC)); + } + + virtual Code::ExtraICState GetExtraICState() { + return bit_field_; + } + + EqualityKind GetKind() { return EqualityKindField::decode(bit_field_); } + NilValue GetNilValue() { return NilValueField::decode(bit_field_); } + Types GetTypes() { return TypesField::decode(bit_field_); } + + static Types TypesFromExtraICState( + Code::ExtraICState state) { + return TypesField::decode(state); + } + static EqualityKind EqualityKindFromExtraICState( + Code::ExtraICState state) { + return EqualityKindField::decode(state); + } + static NilValue NilValueFromExtraICState(Code::ExtraICState state) { + return NilValueField::decode(state); + } + + static Types GetPatchedICFlags(Code::ExtraICState extra_ic_state, + Handle object, + bool* already_monomorphic); + + private: + friend class CompareNilIC; + + class EqualityKindField : public BitField {}; + class NilValueField : public BitField {}; + class TypesField : public BitField {}; + + CompareNilICStub(EqualityKind kind, NilValue nil) + : HydrogenCodeStub(CODE_STUB_IS_MISS), bit_field_(0) { + bit_field_ = EqualityKindField::encode(kind) | + NilValueField::encode(nil); + } + + virtual CodeStub::Major MajorKey() { return CompareNilIC; } + virtual int NotMissMinorKey() { return bit_field_; } + + int bit_field_; + + DISALLOW_COPY_AND_ASSIGN(CompareNilICStub); +}; + + class CEntryStub : public PlatformCodeStub { public: explicit CEntryStub(int result_size, @@ -1291,19 +1452,20 @@ class KeyedLoadDictionaryElementStub : public PlatformCodeStub { public: KeyedLoadDictionaryElementStub() {} - Major MajorKey() { return KeyedLoadElement; } - int MinorKey() { return DICTIONARY_ELEMENTS; } - void Generate(MacroAssembler* masm); private: + Major MajorKey() { return KeyedLoadElement; } + int MinorKey() { return DICTIONARY_ELEMENTS; } + DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub); }; class KeyedLoadFastElementStub : public HydrogenCodeStub { public: - KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) { + KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { bit_field_ = ElementsKindBits::encode(elements_kind) | IsJSArrayBits::encode(is_js_array); } @@ -1323,12 +1485,12 @@ class KeyedLoadFastElementStub : public HydrogenCodeStub { CodeStubInterfaceDescriptor* descriptor); private: - class IsJSArrayBits: public BitField {}; class ElementsKindBits: public BitField {}; + class IsJSArrayBits: public BitField {}; uint32_t bit_field_; Major MajorKey() { return KeyedLoadElement; } - int MinorKey() { return bit_field_; } + int NotMissMinorKey() { return bit_field_; } DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub); }; @@ -1338,15 +1500,13 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub { public: KeyedStoreFastElementStub(bool is_js_array, ElementsKind elements_kind, - KeyedAccessStoreMode mode) { + KeyedAccessStoreMode mode) + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { bit_field_ = ElementsKindBits::encode(elements_kind) | IsJSArrayBits::encode(is_js_array) | StoreModeBits::encode(mode); } - Major MajorKey() { return KeyedStoreElement; } - int MinorKey() { return bit_field_; } - bool is_js_array() const { return IsJSArrayBits::decode(bit_field_); } @@ -1371,6 +1531,9 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub { class IsJSArrayBits: public BitField {}; uint32_t bit_field_; + Major MajorKey() { return KeyedStoreElement; } + int NotMissMinorKey() { return bit_field_; } + DISALLOW_COPY_AND_ASSIGN(KeyedStoreFastElementStub); }; @@ -1378,7 +1541,8 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub { class TransitionElementsKindStub : public HydrogenCodeStub { public: TransitionElementsKindStub(ElementsKind from_kind, - ElementsKind to_kind) { + ElementsKind to_kind) + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { bit_field_ = FromKindBits::encode(from_kind) | ToKindBits::encode(to_kind); } @@ -1403,19 +1567,55 @@ class TransitionElementsKindStub : public HydrogenCodeStub { uint32_t bit_field_; Major MajorKey() { return TransitionElementsKind; } - int MinorKey() { return bit_field_; } + int NotMissMinorKey() { return bit_field_; } DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub); }; -class ArrayNoArgumentConstructorStub : public HydrogenCodeStub { +class ArrayConstructorStubBase : public HydrogenCodeStub { public: - ArrayNoArgumentConstructorStub() { + ArrayConstructorStubBase(ElementsKind kind, AllocationSiteMode mode) + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { + bit_field_ = ElementsKindBits::encode(kind) | + AllocationSiteModeBits::encode(mode == TRACK_ALLOCATION_SITE); } - Major MajorKey() { return ArrayNoArgumentConstructor; } - int MinorKey() { return 0; } + ElementsKind elements_kind() const { + return ElementsKindBits::decode(bit_field_); + } + + AllocationSiteMode mode() const { + return AllocationSiteModeBits::decode(bit_field_) + ? TRACK_ALLOCATION_SITE + : DONT_TRACK_ALLOCATION_SITE; + } + + virtual bool IsPregenerated() { return true; } + static void GenerateStubsAheadOfTime(Isolate* isolate); + static void InstallDescriptors(Isolate* isolate); + + // Parameters accessed via CodeStubGraphBuilder::GetParameter() + static const int kPropertyCell = 0; + + private: + int NotMissMinorKey() { return bit_field_; } + + class ElementsKindBits: public BitField {}; + class AllocationSiteModeBits: public BitField {}; + uint32_t bit_field_; + + DISALLOW_COPY_AND_ASSIGN(ArrayConstructorStubBase); +}; + + +class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase { + public: + ArrayNoArgumentConstructorStub( + ElementsKind kind, + AllocationSiteMode mode = TRACK_ALLOCATION_SITE) + : ArrayConstructorStubBase(kind, mode) { + } virtual Handle GenerateCode(); @@ -1424,18 +1624,20 @@ class ArrayNoArgumentConstructorStub : public HydrogenCodeStub { CodeStubInterfaceDescriptor* descriptor); private: + Major MajorKey() { return ArrayNoArgumentConstructor; } + DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub); }; -class ArraySingleArgumentConstructorStub : public HydrogenCodeStub { +class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase { public: - ArraySingleArgumentConstructorStub() { + ArraySingleArgumentConstructorStub( + ElementsKind kind, + AllocationSiteMode mode = TRACK_ALLOCATION_SITE) + : ArrayConstructorStubBase(kind, mode) { } - Major MajorKey() { return ArraySingleArgumentConstructor; } - int MinorKey() { return 0; } - virtual Handle GenerateCode(); virtual void InitializeInterfaceDescriptor( @@ -1443,18 +1645,20 @@ class ArraySingleArgumentConstructorStub : public HydrogenCodeStub { CodeStubInterfaceDescriptor* descriptor); private: + Major MajorKey() { return ArraySingleArgumentConstructor; } + DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub); }; -class ArrayNArgumentsConstructorStub : public HydrogenCodeStub { +class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase { public: - ArrayNArgumentsConstructorStub() { + ArrayNArgumentsConstructorStub( + ElementsKind kind, + AllocationSiteMode mode = TRACK_ALLOCATION_SITE) : + ArrayConstructorStubBase(kind, mode) { } - Major MajorKey() { return ArrayNArgumentsConstructor; } - int MinorKey() { return 0; } - virtual Handle GenerateCode(); virtual void InitializeInterfaceDescriptor( @@ -1462,6 +1666,8 @@ class ArrayNArgumentsConstructorStub : public HydrogenCodeStub { CodeStubInterfaceDescriptor* descriptor); private: + Major MajorKey() { return ArrayNArgumentsConstructor; } + DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub); }; diff --git a/deps/v8/src/code.h b/deps/v8/src/code.h index 766c932e0f..791420cf39 100644 --- a/deps/v8/src/code.h +++ b/deps/v8/src/code.h @@ -29,6 +29,8 @@ #define V8_CODE_H_ #include "allocation.h" +#include "handles.h" +#include "objects.h" namespace v8 { namespace internal { @@ -44,6 +46,8 @@ class ParameterCount BASE_EMBEDDED { : reg_(reg), immediate_(0) { } explicit ParameterCount(int immediate) : reg_(no_reg), immediate_(immediate) { } + explicit ParameterCount(Handle f) + : reg_(no_reg), immediate_(f->shared()->formal_parameter_count()) { } bool is_reg() const { return !reg_.is(no_reg); } bool is_immediate() const { return !is_reg(); } diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 70ce6bc825..dce817129c 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -125,11 +125,8 @@ CompilationInfo::~CompilationInfo() { int CompilationInfo::num_parameters() const { - if (IsStub()) { - return 0; - } else { - return scope()->num_parameters(); - } + ASSERT(!IsStub()); + return scope()->num_parameters(); } @@ -147,8 +144,7 @@ Code::Flags CompilationInfo::flags() const { return Code::ComputeFlags(code_stub()->GetCodeKind(), code_stub()->GetICState(), code_stub()->GetExtraICState(), - Code::NORMAL, - 0); + Code::NORMAL, -1); } else { return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION); } @@ -425,6 +421,12 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() { Timer timer(this, &time_taken_to_codegen_); ASSERT(chunk_ != NULL); ASSERT(graph_ != NULL); + // Deferred handles reference objects that were accessible during + // graph creation. To make sure that we don't encounter inconsistencies + // between graph creation and code generation, we disallow accessing + // objects through deferred handles during the latter, with exceptions. + HandleDereferenceGuard no_deref_deferred( + isolate(), HandleDereferenceGuard::DISALLOW_DEFERRED); Handle optimized_code = chunk_->Codegen(); if (optimized_code.is_null()) { info()->set_bailout_reason("code generation failed"); @@ -622,7 +624,7 @@ Handle Compiler::Compile(Handle source, isolate->counters()->total_compile_size()->Increment(source_length); // The VM is in the COMPILER state until exiting this function. - VMState state(isolate, COMPILER); + VMState state(isolate); CompilationCache* compilation_cache = isolate->compilation_cache(); @@ -696,7 +698,7 @@ Handle Compiler::CompileEval(Handle source, isolate->counters()->total_compile_size()->Increment(source_length); // The VM is in the COMPILER state until exiting this function. - VMState state(isolate, COMPILER); + VMState state(isolate); // Do a lookup in the compilation cache; if the entry is not there, invoke // the compiler and add the result to the cache. @@ -859,7 +861,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) { ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT); // The VM is in the COMPILER state until exiting this function. - VMState state(isolate, COMPILER); + VMState state(isolate); PostponeInterruptsScope postpone(isolate); @@ -923,7 +925,7 @@ void Compiler::RecompileParallel(Handle closure) { } SmartPointer info(new CompilationInfoWithZone(closure)); - VMState state(isolate, PARALLEL_COMPILER); + VMState state(isolate); PostponeInterruptsScope postpone(isolate); Handle shared = info->shared_info(); @@ -998,7 +1000,7 @@ void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) { } Isolate* isolate = info->isolate(); - VMState state(isolate, PARALLEL_COMPILER); + VMState state(isolate); Logger::TimerEventScope timer( isolate, Logger::TimerEventScope::v8_recompile_synchronous); // If crankshaft succeeded, install the optimized code else install diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h index dae39db351..00074c899b 100644 --- a/deps/v8/src/compiler.h +++ b/deps/v8/src/compiler.h @@ -143,6 +143,14 @@ class CompilationInfo { return SavesCallerDoubles::decode(flags_); } + void MarkAsRequiresFrame() { + flags_ |= RequiresFrame::encode(true); + } + + bool requires_frame() const { + return RequiresFrame::decode(flags_); + } + void SetParseRestriction(ParseRestriction restriction) { flags_ = ParseRestricitonField::update(flags_, restriction); } @@ -300,6 +308,8 @@ class CompilationInfo { class SavesCallerDoubles: public BitField {}; // If the set of valid statements is restricted. class ParseRestricitonField: public BitField {}; + // If the function requires a frame (for unspecified reasons) + class RequiresFrame: public BitField {}; unsigned flags_; diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index abeb8121cb..0024e13d6b 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -123,6 +123,7 @@ enum BindingFlags { V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \ V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \ V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \ + V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \ V(FUNCTION_MAP_INDEX, Map, function_map) \ V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \ V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \ @@ -276,6 +277,7 @@ class Context: public FixedArray { GLOBAL_EVAL_FUN_INDEX, INSTANTIATE_FUN_INDEX, CONFIGURE_INSTANCE_FUN_INDEX, + ARRAY_BUFFER_FUN_INDEX, MESSAGE_LISTENERS_INDEX, MAKE_MESSAGE_FUN_INDEX, GET_STACK_TRACE_LINE_INDEX, diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h index eb718d684d..595ae9ed5b 100644 --- a/deps/v8/src/conversions-inl.h +++ b/deps/v8/src/conversions-inl.h @@ -29,9 +29,9 @@ #define V8_CONVERSIONS_INL_H_ #include // Required for INT_MAX etc. -#include #include // Required for DBL_MAX and on Win32 for finite() #include +#include #include "globals.h" // Required for V8_INFINITY // ---------------------------------------------------------------------------- @@ -86,8 +86,8 @@ inline unsigned int FastD2UI(double x) { inline double DoubleToInteger(double x) { - if (isnan(x)) return 0; - if (!isfinite(x) || x == 0) return x; + if (std::isnan(x)) return 0; + if (!std::isfinite(x) || x == 0) return x; return (x >= 0) ? floor(x) : ceil(x); } diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc index 5bfddd04c0..cdc42e34d9 100644 --- a/deps/v8/src/conversions.cc +++ b/deps/v8/src/conversions.cc @@ -26,14 +26,19 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include -#include #include +#include #include "conversions-inl.h" #include "dtoa.h" #include "strtod.h" #include "utils.h" +#ifndef _STLP_VENDOR_CSTD +// STLPort doesn't import fpclassify into the std namespace. +using std::fpclassify; +#endif + namespace v8 { namespace internal { diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc index 7c8265e981..fa192baed2 100644 --- a/deps/v8/src/counters.cc +++ b/deps/v8/src/counters.cc @@ -45,57 +45,38 @@ int* StatsCounter::FindLocationInStatsTable() const { } -// Start the timer. -void StatsCounterTimer::Start() { - if (!counter_.Enabled()) - return; - stop_time_ = 0; - start_time_ = OS::Ticks(); -} - -// Stop the timer and record the results. -void StatsCounterTimer::Stop() { - if (!counter_.Enabled()) - return; - stop_time_ = OS::Ticks(); - - // Compute the delta between start and stop, in milliseconds. - int milliseconds = static_cast(stop_time_ - start_time_) / 1000; - counter_.Increment(milliseconds); -} - void Histogram::AddSample(int sample) { if (Enabled()) { - Isolate::Current()->stats_table()->AddHistogramSample(histogram_, sample); + isolate()->stats_table()->AddHistogramSample(histogram_, sample); } } void* Histogram::CreateHistogram() const { - return Isolate::Current()->stats_table()-> + return isolate()->stats_table()-> CreateHistogram(name_, min_, max_, num_buckets_); } // Start the timer. void HistogramTimer::Start() { - if (histogram_.Enabled()) { + if (Enabled()) { stop_time_ = 0; start_time_ = OS::Ticks(); } if (FLAG_log_internal_timer_events) { - LOG(Isolate::Current(), TimerEvent(Logger::START, histogram_.name_)); + LOG(isolate(), TimerEvent(Logger::START, name())); } } // Stop the timer and record the results. void HistogramTimer::Stop() { - if (histogram_.Enabled()) { + if (Enabled()) { stop_time_ = OS::Ticks(); // Compute the delta between start and stop, in milliseconds. int milliseconds = static_cast(stop_time_ - start_time_) / 1000; - histogram_.AddSample(milliseconds); + AddSample(milliseconds); } if (FLAG_log_internal_timer_events) { - LOG(Isolate::Current(), TimerEvent(Logger::END, histogram_.name_)); + LOG(isolate(), TimerEvent(Logger::END, name())); } } diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h index 577280f444..a633fea779 100644 --- a/deps/v8/src/counters.h +++ b/deps/v8/src/counters.h @@ -113,14 +113,11 @@ class StatsTable { // The row has a 32bit value for each process/thread in the table and also // a name (stored in the table metadata). Since the storage location can be // thread-specific, this class cannot be shared across threads. -// -// This class is designed to be POD initialized. It will be registered with -// the counter system on first use. For example: -// StatsCounter c = { "c:myctr", NULL, false }; -struct StatsCounter { - const char* name_; - int* ptr_; - bool lookup_done_; +class StatsCounter { + public: + StatsCounter() { } + explicit StatsCounter(const char* name) + : name_(name), ptr_(NULL), lookup_done_(false) { } // Sets the counter to a specific value. void Set(int value) { @@ -177,39 +174,29 @@ struct StatsCounter { private: int* FindLocationInStatsTable() const; -}; - -// StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 }; -struct StatsCounterTimer { - StatsCounter counter_; - - int64_t start_time_; - int64_t stop_time_; - - // Start the timer. - void Start(); - - // Stop the timer and record the results. - void Stop(); - // Returns true if the timer is running. - bool Running() { - return counter_.Enabled() && start_time_ != 0 && stop_time_ == 0; - } + const char* name_; + int* ptr_; + bool lookup_done_; }; // A Histogram represents a dynamically created histogram in the StatsTable. -// -// This class is designed to be POD initialized. It will be registered with -// the histogram system on first use. For example: -// Histogram h = { "myhist", 0, 10000, 50, NULL, false }; -struct Histogram { - const char* name_; - int min_; - int max_; - int num_buckets_; - void* histogram_; - bool lookup_done_; +// It will be registered with the histogram system on first use. +class Histogram { + public: + Histogram() { } + Histogram(const char* name, + int min, + int max, + int num_buckets, + Isolate* isolate) + : name_(name), + min_(min), + max_(max), + num_buckets_(num_buckets), + histogram_(NULL), + lookup_done_(false), + isolate_(isolate) { } // Add a single sample to this histogram. void AddSample(int sample); @@ -234,17 +221,33 @@ struct Histogram { return histogram_; } + const char* name() { return name_; } + Isolate* isolate() const { return isolate_; } + private: void* CreateHistogram() const; -}; -// A HistogramTimer allows distributions of results to be created -// HistogramTimer t = { {L"foo", 0, 10000, 50, NULL, false}, 0, 0 }; -struct HistogramTimer { - Histogram histogram_; + const char* name_; + int min_; + int max_; + int num_buckets_; + void* histogram_; + bool lookup_done_; + Isolate* isolate_; +}; - int64_t start_time_; - int64_t stop_time_; +// A HistogramTimer allows distributions of results to be created. +class HistogramTimer : public Histogram { + public: + HistogramTimer() { } + HistogramTimer(const char* name, + int min, + int max, + int num_buckets, + Isolate* isolate) + : Histogram(name, min, max, num_buckets, isolate), + start_time_(0), + stop_time_(0) { } // Start the timer. void Start(); @@ -254,12 +257,12 @@ struct HistogramTimer { // Returns true if the timer is running. bool Running() { - return histogram_.Enabled() && (start_time_ != 0) && (stop_time_ == 0); + return Enabled() && (start_time_ != 0) && (stop_time_ == 0); } - void Reset() { - histogram_.Reset(); - } + private: + int64_t start_time_; + int64_t stop_time_; }; // Helper class for scoping a HistogramTimer. diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index 47c2a94232..51d29423c4 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -44,9 +44,11 @@ static const int kTickSamplesBufferChunksCount = 16; static const int kProfilerStackSize = 64 * KB; -ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) +ProfilerEventsProcessor::ProfilerEventsProcessor( + ProfileGenerator* generator, CpuProfilesCollection* profiles) : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), generator_(generator), + profiles_(profiles), running_(true), ticks_buffer_(sizeof(TickSampleEventRecord), kTickSamplesBufferChunkSize, @@ -65,7 +67,7 @@ void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag, rec->type = CodeEventRecord::CODE_CREATION; rec->order = ++enqueue_order_; rec->start = start; - rec->entry = generator_->NewCodeEntry(tag, prefix, name); + rec->entry = profiles_->NewCodeEntry(tag, prefix, name); rec->size = 1; rec->shared = NULL; events_buffer_.Enqueue(evt_rec); @@ -85,7 +87,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, rec->type = CodeEventRecord::CODE_CREATION; rec->order = ++enqueue_order_; rec->start = start; - rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number); + rec->entry = profiles_->NewCodeEntry(tag, name, resource_name, line_number); rec->size = size; rec->shared = shared; events_buffer_.Enqueue(evt_rec); @@ -102,7 +104,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, rec->type = CodeEventRecord::CODE_CREATION; rec->order = ++enqueue_order_; rec->start = start; - rec->entry = generator_->NewCodeEntry(tag, name); + rec->entry = profiles_->NewCodeEntry(tag, name); rec->size = size; rec->shared = NULL; events_buffer_.Enqueue(evt_rec); @@ -119,7 +121,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, rec->type = CodeEventRecord::CODE_CREATION; rec->order = ++enqueue_order_; rec->start = start; - rec->entry = generator_->NewCodeEntry(tag, args_count); + rec->entry = profiles_->NewCodeEntry(tag, args_count); rec->size = size; rec->shared = NULL; events_buffer_.Enqueue(evt_rec); @@ -162,7 +164,7 @@ void ProfilerEventsProcessor::RegExpCodeCreateEvent( rec->type = CodeEventRecord::CODE_CREATION; rec->order = ++enqueue_order_; rec->start = start; - rec->entry = generator_->NewCodeEntry(tag, prefix, name); + rec->entry = profiles_->NewCodeEntry(tag, prefix, name); rec->size = size; events_buffer_.Enqueue(evt_rec); } @@ -443,7 +445,7 @@ void CpuProfiler::StartProcessorIfNotStarted() { saved_logging_nesting_ = isolate_->logger()->logging_nesting_; isolate_->logger()->logging_nesting_ = 0; generator_ = new ProfileGenerator(profiles_); - processor_ = new ProfilerEventsProcessor(generator_); + processor_ = new ProfilerEventsProcessor(generator_, profiles_); is_profiling_ = true; processor_->StartSynchronously(); // Enumerate stuff we already have in the heap. @@ -458,7 +460,7 @@ void CpuProfiler::StartProcessorIfNotStarted() { isolate_->logger()->LogAccessorCallbacks(); } // Enable stack sampling. - Sampler* sampler = reinterpret_cast(isolate_->logger()->ticker_); + Sampler* sampler = isolate_->logger()->sampler(); sampler->IncreaseProfilingDepth(); if (!sampler->IsActive()) { sampler->Start(); diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h index 6e2b0e09cd..da7ea6de24 100644 --- a/deps/v8/src/cpu-profiler.h +++ b/deps/v8/src/cpu-profiler.h @@ -125,7 +125,8 @@ class TickSampleEventRecord { // methods called by event producers: VM and stack sampler threads. class ProfilerEventsProcessor : public Thread { public: - explicit ProfilerEventsProcessor(ProfileGenerator* generator); + ProfilerEventsProcessor(ProfileGenerator* generator, + CpuProfilesCollection* profiles); virtual ~ProfilerEventsProcessor() {} // Thread control. @@ -178,6 +179,7 @@ class ProfilerEventsProcessor : public Thread { INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag)); ProfileGenerator* generator_; + CpuProfilesCollection* profiles_; bool running_; UnboundQueue events_buffer_; SamplingCircularQueue ticks_buffer_; diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index 8f6e384c1a..22ace174d2 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -42,6 +42,13 @@ #ifdef V8_SHARED #include +#endif // V8_SHARED + +#ifndef V8_SHARED +#include +#endif // !V8_SHARED + +#ifdef V8_SHARED #include "../include/v8-testing.h" #endif // V8_SHARED @@ -83,7 +90,7 @@ const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_"; const char kArrayMarkerPropName[] = "d8::_is_typed_array_"; -#define FOR_EACH_SYMBOL(V) \ +#define FOR_EACH_STRING(V) \ V(ArrayBuffer, "ArrayBuffer") \ V(ArrayBufferMarkerPropName, kArrayBufferMarkerPropName) \ V(ArrayMarkerPropName, kArrayMarkerPropName) \ @@ -94,36 +101,58 @@ const char kArrayMarkerPropName[] = "d8::_is_typed_array_"; V(length, "length") -class Symbols { +class PerIsolateData { public: - explicit Symbols(Isolate* isolate) : isolate_(isolate) { + explicit PerIsolateData(Isolate* isolate) : isolate_(isolate), realms_(NULL) { HandleScope scope(isolate); -#define INIT_SYMBOL(name, value) \ - name##_ = Persistent::New(isolate, String::NewSymbol(value)); - FOR_EACH_SYMBOL(INIT_SYMBOL) -#undef INIT_SYMBOL +#define INIT_STRING(name, value) \ + name##_string_ = Persistent::New(isolate, String::NewSymbol(value)); + FOR_EACH_STRING(INIT_STRING) +#undef INIT_STRING isolate->SetData(this); } - ~Symbols() { -#define DISPOSE_SYMBOL(name, value) name##_.Dispose(isolate_); - FOR_EACH_SYMBOL(DISPOSE_SYMBOL) -#undef DISPOSE_SYMBOL + ~PerIsolateData() { +#define DISPOSE_STRING(name, value) name##_string_.Dispose(isolate_); + FOR_EACH_STRING(DISPOSE_STRING) +#undef DISPOSE_STRING isolate_->SetData(NULL); // Not really needed, just to be sure... } -#define DEFINE_SYMBOL_GETTER(name, value) \ - static Persistent name(Isolate* isolate) { \ - return reinterpret_cast(isolate->GetData())->name##_; \ + inline static PerIsolateData* Get(Isolate* isolate) { + return reinterpret_cast(isolate->GetData()); + } + +#define DEFINE_STRING_GETTER(name, value) \ + static Persistent name##_string(Isolate* isolate) { \ + return Get(isolate)->name##_string_; \ } - FOR_EACH_SYMBOL(DEFINE_SYMBOL_GETTER) -#undef DEFINE_SYMBOL_GETTER + FOR_EACH_STRING(DEFINE_STRING_GETTER) +#undef DEFINE_STRING_GETTER + + class RealmScope { + public: + explicit RealmScope(PerIsolateData* data); + ~RealmScope(); + private: + PerIsolateData* data_; + }; private: + friend class Shell; + friend class RealmScope; Isolate* isolate_; -#define DEFINE_MEMBER(name, value) Persistent name##_; - FOR_EACH_SYMBOL(DEFINE_MEMBER) + int realm_count_; + int realm_current_; + int realm_switch_; + Persistent* realms_; + Persistent realm_shared_; + +#define DEFINE_MEMBER(name, value) Persistent name##_string_; + FOR_EACH_STRING(DEFINE_MEMBER) #undef DEFINE_MEMBER + + int RealmFind(Handle context); }; @@ -207,14 +236,20 @@ bool Shell::ExecuteString(Isolate* isolate, // When debugging make exceptions appear to be uncaught. try_catch.SetVerbose(true); } - Handle