From cbdcc1d5f321cfff3d4d3f416e48733089de7e00 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Fri, 19 Nov 2010 10:49:09 -0800 Subject: [PATCH] Upgrade V8 to 2.5.7 --- deps/v8/AUTHORS | 1 + deps/v8/ChangeLog | 47 + deps/v8/include/v8-debug.h | 4 +- deps/v8/include/v8.h | 64 +- deps/v8/include/v8stdint.h | 53 + deps/v8/src/SConscript | 5 + deps/v8/src/accessors.cc | 6 +- deps/v8/src/allocation.cc | 18 +- deps/v8/src/allocation.h | 4 + deps/v8/src/api.cc | 20 +- deps/v8/src/arm/assembler-arm.cc | 114 +- deps/v8/src/arm/assembler-arm.h | 46 +- deps/v8/src/arm/codegen-arm.cc | 114 +- deps/v8/src/arm/codegen-arm.h | 14 - deps/v8/src/arm/constants-arm.h | 7 + deps/v8/src/arm/disasm-arm.cc | 23 +- deps/v8/src/arm/full-codegen-arm.cc | 165 +- deps/v8/src/arm/ic-arm.cc | 4 +- deps/v8/src/arm/macro-assembler-arm.cc | 26 +- deps/v8/src/arm/macro-assembler-arm.h | 10 + deps/v8/src/arm/simulator-arm.cc | 108 +- deps/v8/src/arm/simulator-arm.h | 3 + deps/v8/src/arm/stub-cache-arm.cc | 139 +- deps/v8/src/arm/virtual-frame-arm.cc | 7 +- deps/v8/src/assembler.cc | 49 + deps/v8/src/assembler.h | 61 + deps/v8/src/bignum-dtoa.cc | 655 +++++++ deps/v8/src/bignum-dtoa.h | 81 + deps/v8/src/bignum.cc | 767 +++++++++ deps/v8/src/bignum.h | 140 ++ deps/v8/src/bootstrapper.cc | 4 + deps/v8/src/checks.cc | 9 + deps/v8/src/checks.h | 19 +- deps/v8/src/code-stubs.cc | 34 +- deps/v8/src/code-stubs.h | 32 - deps/v8/src/codegen.cc | 62 +- deps/v8/src/codegen.h | 2 - deps/v8/src/compiler.cc | 1 - deps/v8/src/conversions.cc | 97 +- deps/v8/src/dateparser.h | 17 +- deps/v8/src/debug-debugger.js | 32 - deps/v8/src/debug.cc | 1 + deps/v8/src/double.h | 32 +- deps/v8/src/dtoa.cc | 41 +- deps/v8/src/dtoa.h | 14 +- deps/v8/src/execution.cc | 131 -- deps/v8/src/execution.h | 26 +- .../externalize-string-extension.cc | 141 ++ .../extensions/externalize-string-extension.h | 50 + deps/v8/src/extensions/gc-extension.cc | 54 + deps/v8/src/extensions/gc-extension.h | 49 + deps/v8/src/flag-definitions.h | 1 + deps/v8/src/full-codegen.cc | 17 +- deps/v8/src/full-codegen.h | 7 +- deps/v8/src/global-handles.cc | 6 +- deps/v8/src/global-handles.h | 3 +- deps/v8/src/globals.h | 432 +---- deps/v8/src/handles.cc | 4 + deps/v8/src/heap-inl.h | 7 +- deps/v8/src/heap-profiler.cc | 8 +- deps/v8/src/heap.cc | 200 ++- deps/v8/src/heap.h | 55 +- deps/v8/src/ia32/assembler-ia32.cc | 54 +- deps/v8/src/ia32/assembler-ia32.h | 17 +- deps/v8/src/ia32/code-stubs-ia32.cc | 9 - deps/v8/src/ia32/codegen-ia32.cc | 123 +- deps/v8/src/ia32/codegen-ia32.h | 17 - deps/v8/src/ia32/full-codegen-ia32.cc | 146 +- deps/v8/src/ia32/ic-ia32.cc | 1 - deps/v8/src/ia32/macro-assembler-ia32.cc | 90 +- deps/v8/src/ia32/macro-assembler-ia32.h | 46 +- deps/v8/src/ia32/stub-cache-ia32.cc | 243 ++- deps/v8/src/ia32/virtual-frame-ia32.cc | 5 +- deps/v8/src/jump-target-heavy.cc | 5 +- deps/v8/src/jump-target-light.cc | 5 +- deps/v8/src/list.h | 8 - deps/v8/src/mark-compact.cc | 16 +- deps/v8/src/mark-compact.h | 3 + deps/v8/src/objects-debug.cc | 2 +- deps/v8/src/objects-inl.h | 42 +- deps/v8/src/objects.cc | 8 +- deps/v8/src/objects.h | 4 +- deps/v8/src/parser.cc | 1146 +++++-------- deps/v8/src/parser.h | 240 ++- deps/v8/src/platform-linux.cc | 71 +- deps/v8/src/platform-win32.cc | 5 +- deps/v8/src/preparser.h | 1419 ++++++++++++++++ deps/v8/src/prescanner.h | 1098 ++++++++++++ deps/v8/src/profile-generator.cc | 30 +- deps/v8/src/profile-generator.h | 2 - deps/v8/src/regexp.js | 168 +- deps/v8/src/runtime.cc | 66 - deps/v8/src/runtime.h | 5 +- deps/v8/src/scanner-base.cc | 195 +++ deps/v8/src/scanner-base.h | 206 +++ deps/v8/src/scanner.cc | 323 +--- deps/v8/src/scanner.h | 231 +-- deps/v8/src/serialize.cc | 4 - deps/v8/src/spaces.cc | 66 +- deps/v8/src/spaces.h | 32 +- deps/v8/src/string.js | 82 +- deps/v8/src/strtod.cc | 129 +- deps/v8/src/stub-cache.cc | 28 + deps/v8/src/stub-cache.h | 4 + deps/v8/src/token.h | 2 + deps/v8/src/utils.cc | 36 - deps/v8/src/utils.h | 317 +--- deps/v8/src/v8.cc | 38 +- deps/v8/src/v8.h | 9 +- deps/v8/src/v8globals.h | 464 +++++ deps/v8/src/v8utils.h | 301 ++++ deps/v8/src/version.cc | 2 +- deps/v8/src/virtual-frame.h | 13 + deps/v8/src/x64/assembler-x64.cc | 54 +- deps/v8/src/x64/assembler-x64.h | 15 +- deps/v8/src/x64/code-stubs-x64.cc | 33 +- deps/v8/src/x64/codegen-x64.cc | 111 +- deps/v8/src/x64/codegen-x64.h | 14 - deps/v8/src/x64/full-codegen-x64.cc | 196 ++- deps/v8/src/x64/ic-x64.cc | 1 - deps/v8/src/x64/macro-assembler-x64.cc | 127 +- deps/v8/src/x64/macro-assembler-x64.h | 75 +- deps/v8/src/x64/stub-cache-x64.cc | 257 +-- deps/v8/src/x64/virtual-frame-x64.cc | 5 +- deps/v8/test/cctest/SConscript | 3 + deps/v8/test/cctest/test-api.cc | 3 +- deps/v8/test/cctest/test-assembler-arm.cc | 68 + deps/v8/test/cctest/test-bignum-dtoa.cc | 315 ++++ deps/v8/test/cctest/test-bignum.cc | 1502 +++++++++++++++++ deps/v8/test/cctest/test-debug.cc | 45 +- deps/v8/test/cctest/test-disasm-arm.cc | 17 +- deps/v8/test/cctest/test-double.cc | 16 + deps/v8/test/cctest/test-dtoa.cc | 331 ++++ deps/v8/test/cctest/test-heap-profiler.cc | 35 +- deps/v8/test/cctest/test-heap.cc | 29 + deps/v8/test/cctest/test-mark-compact.cc | 4 +- deps/v8/test/cctest/test-parsing.cc | 91 +- deps/v8/test/cctest/test-spaces.cc | 6 +- deps/v8/test/cctest/test-strtod.cc | 181 +- deps/v8/test/mjsunit/regress/regress-918.js | 33 + deps/v8/test/mjsunit/regress/regress-927.js | 33 + deps/v8/test/mjsunit/regress/regress-931.js | 48 + .../regress/regress-conditional-position.js | 95 ++ deps/v8/test/mjsunit/string-split.js | 91 +- deps/v8/tools/gyp/v8.gyp | 10 + deps/v8/tools/presubmit.py | 2 +- deps/v8/tools/visual_studio/v8_base.vcproj | 56 + 147 files changed, 11695 insertions(+), 4126 deletions(-) create mode 100644 deps/v8/include/v8stdint.h create mode 100644 deps/v8/src/bignum-dtoa.cc create mode 100644 deps/v8/src/bignum-dtoa.h create mode 100644 deps/v8/src/bignum.cc create mode 100644 deps/v8/src/bignum.h create mode 100644 deps/v8/src/extensions/externalize-string-extension.cc create mode 100644 deps/v8/src/extensions/externalize-string-extension.h create mode 100644 deps/v8/src/extensions/gc-extension.cc create mode 100644 deps/v8/src/extensions/gc-extension.h create mode 100644 deps/v8/src/preparser.h create mode 100644 deps/v8/src/prescanner.h create mode 100644 deps/v8/src/scanner-base.cc create mode 100644 deps/v8/src/scanner-base.h create mode 100644 deps/v8/src/v8globals.h create mode 100644 deps/v8/src/v8utils.h create mode 100644 deps/v8/test/cctest/test-bignum-dtoa.cc create mode 100644 deps/v8/test/cctest/test-bignum.cc create mode 100644 deps/v8/test/cctest/test-dtoa.cc create mode 100644 deps/v8/test/mjsunit/regress/regress-918.js create mode 100644 deps/v8/test/mjsunit/regress/regress-927.js create mode 100644 deps/v8/test/mjsunit/regress/regress-931.js create mode 100644 deps/v8/test/mjsunit/regress/regress-conditional-position.js diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 3749cebcd1..68f9b63bab 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -9,6 +9,7 @@ ARM Ltd. Hewlett-Packard Development Company, LP Alexander Botero-Lowry +Alexandre Rames Alexandre Vassalotti Andreas Anyuru Burcu Dogan diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 69aff33d18..c618f202f9 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,50 @@ +2010-11-18: Version 2.5.7 + + Fixed obscure evaluation order bug (issue 931). + + Split the random number state between JavaScript and the private API. + + Fixed performance bug causing GCs when generating stack traces on + code from very large scripts. + + Fixed bug in parser that allowed (foo):42 as a labelled statement + (issue 918). + + Provide more accurate results about used heap size via + GetHeapStatistics. + + Allow build-time customization of the max semispace size. + + Made String.prototype.split honor limit when separator is empty + (issue 929). + + Added missing failure check after expecting an identifier in + preparser (Chromium issue 62639). + + +2010-11-10: Version 2.5.6 + + Added support for VFP rounding modes to the ARM simulator. + + Fixed multiplication overflow bug (issue 927). + + Added a limit for the amount of executable memory (issue 925). + + +2010-11-08: Version 2.5.5 + + Added more aggressive GC of external objects in near out-of-memory + situations. + + Fixed a bug that gave the incorrect result for String.split called + on the empty string (issue 924). + + +2010-11-03: Version 2.5.4 + + Improved V8 VFPv3 runtime detection to address issue 914. + + 2010-11-01: Version 2.5.3 Fixed a bug that prevents constants from overwriting function values diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h index 4314727adc..f17b848550 100755 --- a/deps/v8/include/v8-debug.h +++ b/deps/v8/include/v8-debug.h @@ -142,7 +142,7 @@ class EXPORT Debug { virtual ~Message() {} }; - + /** * An event details object passed to the debug event listener. @@ -300,7 +300,7 @@ class EXPORT Debug { * get access to information otherwise not available during normal JavaScript * execution e.g. details on stack frames. Receiver of the function call will * be the debugger context global object, however this is a subject to change. - * The following example show a JavaScript function which when passed to + * The following example show a JavaScript function which when passed to * v8::Debug::Call will return the current line of JavaScript execution. * * \code diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index c7e4552b4d..9baa17db1d 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -38,23 +38,9 @@ #ifndef V8_H_ #define V8_H_ -#include +#include "v8stdint.h" #ifdef _WIN32 -// When compiling on MinGW stdint.h is available. -#ifdef __MINGW32__ -#include -#else // __MINGW32__ -typedef signed char int8_t; -typedef unsigned char uint8_t; -typedef short int16_t; // NOLINT -typedef unsigned short uint16_t; // NOLINT -typedef int int32_t; -typedef unsigned int uint32_t; -typedef __int64 int64_t; -typedef unsigned __int64 uint64_t; -// intptr_t and friends are defined in crtdefs.h through stdio.h. -#endif // __MINGW32__ // Setup for Windows DLL export/import. When building the V8 DLL the // BUILDING_V8_SHARED needs to be defined. When building a program which uses @@ -76,8 +62,6 @@ typedef unsigned __int64 uint64_t; #else // _WIN32 -#include - // Setup for Linux shared library export. There is no need to distinguish // between building or using the V8 shared library, but we should not // export symbols when we are building a static library. @@ -127,7 +111,6 @@ class Arguments; class Object; class Heap; class Top; - } @@ -476,10 +459,10 @@ class V8EXPORT HandleScope { level = 0; } }; - + void Leave(); - + internal::Object** prev_next_; internal::Object** prev_limit_; @@ -1055,7 +1038,7 @@ class String : public Primitive { */ V8EXPORT bool IsExternalAscii() const; - class V8EXPORT ExternalStringResourceBase { + class V8EXPORT ExternalStringResourceBase { // NOLINT public: virtual ~ExternalStringResourceBase() {} @@ -2365,12 +2348,15 @@ class V8EXPORT ResourceConstraints { void set_max_young_space_size(int value) { max_young_space_size_ = value; } int max_old_space_size() const { return max_old_space_size_; } void set_max_old_space_size(int value) { max_old_space_size_ = value; } + int max_executable_size() { return max_executable_size_; } + void set_max_executable_size(int value) { max_executable_size_ = value; } uint32_t* stack_limit() const { return stack_limit_; } // Sets an address beyond which the VM's stack may not grow. void set_stack_limit(uint32_t* value) { stack_limit_ = value; } private: int max_young_space_size_; int max_old_space_size_; + int max_executable_size_; uint32_t* stack_limit_; }; @@ -2502,13 +2488,18 @@ class V8EXPORT HeapStatistics { public: HeapStatistics(); size_t total_heap_size() { return total_heap_size_; } + size_t total_heap_size_executable() { return total_heap_size_executable_; } size_t used_heap_size() { return used_heap_size_; } private: void set_total_heap_size(size_t size) { total_heap_size_ = size; } + void set_total_heap_size_executable(size_t size) { + total_heap_size_executable_ = size; + } void set_used_heap_size(size_t size) { used_heap_size_ = size; } size_t total_heap_size_; + size_t total_heap_size_executable_; size_t used_heap_size_; friend class V8; @@ -3260,8 +3251,8 @@ class V8EXPORT Locker { /** * An interface for exporting data from V8, using "push" model. */ -class V8EXPORT OutputStream { -public: +class V8EXPORT OutputStream { // NOLINT + public: enum OutputEncoding { kAscii = 0 // 7-bit ASCII. }; @@ -3291,6 +3282,8 @@ public: namespace internal { +static const int kApiPointerSize = sizeof(void*); // NOLINT +static const int kApiIntSize = sizeof(int); // NOLINT // Tag information for HeapObject. const int kHeapObjectTag = 1; @@ -3326,19 +3319,19 @@ template <> struct SmiConstants<8> { } }; -const int kSmiShiftSize = SmiConstants::kSmiShiftSize; -const int kSmiValueSize = SmiConstants::kSmiValueSize; +const int kSmiShiftSize = SmiConstants::kSmiShiftSize; +const int kSmiValueSize = SmiConstants::kSmiValueSize; template struct InternalConstants; // Internal constants for 32-bit systems. template <> struct InternalConstants<4> { - static const int kStringResourceOffset = 3 * sizeof(void*); + static const int kStringResourceOffset = 3 * kApiPointerSize; }; // Internal constants for 64-bit systems. template <> struct InternalConstants<8> { - static const int kStringResourceOffset = 3 * sizeof(void*); + static const int kStringResourceOffset = 3 * kApiPointerSize; }; /** @@ -3352,12 +3345,12 @@ class Internals { // These values match non-compiler-dependent values defined within // the implementation of v8. static const int kHeapObjectMapOffset = 0; - static const int kMapInstanceTypeOffset = sizeof(void*) + sizeof(int); + static const int kMapInstanceTypeOffset = kApiPointerSize + kApiIntSize; static const int kStringResourceOffset = - InternalConstants::kStringResourceOffset; + InternalConstants::kStringResourceOffset; - static const int kProxyProxyOffset = sizeof(void*); - static const int kJSObjectHeaderSize = 3 * sizeof(void*); + static const int kProxyProxyOffset = kApiPointerSize; + static const int kJSObjectHeaderSize = 3 * kApiPointerSize; static const int kFullStringRepresentationMask = 0x07; static const int kExternalTwoByteRepresentationTag = 0x02; @@ -3375,7 +3368,7 @@ class Internals { } static inline int SmiValue(internal::Object* value) { - return SmiConstants::SmiToInt(value); + return SmiConstants::SmiToInt(value); } static inline int GetInstanceType(internal::Object* obj) { @@ -3404,10 +3397,9 @@ class Internals { uint8_t* addr = reinterpret_cast(ptr) + offset - kHeapObjectTag; return *reinterpret_cast(addr); } - }; -} +} // namespace internal template @@ -3567,7 +3559,7 @@ Local Object::UncheckedGetInternalField(int index) { // If the object is a plain JSObject, which is the common case, // we know where to find the internal fields and can return the // value directly. - int offset = I::kJSObjectHeaderSize + (sizeof(void*) * index); + int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index); O* value = I::ReadField(obj, offset); O** result = HandleScope::CreateHandle(value); return Local(reinterpret_cast(result)); @@ -3603,7 +3595,7 @@ void* Object::GetPointerFromInternalField(int index) { // If the object is a plain JSObject, which is the common case, // we know where to find the internal fields and can return the // value directly. - int offset = I::kJSObjectHeaderSize + (sizeof(void*) * index); + int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index); O* value = I::ReadField(obj, offset); return I::GetExternalPointer(value); } diff --git a/deps/v8/include/v8stdint.h b/deps/v8/include/v8stdint.h new file mode 100644 index 0000000000..50b4f29a64 --- /dev/null +++ b/deps/v8/include/v8stdint.h @@ -0,0 +1,53 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Load definitions of standard types. + +#ifndef V8STDINT_H_ +#define V8STDINT_H_ + +#include + +#if defined(_WIN32) && !defined(__MINGW32__) + +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef short int16_t; // NOLINT +typedef unsigned short uint16_t; // NOLINT +typedef int int32_t; +typedef unsigned int uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +// intptr_t and friends are defined in crtdefs.h through stdio.h. + +#else + +#include + +#endif + +#endif // V8STDINT_H_ diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index ef5485d854..316387fa23 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -40,6 +40,8 @@ SOURCES = { api.cc assembler.cc ast.cc + bignum.cc + bignum-dtoa.cc bootstrapper.cc builtins.cc cached-powers.cc @@ -95,6 +97,7 @@ SOURCES = { register-allocator.cc rewriter.cc runtime.cc + scanner-base.cc scanner.cc scopeinfo.cc scopes.cc @@ -117,6 +120,8 @@ SOURCES = { version.cc virtual-frame.cc zone.cc + extensions/gc-extension.cc + extensions/externalize-string-extension.cc """), 'arch:arm': Split(""" jump-target-light.cc diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index 7c21659ebc..08ef41b9f7 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -316,8 +316,10 @@ MaybeObject* Accessors::ScriptGetLineEnds(Object* object, void*) { InitScriptLineEnds(script); ASSERT(script->line_ends()->IsFixedArray()); Handle line_ends(FixedArray::cast(script->line_ends())); - Handle copy = Factory::CopyFixedArray(line_ends); - Handle js_array = Factory::NewJSArrayWithElements(copy); + // We do not want anyone to modify this array from JS. + ASSERT(*line_ends == Heap::empty_fixed_array() || + line_ends->map() == Heap::fixed_cow_array_map()); + Handle js_array = Factory::NewJSArrayWithElements(line_ends); return *js_array; } diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc index 678f4fd7d2..d74c37cd79 100644 --- a/deps/v8/src/allocation.cc +++ b/deps/v8/src/allocation.cc @@ -27,16 +27,21 @@ #include -#include "v8.h" +#include "../include/v8stdint.h" +#include "globals.h" +#include "checks.h" +#include "allocation.h" +#include "utils.h" namespace v8 { namespace internal { - void* Malloced::New(size_t size) { ASSERT(NativeAllocationChecker::allocation_allowed()); void* result = malloc(size); - if (result == NULL) V8::FatalProcessOutOfMemory("Malloced operator new"); + if (result == NULL) { + v8::internal::FatalProcessOutOfMemory("Malloced operator new"); + } return result; } @@ -47,7 +52,7 @@ void Malloced::Delete(void* p) { void Malloced::FatalProcessOutOfMemory() { - V8::FatalProcessOutOfMemory("Out of memory"); + v8::internal::FatalProcessOutOfMemory("Out of memory"); } @@ -82,7 +87,7 @@ void AllStatic::operator delete(void* p) { char* StrDup(const char* str) { int length = StrLength(str); char* result = NewArray(length + 1); - memcpy(result, str, length * kCharSize); + memcpy(result, str, length); result[length] = '\0'; return result; } @@ -92,7 +97,7 @@ char* StrNDup(const char* str, int n) { int length = StrLength(str); if (n < length) length = n; char* result = NewArray(length + 1); - memcpy(result, str, length * kCharSize); + memcpy(result, str, length); result[length] = '\0'; return result; } @@ -124,6 +129,7 @@ void* PreallocatedStorage::New(size_t size) { } ASSERT(free_list_.next_ != &free_list_); ASSERT(free_list_.previous_ != &free_list_); + size = (size + kPointerSize - 1) & ~(kPointerSize - 1); // Search for exact fit. for (PreallocatedStorage* storage = free_list_.next_; diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h index 70a3a03889..6f4bd2fb54 100644 --- a/deps/v8/src/allocation.h +++ b/deps/v8/src/allocation.h @@ -31,6 +31,10 @@ namespace v8 { namespace internal { +// Called when allocation routines fail to allocate. +// This function should not return, but should terminate the current +// processing. +void FatalProcessOutOfMemory(const char* message); // A class that controls whether allocation is allowed. This is for // the C++ heap only! diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 617922dd5a..5912449169 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -43,7 +43,6 @@ #include "serialize.h" #include "snapshot.h" #include "top.h" -#include "utils.h" #include "v8threads.h" #include "version.h" @@ -116,7 +115,6 @@ static void DefaultFatalErrorHandler(const char* location, } - static FatalErrorCallback& GetFatalErrorHandler() { if (exception_behavior == NULL) { exception_behavior = DefaultFatalErrorHandler; @@ -125,6 +123,10 @@ static FatalErrorCallback& GetFatalErrorHandler() { } +void i::FatalProcessOutOfMemory(const char* location) { + i::V8::FatalProcessOutOfMemory(location, false); +} + // When V8 cannot allocated memory FatalProcessOutOfMemory is called. // The default fatal error handler is called and execution is stopped. @@ -394,14 +396,18 @@ v8::Handle False() { ResourceConstraints::ResourceConstraints() : max_young_space_size_(0), max_old_space_size_(0), + max_executable_size_(0), stack_limit_(NULL) { } bool SetResourceConstraints(ResourceConstraints* constraints) { int young_space_size = constraints->max_young_space_size(); int old_gen_size = constraints->max_old_space_size(); - if (young_space_size != 0 || old_gen_size != 0) { - bool result = i::Heap::ConfigureHeap(young_space_size / 2, old_gen_size); + int max_executable_size = constraints->max_executable_size(); + if (young_space_size != 0 || old_gen_size != 0 || max_executable_size != 0) { + bool result = i::Heap::ConfigureHeap(young_space_size / 2, + old_gen_size, + max_executable_size); if (!result) return false; } if (constraints->stack_limit() != NULL) { @@ -3260,11 +3266,15 @@ bool v8::V8::Dispose() { } -HeapStatistics::HeapStatistics(): total_heap_size_(0), used_heap_size_(0) { } +HeapStatistics::HeapStatistics(): total_heap_size_(0), + total_heap_size_executable_(0), + used_heap_size_(0) { } void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) { heap_statistics->set_total_heap_size(i::Heap::CommittedMemory()); + heap_statistics->set_total_heap_size_executable( + i::Heap::CommittedMemoryExecutable()); heap_statistics->set_used_heap_size(i::Heap::SizeOfObjects()); } diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index ebbd9b1138..4cb421c577 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -317,7 +317,8 @@ static const Instr kLdrStrOffsetMask = 0x00000fff; static const int kMinimalBufferSize = 4*KB; static byte* spare_buffer_ = NULL; -Assembler::Assembler(void* buffer, int buffer_size) { +Assembler::Assembler(void* buffer, int buffer_size) + : positions_recorder_(this) { if (buffer == NULL) { // Do our own buffer management. if (buffer_size <= kMinimalBufferSize) { @@ -354,10 +355,6 @@ Assembler::Assembler(void* buffer, int buffer_size) { no_const_pool_before_ = 0; last_const_pool_end_ = 0; last_bound_pos_ = 0; - current_statement_position_ = RelocInfo::kNoPosition; - current_position_ = RelocInfo::kNoPosition; - written_statement_position_ = current_statement_position_; - written_position_ = current_position_; } @@ -752,15 +749,15 @@ static bool fits_shifter(uint32_t imm32, // if they can be encoded in the ARM's 12 bits of immediate-offset instruction // space. There is no guarantee that the relocated location can be similarly // encoded. -static bool MustUseConstantPool(RelocInfo::Mode rmode) { - if (rmode == RelocInfo::EXTERNAL_REFERENCE) { +bool Operand::must_use_constant_pool() const { + if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { #ifdef DEBUG if (!Serializer::enabled()) { Serializer::TooLateToEnableNow(); } #endif // def DEBUG return Serializer::enabled(); - } else if (rmode == RelocInfo::NONE) { + } else if (rmode_ == RelocInfo::NONE) { return false; } return true; @@ -769,7 +766,7 @@ static bool MustUseConstantPool(RelocInfo::Mode rmode) { bool Operand::is_single_instruction() const { if (rm_.is_valid()) return true; - if (MustUseConstantPool(rmode_)) return false; + if (must_use_constant_pool()) return false; uint32_t dummy1, dummy2; return fits_shifter(imm32_, &dummy1, &dummy2, NULL); } @@ -785,7 +782,7 @@ void Assembler::addrmod1(Instr instr, // Immediate. uint32_t rotate_imm; uint32_t immed_8; - if (MustUseConstantPool(x.rmode_) || + if (x.must_use_constant_pool() || !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { // The immediate operand cannot be encoded as a shifter operand, so load // it first to register ip and change the original instruction to use ip. @@ -794,8 +791,7 @@ void Assembler::addrmod1(Instr instr, CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed Condition cond = static_cast(instr & CondMask); if ((instr & ~CondMask) == 13*B21) { // mov, S not set - if (MustUseConstantPool(x.rmode_) || - !CpuFeatures::IsSupported(ARMv7)) { + if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) { RecordRelocInfo(x.rmode_, x.imm32_); ldr(rd, MemOperand(pc, 0), cond); } else { @@ -806,7 +802,7 @@ void Assembler::addrmod1(Instr instr, } else { // If this is not a mov or mvn instruction we may still be able to avoid // a constant pool entry by using mvn or movw. - if (!MustUseConstantPool(x.rmode_) && + if (!x.must_use_constant_pool() && (instr & kMovMvnMask) != kMovMvnPattern) { mov(ip, x, LeaveCC, cond); } else { @@ -999,7 +995,7 @@ void Assembler::bl(int branch_offset, Condition cond) { void Assembler::blx(int branch_offset) { // v5 and above - WriteRecordedPositions(); + positions_recorder()->WriteRecordedPositions(); ASSERT((branch_offset & 1) == 0); int h = ((branch_offset & 2) >> 1)*B24; int imm24 = branch_offset >> 2; @@ -1009,14 +1005,14 @@ void Assembler::blx(int branch_offset) { // v5 and above void Assembler::blx(Register target, Condition cond) { // v5 and above - WriteRecordedPositions(); + positions_recorder()->WriteRecordedPositions(); ASSERT(!target.is(pc)); emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code()); } void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t - WriteRecordedPositions(); + positions_recorder()->WriteRecordedPositions(); ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code()); } @@ -1114,7 +1110,7 @@ void Assembler::orr(Register dst, Register src1, const Operand& src2, void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) { if (dst.is(pc)) { - WriteRecordedPositions(); + positions_recorder()->WriteRecordedPositions(); } // Don't allow nop instructions in the form mov rn, rn to be generated using // the mov instruction. They must be generated using nop(int) @@ -1339,7 +1335,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src, // Immediate. uint32_t rotate_imm; uint32_t immed_8; - if (MustUseConstantPool(src.rmode_) || + if (src.must_use_constant_pool() || !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { // Immediate operand cannot be encoded, load it first to register ip. RecordRelocInfo(src.rmode_, src.imm32_); @@ -1359,7 +1355,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src, // Load/Store instructions. void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { if (dst.is(pc)) { - WriteRecordedPositions(); + positions_recorder()->WriteRecordedPositions(); } addrmod2(cond | B26 | L, dst, src); @@ -2148,6 +2144,7 @@ static Instr EncodeVCVT(const VFPType dst_type, const int dst_code, const VFPType src_type, const int src_code, + Assembler::ConversionMode mode, const Condition cond) { ASSERT(src_type != dst_type); int D, Vd, M, Vm; @@ -2166,7 +2163,7 @@ static Instr EncodeVCVT(const VFPType dst_type, if (IsIntegerVFPType(dst_type)) { opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4; sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0; - op = 1; // round towards zero + op = mode; } else { ASSERT(IsIntegerVFPType(src_type)); opc2 = 0x0; @@ -2190,57 +2187,64 @@ static Instr EncodeVCVT(const VFPType dst_type, void Assembler::vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, + ConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(EncodeVCVT(F64, dst.code(), S32, src.code(), cond)); + emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond)); } void Assembler::vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, + ConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(EncodeVCVT(F32, dst.code(), S32, src.code(), cond)); + emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond)); } void Assembler::vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, + ConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(EncodeVCVT(F64, dst.code(), U32, src.code(), cond)); + emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond)); } void Assembler::vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, + ConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(EncodeVCVT(S32, dst.code(), F64, src.code(), cond)); + emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond)); } void Assembler::vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, + ConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(EncodeVCVT(U32, dst.code(), F64, src.code(), cond)); + emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond)); } void Assembler::vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, + ConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(EncodeVCVT(F64, dst.code(), F32, src.code(), cond)); + emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond)); } void Assembler::vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, + ConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(EncodeVCVT(F32, dst.code(), F64, src.code(), cond)); + emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); } @@ -2333,6 +2337,16 @@ void Assembler::vcmp(const DwVfpRegister src1, } +void Assembler::vmsr(Register dst, Condition cond) { + // Instruction details available in ARM DDI 0406A, A8-652. + // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) | + // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(cond | 0xE*B24 | 0xE*B20 | B16 | + dst.code()*B12 | 0xA*B8 | B4); +} + + void Assembler::vmrs(Register dst, Condition cond) { // Instruction details available in ARM DDI 0406A, A8-652. // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) | @@ -2343,7 +2357,6 @@ void Assembler::vmrs(Register dst, Condition cond) { } - void Assembler::vsqrt(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond) { @@ -2377,14 +2390,14 @@ void Assembler::BlockConstPoolFor(int instructions) { // Debugging. void Assembler::RecordJSReturn() { - WriteRecordedPositions(); + positions_recorder()->WriteRecordedPositions(); CheckBuffer(); RecordRelocInfo(RelocInfo::JS_RETURN); } void Assembler::RecordDebugBreakSlot() { - WriteRecordedPositions(); + positions_recorder()->WriteRecordedPositions(); CheckBuffer(); RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); } @@ -2398,47 +2411,6 @@ void Assembler::RecordComment(const char* msg) { } -void Assembler::RecordPosition(int pos) { - if (pos == RelocInfo::kNoPosition) return; - ASSERT(pos >= 0); - current_position_ = pos; -} - - -void Assembler::RecordStatementPosition(int pos) { - if (pos == RelocInfo::kNoPosition) return; - ASSERT(pos >= 0); - current_statement_position_ = pos; -} - - -bool Assembler::WriteRecordedPositions() { - bool written = false; - - // Write the statement position if it is different from what was written last - // time. - if (current_statement_position_ != written_statement_position_) { - CheckBuffer(); - RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_); - written_statement_position_ = current_statement_position_; - written = true; - } - - // Write the position if it is different from what was written last time and - // also different from the written statement position. - if (current_position_ != written_position_ && - current_position_ != written_statement_position_) { - CheckBuffer(); - RecordRelocInfo(RelocInfo::POSITION, current_position_); - written_position_ = current_position_; - written = true; - } - - // Return whether something was written. - return written; -} - - void Assembler::GrowBuffer() { if (!own_buffer_) FATAL("external code buffer is too small"); diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 5b647a7537..606ff86340 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -219,6 +219,11 @@ const DwVfpRegister d13 = { 13 }; const DwVfpRegister d14 = { 14 }; const DwVfpRegister d15 = { 15 }; +// VFP FPSCR constants. +static const uint32_t kVFPExceptionMask = 0xf; +static const uint32_t kVFPRoundingModeMask = 3 << 22; +static const uint32_t kVFPFlushToZeroMask = 1 << 24; +static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22; // Coprocessor register struct CRegister { @@ -448,6 +453,7 @@ class Operand BASE_EMBEDDED { // Return true of this operand fits in one instruction so that no // 2-instruction solution with a load into the ip register is necessary. bool is_single_instruction() const; + bool must_use_constant_pool() const; inline int32_t immediate() const { ASSERT(!rm_.is_valid()); @@ -1007,26 +1013,37 @@ class Assembler : public Malloced { void vmov(const Register dst, const SwVfpRegister src, const Condition cond = al); + enum ConversionMode { + FPSCRRounding = 0, + RoundToZero = 1 + }; void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, + ConversionMode mode = RoundToZero, const Condition cond = al); void vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, + ConversionMode mode = RoundToZero, const Condition cond = al); void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, + ConversionMode mode = RoundToZero, const Condition cond = al); void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, + ConversionMode mode = RoundToZero, const Condition cond = al); void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, + ConversionMode mode = RoundToZero, const Condition cond = al); void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, + ConversionMode mode = RoundToZero, const Condition cond = al); void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, + ConversionMode mode = RoundToZero, const Condition cond = al); void vadd(const DwVfpRegister dst, @@ -1055,6 +1072,8 @@ class Assembler : public Malloced { const Condition cond = al); void vmrs(const Register dst, const Condition cond = al); + void vmsr(const Register dst, + const Condition cond = al); void vsqrt(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond = al); @@ -1117,13 +1136,9 @@ class Assembler : public Malloced { // Use --debug_code to enable. void RecordComment(const char* msg); - void RecordPosition(int pos); - void RecordStatementPosition(int pos); - bool WriteRecordedPositions(); - int pc_offset() const { return pc_ - buffer_; } - int current_position() const { return current_position_; } - int current_statement_position() const { return current_statement_position_; } + + PositionsRecorder* positions_recorder() { return &positions_recorder_; } bool can_peephole_optimize(int instructions) { if (!FLAG_peephole_optimization) return false; @@ -1259,12 +1274,6 @@ class Assembler : public Malloced { // The bound position, before this we cannot do instruction elimination. int last_bound_pos_; - // source position information - int current_position_; - int current_statement_position_; - int written_position_; - int written_statement_position_; - // Code emission inline void CheckBuffer(); void GrowBuffer(); @@ -1290,8 +1299,21 @@ class Assembler : public Malloced { friend class RelocInfo; friend class CodePatcher; friend class BlockConstPoolScope; + + PositionsRecorder positions_recorder_; + friend class PositionsRecorder; + friend class EnsureSpace; }; + +class EnsureSpace BASE_EMBEDDED { + public: + explicit EnsureSpace(Assembler* assembler) { + assembler->CheckBuffer(); + } +}; + + } } // namespace v8::internal #endif // V8_ARM_ASSEMBLER_ARM_H_ diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 70ff244649..dd0520feed 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -43,6 +43,7 @@ #include "register-allocator-inl.h" #include "runtime.h" #include "scopes.h" +#include "stub-cache.h" #include "virtual-frame-inl.h" #include "virtual-frame-arm-inl.h" @@ -557,7 +558,7 @@ void CodeGenerator::Load(Expression* expr) { void CodeGenerator::LoadGlobal() { Register reg = frame_->GetTOSRegister(); - __ ldr(reg, GlobalObject()); + __ ldr(reg, GlobalObjectOperand()); frame_->EmitPush(reg); } @@ -1891,18 +1892,15 @@ void CodeGenerator::CheckStack() { frame_->SpillAll(); Comment cmnt(masm_, "[ check stack"); __ LoadRoot(ip, Heap::kStackLimitRootIndex); - // Put the lr setup instruction in the delay slot. kInstrSize is added to - // the implicit 8 byte offset that always applies to operations with pc and - // gives a return address 12 bytes down. - masm_->add(lr, pc, Operand(Assembler::kInstrSize)); masm_->cmp(sp, Operand(ip)); StackCheckStub stub; // Call the stub if lower. - masm_->mov(pc, + masm_->mov(ip, Operand(reinterpret_cast(stub.GetCode().location()), RelocInfo::CODE_TARGET), LeaveCC, lo); + masm_->Call(ip, lo); } @@ -4232,7 +4230,7 @@ void CodeGenerator::VisitCall(Call* node) { // Setup the name register and call the IC initialization code. __ mov(r2, Operand(var->name())); InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; - Handle stub = ComputeCallInitialize(arg_count, in_loop); + Handle stub = StubCache::ComputeCallInitialize(arg_count, in_loop); CodeForSourcePosition(node->position()); frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT, arg_count + 1); @@ -4326,7 +4324,8 @@ void CodeGenerator::VisitCall(Call* node) { // Set the name register and call the IC initialization code. __ mov(r2, Operand(name)); InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; - Handle stub = ComputeCallInitialize(arg_count, in_loop); + Handle stub = + StubCache::ComputeCallInitialize(arg_count, in_loop); CodeForSourcePosition(node->position()); frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1); __ ldr(cp, frame_->Context()); @@ -4337,9 +4336,12 @@ void CodeGenerator::VisitCall(Call* node) { // ------------------------------------------- // JavaScript example: 'array[index](1, 2, 3)' // ------------------------------------------- + + // Load the receiver and name of the function. Load(property->obj()); + Load(property->key()); + if (property->is_synthetic()) { - Load(property->key()); EmitKeyedLoad(); // Put the function below the receiver. // Use the global receiver. @@ -4349,21 +4351,28 @@ void CodeGenerator::VisitCall(Call* node) { CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position()); frame_->EmitPush(r0); } else { + // Swap the name of the function and the receiver on the stack to follow + // the calling convention for call ICs. + Register key = frame_->PopToRegister(); + Register receiver = frame_->PopToRegister(key); + frame_->EmitPush(key); + frame_->EmitPush(receiver); + // Load the arguments. int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Load(args->at(i)); } - // Set the name register and call the IC initialization code. - Load(property->key()); - frame_->SpillAll(); - frame_->EmitPop(r2); // Function name. - + // Load the key into r2 and call the IC initialization code. InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; - Handle stub = ComputeKeyedCallInitialize(arg_count, in_loop); + Handle stub = + StubCache::ComputeKeyedCallInitialize(arg_count, in_loop); CodeForSourcePosition(node->position()); + frame_->SpillAll(); + __ ldr(r2, frame_->ElementAt(arg_count + 1)); frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1); + frame_->Drop(); // Drop the key still on the stack. __ ldr(cp, frame_->Context()); frame_->EmitPush(r0); } @@ -5135,11 +5144,11 @@ class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode { __ b(eq, &false_result); __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset)); __ ldr(scratch2_, - CodeGenerator::ContextOperand(cp, Context::GLOBAL_INDEX)); + ContextOperand(cp, Context::GLOBAL_INDEX)); __ ldr(scratch2_, FieldMemOperand(scratch2_, GlobalObject::kGlobalContextOffset)); __ ldr(scratch2_, - CodeGenerator::ContextOperand( + ContextOperand( scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX)); __ cmp(scratch1_, scratch2_); __ b(ne, &false_result); @@ -5496,73 +5505,6 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList* args) { } -void CodeGenerator::GenerateRegExpCloneResult(ZoneList* args) { - ASSERT_EQ(1, args->length()); - - Load(args->at(0)); - frame_->PopToR0(); - { - VirtualFrame::SpilledScope spilled_scope(frame_); - - Label done; - Label call_runtime; - __ BranchOnSmi(r0, &done); - - // Load JSRegExp map into r1. Check that argument object has this map. - // Arguments to this function should be results of calling RegExp exec, - // which is either an unmodified JSRegExpResult or null. Anything not having - // the unmodified JSRegExpResult map is returned unmodified. - // This also ensures that elements are fast. - - __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX)); - __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset)); - __ ldr(r1, ContextOperand(r1, Context::REGEXP_RESULT_MAP_INDEX)); - __ ldr(ip, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ cmp(r1, Operand(ip)); - __ b(ne, &done); - - if (FLAG_debug_code) { - __ LoadRoot(r2, Heap::kEmptyFixedArrayRootIndex); - __ ldr(ip, FieldMemOperand(r0, JSObject::kPropertiesOffset)); - __ cmp(ip, r2); - __ Check(eq, "JSRegExpResult: default map but non-empty properties."); - } - - // All set, copy the contents to a new object. - __ AllocateInNewSpace(JSRegExpResult::kSize, - r2, - r3, - r4, - &call_runtime, - NO_ALLOCATION_FLAGS); - // Store RegExpResult map as map of allocated object. - ASSERT(JSRegExpResult::kSize == 6 * kPointerSize); - // Copy all fields (map is already in r1) from (untagged) r0 to r2. - // Change map of elements array (ends up in r4) to be a FixedCOWArray. - __ bic(r0, r0, Operand(kHeapObjectTagMask)); - __ ldm(ib, r0, r3.bit() | r4.bit() | r5.bit() | r6.bit() | r7.bit()); - __ stm(ia, r2, - r1.bit() | r3.bit() | r4.bit() | r5.bit() | r6.bit() | r7.bit()); - ASSERT(JSRegExp::kElementsOffset == 2 * kPointerSize); - // Check whether elements array is empty fixed array, and otherwise make - // it copy-on-write (it never should be empty unless someone is messing - // with the arguments to the runtime function). - __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex); - __ add(r0, r2, Operand(kHeapObjectTag)); // Tag result and move it to r0. - __ cmp(r4, ip); - __ b(eq, &done); - __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); - __ str(ip, FieldMemOperand(r4, HeapObject::kMapOffset)); - __ b(&done); - __ bind(&call_runtime); - __ push(r0); - __ CallRuntime(Runtime::kRegExpCloneResult, 1); - __ bind(&done); - } - frame_->EmitPush(r0); -} - - class DeferredSearchCache: public DeferredCode { public: DeferredSearchCache(Register dst, Register cache, Register key) @@ -5892,7 +5834,7 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) { // Prepare stack for calling JS runtime function. // Push the builtins object found in the current global object. Register scratch = VirtualFrame::scratch0(); - __ ldr(scratch, GlobalObject()); + __ ldr(scratch, GlobalObjectOperand()); Register builtins = frame_->GetTOSRegister(); __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset)); frame_->EmitPush(builtins); @@ -5910,7 +5852,7 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) { // Call the JS runtime function. __ mov(r2, Operand(node->name())); InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; - Handle stub = ComputeCallInitialize(arg_count, in_loop); + Handle stub = StubCache::ComputeCallInitialize(arg_count, in_loop); frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1); __ ldr(cp, frame_->Context()); frame_->EmitPush(r0); diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index e6fd6071e1..2e8f46668c 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -279,10 +279,6 @@ class CodeGenerator: public AstVisitor { return inlined_write_barrier_size_ + 4; } - static MemOperand ContextOperand(Register context, int index) { - return MemOperand(context, Context::SlotOffset(index)); - } - private: // Type of a member function that generates inline code for a native function. typedef void (CodeGenerator::*InlineFunctionGenerator) @@ -349,10 +345,6 @@ class CodeGenerator: public AstVisitor { JumpTarget* slow); // Expressions - static MemOperand GlobalObject() { - return ContextOperand(cp, Context::GLOBAL_INDEX); - } - void LoadCondition(Expression* x, JumpTarget* true_target, JumpTarget* false_target, @@ -452,10 +444,6 @@ class CodeGenerator: public AstVisitor { static Handle ComputeLazyCompile(int argc); void ProcessDeclarations(ZoneList* declarations); - static Handle ComputeCallInitialize(int argc, InLoopFlag in_loop); - - static Handle ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop); - // Declare global variables and functions in the given array of // name/value pairs. void DeclareGlobals(Handle pairs); @@ -518,8 +506,6 @@ class CodeGenerator: public AstVisitor { void GenerateRegExpConstructResult(ZoneList* args); - void GenerateRegExpCloneResult(ZoneList* args); - // Support for fast native caches. void GenerateGetFromCache(ZoneList* args); diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 123c5e7972..36f6283c96 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -206,6 +206,13 @@ enum VFPRegPrecision { kDoublePrecision = 1 }; +// VFP rounding modes. See ARM DDI 0406B Page A2-29. +enum FPSCRRoundingModes { + RN, // Round to Nearest. + RP, // Round towards Plus Infinity. + RM, // Round towards Minus Infinity. + RZ // Round towards zero. +}; typedef int32_t instr_t; diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 4e7580f868..297a2db5b2 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -1046,6 +1046,7 @@ int Decoder::DecodeType7(Instr* instr) { // Dd = vdiv(Dn, Dm) // vcmp(Dd, Dm) // vmrs +// vmsr // Dd = vsqrt(Dm) void Decoder::DecodeTypeVFP(Instr* instr) { ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) ); @@ -1111,16 +1112,22 @@ void Decoder::DecodeTypeVFP(Instr* instr) { if ((instr->VCField() == 0x0) && (instr->VAField() == 0x0)) { DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr); - } else if ((instr->VLField() == 0x1) && - (instr->VCField() == 0x0) && + } else if ((instr->VCField() == 0x0) && (instr->VAField() == 0x7) && (instr->Bits(19, 16) == 0x1)) { - if (instr->Bits(15, 12) == 0xF) - Format(instr, "vmrs'cond APSR, FPSCR"); - else - Unknown(instr); // Not used by V8. - } else { - Unknown(instr); // Not used by V8. + if (instr->VLField() == 0) { + if (instr->Bits(15, 12) == 0xF) { + Format(instr, "vmsr'cond FPSCR, APSR"); + } else { + Format(instr, "vmsr'cond FPSCR, 'rt"); + } + } else { + if (instr->Bits(15, 12) == 0xF) { + Format(instr, "vmrs'cond APSR, FPSCR"); + } else { + Format(instr, "vmrs'cond 'rt, FPSCR"); + } + } } } } diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 9935e038f5..c50f84ad5e 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -36,6 +36,7 @@ #include "full-codegen.h" #include "parser.h" #include "scopes.h" +#include "stub-cache.h" namespace v8 { namespace internal { @@ -171,19 +172,16 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { } // Check the stack for overflow or break request. - // Put the lr setup instruction in the delay slot. The kInstrSize is - // added to the implicit 8 byte offset that always applies to operations - // with pc and gives a return address 12 bytes down. { Comment cmnt(masm_, "[ Stack check"); __ LoadRoot(r2, Heap::kStackLimitRootIndex); - __ add(lr, pc, Operand(Assembler::kInstrSize)); __ cmp(sp, Operand(r2)); StackCheckStub stub; - __ mov(pc, + __ mov(ip, Operand(reinterpret_cast(stub.GetCode().location()), RelocInfo::CODE_TARGET), LeaveCC, lo); + __ Call(ip, lo); } if (FLAG_trace) { @@ -1019,7 +1017,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions( __ bind(&fast); } - __ ldr(r0, CodeGenerator::GlobalObject()); + __ ldr(r0, GlobalObjectOperand()); __ mov(r2, Operand(slot->var()->name())); RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) ? RelocInfo::CODE_TARGET @@ -1040,7 +1038,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) { Comment cmnt(masm_, "Global variable"); // Use inline caching. Variable name is passed in r2 and the global // object (receiver) in r0. - __ ldr(r0, CodeGenerator::GlobalObject()); + __ ldr(r0, GlobalObjectOperand()); __ mov(r2, Operand(var->name())); Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); @@ -1514,7 +1512,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, // assignment. Right-hand-side value is passed in r0, variable name in // r2, and the global object in r1. __ mov(r2, Operand(var->name())); - __ ldr(r1, CodeGenerator::GlobalObject()); + __ ldr(r1, GlobalObjectOperand()); Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); @@ -1688,15 +1686,17 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr, // Code common for calls using the IC. ZoneList* args = expr->arguments(); int arg_count = args->length(); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); + { PreserveStatementPositionScope scope(masm()->positions_recorder()); + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } + __ mov(r2, Operand(name)); } - __ mov(r2, Operand(name)); // Record source position for debugger. - SetSourcePosition(expr->position()); + SetSourcePosition(expr->position(), FORCED_POSITION); // Call the IC initialization code. InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; - Handle ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop); + Handle ic = StubCache::ComputeCallInitialize(arg_count, in_loop); EmitCallIC(ic, mode); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -1707,24 +1707,33 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr, void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode) { + // Load the key. + VisitForAccumulatorValue(key); + + // Swap the name of the function and the receiver on the stack to follow + // the calling convention for call ICs. + __ pop(r1); + __ push(r0); + __ push(r1); + // Code common for calls using the IC. ZoneList* args = expr->arguments(); int arg_count = args->length(); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); + { PreserveStatementPositionScope scope(masm()->positions_recorder()); + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } } - VisitForAccumulatorValue(key); - __ mov(r2, r0); // Record source position for debugger. - SetSourcePosition(expr->position()); + SetSourcePosition(expr->position(), FORCED_POSITION); // Call the IC initialization code. InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; - Handle ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count, - in_loop); + Handle ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop); + __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key. EmitCallIC(ic, mode); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - context()->Plug(r0); + context()->DropAndPlug(1, r0); // Drop the key still on the stack. } @@ -1732,11 +1741,13 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) { // Code common for calls using the call stub. ZoneList* args = expr->arguments(); int arg_count = args->length(); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); + { PreserveStatementPositionScope scope(masm()->positions_recorder()); + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } } // Record source position for debugger. - SetSourcePosition(expr->position()); + SetSourcePosition(expr->position(), FORCED_POSITION); InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE); __ CallStub(&stub); @@ -1756,41 +1767,45 @@ void FullCodeGenerator::VisitCall(Call* expr) { // resolve the function we need to call and the receiver of the // call. Then we call the resolved function using the given // arguments. - VisitForStackValue(fun); - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - __ push(r2); // Reserved receiver slot. - - // Push the arguments. ZoneList* args = expr->arguments(); int arg_count = args->length(); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } - // Push copy of the function - found below the arguments. - __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ push(r1); + { PreserveStatementPositionScope pos_scope(masm()->positions_recorder()); + VisitForStackValue(fun); + __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); + __ push(r2); // Reserved receiver slot. + + // Push the arguments. + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } - // Push copy of the first argument or undefined if it doesn't exist. - if (arg_count > 0) { - __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); + // Push copy of the function - found below the arguments. + __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ push(r1); - } else { - __ push(r2); - } - // Push the receiver of the enclosing function and do runtime call. - __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize)); - __ push(r1); - __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3); + // Push copy of the first argument or undefined if it doesn't exist. + if (arg_count > 0) { + __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); + __ push(r1); + } else { + __ push(r2); + } - // The runtime call returns a pair of values in r0 (function) and - // r1 (receiver). Touch up the stack with the right values. - __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ str(r1, MemOperand(sp, arg_count * kPointerSize)); + // Push the receiver of the enclosing function and do runtime call. + __ ldr(r1, + MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize)); + __ push(r1); + __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3); + + // The runtime call returns a pair of values in r0 (function) and + // r1 (receiver). Touch up the stack with the right values. + __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize)); + __ str(r1, MemOperand(sp, arg_count * kPointerSize)); + } // Record source position for debugger. - SetSourcePosition(expr->position()); + SetSourcePosition(expr->position(), FORCED_POSITION); InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE); __ CallStub(&stub); @@ -1799,7 +1814,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { context()->DropAndPlug(1, r0); } else if (var != NULL && !var->is_this() && var->is_global()) { // Push global object as receiver for the call IC. - __ ldr(r0, CodeGenerator::GlobalObject()); + __ ldr(r0, GlobalObjectOperand()); __ push(r0); EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT); } else if (var != NULL && var->AsSlot() != NULL && @@ -1807,12 +1822,14 @@ void FullCodeGenerator::VisitCall(Call* expr) { // Call to a lookup slot (dynamically introduced variable). Label slow, done; - // Generate code for loading from variables potentially shadowed - // by eval-introduced variables. - EmitDynamicLoadFromSlotFastCase(var->AsSlot(), - NOT_INSIDE_TYPEOF, - &slow, - &done); + { PreserveStatementPositionScope scope(masm()->positions_recorder()); + // Generate code for loading from variables potentially shadowed + // by eval-introduced variables. + EmitDynamicLoadFromSlotFastCase(var->AsSlot(), + NOT_INSIDE_TYPEOF, + &slow, + &done); + } __ bind(&slow); // Call the runtime to find the function to call (returned in r0) @@ -1833,7 +1850,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { // Push function. __ push(r0); // Push global receiver. - __ ldr(r1, CodeGenerator::GlobalObject()); + __ ldr(r1, GlobalObjectOperand()); __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); __ push(r1); __ bind(&call); @@ -1846,22 +1863,28 @@ void FullCodeGenerator::VisitCall(Call* expr) { Literal* key = prop->key()->AsLiteral(); if (key != NULL && key->handle()->IsSymbol()) { // Call to a named property, use call IC. - VisitForStackValue(prop->obj()); + { PreserveStatementPositionScope scope(masm()->positions_recorder()); + VisitForStackValue(prop->obj()); + } EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET); } else { // Call to a keyed property. // For a synthetic property use keyed load IC followed by function call, // for a regular property use keyed CallIC. - VisitForStackValue(prop->obj()); + { PreserveStatementPositionScope scope(masm()->positions_recorder()); + VisitForStackValue(prop->obj()); + } if (prop->is_synthetic()) { - VisitForAccumulatorValue(prop->key()); + { PreserveStatementPositionScope scope(masm()->positions_recorder()); + VisitForAccumulatorValue(prop->key()); + } // Record source code position for IC call. - SetSourcePosition(prop->position()); + SetSourcePosition(prop->position(), FORCED_POSITION); __ pop(r1); // We do not need to keep the receiver. Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); - __ ldr(r1, CodeGenerator::GlobalObject()); + __ ldr(r1, GlobalObjectOperand()); __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); __ Push(r0, r1); // Function, receiver. EmitCallWithStub(expr); @@ -1879,9 +1902,12 @@ void FullCodeGenerator::VisitCall(Call* expr) { loop_depth() == 0) { lit->set_try_full_codegen(true); } - VisitForStackValue(fun); + + { PreserveStatementPositionScope scope(masm()->positions_recorder()); + VisitForStackValue(fun); + } // Load global receiver object. - __ ldr(r1, CodeGenerator::GlobalObject()); + __ ldr(r1, GlobalObjectOperand()); __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); __ push(r1); // Emit function call. @@ -2759,7 +2785,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { if (expr->is_jsruntime()) { // Prepare for calling JS runtime function. - __ ldr(r0, CodeGenerator::GlobalObject()); + __ ldr(r0, GlobalObjectOperand()); __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset)); __ push(r0); } @@ -2773,8 +2799,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { if (expr->is_jsruntime()) { // Call the JS runtime function. __ mov(r2, Operand(expr->name())); - Handle ic = CodeGenerator::ComputeCallInitialize(arg_count, - NOT_IN_LOOP); + Handle ic = StubCache::ComputeCallInitialize(arg_count, NOT_IN_LOOP); EmitCallIC(ic, RelocInfo::CODE_TARGET); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2811,7 +2836,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { VisitForStackValue(prop->obj()); VisitForStackValue(prop->key()); } else if (var->is_global()) { - __ ldr(r1, CodeGenerator::GlobalObject()); + __ ldr(r1, GlobalObjectOperand()); __ mov(r0, Operand(var->name())); __ Push(r1, r0); } else { @@ -3077,7 +3102,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { VariableProxy* proxy = expr->AsVariableProxy(); if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) { Comment cmnt(masm_, "Global variable"); - __ ldr(r0, CodeGenerator::GlobalObject()); + __ ldr(r0, GlobalObjectOperand()); __ mov(r2, Operand(proxy->name())); Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); // Use a regular load, not a contextual load, to avoid a reference diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index a09afdf754..4c1f9835f4 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1988,9 +1988,9 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, // Not infinity or NaN simply convert to int. if (IsElementTypeSigned(array_type)) { - __ vcvt_s32_f64(s0, d0, ne); + __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne); } else { - __ vcvt_u32_f64(s0, d0, ne); + __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne); } __ vmov(r5, s0, ne); diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 7f6090bc50..d2c22af53d 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -129,7 +129,7 @@ void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, // address is loaded. The mov method will automatically record // positions when pc is the target, since this is not the case here // we have to do it explicitly. - WriteRecordedPositions(); + positions_recorder()->WriteRecordedPositions(); mov(ip, Operand(target, rmode), LeaveCC, cond); blx(ip, cond); @@ -220,20 +220,20 @@ void MacroAssembler::Move(Register dst, Register src) { void MacroAssembler::And(Register dst, Register src1, const Operand& src2, Condition cond) { - if (!CpuFeatures::IsSupported(ARMv7) || src2.is_single_instruction()) { - and_(dst, src1, src2, LeaveCC, cond); - return; - } - int32_t immediate = src2.immediate(); - if (immediate == 0) { + if (!src2.is_reg() && + !src2.must_use_constant_pool() && + src2.immediate() == 0) { mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond); - return; - } - if (IsPowerOf2(immediate + 1) && ((immediate & 1) != 0)) { - ubfx(dst, src1, 0, WhichPowerOf2(immediate + 1), cond); - return; + + } else if (!src2.is_single_instruction() && + !src2.must_use_constant_pool() && + CpuFeatures::IsSupported(ARMv7) && + IsPowerOf2(src2.immediate() + 1)) { + ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond); + + } else { + and_(dst, src1, src2, LeaveCC, cond); } - and_(dst, src1, src2, LeaveCC, cond); } diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 48a8059966..8d89d6984c 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -727,6 +727,16 @@ class CodePatcher { // ----------------------------------------------------------------------------- // Static helper functions. +static MemOperand ContextOperand(Register context, int index) { + return MemOperand(context, Context::SlotOffset(index)); +} + + +static inline MemOperand GlobalObjectOperand() { + return ContextOperand(cp, Context::GLOBAL_INDEX); +} + + #ifdef GENERATED_CODE_COVERAGE #define CODE_COVERAGE_STRINGIFY(x) #x #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index cb91520f3a..3ec5f449d8 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -705,6 +705,7 @@ Simulator::Simulator() { z_flag_FPSCR_ = false; c_flag_FPSCR_ = false; v_flag_FPSCR_ = false; + FPSCR_rounding_mode_ = RZ; inv_op_vfp_flag_ = false; div_zero_vfp_flag_ = false; @@ -2501,10 +2502,45 @@ void Simulator::DecodeTypeVFP(Instr* instr) { (instr->VAField() == 0x7) && (instr->Bits(19, 16) == 0x1)) { // vmrs - if (instr->RtField() == 0xF) + uint32_t rt = instr->RtField(); + if (rt == 0xF) { Copy_FPSCR_to_APSR(); - else - UNIMPLEMENTED(); // Not used by V8. + } else { + // Emulate FPSCR from the Simulator flags. + uint32_t fpscr = (n_flag_FPSCR_ << 31) | + (z_flag_FPSCR_ << 30) | + (c_flag_FPSCR_ << 29) | + (v_flag_FPSCR_ << 28) | + (inexact_vfp_flag_ << 4) | + (underflow_vfp_flag_ << 3) | + (overflow_vfp_flag_ << 2) | + (div_zero_vfp_flag_ << 1) | + (inv_op_vfp_flag_ << 0) | + (FPSCR_rounding_mode_ << 22); + set_register(rt, fpscr); + } + } else if ((instr->VLField() == 0x0) && + (instr->VCField() == 0x0) && + (instr->VAField() == 0x7) && + (instr->Bits(19, 16) == 0x1)) { + // vmsr + uint32_t rt = instr->RtField(); + if (rt == pc) { + UNREACHABLE(); + } else { + uint32_t rt_value = get_register(rt); + n_flag_FPSCR_ = (rt_value >> 31) & 1; + z_flag_FPSCR_ = (rt_value >> 30) & 1; + c_flag_FPSCR_ = (rt_value >> 29) & 1; + v_flag_FPSCR_ = (rt_value >> 28) & 1; + inexact_vfp_flag_ = (rt_value >> 4) & 1; + underflow_vfp_flag_ = (rt_value >> 3) & 1; + overflow_vfp_flag_ = (rt_value >> 2) & 1; + div_zero_vfp_flag_ = (rt_value >> 1) & 1; + inv_op_vfp_flag_ = (rt_value >> 0) & 1; + FPSCR_rounding_mode_ = + static_cast((rt_value >> 22) & 3); + } } else { UNIMPLEMENTED(); // Not used by V8. } @@ -2605,29 +2641,71 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) { if (to_integer) { bool unsigned_integer = (instr->Bit(16) == 0); + FPSCRRoundingModes mode; if (instr->Bit(7) != 1) { - // Only rounding towards zero supported. - UNIMPLEMENTED(); // Not used by V8. + // Use FPSCR defined rounding mode. + mode = FPSCR_rounding_mode_; + // Only RZ and RM modes are supported. + ASSERT((mode == RM) || (mode == RZ)); + } else { + // VFP uses round towards zero by default. + mode = RZ; } int dst = instr->VFPDRegCode(kSinglePrecision); int src = instr->VFPMRegCode(src_precision); + int32_t kMaxInt = v8::internal::kMaxInt; + int32_t kMinInt = v8::internal::kMinInt; + switch (mode) { + case RM: + if (src_precision == kDoublePrecision) { + double val = get_double_from_d_register(src); - if (src_precision == kDoublePrecision) { - double val = get_double_from_d_register(src); + inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val); - int sint = unsigned_integer ? static_cast(val) : - static_cast(val); + int sint = unsigned_integer ? static_cast(val) : + static_cast(val); + sint = sint > val ? sint - 1 : sint; - set_s_register_from_sinteger(dst, sint); - } else { - float val = get_float_from_s_register(src); + set_s_register_from_sinteger(dst, sint); + } else { + float val = get_float_from_s_register(src); + + inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val); - int sint = unsigned_integer ? static_cast(val) : - static_cast(val); + int sint = unsigned_integer ? static_cast(val) : + static_cast(val); + sint = sint > val ? sint - 1 : sint; - set_s_register_from_sinteger(dst, sint); + set_s_register_from_sinteger(dst, sint); + } + break; + case RZ: + if (src_precision == kDoublePrecision) { + double val = get_double_from_d_register(src); + + inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val); + + int sint = unsigned_integer ? static_cast(val) : + static_cast(val); + + set_s_register_from_sinteger(dst, sint); + } else { + float val = get_float_from_s_register(src); + + inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val); + + int sint = unsigned_integer ? static_cast(val) : + static_cast(val); + + set_s_register_from_sinteger(dst, sint); + } + break; + + default: + UNREACHABLE(); } + } else { bool unsigned_integer = (instr->Bit(7) == 0); diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index 3e023489ee..c37b3f7156 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -306,6 +306,9 @@ class Simulator { bool c_flag_FPSCR_; bool v_flag_FPSCR_; + // VFP rounding mode. See ARM DDI 0406B Page A2-29. + FPSCRRoundingModes FPSCR_rounding_mode_; + // VFP FP exception flags architecture state. bool inv_op_vfp_flag_; bool div_zero_vfp_flag_; diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 5e29c2e485..a0ef80a0fd 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -1676,8 +1676,143 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object, JSGlobalPropertyCell* cell, JSFunction* function, String* name) { - // TODO(872): implement this. - return Heap::undefined_value(); + // ----------- S t a t e ------------- + // -- r2 : function name + // -- lr : return address + // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) + // -- ... + // -- sp[argc * 4] : receiver + // ----------------------------------- + + if (!CpuFeatures::IsSupported(VFP3)) return Heap::undefined_value(); + CpuFeatures::Scope scope_vfp3(VFP3); + + const int argc = arguments().immediate(); + + // If the object is not a JSObject or we got an unexpected number of + // arguments, bail out to the regular call. + if (!object->IsJSObject() || argc != 1) return Heap::undefined_value(); + + Label miss, slow; + GenerateNameCheck(name, &miss); + + if (cell == NULL) { + __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); + + STATIC_ASSERT(kSmiTag == 0); + __ BranchOnSmi(r1, &miss); + + CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name, + &miss); + } else { + ASSERT(cell->value() == function); + GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss); + GenerateLoadFunctionFromCell(cell, function, &miss); + } + + // Load the (only) argument into r0. + __ ldr(r0, MemOperand(sp, 0 * kPointerSize)); + + // If the argument is a smi, just return. + STATIC_ASSERT(kSmiTag == 0); + __ tst(r0, Operand(kSmiTagMask)); + __ Drop(argc + 1, eq); + __ Ret(eq); + + __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true); + + Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return; + + // If vfp3 is enabled, we use the fpu rounding with the RM (round towards + // minus infinity) mode. + + // Load the HeapNumber value. + // We will need access to the value in the core registers, so we load it + // with ldrd and move it to the fpu. It also spares a sub instruction for + // updating the HeapNumber value address, as vldr expects a multiple + // of 4 offset. + __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ vmov(d1, r4, r5); + + // Backup FPSCR. + __ vmrs(r3); + // Set custom FPCSR: + // - Set rounding mode to "Round towards Minus Infinity" + // (ie bits [23:22] = 0b10). + // - Clear vfp cumulative exception flags (bits [3:0]). + // - Make sure Flush-to-zero mode control bit is unset (bit 22). + __ bic(r9, r3, + Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask)); + __ orr(r9, r9, Operand(kVFPRoundToMinusInfinityBits)); + __ vmsr(r9); + + // Convert the argument to an integer. + __ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al); + + // Use vcvt latency to start checking for special cases. + // Get the argument exponent and clear the sign bit. + __ bic(r6, r5, Operand(HeapNumber::kSignMask)); + __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord)); + + // Retrieve FPSCR and check for vfp exceptions. + __ vmrs(r9); + __ tst(r9, Operand(kVFPExceptionMask)); + __ b(&no_vfp_exception, eq); + + // Check for NaN, Infinity, and -Infinity. + // They are invariant through a Math.Floor call, so just + // return the original argument. + __ sub(r7, r6, Operand(HeapNumber::kExponentMask + >> HeapNumber::kMantissaBitsInTopWord), SetCC); + __ b(&restore_fpscr_and_return, eq); + // We had an overflow or underflow in the conversion. Check if we + // have a big exponent. + __ cmp(r7, Operand(HeapNumber::kMantissaBits)); + // If greater or equal, the argument is already round and in r0. + __ b(&restore_fpscr_and_return, ge); + __ b(&slow); + + __ bind(&no_vfp_exception); + // Move the result back to general purpose register r0. + __ vmov(r0, s0); + // Check if the result fits into a smi. + __ add(r1, r0, Operand(0x40000000), SetCC); + __ b(&wont_fit_smi, mi); + // Tag the result. + STATIC_ASSERT(kSmiTag == 0); + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + + // Check for -0. + __ cmp(r0, Operand(0)); + __ b(&restore_fpscr_and_return, ne); + // r5 already holds the HeapNumber exponent. + __ tst(r5, Operand(HeapNumber::kSignMask)); + // If our HeapNumber is negative it was -0, so load its address and return. + // Else r0 is loaded with 0, so we can also just return. + __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne); + + __ bind(&restore_fpscr_and_return); + // Restore FPSCR and return. + __ vmsr(r3); + __ Drop(argc + 1); + __ Ret(); + + __ bind(&wont_fit_smi); + __ bind(&slow); + // Restore FPCSR and fall to slow case. + __ vmsr(r3); + + // Tail call the full function. We do not have to patch the receiver + // because the function makes no use of it. + __ InvokeFunction(function, arguments(), JUMP_FUNCTION); + + __ bind(&miss); + // r2: function name. + MaybeObject* obj = GenerateMissBranch(); + if (obj->IsFailure()) return obj; + + // Return the generated code. + return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name); } diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc index da76edf391..45f48767c4 100644 --- a/deps/v8/src/arm/virtual-frame-arm.cc +++ b/deps/v8/src/arm/virtual-frame-arm.cc @@ -245,18 +245,15 @@ void VirtualFrame::AllocateStackSlots() { __ LoadRoot(r2, Heap::kStackLimitRootIndex); } // Check the stack for overflow or a break request. - // Put the lr setup instruction in the delay slot. The kInstrSize is added - // to the implicit 8 byte offset that always applies to operations with pc - // and gives a return address 12 bytes down. - masm()->add(lr, pc, Operand(Assembler::kInstrSize)); masm()->cmp(sp, Operand(r2)); StackCheckStub stub; // Call the stub if lower. - masm()->mov(pc, + masm()->mov(ip, Operand(reinterpret_cast(stub.GetCode().location()), RelocInfo::CODE_TARGET), LeaveCC, lo); + masm()->Call(ip, lo); } diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index ce90dceacb..7493673e81 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -804,4 +804,53 @@ ExternalReference ExternalReference::debug_step_in_fp_address() { } #endif + +void PositionsRecorder::RecordPosition(int pos, + PositionRecordingType recording_type) { + ASSERT(pos != RelocInfo::kNoPosition); + ASSERT(pos >= 0); + current_position_ = pos; + current_position_recording_type_ = recording_type; +} + + +void PositionsRecorder::RecordStatementPosition(int pos) { + ASSERT(pos != RelocInfo::kNoPosition); + ASSERT(pos >= 0); + current_statement_position_ = pos; +} + + +bool PositionsRecorder::WriteRecordedPositions() { + bool written = false; + + // Write the statement position if it is different from what was written last + // time. + if (current_statement_position_ != written_statement_position_) { + EnsureSpace ensure_space(assembler_); + assembler_->RecordRelocInfo(RelocInfo::STATEMENT_POSITION, + current_statement_position_); + written_statement_position_ = current_statement_position_; + written = true; + } + + // Write the position if it is different from what was written last time and + // also different from the written statement position or was forced. + if (current_position_ != written_position_ && + (current_position_ != current_statement_position_ || !written) && + (current_position_ != written_statement_position_ + || current_position_recording_type_ == FORCED_POSITION)) { + EnsureSpace ensure_space(assembler_); + assembler_->RecordRelocInfo(RelocInfo::POSITION, current_position_); + written_position_ = current_position_; + written = true; + } + + current_position_recording_type_ = NORMAL_POSITION; + + // Return whether something was written. + return written; +} + + } } // namespace v8::internal diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 66811777fa..09159fed08 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -584,6 +584,67 @@ class ExternalReference BASE_EMBEDDED { }; +// ----------------------------------------------------------------------------- +// Position recording support + +enum PositionRecordingType { FORCED_POSITION, NORMAL_POSITION }; + +class PositionsRecorder BASE_EMBEDDED { + public: + explicit PositionsRecorder(Assembler* assembler) + : assembler_(assembler), + current_position_(RelocInfo::kNoPosition), + current_position_recording_type_(NORMAL_POSITION), + written_position_(RelocInfo::kNoPosition), + current_statement_position_(RelocInfo::kNoPosition), + written_statement_position_(RelocInfo::kNoPosition) { } + + // Set current position to pos. If recording_type is FORCED_POSITION then + // WriteRecordedPositions will write this position even if it is equal to + // statement position previously written for another pc. + void RecordPosition(int pos, + PositionRecordingType recording_type = NORMAL_POSITION); + + // Set current statement position to pos. + void RecordStatementPosition(int pos); + + // Write recorded positions to relocation information. + bool WriteRecordedPositions(); + + int current_position() const { return current_position_; } + + int current_statement_position() const { return current_statement_position_; } + + private: + Assembler* assembler_; + + int current_position_; + PositionRecordingType current_position_recording_type_; + int written_position_; + + int current_statement_position_; + int written_statement_position_; +}; + + +class PreserveStatementPositionScope BASE_EMBEDDED { + public: + explicit PreserveStatementPositionScope(PositionsRecorder* positions_recorder) + : positions_recorder_(positions_recorder), + statement_position_(positions_recorder->current_statement_position()) {} + + ~PreserveStatementPositionScope() { + if (statement_position_ != RelocInfo::kNoPosition) { + positions_recorder_->RecordStatementPosition(statement_position_); + } + } + + private: + PositionsRecorder* positions_recorder_; + int statement_position_; +}; + + // ----------------------------------------------------------------------------- // Utility functions diff --git a/deps/v8/src/bignum-dtoa.cc b/deps/v8/src/bignum-dtoa.cc new file mode 100644 index 0000000000..088dd79f55 --- /dev/null +++ b/deps/v8/src/bignum-dtoa.cc @@ -0,0 +1,655 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "v8.h" +#include "bignum-dtoa.h" + +#include "bignum.h" +#include "double.h" + +namespace v8 { +namespace internal { + +static int NormalizedExponent(uint64_t significand, int exponent) { + ASSERT(significand != 0); + while ((significand & Double::kHiddenBit) == 0) { + significand = significand << 1; + exponent = exponent - 1; + } + return exponent; +} + + +// Forward declarations: +// Returns an estimation of k such that 10^(k-1) <= v < 10^k. +static int EstimatePower(int exponent); +// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator +// and denominator. +static void InitialScaledStartValues(double v, + int estimated_power, + bool need_boundary_deltas, + Bignum* numerator, + Bignum* denominator, + Bignum* delta_minus, + Bignum* delta_plus); +// Multiplies numerator/denominator so that its values lies in the range 1-10. +// Returns decimal_point s.t. +// v = numerator'/denominator' * 10^(decimal_point-1) +// where numerator' and denominator' are the values of numerator and +// denominator after the call to this function. +static void FixupMultiply10(int estimated_power, bool is_even, + int* decimal_point, + Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus); +// Generates digits from the left to the right and stops when the generated +// digits yield the shortest decimal representation of v. +static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus, + bool is_even, + Vector buffer, int* length); +// Generates 'requested_digits' after the decimal point. +static void BignumToFixed(int requested_digits, int* decimal_point, + Bignum* numerator, Bignum* denominator, + Vector(buffer), int* length); +// Generates 'count' digits of numerator/denominator. +// Once 'count' digits have been produced rounds the result depending on the +// remainder (remainders of exactly .5 round upwards). Might update the +// decimal_point when rounding up (for example for 0.9999). +static void GenerateCountedDigits(int count, int* decimal_point, + Bignum* numerator, Bignum* denominator, + Vector(buffer), int* length); + + +void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits, + Vector buffer, int* length, int* decimal_point) { + ASSERT(v > 0); + ASSERT(!Double(v).IsSpecial()); + uint64_t significand = Double(v).Significand(); + bool is_even = (significand & 1) == 0; + int exponent = Double(v).Exponent(); + int normalized_exponent = NormalizedExponent(significand, exponent); + // estimated_power might be too low by 1. + int estimated_power = EstimatePower(normalized_exponent); + + // Shortcut for Fixed. + // The requested digits correspond to the digits after the point. If the + // number is much too small, then there is no need in trying to get any + // digits. + if (mode == BIGNUM_DTOA_FIXED && -estimated_power - 1 > requested_digits) { + buffer[0] = '\0'; + *length = 0; + // Set decimal-point to -requested_digits. This is what Gay does. + // Note that it should not have any effect anyways since the string is + // empty. + *decimal_point = -requested_digits; + return; + } + + Bignum numerator; + Bignum denominator; + Bignum delta_minus; + Bignum delta_plus; + // Make sure the bignum can grow large enough. The smallest double equals + // 4e-324. In this case the denominator needs fewer than 324*4 binary digits. + // The maximum double is 1.7976931348623157e308 which needs fewer than + // 308*4 binary digits. + ASSERT(Bignum::kMaxSignificantBits >= 324*4); + bool need_boundary_deltas = (mode == BIGNUM_DTOA_SHORTEST); + InitialScaledStartValues(v, estimated_power, need_boundary_deltas, + &numerator, &denominator, + &delta_minus, &delta_plus); + // We now have v = (numerator / denominator) * 10^estimated_power. + FixupMultiply10(estimated_power, is_even, decimal_point, + &numerator, &denominator, + &delta_minus, &delta_plus); + // We now have v = (numerator / denominator) * 10^(decimal_point-1), and + // 1 <= (numerator + delta_plus) / denominator < 10 + switch (mode) { + case BIGNUM_DTOA_SHORTEST: + GenerateShortestDigits(&numerator, &denominator, + &delta_minus, &delta_plus, + is_even, buffer, length); + break; + case BIGNUM_DTOA_FIXED: + BignumToFixed(requested_digits, decimal_point, + &numerator, &denominator, + buffer, length); + break; + case BIGNUM_DTOA_PRECISION: + GenerateCountedDigits(requested_digits, decimal_point, + &numerator, &denominator, + buffer, length); + break; + default: + UNREACHABLE(); + } + buffer[*length] = '\0'; +} + + +// The procedure starts generating digits from the left to the right and stops +// when the generated digits yield the shortest decimal representation of v. A +// decimal representation of v is a number lying closer to v than to any other +// double, so it converts to v when read. +// +// This is true if d, the decimal representation, is between m- and m+, the +// upper and lower boundaries. d must be strictly between them if !is_even. +// m- := (numerator - delta_minus) / denominator +// m+ := (numerator + delta_plus) / denominator +// +// Precondition: 0 <= (numerator+delta_plus) / denominator < 10. +// If 1 <= (numerator+delta_plus) / denominator < 10 then no leading 0 digit +// will be produced. This should be the standard precondition. +static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus, + bool is_even, + Vector buffer, int* length) { + // Small optimization: if delta_minus and delta_plus are the same just reuse + // one of the two bignums. + if (Bignum::Equal(*delta_minus, *delta_plus)) { + delta_plus = delta_minus; + } + *length = 0; + while (true) { + uint16_t digit; + digit = numerator->DivideModuloIntBignum(*denominator); + ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive. + // digit = numerator / denominator (integer division). + // numerator = numerator % denominator. + buffer[(*length)++] = digit + '0'; + + // Can we stop already? + // If the remainder of the division is less than the distance to the lower + // boundary we can stop. In this case we simply round down (discarding the + // remainder). + // Similarly we test if we can round up (using the upper boundary). + bool in_delta_room_minus; + bool in_delta_room_plus; + if (is_even) { + in_delta_room_minus = Bignum::LessEqual(*numerator, *delta_minus); + } else { + in_delta_room_minus = Bignum::Less(*numerator, *delta_minus); + } + if (is_even) { + in_delta_room_plus = + Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0; + } else { + in_delta_room_plus = + Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0; + } + if (!in_delta_room_minus && !in_delta_room_plus) { + // Prepare for next iteration. + numerator->Times10(); + delta_minus->Times10(); + // We optimized delta_plus to be equal to delta_minus (if they share the + // same value). So don't multiply delta_plus if they point to the same + // object. + if (delta_minus != delta_plus) { + delta_plus->Times10(); + } + } else if (in_delta_room_minus && in_delta_room_plus) { + // Let's see if 2*numerator < denominator. + // If yes, then the next digit would be < 5 and we can round down. + int compare = Bignum::PlusCompare(*numerator, *numerator, *denominator); + if (compare < 0) { + // Remaining digits are less than .5. -> Round down (== do nothing). + } else if (compare > 0) { + // Remaining digits are more than .5 of denominator. -> Round up. + // Note that the last digit could not be a '9' as otherwise the whole + // loop would have stopped earlier. + // We still have an assert here in case the preconditions were not + // satisfied. + ASSERT(buffer[(*length) - 1] != '9'); + buffer[(*length) - 1]++; + } else { + // Halfway case. + // TODO(floitsch): need a way to solve half-way cases. + // For now let's round towards even (since this is what Gay seems to + // do). + + if ((buffer[(*length) - 1] - '0') % 2 == 0) { + // Round down => Do nothing. + } else { + ASSERT(buffer[(*length) - 1] != '9'); + buffer[(*length) - 1]++; + } + } + return; + } else if (in_delta_room_minus) { + // Round down (== do nothing). + return; + } else { // in_delta_room_plus + // Round up. + // Note again that the last digit could not be '9' since this would have + // stopped the loop earlier. + // We still have an ASSERT here, in case the preconditions were not + // satisfied. + ASSERT(buffer[(*length) -1] != '9'); + buffer[(*length) - 1]++; + return; + } + } +} + + +// Let v = numerator / denominator < 10. +// Then we generate 'count' digits of d = x.xxxxx... (without the decimal point) +// from left to right. Once 'count' digits have been produced we decide wether +// to round up or down. Remainders of exactly .5 round upwards. Numbers such +// as 9.999999 propagate a carry all the way, and change the +// exponent (decimal_point), when rounding upwards. +static void GenerateCountedDigits(int count, int* decimal_point, + Bignum* numerator, Bignum* denominator, + Vector(buffer), int* length) { + ASSERT(count >= 0); + for (int i = 0; i < count - 1; ++i) { + uint16_t digit; + digit = numerator->DivideModuloIntBignum(*denominator); + ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive. + // digit = numerator / denominator (integer division). + // numerator = numerator % denominator. + buffer[i] = digit + '0'; + // Prepare for next iteration. + numerator->Times10(); + } + // Generate the last digit. + uint16_t digit; + digit = numerator->DivideModuloIntBignum(*denominator); + if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) { + digit++; + } + buffer[count - 1] = digit + '0'; + // Correct bad digits (in case we had a sequence of '9's). Propagate the + // carry until we hat a non-'9' or til we reach the first digit. + for (int i = count - 1; i > 0; --i) { + if (buffer[i] != '0' + 10) break; + buffer[i] = '0'; + buffer[i - 1]++; + } + if (buffer[0] == '0' + 10) { + // Propagate a carry past the top place. + buffer[0] = '1'; + (*decimal_point)++; + } + *length = count; +} + + +// Generates 'requested_digits' after the decimal point. It might omit +// trailing '0's. If the input number is too small then no digits at all are +// generated (ex.: 2 fixed digits for 0.00001). +// +// Input verifies: 1 <= (numerator + delta) / denominator < 10. +static void BignumToFixed(int requested_digits, int* decimal_point, + Bignum* numerator, Bignum* denominator, + Vector(buffer), int* length) { + // Note that we have to look at more than just the requested_digits, since + // a number could be rounded up. Example: v=0.5 with requested_digits=0. + // Even though the power of v equals 0 we can't just stop here. + if (-(*decimal_point) > requested_digits) { + // The number is definitively too small. + // Ex: 0.001 with requested_digits == 1. + // Set decimal-point to -requested_digits. This is what Gay does. + // Note that it should not have any effect anyways since the string is + // empty. + *decimal_point = -requested_digits; + *length = 0; + return; + } else if (-(*decimal_point) == requested_digits) { + // We only need to verify if the number rounds down or up. + // Ex: 0.04 and 0.06 with requested_digits == 1. + ASSERT(*decimal_point == -requested_digits); + // Initially the fraction lies in range (1, 10]. Multiply the denominator + // by 10 so that we can compare more easily. + denominator->Times10(); + if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) { + // If the fraction is >= 0.5 then we have to include the rounded + // digit. + buffer[0] = '1'; + *length = 1; + (*decimal_point)++; + } else { + // Note that we caught most of similar cases earlier. + *length = 0; + } + return; + } else { + // The requested digits correspond to the digits after the point. + // The variable 'needed_digits' includes the digits before the point. + int needed_digits = (*decimal_point) + requested_digits; + GenerateCountedDigits(needed_digits, decimal_point, + numerator, denominator, + buffer, length); + } +} + + +// Returns an estimation of k such that 10^(k-1) <= v < 10^k where +// v = f * 2^exponent and 2^52 <= f < 2^53. +// v is hence a normalized double with the given exponent. The output is an +// approximation for the exponent of the decimal approimation .digits * 10^k. +// +// The result might undershoot by 1 in which case 10^k <= v < 10^k+1. +// Note: this property holds for v's upper boundary m+ too. +// 10^k <= m+ < 10^k+1. +// (see explanation below). +// +// Examples: +// EstimatePower(0) => 16 +// EstimatePower(-52) => 0 +// +// Note: e >= 0 => EstimatedPower(e) > 0. No similar claim can be made for e<0. +static int EstimatePower(int exponent) { + // This function estimates log10 of v where v = f*2^e (with e == exponent). + // Note that 10^floor(log10(v)) <= v, but v <= 10^ceil(log10(v)). + // Note that f is bounded by its container size. Let p = 53 (the double's + // significand size). Then 2^(p-1) <= f < 2^p. + // + // Given that log10(v) == log2(v)/log2(10) and e+(len(f)-1) is quite close + // to log2(v) the function is simplified to (e+(len(f)-1)/log2(10)). + // The computed number undershoots by less than 0.631 (when we compute log3 + // and not log10). + // + // Optimization: since we only need an approximated result this computation + // can be performed on 64 bit integers. On x86/x64 architecture the speedup is + // not really measurable, though. + // + // Since we want to avoid overshooting we decrement by 1e10 so that + // floating-point imprecisions don't affect us. + // + // Explanation for v's boundary m+: the computation takes advantage of + // the fact that 2^(p-1) <= f < 2^p. Boundaries still satisfy this requirement + // (even for denormals where the delta can be much more important). + + const double k1Log10 = 0.30102999566398114; // 1/lg(10) + + // For doubles len(f) == 53 (don't forget the hidden bit). + const int kSignificandSize = 53; + double estimate = ceil((exponent + kSignificandSize - 1) * k1Log10 - 1e-10); + return static_cast(estimate); +} + + +// See comments for InitialScaledStartValues. +static void InitialScaledStartValuesPositiveExponent( + double v, int estimated_power, bool need_boundary_deltas, + Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus) { + // A positive exponent implies a positive power. + ASSERT(estimated_power >= 0); + // Since the estimated_power is positive we simply multiply the denominator + // by 10^estimated_power. + + // numerator = v. + numerator->AssignUInt64(Double(v).Significand()); + numerator->ShiftLeft(Double(v).Exponent()); + // denominator = 10^estimated_power. + denominator->AssignPowerUInt16(10, estimated_power); + + if (need_boundary_deltas) { + // Introduce a common denominator so that the deltas to the boundaries are + // integers. + denominator->ShiftLeft(1); + numerator->ShiftLeft(1); + // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common + // denominator (of 2) delta_plus equals 2^e. + delta_plus->AssignUInt16(1); + delta_plus->ShiftLeft(Double(v).Exponent()); + // Same for delta_minus (with adjustments below if f == 2^p-1). + delta_minus->AssignUInt16(1); + delta_minus->ShiftLeft(Double(v).Exponent()); + + // If the significand (without the hidden bit) is 0, then the lower + // boundary is closer than just half a ulp (unit in the last place). + // There is only one exception: if the next lower number is a denormal then + // the distance is 1 ulp. This cannot be the case for exponent >= 0 (but we + // have to test it in the other function where exponent < 0). + uint64_t v_bits = Double(v).AsUint64(); + if ((v_bits & Double::kSignificandMask) == 0) { + // The lower boundary is closer at half the distance of "normal" numbers. + // Increase the common denominator and adapt all but the delta_minus. + denominator->ShiftLeft(1); // *2 + numerator->ShiftLeft(1); // *2 + delta_plus->ShiftLeft(1); // *2 + } + } +} + + +// See comments for InitialScaledStartValues +static void InitialScaledStartValuesNegativeExponentPositivePower( + double v, int estimated_power, bool need_boundary_deltas, + Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus) { + uint64_t significand = Double(v).Significand(); + int exponent = Double(v).Exponent(); + // v = f * 2^e with e < 0, and with estimated_power >= 0. + // This means that e is close to 0 (have a look at how estimated_power is + // computed). + + // numerator = significand + // since v = significand * 2^exponent this is equivalent to + // numerator = v * / 2^-exponent + numerator->AssignUInt64(significand); + // denominator = 10^estimated_power * 2^-exponent (with exponent < 0) + denominator->AssignPowerUInt16(10, estimated_power); + denominator->ShiftLeft(-exponent); + + if (need_boundary_deltas) { + // Introduce a common denominator so that the deltas to the boundaries are + // integers. + denominator->ShiftLeft(1); + numerator->ShiftLeft(1); + // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common + // denominator (of 2) delta_plus equals 2^e. + // Given that the denominator already includes v's exponent the distance + // to the boundaries is simply 1. + delta_plus->AssignUInt16(1); + // Same for delta_minus (with adjustments below if f == 2^p-1). + delta_minus->AssignUInt16(1); + + // If the significand (without the hidden bit) is 0, then the lower + // boundary is closer than just one ulp (unit in the last place). + // There is only one exception: if the next lower number is a denormal + // then the distance is 1 ulp. Since the exponent is close to zero + // (otherwise estimated_power would have been negative) this cannot happen + // here either. + uint64_t v_bits = Double(v).AsUint64(); + if ((v_bits & Double::kSignificandMask) == 0) { + // The lower boundary is closer at half the distance of "normal" numbers. + // Increase the denominator and adapt all but the delta_minus. + denominator->ShiftLeft(1); // *2 + numerator->ShiftLeft(1); // *2 + delta_plus->ShiftLeft(1); // *2 + } + } +} + + +// See comments for InitialScaledStartValues +static void InitialScaledStartValuesNegativeExponentNegativePower( + double v, int estimated_power, bool need_boundary_deltas, + Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus) { + const uint64_t kMinimalNormalizedExponent = + V8_2PART_UINT64_C(0x00100000, 00000000); + uint64_t significand = Double(v).Significand(); + int exponent = Double(v).Exponent(); + // Instead of multiplying the denominator with 10^estimated_power we + // multiply all values (numerator and deltas) by 10^-estimated_power. + + // Use numerator as temporary container for power_ten. + Bignum* power_ten = numerator; + power_ten->AssignPowerUInt16(10, -estimated_power); + + if (need_boundary_deltas) { + // Since power_ten == numerator we must make a copy of 10^estimated_power + // before we complete the computation of the numerator. + // delta_plus = delta_minus = 10^estimated_power + delta_plus->AssignBignum(*power_ten); + delta_minus->AssignBignum(*power_ten); + } + + // numerator = significand * 2 * 10^-estimated_power + // since v = significand * 2^exponent this is equivalent to + // numerator = v * 10^-estimated_power * 2 * 2^-exponent. + // Remember: numerator has been abused as power_ten. So no need to assign it + // to itself. + ASSERT(numerator == power_ten); + numerator->MultiplyByUInt64(significand); + + // denominator = 2 * 2^-exponent with exponent < 0. + denominator->AssignUInt16(1); + denominator->ShiftLeft(-exponent); + + if (need_boundary_deltas) { + // Introduce a common denominator so that the deltas to the boundaries are + // integers. + numerator->ShiftLeft(1); + denominator->ShiftLeft(1); + // With this shift the boundaries have their correct value, since + // delta_plus = 10^-estimated_power, and + // delta_minus = 10^-estimated_power. + // These assignments have been done earlier. + + // The special case where the lower boundary is twice as close. + // This time we have to look out for the exception too. + uint64_t v_bits = Double(v).AsUint64(); + if ((v_bits & Double::kSignificandMask) == 0 && + // The only exception where a significand == 0 has its boundaries at + // "normal" distances: + (v_bits & Double::kExponentMask) != kMinimalNormalizedExponent) { + numerator->ShiftLeft(1); // *2 + denominator->ShiftLeft(1); // *2 + delta_plus->ShiftLeft(1); // *2 + } + } +} + + +// Let v = significand * 2^exponent. +// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator +// and denominator. The functions GenerateShortestDigits and +// GenerateCountedDigits will then convert this ratio to its decimal +// representation d, with the required accuracy. +// Then d * 10^estimated_power is the representation of v. +// (Note: the fraction and the estimated_power might get adjusted before +// generating the decimal representation.) +// +// The initial start values consist of: +// - a scaled numerator: s.t. numerator/denominator == v / 10^estimated_power. +// - a scaled (common) denominator. +// optionally (used by GenerateShortestDigits to decide if it has the shortest +// decimal converting back to v): +// - v - m-: the distance to the lower boundary. +// - m+ - v: the distance to the upper boundary. +// +// v, m+, m-, and therefore v - m- and m+ - v all share the same denominator. +// +// Let ep == estimated_power, then the returned values will satisfy: +// v / 10^ep = numerator / denominator. +// v's boundarys m- and m+: +// m- / 10^ep == v / 10^ep - delta_minus / denominator +// m+ / 10^ep == v / 10^ep + delta_plus / denominator +// Or in other words: +// m- == v - delta_minus * 10^ep / denominator; +// m+ == v + delta_plus * 10^ep / denominator; +// +// Since 10^(k-1) <= v < 10^k (with k == estimated_power) +// or 10^k <= v < 10^(k+1) +// we then have 0.1 <= numerator/denominator < 1 +// or 1 <= numerator/denominator < 10 +// +// It is then easy to kickstart the digit-generation routine. +// +// The boundary-deltas are only filled if need_boundary_deltas is set. +static void InitialScaledStartValues(double v, + int estimated_power, + bool need_boundary_deltas, + Bignum* numerator, + Bignum* denominator, + Bignum* delta_minus, + Bignum* delta_plus) { + if (Double(v).Exponent() >= 0) { + InitialScaledStartValuesPositiveExponent( + v, estimated_power, need_boundary_deltas, + numerator, denominator, delta_minus, delta_plus); + } else if (estimated_power >= 0) { + InitialScaledStartValuesNegativeExponentPositivePower( + v, estimated_power, need_boundary_deltas, + numerator, denominator, delta_minus, delta_plus); + } else { + InitialScaledStartValuesNegativeExponentNegativePower( + v, estimated_power, need_boundary_deltas, + numerator, denominator, delta_minus, delta_plus); + } +} + + +// This routine multiplies numerator/denominator so that its values lies in the +// range 1-10. That is after a call to this function we have: +// 1 <= (numerator + delta_plus) /denominator < 10. +// Let numerator the input before modification and numerator' the argument +// after modification, then the output-parameter decimal_point is such that +// numerator / denominator * 10^estimated_power == +// numerator' / denominator' * 10^(decimal_point - 1) +// In some cases estimated_power was too low, and this is already the case. We +// then simply adjust the power so that 10^(k-1) <= v < 10^k (with k == +// estimated_power) but do not touch the numerator or denominator. +// Otherwise the routine multiplies the numerator and the deltas by 10. +static void FixupMultiply10(int estimated_power, bool is_even, + int* decimal_point, + Bignum* numerator, Bignum* denominator, + Bignum* delta_minus, Bignum* delta_plus) { + bool in_range; + if (is_even) { + // For IEEE doubles half-way cases (in decimal system numbers ending with 5) + // are rounded to the closest floating-point number with even significand. + in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0; + } else { + in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0; + } + if (in_range) { + // Since numerator + delta_plus >= denominator we already have + // 1 <= numerator/denominator < 10. Simply update the estimated_power. + *decimal_point = estimated_power + 1; + } else { + *decimal_point = estimated_power; + numerator->Times10(); + if (Bignum::Equal(*delta_minus, *delta_plus)) { + delta_minus->Times10(); + delta_plus->AssignBignum(*delta_minus); + } else { + delta_minus->Times10(); + delta_plus->Times10(); + } + } +} + +} } // namespace v8::internal diff --git a/deps/v8/src/bignum-dtoa.h b/deps/v8/src/bignum-dtoa.h new file mode 100644 index 0000000000..ea1acbbfc8 --- /dev/null +++ b/deps/v8/src/bignum-dtoa.h @@ -0,0 +1,81 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_BIGNUM_DTOA_H_ +#define V8_BIGNUM_DTOA_H_ + +namespace v8 { +namespace internal { + +enum BignumDtoaMode { + // Return the shortest correct representation. + // For example the output of 0.299999999999999988897 is (the less accurate but + // correct) 0.3. + BIGNUM_DTOA_SHORTEST, + // Return a fixed number of digits after the decimal point. + // For instance fixed(0.1, 4) becomes 0.1000 + // If the input number is big, the output will be big. + BIGNUM_DTOA_FIXED, + // Return a fixed number of digits, no matter what the exponent is. + BIGNUM_DTOA_PRECISION +}; + +// Converts the given double 'v' to ascii. +// The result should be interpreted as buffer * 10^(point-length). +// The buffer will be null-terminated. +// +// The input v must be > 0 and different from NaN, and Infinity. +// +// The output depends on the given mode: +// - SHORTEST: produce the least amount of digits for which the internal +// identity requirement is still satisfied. If the digits are printed +// (together with the correct exponent) then reading this number will give +// 'v' again. The buffer will choose the representation that is closest to +// 'v'. If there are two at the same distance, than the number is round up. +// In this mode the 'requested_digits' parameter is ignored. +// - FIXED: produces digits necessary to print a given number with +// 'requested_digits' digits after the decimal point. The produced digits +// might be too short in which case the caller has to fill the gaps with '0's. +// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2. +// Halfway cases are rounded up. The call toFixed(0.15, 2) thus returns +// buffer="2", point=0. +// Note: the length of the returned buffer has no meaning wrt the significance +// of its digits. That is, just because it contains '0's does not mean that +// any other digit would not satisfy the internal identity requirement. +// - PRECISION: produces 'requested_digits' where the first digit is not '0'. +// Even though the length of produced digits usually equals +// 'requested_digits', the function is allowed to return fewer digits, in +// which case the caller has to fill the missing digits with '0's. +// Halfway cases are again rounded up. +// 'BignumDtoa' expects the given buffer to be big enough to hold all digits +// and a terminating null-character. +void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits, + Vector buffer, int* length, int* point); + +} } // namespace v8::internal + +#endif // V8_BIGNUM_DTOA_H_ diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/bignum.cc new file mode 100644 index 0000000000..dd1537a25a --- /dev/null +++ b/deps/v8/src/bignum.cc @@ -0,0 +1,767 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "bignum.h" +#include "utils.h" + +namespace v8 { +namespace internal { + +Bignum::Bignum() + : bigits_(bigits_buffer_, kBigitCapacity), used_digits_(0), exponent_(0) { + for (int i = 0; i < kBigitCapacity; ++i) { + bigits_[i] = 0; + } +} + + +template +static int BitSize(S value) { + return 8 * sizeof(value); +} + +// Guaranteed to lie in one Bigit. +void Bignum::AssignUInt16(uint16_t value) { + ASSERT(kBigitSize >= BitSize(value)); + Zero(); + if (value == 0) return; + + EnsureCapacity(1); + bigits_[0] = value; + used_digits_ = 1; +} + + +void Bignum::AssignUInt64(uint64_t value) { + const int kUInt64Size = 64; + + Zero(); + if (value == 0) return; + + int needed_bigits = kUInt64Size / kBigitSize + 1; + EnsureCapacity(needed_bigits); + for (int i = 0; i < needed_bigits; ++i) { + bigits_[i] = value & kBigitMask; + value = value >> kBigitSize; + } + used_digits_ = needed_bigits; + Clamp(); +} + + +void Bignum::AssignBignum(const Bignum& other) { + exponent_ = other.exponent_; + for (int i = 0; i < other.used_digits_; ++i) { + bigits_[i] = other.bigits_[i]; + } + // Clear the excess digits (if there were any). + for (int i = other.used_digits_; i < used_digits_; ++i) { + bigits_[i] = 0; + } + used_digits_ = other.used_digits_; +} + + +static uint64_t ReadUInt64(Vector buffer, + int from, + int digits_to_read) { + uint64_t result = 0; + for (int i = from; i < from + digits_to_read; ++i) { + int digit = buffer[i] - '0'; + ASSERT(0 <= digit && digit <= 9); + result = result * 10 + digit; + } + return result; +} + + +void Bignum::AssignDecimalString(Vector value) { + // 2^64 = 18446744073709551616 > 10^19 + const int kMaxUint64DecimalDigits = 19; + Zero(); + int length = value.length(); + int pos = 0; + // Let's just say that each digit needs 4 bits. + while (length >= kMaxUint64DecimalDigits) { + uint64_t digits = ReadUInt64(value, pos, kMaxUint64DecimalDigits); + pos += kMaxUint64DecimalDigits; + length -= kMaxUint64DecimalDigits; + MultiplyByPowerOfTen(kMaxUint64DecimalDigits); + AddUInt64(digits); + } + uint64_t digits = ReadUInt64(value, pos, length); + MultiplyByPowerOfTen(length); + AddUInt64(digits); + Clamp(); +} + + +static int HexCharValue(char c) { + if ('0' <= c && c <= '9') return c - '0'; + if ('a' <= c && c <= 'f') return 10 + c - 'a'; + if ('A' <= c && c <= 'F') return 10 + c - 'A'; + UNREACHABLE(); + return 0; // To make compiler happy. +} + + +void Bignum::AssignHexString(Vector value) { + Zero(); + int length = value.length(); + + int needed_bigits = length * 4 / kBigitSize + 1; + EnsureCapacity(needed_bigits); + int string_index = length - 1; + for (int i = 0; i < needed_bigits - 1; ++i) { + // These bigits are guaranteed to be "full". + Chunk current_bigit = 0; + for (int j = 0; j < kBigitSize / 4; j++) { + current_bigit += HexCharValue(value[string_index--]) << (j * 4); + } + bigits_[i] = current_bigit; + } + used_digits_ = needed_bigits - 1; + + Chunk most_significant_bigit = 0; // Could be = 0; + for (int j = 0; j <= string_index; ++j) { + most_significant_bigit <<= 4; + most_significant_bigit += HexCharValue(value[j]); + } + if (most_significant_bigit != 0) { + bigits_[used_digits_] = most_significant_bigit; + used_digits_++; + } + Clamp(); +} + + +void Bignum::AddUInt64(uint64_t operand) { + if (operand == 0) return; + Bignum other; + other.AssignUInt64(operand); + AddBignum(other); +} + + +void Bignum::AddBignum(const Bignum& other) { + ASSERT(IsClamped()); + ASSERT(other.IsClamped()); + + // If this has a greater exponent than other append zero-bigits to this. + // After this call exponent_ <= other.exponent_. + Align(other); + + // There are two possibilities: + // aaaaaaaaaaa 0000 (where the 0s represent a's exponent) + // bbbbb 00000000 + // ---------------- + // ccccccccccc 0000 + // or + // aaaaaaaaaa 0000 + // bbbbbbbbb 0000000 + // ----------------- + // cccccccccccc 0000 + // In both cases we might need a carry bigit. + + EnsureCapacity(1 + Max(BigitLength(), other.BigitLength()) - exponent_); + Chunk carry = 0; + int bigit_pos = other.exponent_ - exponent_; + ASSERT(bigit_pos >= 0); + for (int i = 0; i < other.used_digits_; ++i) { + Chunk sum = bigits_[bigit_pos] + other.bigits_[i] + carry; + bigits_[bigit_pos] = sum & kBigitMask; + carry = sum >> kBigitSize; + bigit_pos++; + } + + while (carry != 0) { + Chunk sum = bigits_[bigit_pos] + carry; + bigits_[bigit_pos] = sum & kBigitMask; + carry = sum >> kBigitSize; + bigit_pos++; + } + used_digits_ = Max(bigit_pos, used_digits_); + ASSERT(IsClamped()); +} + + +void Bignum::SubtractBignum(const Bignum& other) { + ASSERT(IsClamped()); + ASSERT(other.IsClamped()); + // We require this to be bigger than other. + ASSERT(LessEqual(other, *this)); + + Align(other); + + int offset = other.exponent_ - exponent_; + Chunk borrow = 0; + int i; + for (i = 0; i < other.used_digits_; ++i) { + ASSERT((borrow == 0) || (borrow == 1)); + Chunk difference = bigits_[i + offset] - other.bigits_[i] - borrow; + bigits_[i + offset] = difference & kBigitMask; + borrow = difference >> (kChunkSize - 1); + } + while (borrow != 0) { + Chunk difference = bigits_[i + offset] - borrow; + bigits_[i + offset] = difference & kBigitMask; + borrow = difference >> (kChunkSize - 1); + ++i; + } + Clamp(); +} + + +void Bignum::ShiftLeft(int shift_amount) { + if (used_digits_ == 0) return; + exponent_ += shift_amount / kBigitSize; + int local_shift = shift_amount % kBigitSize; + EnsureCapacity(used_digits_ + 1); + BigitsShiftLeft(local_shift); +} + + +void Bignum::MultiplyByUInt32(uint32_t factor) { + if (factor == 1) return; + if (factor == 0) { + Zero(); + return; + } + if (used_digits_ == 0) return; + + // The product of a bigit with the factor is of size kBigitSize + 32. + // Assert that this number + 1 (for the carry) fits into double chunk. + ASSERT(kDoubleChunkSize >= kBigitSize + 32 + 1); + DoubleChunk carry = 0; + for (int i = 0; i < used_digits_; ++i) { + DoubleChunk product = static_cast(factor) * bigits_[i] + carry; + bigits_[i] = static_cast(product & kBigitMask); + carry = (product >> kBigitSize); + } + while (carry != 0) { + EnsureCapacity(used_digits_ + 1); + bigits_[used_digits_] = carry & kBigitMask; + used_digits_++; + carry >>= kBigitSize; + } +} + + +void Bignum::MultiplyByUInt64(uint64_t factor) { + if (factor == 1) return; + if (factor == 0) { + Zero(); + return; + } + ASSERT(kBigitSize < 32); + uint64_t carry = 0; + uint64_t low = factor & 0xFFFFFFFF; + uint64_t high = factor >> 32; + for (int i = 0; i < used_digits_; ++i) { + uint64_t product_low = low * bigits_[i]; + uint64_t product_high = high * bigits_[i]; + uint64_t tmp = (carry & kBigitMask) + product_low; + bigits_[i] = tmp & kBigitMask; + carry = (carry >> kBigitSize) + (tmp >> kBigitSize) + + (product_high << (32 - kBigitSize)); + } + while (carry != 0) { + EnsureCapacity(used_digits_ + 1); + bigits_[used_digits_] = carry & kBigitMask; + used_digits_++; + carry >>= kBigitSize; + } +} + + +void Bignum::MultiplyByPowerOfTen(int exponent) { + const uint64_t kFive27 = V8_2PART_UINT64_C(0x6765c793, fa10079d); + const uint16_t kFive1 = 5; + const uint16_t kFive2 = kFive1 * 5; + const uint16_t kFive3 = kFive2 * 5; + const uint16_t kFive4 = kFive3 * 5; + const uint16_t kFive5 = kFive4 * 5; + const uint16_t kFive6 = kFive5 * 5; + const uint32_t kFive7 = kFive6 * 5; + const uint32_t kFive8 = kFive7 * 5; + const uint32_t kFive9 = kFive8 * 5; + const uint32_t kFive10 = kFive9 * 5; + const uint32_t kFive11 = kFive10 * 5; + const uint32_t kFive12 = kFive11 * 5; + const uint32_t kFive13 = kFive12 * 5; + const uint32_t kFive1_to_12[] = + { kFive1, kFive2, kFive3, kFive4, kFive5, kFive6, + kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 }; + + ASSERT(exponent >= 0); + if (exponent == 0) return; + if (used_digits_ == 0) return; + + // We shift by exponent at the end just before returning. + int remaining_exponent = exponent; + while (remaining_exponent >= 27) { + MultiplyByUInt64(kFive27); + remaining_exponent -= 27; + } + while (remaining_exponent >= 13) { + MultiplyByUInt32(kFive13); + remaining_exponent -= 13; + } + if (remaining_exponent > 0) { + MultiplyByUInt32(kFive1_to_12[remaining_exponent - 1]); + } + ShiftLeft(exponent); +} + + +void Bignum::Square() { + ASSERT(IsClamped()); + int product_length = 2 * used_digits_; + EnsureCapacity(product_length); + + // Comba multiplication: compute each column separately. + // Example: r = a2a1a0 * b2b1b0. + // r = 1 * a0b0 + + // 10 * (a1b0 + a0b1) + + // 100 * (a2b0 + a1b1 + a0b2) + + // 1000 * (a2b1 + a1b2) + + // 10000 * a2b2 + // + // In the worst case we have to accumulate nb-digits products of digit*digit. + // + // Assert that the additional number of bits in a DoubleChunk are enough to + // sum up used_digits of Bigit*Bigit. + if ((1 << (2 * (kChunkSize - kBigitSize))) <= used_digits_) { + UNIMPLEMENTED(); + } + DoubleChunk accumulator = 0; + // First shift the digits so we don't overwrite them. + int copy_offset = used_digits_; + for (int i = 0; i < used_digits_; ++i) { + bigits_[copy_offset + i] = bigits_[i]; + } + // We have two loops to avoid some 'if's in the loop. + for (int i = 0; i < used_digits_; ++i) { + // Process temporary digit i with power i. + // The sum of the two indices must be equal to i. + int bigit_index1 = i; + int bigit_index2 = 0; + // Sum all of the sub-products. + while (bigit_index1 >= 0) { + Chunk chunk1 = bigits_[copy_offset + bigit_index1]; + Chunk chunk2 = bigits_[copy_offset + bigit_index2]; + accumulator += static_cast(chunk1) * chunk2; + bigit_index1--; + bigit_index2++; + } + bigits_[i] = static_cast(accumulator) & kBigitMask; + accumulator >>= kBigitSize; + } + for (int i = used_digits_; i < product_length; ++i) { + int bigit_index1 = used_digits_ - 1; + int bigit_index2 = i - bigit_index1; + // Invariant: sum of both indices is again equal to i. + // Inner loop runs 0 times on last iteration, emptying accumulator. + while (bigit_index2 < used_digits_) { + Chunk chunk1 = bigits_[copy_offset + bigit_index1]; + Chunk chunk2 = bigits_[copy_offset + bigit_index2]; + accumulator += static_cast(chunk1) * chunk2; + bigit_index1--; + bigit_index2++; + } + // The overwritten bigits_[i] will never be read in further loop iterations, + // because bigit_index1 and bigit_index2 are always greater + // than i - used_digits_. + bigits_[i] = static_cast(accumulator) & kBigitMask; + accumulator >>= kBigitSize; + } + // Since the result was guaranteed to lie inside the number the + // accumulator must be 0 now. + ASSERT(accumulator == 0); + + // Don't forget to update the used_digits and the exponent. + used_digits_ = product_length; + exponent_ *= 2; + Clamp(); +} + + +void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) { + ASSERT(base != 0); + ASSERT(power_exponent >= 0); + if (power_exponent == 0) { + AssignUInt16(1); + return; + } + Zero(); + int shifts = 0; + // We expect base to be in range 2-32, and most often to be 10. + // It does not make much sense to implement different algorithms for counting + // the bits. + while ((base & 1) == 0) { + base >>= 1; + shifts++; + } + int bit_size = 0; + int tmp_base = base; + while (tmp_base != 0) { + tmp_base >>= 1; + bit_size++; + } + int final_size = bit_size * power_exponent; + // 1 extra bigit for the shifting, and one for rounded final_size. + EnsureCapacity(final_size / kBigitSize + 2); + + // Left to Right exponentiation. + int mask = 1; + while (power_exponent >= mask) mask <<= 1; + + // The mask is now pointing to the bit above the most significant 1-bit of + // power_exponent. + // Get rid of first 1-bit; + mask >>= 2; + uint64_t this_value = base; + + bool delayed_multipliciation = false; + const uint64_t max_32bits = 0xFFFFFFFF; + while (mask != 0 && this_value <= max_32bits) { + this_value = this_value * this_value; + // Verify that there is enough space in this_value to perform the + // multiplication. The first bit_size bits must be 0. + if ((power_exponent & mask) != 0) { + uint64_t base_bits_mask = + ~((static_cast(1) << (64 - bit_size)) - 1); + bool high_bits_zero = (this_value & base_bits_mask) == 0; + if (high_bits_zero) { + this_value *= base; + } else { + delayed_multipliciation = true; + } + } + mask >>= 1; + } + AssignUInt64(this_value); + if (delayed_multipliciation) { + MultiplyByUInt32(base); + } + + // Now do the same thing as a bignum. + while (mask != 0) { + Square(); + if ((power_exponent & mask) != 0) { + MultiplyByUInt32(base); + } + mask >>= 1; + } + + // And finally add the saved shifts. + ShiftLeft(shifts * power_exponent); +} + + +// Precondition: this/other < 16bit. +uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) { + ASSERT(IsClamped()); + ASSERT(other.IsClamped()); + ASSERT(other.used_digits_ > 0); + + // Easy case: if we have less digits than the divisor than the result is 0. + // Note: this handles the case where this == 0, too. + if (BigitLength() < other.BigitLength()) { + return 0; + } + + Align(other); + + uint16_t result = 0; + + // Start by removing multiples of 'other' until both numbers have the same + // number of digits. + while (BigitLength() > other.BigitLength()) { + // This naive approach is extremely inefficient if the this divided other + // might be big. This function is implemented for doubleToString where + // the result should be small (less than 10). + ASSERT(other.bigits_[other.used_digits_ - 1] >= ((1 << kBigitSize) / 16)); + // Remove the multiples of the first digit. + // Example this = 23 and other equals 9. -> Remove 2 multiples. + result += bigits_[used_digits_ - 1]; + SubtractTimes(other, bigits_[used_digits_ - 1]); + } + + ASSERT(BigitLength() == other.BigitLength()); + + // Both bignums are at the same length now. + // Since other has more than 0 digits we know that the access to + // bigits_[used_digits_ - 1] is safe. + Chunk this_bigit = bigits_[used_digits_ - 1]; + Chunk other_bigit = other.bigits_[other.used_digits_ - 1]; + + if (other.used_digits_ == 1) { + // Shortcut for easy (and common) case. + int quotient = this_bigit / other_bigit; + bigits_[used_digits_ - 1] = this_bigit - other_bigit * quotient; + result += quotient; + Clamp(); + return result; + } + + int division_estimate = this_bigit / (other_bigit + 1); + result += division_estimate; + SubtractTimes(other, division_estimate); + + if (other_bigit * (division_estimate + 1) > this_bigit) { + // No need to even try to subtract. Even if other's remaining digits were 0 + // another subtraction would be too much. + return result; + } + + while (LessEqual(other, *this)) { + SubtractBignum(other); + result++; + } + return result; +} + + +template +static int SizeInHexChars(S number) { + ASSERT(number > 0); + int result = 0; + while (number != 0) { + number >>= 4; + result++; + } + return result; +} + + +static char HexCharOfValue(int value) { + ASSERT(0 <= value && value <= 16); + if (value < 10) return value + '0'; + return value - 10 + 'A'; +} + + +bool Bignum::ToHexString(char* buffer, int buffer_size) const { + ASSERT(IsClamped()); + // Each bigit must be printable as separate hex-character. + ASSERT(kBigitSize % 4 == 0); + const int kHexCharsPerBigit = kBigitSize / 4; + + if (used_digits_ == 0) { + if (buffer_size < 2) return false; + buffer[0] = '0'; + buffer[1] = '\0'; + return true; + } + // We add 1 for the terminating '\0' character. + int needed_chars = (BigitLength() - 1) * kHexCharsPerBigit + + SizeInHexChars(bigits_[used_digits_ - 1]) + 1; + if (needed_chars > buffer_size) return false; + int string_index = needed_chars - 1; + buffer[string_index--] = '\0'; + for (int i = 0; i < exponent_; ++i) { + for (int j = 0; j < kHexCharsPerBigit; ++j) { + buffer[string_index--] = '0'; + } + } + for (int i = 0; i < used_digits_ - 1; ++i) { + Chunk current_bigit = bigits_[i]; + for (int j = 0; j < kHexCharsPerBigit; ++j) { + buffer[string_index--] = HexCharOfValue(current_bigit & 0xF); + current_bigit >>= 4; + } + } + // And finally the last bigit. + Chunk most_significant_bigit = bigits_[used_digits_ - 1]; + while (most_significant_bigit != 0) { + buffer[string_index--] = HexCharOfValue(most_significant_bigit & 0xF); + most_significant_bigit >>= 4; + } + return true; +} + + +Bignum::Chunk Bignum::BigitAt(int index) const { + if (index >= BigitLength()) return 0; + if (index < exponent_) return 0; + return bigits_[index - exponent_]; +} + + +int Bignum::Compare(const Bignum& a, const Bignum& b) { + ASSERT(a.IsClamped()); + ASSERT(b.IsClamped()); + int bigit_length_a = a.BigitLength(); + int bigit_length_b = b.BigitLength(); + if (bigit_length_a < bigit_length_b) return -1; + if (bigit_length_a > bigit_length_b) return +1; + for (int i = bigit_length_a - 1; i >= Min(a.exponent_, b.exponent_); --i) { + Chunk bigit_a = a.BigitAt(i); + Chunk bigit_b = b.BigitAt(i); + if (bigit_a < bigit_b) return -1; + if (bigit_a > bigit_b) return +1; + // Otherwise they are equal up to this digit. Try the next digit. + } + return 0; +} + + +int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) { + ASSERT(a.IsClamped()); + ASSERT(b.IsClamped()); + ASSERT(c.IsClamped()); + if (a.BigitLength() < b.BigitLength()) { + return PlusCompare(b, a, c); + } + if (a.BigitLength() + 1 < c.BigitLength()) return -1; + if (a.BigitLength() > c.BigitLength()) return +1; + // The exponent encodes 0-bigits. So if there are more 0-digits in 'a' than + // 'b' has digits, then the bigit-length of 'a'+'b' must be equal to the one + // of 'a'. + if (a.exponent_ >= b.BigitLength() && a.BigitLength() < c.BigitLength()) { + return -1; + } + + Chunk borrow = 0; + // Starting at min_exponent all digits are == 0. So no need to compare them. + int min_exponent = Min(Min(a.exponent_, b.exponent_), c.exponent_); + for (int i = c.BigitLength() - 1; i >= min_exponent; --i) { + Chunk chunk_a = a.BigitAt(i); + Chunk chunk_b = b.BigitAt(i); + Chunk chunk_c = c.BigitAt(i); + Chunk sum = chunk_a + chunk_b; + if (sum > chunk_c + borrow) { + return +1; + } else { + borrow = chunk_c + borrow - sum; + if (borrow > 1) return -1; + borrow <<= kBigitSize; + } + } + if (borrow == 0) return 0; + return -1; +} + + +void Bignum::Clamp() { + while (used_digits_ > 0 && bigits_[used_digits_ - 1] == 0) { + used_digits_--; + } + if (used_digits_ == 0) { + // Zero. + exponent_ = 0; + } +} + + +bool Bignum::IsClamped() const { + return used_digits_ == 0 || bigits_[used_digits_ - 1] != 0; +} + + +void Bignum::Zero() { + for (int i = 0; i < used_digits_; ++i) { + bigits_[i] = 0; + } + used_digits_ = 0; + exponent_ = 0; +} + + +void Bignum::Align(const Bignum& other) { + if (exponent_ > other.exponent_) { + // If "X" represents a "hidden" digit (by the exponent) then we are in the + // following case (a == this, b == other): + // a: aaaaaaXXXX or a: aaaaaXXX + // b: bbbbbbX b: bbbbbbbbXX + // We replace some of the hidden digits (X) of a with 0 digits. + // a: aaaaaa000X or a: aaaaa0XX + int zero_digits = exponent_ - other.exponent_; + EnsureCapacity(used_digits_ + zero_digits); + for (int i = used_digits_ - 1; i >= 0; --i) { + bigits_[i + zero_digits] = bigits_[i]; + } + for (int i = 0; i < zero_digits; ++i) { + bigits_[i] = 0; + } + used_digits_ += zero_digits; + exponent_ -= zero_digits; + ASSERT(used_digits_ >= 0); + ASSERT(exponent_ >= 0); + } +} + + +void Bignum::BigitsShiftLeft(int shift_amount) { + ASSERT(shift_amount < kBigitSize); + ASSERT(shift_amount >= 0); + Chunk carry = 0; + for (int i = 0; i < used_digits_; ++i) { + Chunk new_carry = bigits_[i] >> (kBigitSize - shift_amount); + bigits_[i] = ((bigits_[i] << shift_amount) + carry) & kBigitMask; + carry = new_carry; + } + if (carry != 0) { + bigits_[used_digits_] = carry; + used_digits_++; + } +} + + +void Bignum::SubtractTimes(const Bignum& other, int factor) { + ASSERT(exponent_ <= other.exponent_); + if (factor < 3) { + for (int i = 0; i < factor; ++i) { + SubtractBignum(other); + } + return; + } + Chunk borrow = 0; + int exponent_diff = other.exponent_ - exponent_; + for (int i = 0; i < other.used_digits_; ++i) { + DoubleChunk product = static_cast(factor) * other.bigits_[i]; + DoubleChunk remove = borrow + product; + Chunk difference = bigits_[i + exponent_diff] - (remove & kBigitMask); + bigits_[i + exponent_diff] = difference & kBigitMask; + borrow = static_cast((difference >> (kChunkSize - 1)) + + (remove >> kBigitSize)); + } + for (int i = other.used_digits_ + exponent_diff; i < used_digits_; ++i) { + if (borrow == 0) return; + Chunk difference = bigits_[i] - borrow; + bigits_[i] = difference & kBigitMask; + borrow = difference >> (kChunkSize - 1); + ++i; + } + Clamp(); +} + + +} } // namespace v8::internal diff --git a/deps/v8/src/bignum.h b/deps/v8/src/bignum.h new file mode 100644 index 0000000000..1d2bff61a5 --- /dev/null +++ b/deps/v8/src/bignum.h @@ -0,0 +1,140 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_BIGNUM_H_ +#define V8_BIGNUM_H_ + +namespace v8 { +namespace internal { + +class Bignum { + public: + // 3584 = 128 * 28. We can represent 2^3584 > 10^1000 accurately. + // This bignum can encode much bigger numbers, since it contains an + // exponent. + static const int kMaxSignificantBits = 3584; + + Bignum(); + void AssignUInt16(uint16_t value); + void AssignUInt64(uint64_t value); + void AssignBignum(const Bignum& other); + + void AssignDecimalString(Vector value); + void AssignHexString(Vector value); + + void AssignPowerUInt16(uint16_t base, int exponent); + + void AddUInt16(uint16_t operand); + void AddUInt64(uint64_t operand); + void AddBignum(const Bignum& other); + // Precondition: this >= other. + void SubtractBignum(const Bignum& other); + + void Square(); + void ShiftLeft(int shift_amount); + void MultiplyByUInt32(uint32_t factor); + void MultiplyByUInt64(uint64_t factor); + void MultiplyByPowerOfTen(int exponent); + void Times10() { return MultiplyByUInt32(10); } + // Pseudocode: + // int result = this / other; + // this = this % other; + // In the worst case this function is in O(this/other). + uint16_t DivideModuloIntBignum(const Bignum& other); + + bool ToHexString(char* buffer, int buffer_size) const; + + static int Compare(const Bignum& a, const Bignum& b); + static bool Equal(const Bignum& a, const Bignum& b) { + return Compare(a, b) == 0; + } + static bool LessEqual(const Bignum& a, const Bignum& b) { + return Compare(a, b) <= 0; + } + static bool Less(const Bignum& a, const Bignum& b) { + return Compare(a, b) < 0; + } + // Returns Compare(a + b, c); + static int PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c); + // Returns a + b == c + static bool PlusEqual(const Bignum& a, const Bignum& b, const Bignum& c) { + return PlusCompare(a, b, c) == 0; + } + // Returns a + b <= c + static bool PlusLessEqual(const Bignum& a, const Bignum& b, const Bignum& c) { + return PlusCompare(a, b, c) <= 0; + } + // Returns a + b < c + static bool PlusLess(const Bignum& a, const Bignum& b, const Bignum& c) { + return PlusCompare(a, b, c) < 0; + } + private: + typedef uint32_t Chunk; + typedef uint64_t DoubleChunk; + + static const int kChunkSize = sizeof(Chunk) * 8; + static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8; + // With bigit size of 28 we loose some bits, but a double still fits easily + // into two chunks, and more importantly we can use the Comba multiplication. + static const int kBigitSize = 28; + static const Chunk kBigitMask = (1 << kBigitSize) - 1; + // Every instance allocates kBigitLength chunks on the stack. Bignums cannot + // grow. There are no checks if the stack-allocated space is sufficient. + static const int kBigitCapacity = kMaxSignificantBits / kBigitSize; + + void EnsureCapacity(int size) { + if (size > kBigitCapacity) { + UNREACHABLE(); + } + } + void Align(const Bignum& other); + void Clamp(); + bool IsClamped() const; + void Zero(); + // Requires this to have enough capacity (no tests done). + // Updates used_digits_ if necessary. + // by must be < kBigitSize. + void BigitsShiftLeft(int shift_amount); + // BigitLength includes the "hidden" digits encoded in the exponent. + int BigitLength() const { return used_digits_ + exponent_; } + Chunk BigitAt(int index) const; + void SubtractTimes(const Bignum& other, int factor); + + Chunk bigits_buffer_[kBigitCapacity]; + // A vector backed by bigits_buffer_. This way accesses to the array are + // checked for out-of-bounds errors. + Vector bigits_; + int used_digits_; + // The Bignum's value equals value(bigits_) * 2^(exponent_ * kBigitSize). + int exponent_; + + DISALLOW_COPY_AND_ASSIGN(Bignum); +}; + +} } // namespace v8::internal + +#endif // V8_BIGNUM_H_ diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 0e49966bbd..f60a975dc1 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -39,6 +39,8 @@ #include "objects-visiting.h" #include "snapshot.h" #include "stub-cache.h" +#include "extensions/externalize-string-extension.h" +#include "extensions/gc-extension.h" namespace v8 { namespace internal { @@ -137,6 +139,8 @@ Handle Bootstrapper::NativesSourceLookup(int index) { void Bootstrapper::Initialize(bool create_heap_objects) { extensions_cache.Initialize(create_heap_objects); + GCExtension::Register(); + ExternalizeStringExtension::Register(); } diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc index b5df316d0f..1ab8802ec3 100644 --- a/deps/v8/src/checks.cc +++ b/deps/v8/src/checks.cc @@ -98,3 +98,12 @@ void API_Fatal(const char* location, const char* format, ...) { i::OS::PrintError("\n#\n\n"); i::OS::Abort(); } + + +namespace v8 { namespace internal { + + bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; } + + intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; } + +} } // namespace v8::internal diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h index 5ea59920ac..6b493225ad 100644 --- a/deps/v8/src/checks.h +++ b/deps/v8/src/checks.h @@ -30,8 +30,6 @@ #include -#include "flags.h" - extern "C" void V8_Fatal(const char* file, int line, const char* format, ...); void API_Fatal(const char* location, const char* format, ...); @@ -279,6 +277,12 @@ template class StaticAssertionHelper { }; SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) +namespace v8 { namespace internal { + +bool EnableSlowAsserts(); + +} } // namespace v8::internal + // The ASSERT macro is equivalent to CHECK except that it only // generates code in debug builds. #ifdef DEBUG @@ -287,7 +291,7 @@ template class StaticAssertionHelper { }; #define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2) #define ASSERT_NE(v1, v2) CHECK_NE(v1, v2) #define ASSERT_GE(v1, v2) CHECK_GE(v1, v2) -#define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition) +#define SLOW_ASSERT(condition) if (EnableSlowAsserts()) CHECK(condition) #else #define ASSERT_RESULT(expr) (expr) #define ASSERT(condition) ((void) 0) @@ -303,11 +307,16 @@ template class StaticAssertionHelper { }; // and release compilation modes behaviour. #define STATIC_ASSERT(test) STATIC_CHECK(test) +namespace v8 { namespace internal { + +intptr_t HeapObjectTagMask(); + +} } // namespace v8::internal #define ASSERT_TAG_ALIGNED(address) \ - ASSERT((reinterpret_cast(address) & kHeapObjectTagMask) == 0) + ASSERT((reinterpret_cast(address) & HeapObjectTagMask()) == 0) -#define ASSERT_SIZE_TAG_ALIGNED(size) ASSERT((size & kHeapObjectTagMask) == 0) +#define ASSERT_SIZE_TAG_ALIGNED(size) ASSERT((size & HeapObjectTagMask()) == 0) #define ASSERT_NOT_NULL(p) ASSERT_NE(NULL, p) diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 787ec2a7a1..8b9198fb99 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -37,7 +37,6 @@ namespace v8 { namespace internal { bool CodeStub::FindCodeInCache(Code** code_out) { - if (has_custom_cache()) return GetCustomCache(code_out); int index = Heap::code_stubs()->FindEntry(GetKey()); if (index != NumberDictionary::kNotFound) { *code_out = Code::cast(Heap::code_stubs()->ValueAt(index)); @@ -105,17 +104,14 @@ Handle CodeStub::GetCode() { Handle new_object = Factory::NewCode(desc, flags, masm.CodeObject()); RecordCodeGeneration(*new_object, &masm); - if (has_custom_cache()) { - SetCustomCache(*new_object); - } else { - // Update the dictionary and the root in Heap. - Handle dict = - Factory::DictionaryAtNumberPut( - Handle(Heap::code_stubs()), - GetKey(), - new_object); - Heap::public_set_code_stubs(*dict); - } + // Update the dictionary and the root in Heap. + Handle dict = + Factory::DictionaryAtNumberPut( + Handle(Heap::code_stubs()), + GetKey(), + new_object); + Heap::public_set_code_stubs(*dict); + code = *new_object; } @@ -147,15 +143,11 @@ MaybeObject* CodeStub::TryGetCode() { code = Code::cast(new_object); RecordCodeGeneration(code, &masm); - if (has_custom_cache()) { - SetCustomCache(code); - } else { - // Try to update the code cache but do not fail if unable. - MaybeObject* maybe_new_object = - Heap::code_stubs()->AtNumberPut(GetKey(), code); - if (maybe_new_object->ToObject(&new_object)) { - Heap::public_set_code_stubs(NumberDictionary::cast(new_object)); - } + // Try to update the code cache but do not fail if unable. + MaybeObject* maybe_new_object = + Heap::code_stubs()->AtNumberPut(GetKey(), code); + if (maybe_new_object->ToObject(&new_object)) { + Heap::public_set_code_stubs(NumberDictionary::cast(new_object)); } } diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index c0a8d3063c..b156647d5d 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -124,12 +124,6 @@ class CodeStub BASE_EMBEDDED { virtual ~CodeStub() {} - // Override these methods to provide a custom caching mechanism for - // an individual type of code stub. - virtual bool GetCustomCache(Code** code_out) { return false; } - virtual void SetCustomCache(Code* value) { } - virtual bool has_custom_cache() { return false; } - protected: static const int kMajorBits = 5; static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits; @@ -524,32 +518,6 @@ class CEntryStub : public CodeStub { }; -class ApiGetterEntryStub : public CodeStub { - public: - ApiGetterEntryStub(Handle info, - ApiFunction* fun) - : info_(info), - fun_(fun) { } - void Generate(MacroAssembler* masm); - virtual bool has_custom_cache() { return true; } - virtual bool GetCustomCache(Code** code_out); - virtual void SetCustomCache(Code* value); - - static const int kStackSpace = 5; - static const int kArgc = 2; - private: - Handle info() { return info_; } - ApiFunction* fun() { return fun_; } - Major MajorKey() { return NoCache; } - int MinorKey() { return 0; } - const char* GetName() { return "ApiEntryStub"; } - // The accessor info associated with the function. - Handle info_; - // The function to be called. - ApiFunction* fun_; -}; - - class JSEntryStub : public CodeStub { public: JSEntryStub() { } diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index bda697abaf..fb8c5cd4a3 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -70,9 +70,10 @@ void CodeGenerator::ProcessDeferred() { DeferredCode* code = deferred_.RemoveLast(); ASSERT(masm_ == code->masm()); // Record position of deferred code stub. - masm_->RecordStatementPosition(code->statement_position()); + masm_->positions_recorder()->RecordStatementPosition( + code->statement_position()); if (code->position() != RelocInfo::kNoPosition) { - masm_->RecordPosition(code->position()); + masm_->positions_recorder()->RecordPosition(code->position()); } // Generate the code. Comment cmnt(masm_, code->comment()); @@ -251,39 +252,6 @@ bool CodeGenerator::ShouldGenerateLog(Expression* type) { #endif -Handle CodeGenerator::ComputeCallInitialize( - int argc, - InLoopFlag in_loop) { - if (in_loop == IN_LOOP) { - // Force the creation of the corresponding stub outside loops, - // because it may be used when clearing the ICs later - it is - // possible for a series of IC transitions to lose the in-loop - // information, and the IC clearing code can't generate a stub - // that it needs so we need to ensure it is generated already. - ComputeCallInitialize(argc, NOT_IN_LOOP); - } - CALL_HEAP_FUNCTION( - StubCache::ComputeCallInitialize(argc, in_loop, Code::CALL_IC), - Code); -} - - -Handle CodeGenerator::ComputeKeyedCallInitialize( - int argc, - InLoopFlag in_loop) { - if (in_loop == IN_LOOP) { - // Force the creation of the corresponding stub outside loops, - // because it may be used when clearing the ICs later - it is - // possible for a series of IC transitions to lose the in-loop - // information, and the IC clearing code can't generate a stub - // that it needs so we need to ensure it is generated already. - ComputeKeyedCallInitialize(argc, NOT_IN_LOOP); - } - CALL_HEAP_FUNCTION( - StubCache::ComputeCallInitialize(argc, in_loop, Code::KEYED_CALL_IC), - Code); -} - void CodeGenerator::ProcessDeclarations(ZoneList* declarations) { int length = declarations->length(); int globals = 0; @@ -402,10 +370,10 @@ bool CodeGenerator::RecordPositions(MacroAssembler* masm, int pos, bool right_here) { if (pos != RelocInfo::kNoPosition) { - masm->RecordStatementPosition(pos); - masm->RecordPosition(pos); + masm->positions_recorder()->RecordStatementPosition(pos); + masm->positions_recorder()->RecordPosition(pos); if (right_here) { - return masm->WriteRecordedPositions(); + return masm->positions_recorder()->WriteRecordedPositions(); } } return false; @@ -435,7 +403,7 @@ void CodeGenerator::CodeForDoWhileConditionPosition(DoWhileStatement* stmt) { void CodeGenerator::CodeForSourcePosition(int pos) { if (FLAG_debug_info && pos != RelocInfo::kNoPosition) { - masm()->RecordPosition(pos); + masm()->positions_recorder()->RecordPosition(pos); } } @@ -481,20 +449,4 @@ int CEntryStub::MinorKey() { } -bool ApiGetterEntryStub::GetCustomCache(Code** code_out) { - Object* cache = info()->load_stub_cache(); - if (cache->IsUndefined()) { - return false; - } else { - *code_out = Code::cast(cache); - return true; - } -} - - -void ApiGetterEntryStub::SetCustomCache(Code* value) { - info()->set_load_stub_cache(value); -} - - } } // namespace v8::internal diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index 8f923dd34b..66300d6caf 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -58,8 +58,6 @@ // Generate // ComputeLazyCompile // BuildFunctionInfo -// ComputeCallInitialize -// ComputeCallInitializeInLoop // ProcessDeclarations // DeclareGlobals // CheckForInlineRuntimeCall diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 6f02960dda..29bbbc7034 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -279,7 +279,6 @@ Handle Compiler::Compile(Handle source, // in that case too. ScriptDataImpl* pre_data = input_pre_data; if (pre_data == NULL - && FLAG_lazy && source_length >= FLAG_min_preparse_length) { pre_data = ParserApi::PartialPreParse(source, NULL, extension); } diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc index 790e807aef..c0dbf73ad0 100644 --- a/deps/v8/src/conversions.cc +++ b/deps/v8/src/conversions.cc @@ -33,7 +33,7 @@ #include "conversions-inl.h" #include "dtoa.h" #include "factory.h" -#include "scanner.h" +#include "scanner-base.h" #include "strtod.h" namespace v8 { @@ -121,7 +121,7 @@ static const double JUNK_STRING_VALUE = OS::nan_value(); template static inline bool AdvanceToNonspace(Iterator* current, EndMark end) { while (*current != end) { - if (!Scanner::kIsWhiteSpace.get(**current)) return true; + if (!ScannerConstants::kIsWhiteSpace.get(**current)) return true; ++*current; } return false; @@ -654,7 +654,7 @@ static double InternalStringToDouble(Iterator current, buffer[buffer_pos] = '\0'; double converted = Strtod(Vector(buffer, buffer_pos), exponent); - return sign? -converted: converted; + return sign ? -converted : converted; } @@ -711,11 +711,6 @@ double StringToDouble(Vector str, } -extern "C" char* dtoa(double d, int mode, int ndigits, - int* decpt, int* sign, char** rve); - -extern "C" void freedtoa(char* s); - const char* DoubleToCString(double v, Vector buffer) { StringBuilder builder(buffer.start(), buffer.length()); @@ -739,21 +734,13 @@ const char* DoubleToCString(double v, Vector buffer) { default: { int decimal_point; int sign; - char* decimal_rep; - bool used_gay_dtoa = false; const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1; - char v8_dtoa_buffer[kV8DtoaBufferCapacity]; + char decimal_rep[kV8DtoaBufferCapacity]; int length; - if (DoubleToAscii(v, DTOA_SHORTEST, 0, - Vector(v8_dtoa_buffer, kV8DtoaBufferCapacity), - &sign, &length, &decimal_point)) { - decimal_rep = v8_dtoa_buffer; - } else { - decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL); - used_gay_dtoa = true; - length = StrLength(decimal_rep); - } + DoubleToAscii(v, DTOA_SHORTEST, 0, + Vector(decimal_rep, kV8DtoaBufferCapacity), + &sign, &length, &decimal_point); if (sign) builder.AddCharacter('-'); @@ -787,8 +774,6 @@ const char* DoubleToCString(double v, Vector buffer) { if (exponent < 0) exponent = -exponent; builder.AddFormatted("%d", exponent); } - - if (used_gay_dtoa) freedtoa(decimal_rep); } } return builder.Finalize(); @@ -816,7 +801,7 @@ const char* IntToCString(int n, Vector buffer) { char* DoubleToFixedCString(double value, int f) { - const int kMaxDigitsBeforePoint = 20; + const int kMaxDigitsBeforePoint = 21; const double kFirstNonFixed = 1e21; const int kMaxDigitsAfterPoint = 20; ASSERT(f >= 0); @@ -840,16 +825,14 @@ char* DoubleToFixedCString(double value, int f) { // Find a sufficiently precise decimal representation of n. int decimal_point; int sign; - // Add space for the '.' and the '\0' byte. + // Add space for the '\0' byte. const int kDecimalRepCapacity = - kMaxDigitsBeforePoint + kMaxDigitsAfterPoint + 2; + kMaxDigitsBeforePoint + kMaxDigitsAfterPoint + 1; char decimal_rep[kDecimalRepCapacity]; int decimal_rep_length; - bool status = DoubleToAscii(value, DTOA_FIXED, f, - Vector(decimal_rep, kDecimalRepCapacity), - &sign, &decimal_rep_length, &decimal_point); - USE(status); - ASSERT(status); + DoubleToAscii(value, DTOA_FIXED, f, + Vector(decimal_rep, kDecimalRepCapacity), + &sign, &decimal_rep_length, &decimal_point); // Create a representation that is padded with zeros if needed. int zero_prefix_length = 0; @@ -935,8 +918,6 @@ char* DoubleToExponentialCString(double value, int f) { // Find a sufficiently precise decimal representation of n. int decimal_point; int sign; - char* decimal_rep = NULL; - bool used_gay_dtoa = false; // f corresponds to the digits after the point. There is always one digit // before the point. The number of requested_digits equals hence f + 1. // And we have to add one character for the null-terminator. @@ -944,31 +925,18 @@ char* DoubleToExponentialCString(double value, int f) { // Make sure that the buffer is big enough, even if we fall back to the // shortest representation (which happens when f equals -1). ASSERT(kBase10MaximalLength <= kMaxDigitsAfterPoint + 1); - char v8_dtoa_buffer[kV8DtoaBufferCapacity]; + char decimal_rep[kV8DtoaBufferCapacity]; int decimal_rep_length; if (f == -1) { - if (DoubleToAscii(value, DTOA_SHORTEST, 0, - Vector(v8_dtoa_buffer, kV8DtoaBufferCapacity), - &sign, &decimal_rep_length, &decimal_point)) { - f = decimal_rep_length - 1; - decimal_rep = v8_dtoa_buffer; - } else { - decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL); - decimal_rep_length = StrLength(decimal_rep); - f = decimal_rep_length - 1; - used_gay_dtoa = true; - } + DoubleToAscii(value, DTOA_SHORTEST, 0, + Vector(decimal_rep, kV8DtoaBufferCapacity), + &sign, &decimal_rep_length, &decimal_point); + f = decimal_rep_length - 1; } else { - if (DoubleToAscii(value, DTOA_PRECISION, f + 1, - Vector(v8_dtoa_buffer, kV8DtoaBufferCapacity), - &sign, &decimal_rep_length, &decimal_point)) { - decimal_rep = v8_dtoa_buffer; - } else { - decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL); - decimal_rep_length = StrLength(decimal_rep); - used_gay_dtoa = true; - } + DoubleToAscii(value, DTOA_PRECISION, f + 1, + Vector(decimal_rep, kV8DtoaBufferCapacity), + &sign, &decimal_rep_length, &decimal_point); } ASSERT(decimal_rep_length > 0); ASSERT(decimal_rep_length <= f + 1); @@ -977,10 +945,6 @@ char* DoubleToExponentialCString(double value, int f) { char* result = CreateExponentialRepresentation(decimal_rep, exponent, negative, f+1); - if (used_gay_dtoa) { - freedtoa(decimal_rep); - } - return result; } @@ -1000,22 +964,14 @@ char* DoubleToPrecisionCString(double value, int p) { // Find a sufficiently precise decimal representation of n. int decimal_point; int sign; - char* decimal_rep = NULL; - bool used_gay_dtoa = false; // Add one for the terminating null character. const int kV8DtoaBufferCapacity = kMaximalDigits + 1; - char v8_dtoa_buffer[kV8DtoaBufferCapacity]; + char decimal_rep[kV8DtoaBufferCapacity]; int decimal_rep_length; - if (DoubleToAscii(value, DTOA_PRECISION, p, - Vector(v8_dtoa_buffer, kV8DtoaBufferCapacity), - &sign, &decimal_rep_length, &decimal_point)) { - decimal_rep = v8_dtoa_buffer; - } else { - decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL); - decimal_rep_length = StrLength(decimal_rep); - used_gay_dtoa = true; - } + DoubleToAscii(value, DTOA_PRECISION, p, + Vector(decimal_rep, kV8DtoaBufferCapacity), + &sign, &decimal_rep_length, &decimal_point); ASSERT(decimal_rep_length <= p); int exponent = decimal_point - 1; @@ -1059,9 +1015,6 @@ char* DoubleToPrecisionCString(double value, int p) { result = builder.Finalize(); } - if (used_gay_dtoa) { - freedtoa(decimal_rep); - } return result; } diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h index cae9b08d5b..28053f46d1 100644 --- a/deps/v8/src/dateparser.h +++ b/deps/v8/src/dateparser.h @@ -28,7 +28,8 @@ #ifndef V8_DATEPARSER_H_ #define V8_DATEPARSER_H_ -#include "scanner.h" +#include "char-predicates-inl.h" +#include "scanner-base.h" namespace v8 { namespace internal { @@ -99,10 +100,20 @@ class DateParser : public AllStatic { } // The skip methods return whether they actually skipped something. - bool Skip(uint32_t c) { return ch_ == c ? (Next(), true) : false; } + bool Skip(uint32_t c) { + if (ch_ == c) { + Next(); + return true; + } + return false; + } bool SkipWhiteSpace() { - return Scanner::kIsWhiteSpace.get(ch_) ? (Next(), true) : false; + if (ScannerConstants::kIsWhiteSpace.get(ch_)) { + Next(); + return true; + } + return false; } bool SkipParentheses() { diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js index 0eab8d1b83..d091991a11 100644 --- a/deps/v8/src/debug-debugger.js +++ b/deps/v8/src/debug-debugger.js @@ -897,10 +897,6 @@ ExecutionState.prototype.frame = function(opt_index) { return new FrameMirror(this.break_id, opt_index); }; -ExecutionState.prototype.cframesValue = function(opt_from_index, opt_to_index) { - return %GetCFrames(this.break_id); -}; - ExecutionState.prototype.setSelectedFrame = function(index) { var i = %ToNumber(index); if (i < 0 || i >= this.frameCount()) throw new Error('Illegal frame index.'); @@ -1751,11 +1747,6 @@ DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) }; -DebugCommandProcessor.prototype.backtracec = function(cmd, args) { - return this.exec_state_.cframesValue(); -}; - - DebugCommandProcessor.prototype.frameRequest_ = function(request, response) { // No frames no source. if (this.exec_state_.frameCount() == 0) { @@ -2205,29 +2196,6 @@ function NumberToHex8Str(n) { return r; }; -DebugCommandProcessor.prototype.formatCFrames = function(cframes_value) { - var result = ""; - if (cframes_value == null || cframes_value.length == 0) { - result += "(stack empty)"; - } else { - for (var i = 0; i < cframes_value.length; ++i) { - if (i != 0) result += "\n"; - result += this.formatCFrame(cframes_value[i]); - } - } - return result; -}; - - -DebugCommandProcessor.prototype.formatCFrame = function(cframe_value) { - var result = ""; - result += "0x" + NumberToHex8Str(cframe_value.address); - if (!IS_UNDEFINED(cframe_value.text)) { - result += " " + cframe_value.text; - } - return result; -} - /** * Convert an Object to its debugger protocol representation. The representation diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index 24f0409861..f3bf954da9 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -1839,6 +1839,7 @@ bool Debug::IsDebugGlobal(GlobalObject* global) { void Debug::ClearMirrorCache() { + PostponeInterruptsScope postpone; HandleScope scope; ASSERT(Top::context() == *Debug::debug_context()); diff --git a/deps/v8/src/double.h b/deps/v8/src/double.h index e805173e07..65eded9989 100644 --- a/deps/v8/src/double.h +++ b/deps/v8/src/double.h @@ -54,18 +54,20 @@ class Double { explicit Double(DiyFp diy_fp) : d64_(DiyFpToUint64(diy_fp)) {} + // The value encoded by this Double must be greater or equal to +0.0. + // It must not be special (infinity, or NaN). DiyFp AsDiyFp() const { + ASSERT(Sign() > 0); ASSERT(!IsSpecial()); return DiyFp(Significand(), Exponent()); } - // this->Significand() must not be 0. + // The value encoded by this Double must be strictly greater than 0. DiyFp AsNormalizedDiyFp() const { + ASSERT(value() > 0.0); uint64_t f = Significand(); int e = Exponent(); - ASSERT(f != 0); - // The current double could be a denormal. while ((f & kHiddenBit) == 0) { f <<= 1; @@ -82,6 +84,20 @@ class Double { return d64_; } + // Returns the next greater double. Returns +infinity on input +infinity. + double NextDouble() const { + if (d64_ == kInfinity) return Double(kInfinity).value(); + if (Sign() < 0 && Significand() == 0) { + // -0.0 + return 0.0; + } + if (Sign() < 0) { + return Double(d64_ - 1).value(); + } else { + return Double(d64_ + 1).value(); + } + } + int Exponent() const { if (IsDenormal()) return kDenormalExponent; @@ -120,24 +136,30 @@ class Double { ((d64 & kSignificandMask) != 0); } - bool IsInfinite() const { uint64_t d64 = AsUint64(); return ((d64 & kExponentMask) == kExponentMask) && ((d64 & kSignificandMask) == 0); } - int Sign() const { uint64_t d64 = AsUint64(); return (d64 & kSignMask) == 0? 1: -1; } + // Precondition: the value encoded by this Double must be greater or equal + // than +0.0. + DiyFp UpperBoundary() const { + ASSERT(Sign() > 0); + return DiyFp(Significand() * 2 + 1, Exponent() - 1); + } // Returns the two boundaries of this. // The bigger boundary (m_plus) is normalized. The lower boundary has the same // exponent as m_plus. + // Precondition: the value encoded by this Double must be greater than 0. void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const { + ASSERT(value() > 0.0); DiyFp v = this->AsDiyFp(); bool significand_is_zero = (v.f() == kHiddenBit); DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1)); diff --git a/deps/v8/src/dtoa.cc b/deps/v8/src/dtoa.cc index f4141eb619..b857a5dc59 100644 --- a/deps/v8/src/dtoa.cc +++ b/deps/v8/src/dtoa.cc @@ -30,6 +30,7 @@ #include "v8.h" #include "dtoa.h" +#include "bignum-dtoa.h" #include "double.h" #include "fast-dtoa.h" #include "fixed-dtoa.h" @@ -37,7 +38,19 @@ namespace v8 { namespace internal { -bool DoubleToAscii(double v, DtoaMode mode, int requested_digits, +static BignumDtoaMode DtoaToBignumDtoaMode(DtoaMode dtoa_mode) { + switch (dtoa_mode) { + case DTOA_SHORTEST: return BIGNUM_DTOA_SHORTEST; + case DTOA_FIXED: return BIGNUM_DTOA_FIXED; + case DTOA_PRECISION: return BIGNUM_DTOA_PRECISION; + default: + UNREACHABLE(); + return BIGNUM_DTOA_SHORTEST; // To silence compiler. + } +} + + +void DoubleToAscii(double v, DtoaMode mode, int requested_digits, Vector buffer, int* sign, int* length, int* point) { ASSERT(!Double(v).IsSpecial()); ASSERT(mode == DTOA_SHORTEST || requested_digits >= 0); @@ -54,25 +67,37 @@ bool DoubleToAscii(double v, DtoaMode mode, int requested_digits, buffer[1] = '\0'; *length = 1; *point = 1; - return true; + return; } if (mode == DTOA_PRECISION && requested_digits == 0) { buffer[0] = '\0'; *length = 0; - return true; + return; } + bool fast_worked; switch (mode) { case DTOA_SHORTEST: - return FastDtoa(v, FAST_DTOA_SHORTEST, 0, buffer, length, point); + fast_worked = FastDtoa(v, FAST_DTOA_SHORTEST, 0, buffer, length, point); + break; case DTOA_FIXED: - return FastFixedDtoa(v, requested_digits, buffer, length, point); + fast_worked = FastFixedDtoa(v, requested_digits, buffer, length, point); + break; case DTOA_PRECISION: - return FastDtoa(v, FAST_DTOA_PRECISION, requested_digits, - buffer, length, point); + fast_worked = FastDtoa(v, FAST_DTOA_PRECISION, requested_digits, + buffer, length, point); + break; + default: + UNREACHABLE(); + fast_worked = false; } - return false; + if (fast_worked) return; + + // If the fast dtoa didn't succeed use the slower bignum version. + BignumDtoaMode bignum_mode = DtoaToBignumDtoaMode(mode); + BignumDtoa(v, bignum_mode, requested_digits, buffer, length, point); + buffer[*length] = '\0'; } } } // namespace v8::internal diff --git a/deps/v8/src/dtoa.h b/deps/v8/src/dtoa.h index be0d5456b2..b3e79afa48 100644 --- a/deps/v8/src/dtoa.h +++ b/deps/v8/src/dtoa.h @@ -32,13 +32,15 @@ namespace v8 { namespace internal { enum DtoaMode { - // 0.9999999999999999 becomes 0.1 + // Return the shortest correct representation. + // For example the output of 0.299999999999999988897 is (the less accurate but + // correct) 0.3. DTOA_SHORTEST, - // Fixed number of digits after the decimal point. + // Return a fixed number of digits after the decimal point. // For instance fixed(0.1, 4) becomes 0.1000 // If the input number is big, the output will be big. DTOA_FIXED, - // Fixed number of digits (independent of the decimal point). + // Return a fixed number of digits, no matter what the exponent is. DTOA_PRECISION }; @@ -72,8 +74,10 @@ static const int kBase10MaximalLength = 17; // which case the caller has to fill the missing digits with '0's. // Halfway cases are again rounded away from 0. // 'DoubleToAscii' expects the given buffer to be big enough to hold all digits -// and a terminating null-character. -bool DoubleToAscii(double v, DtoaMode mode, int requested_digits, +// and a terminating null-character. In SHORTEST-mode it expects a buffer of +// at least kBase10MaximalLength + 1. Otherwise, the size of the output is +// limited to requested_digits digits plus the null terminator. +void DoubleToAscii(double v, DtoaMode mode, int requested_digits, Vector buffer, int* sign, int* length, int* point); } } // namespace v8::internal diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc index 885bf63cf1..691d56954a 100644 --- a/deps/v8/src/execution.cc +++ b/deps/v8/src/execution.cc @@ -700,135 +700,4 @@ MaybeObject* Execution::HandleStackGuardInterrupt() { return Heap::undefined_value(); } -// --- G C E x t e n s i o n --- - -const char* const GCExtension::kSource = "native function gc();"; - - -v8::Handle GCExtension::GetNativeFunction( - v8::Handle str) { - return v8::FunctionTemplate::New(GCExtension::GC); -} - - -v8::Handle GCExtension::GC(const v8::Arguments& args) { - // All allocation spaces other than NEW_SPACE have the same effect. - Heap::CollectAllGarbage(false); - return v8::Undefined(); -} - - -static GCExtension gc_extension; -static v8::DeclareExtension gc_extension_declaration(&gc_extension); - - -// --- E x t e r n a l i z e S t r i n g E x t e n s i o n --- - - -template -class SimpleStringResource : public Base { - public: - // Takes ownership of |data|. - SimpleStringResource(Char* data, size_t length) - : data_(data), - length_(length) {} - - virtual ~SimpleStringResource() { delete[] data_; } - - virtual const Char* data() const { return data_; } - - virtual size_t length() const { return length_; } - - private: - Char* const data_; - const size_t length_; -}; - - -typedef SimpleStringResource - SimpleAsciiStringResource; -typedef SimpleStringResource - SimpleTwoByteStringResource; - - -const char* const ExternalizeStringExtension::kSource = - "native function externalizeString();" - "native function isAsciiString();"; - - -v8::Handle ExternalizeStringExtension::GetNativeFunction( - v8::Handle str) { - if (strcmp(*v8::String::AsciiValue(str), "externalizeString") == 0) { - return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize); - } else { - ASSERT(strcmp(*v8::String::AsciiValue(str), "isAsciiString") == 0); - return v8::FunctionTemplate::New(ExternalizeStringExtension::IsAscii); - } -} - - -v8::Handle ExternalizeStringExtension::Externalize( - const v8::Arguments& args) { - if (args.Length() < 1 || !args[0]->IsString()) { - return v8::ThrowException(v8::String::New( - "First parameter to externalizeString() must be a string.")); - } - bool force_two_byte = false; - if (args.Length() >= 2) { - if (args[1]->IsBoolean()) { - force_two_byte = args[1]->BooleanValue(); - } else { - return v8::ThrowException(v8::String::New( - "Second parameter to externalizeString() must be a boolean.")); - } - } - bool result = false; - Handle string = Utils::OpenHandle(*args[0].As()); - if (string->IsExternalString()) { - return v8::ThrowException(v8::String::New( - "externalizeString() can't externalize twice.")); - } - if (string->IsAsciiRepresentation() && !force_two_byte) { - char* data = new char[string->length()]; - String::WriteToFlat(*string, data, 0, string->length()); - SimpleAsciiStringResource* resource = new SimpleAsciiStringResource( - data, string->length()); - result = string->MakeExternal(resource); - if (result && !string->IsSymbol()) { - i::ExternalStringTable::AddString(*string); - } - if (!result) delete resource; - } else { - uc16* data = new uc16[string->length()]; - String::WriteToFlat(*string, data, 0, string->length()); - SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource( - data, string->length()); - result = string->MakeExternal(resource); - if (result && !string->IsSymbol()) { - i::ExternalStringTable::AddString(*string); - } - if (!result) delete resource; - } - if (!result) { - return v8::ThrowException(v8::String::New("externalizeString() failed.")); - } - return v8::Undefined(); -} - - -v8::Handle ExternalizeStringExtension::IsAscii( - const v8::Arguments& args) { - if (args.Length() != 1 || !args[0]->IsString()) { - return v8::ThrowException(v8::String::New( - "isAsciiString() requires a single string argument.")); - } - return Utils::OpenHandle(*args[0].As())->IsAsciiRepresentation() ? - v8::True() : v8::False(); -} - - -static ExternalizeStringExtension externalize_extension; -static v8::DeclareExtension externalize_extension_declaration( - &externalize_extension); - } } // namespace v8::internal diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h index 5547803be7..a2ddc41a15 100644 --- a/deps/v8/src/execution.h +++ b/deps/v8/src/execution.h @@ -189,6 +189,9 @@ class StackGuard : public AllStatic { static uintptr_t climit() { return thread_local_.climit_; } + static uintptr_t real_climit() { + return thread_local_.real_climit_; + } static uintptr_t jslimit() { return thread_local_.jslimit_; } @@ -313,29 +316,6 @@ class PostponeInterruptsScope BASE_EMBEDDED { } }; - -class GCExtension : public v8::Extension { - public: - GCExtension() : v8::Extension("v8/gc", kSource) {} - virtual v8::Handle GetNativeFunction( - v8::Handle name); - static v8::Handle GC(const v8::Arguments& args); - private: - static const char* const kSource; -}; - - -class ExternalizeStringExtension : public v8::Extension { - public: - ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {} - virtual v8::Handle GetNativeFunction( - v8::Handle name); - static v8::Handle Externalize(const v8::Arguments& args); - static v8::Handle IsAscii(const v8::Arguments& args); - private: - static const char* const kSource; -}; - } } // namespace v8::internal #endif // V8_EXECUTION_H_ diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc new file mode 100644 index 0000000000..8b4bdbd88e --- /dev/null +++ b/deps/v8/src/extensions/externalize-string-extension.cc @@ -0,0 +1,141 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "externalize-string-extension.h" + +namespace v8 { +namespace internal { + +template +class SimpleStringResource : public Base { + public: + // Takes ownership of |data|. + SimpleStringResource(Char* data, size_t length) + : data_(data), + length_(length) {} + + virtual ~SimpleStringResource() { delete[] data_; } + + virtual const Char* data() const { return data_; } + + virtual size_t length() const { return length_; } + + private: + Char* const data_; + const size_t length_; +}; + + +typedef SimpleStringResource + SimpleAsciiStringResource; +typedef SimpleStringResource + SimpleTwoByteStringResource; + + +const char* const ExternalizeStringExtension::kSource = + "native function externalizeString();" + "native function isAsciiString();"; + + +v8::Handle ExternalizeStringExtension::GetNativeFunction( + v8::Handle str) { + if (strcmp(*v8::String::AsciiValue(str), "externalizeString") == 0) { + return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize); + } else { + ASSERT(strcmp(*v8::String::AsciiValue(str), "isAsciiString") == 0); + return v8::FunctionTemplate::New(ExternalizeStringExtension::IsAscii); + } +} + + +v8::Handle ExternalizeStringExtension::Externalize( + const v8::Arguments& args) { + if (args.Length() < 1 || !args[0]->IsString()) { + return v8::ThrowException(v8::String::New( + "First parameter to externalizeString() must be a string.")); + } + bool force_two_byte = false; + if (args.Length() >= 2) { + if (args[1]->IsBoolean()) { + force_two_byte = args[1]->BooleanValue(); + } else { + return v8::ThrowException(v8::String::New( + "Second parameter to externalizeString() must be a boolean.")); + } + } + bool result = false; + Handle string = Utils::OpenHandle(*args[0].As()); + if (string->IsExternalString()) { + return v8::ThrowException(v8::String::New( + "externalizeString() can't externalize twice.")); + } + if (string->IsAsciiRepresentation() && !force_two_byte) { + char* data = new char[string->length()]; + String::WriteToFlat(*string, data, 0, string->length()); + SimpleAsciiStringResource* resource = new SimpleAsciiStringResource( + data, string->length()); + result = string->MakeExternal(resource); + if (result && !string->IsSymbol()) { + i::ExternalStringTable::AddString(*string); + } + if (!result) delete resource; + } else { + uc16* data = new uc16[string->length()]; + String::WriteToFlat(*string, data, 0, string->length()); + SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource( + data, string->length()); + result = string->MakeExternal(resource); + if (result && !string->IsSymbol()) { + i::ExternalStringTable::AddString(*string); + } + if (!result) delete resource; + } + if (!result) { + return v8::ThrowException(v8::String::New("externalizeString() failed.")); + } + return v8::Undefined(); +} + + +v8::Handle ExternalizeStringExtension::IsAscii( + const v8::Arguments& args) { + if (args.Length() != 1 || !args[0]->IsString()) { + return v8::ThrowException(v8::String::New( + "isAsciiString() requires a single string argument.")); + } + return Utils::OpenHandle(*args[0].As())->IsAsciiRepresentation() ? + v8::True() : v8::False(); +} + + +void ExternalizeStringExtension::Register() { + static ExternalizeStringExtension externalize_extension; + static v8::DeclareExtension externalize_extension_declaration( + &externalize_extension); +} + +} } // namespace v8::internal diff --git a/deps/v8/src/extensions/externalize-string-extension.h b/deps/v8/src/extensions/externalize-string-extension.h new file mode 100644 index 0000000000..b97b4962cf --- /dev/null +++ b/deps/v8/src/extensions/externalize-string-extension.h @@ -0,0 +1,50 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_ +#define V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_ + +#include "v8.h" + +namespace v8 { +namespace internal { + +class ExternalizeStringExtension : public v8::Extension { + public: + ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {} + virtual v8::Handle GetNativeFunction( + v8::Handle name); + static v8::Handle Externalize(const v8::Arguments& args); + static v8::Handle IsAscii(const v8::Arguments& args); + static void Register(); + private: + static const char* const kSource; +}; + +} } // namespace v8::internal + +#endif // V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_ diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc new file mode 100644 index 0000000000..b8f081c54d --- /dev/null +++ b/deps/v8/src/extensions/gc-extension.cc @@ -0,0 +1,54 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "gc-extension.h" + +namespace v8 { +namespace internal { + +const char* const GCExtension::kSource = "native function gc();"; + + +v8::Handle GCExtension::GetNativeFunction( + v8::Handle str) { + return v8::FunctionTemplate::New(GCExtension::GC); +} + + +v8::Handle GCExtension::GC(const v8::Arguments& args) { + // All allocation spaces other than NEW_SPACE have the same effect. + Heap::CollectAllGarbage(false); + return v8::Undefined(); +} + + +void GCExtension::Register() { + static GCExtension gc_extension; + static v8::DeclareExtension gc_extension_declaration(&gc_extension); +} + +} } // namespace v8::internal diff --git a/deps/v8/src/extensions/gc-extension.h b/deps/v8/src/extensions/gc-extension.h new file mode 100644 index 0000000000..06ea4ed21a --- /dev/null +++ b/deps/v8/src/extensions/gc-extension.h @@ -0,0 +1,49 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_EXTENSIONS_GC_EXTENSION_H_ +#define V8_EXTENSIONS_GC_EXTENSION_H_ + +#include "v8.h" + +namespace v8 { +namespace internal { + +class GCExtension : public v8::Extension { + public: + GCExtension() : v8::Extension("v8/gc", kSource) {} + virtual v8::Handle GetNativeFunction( + v8::Handle name); + static v8::Handle GC(const v8::Arguments& args); + static void Register(); + private: + static const char* const kSource; +}; + +} } // namespace v8::internal + +#endif // V8_EXTENSIONS_GC_EXTENSION_H_ diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 54501ec95d..46feea77ac 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -186,6 +186,7 @@ DEFINE_bool(always_inline_smi_code, false, // heap.cc DEFINE_int(max_new_space_size, 0, "max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0, "max size of the old generation (in Mbytes)") +DEFINE_int(max_executable_size, 0, "max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false, "always perform global GCs") DEFINE_int(gc_interval, -1, "garbage collect after allocations") DEFINE_bool(trace_gc, false, diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index 97987c27a8..8592472524 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -301,11 +301,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { } -MemOperand FullCodeGenerator::ContextOperand(Register context, int index) { - return CodeGenerator::ContextOperand(context, index); -} - - int FullCodeGenerator::SlotOffset(Slot* slot) { ASSERT(slot != NULL); // Offset is negative because higher indexes are at lower addresses. @@ -563,9 +558,10 @@ void FullCodeGenerator::SetStatementPosition(int pos) { } -void FullCodeGenerator::SetSourcePosition(int pos) { +void FullCodeGenerator::SetSourcePosition( + int pos, PositionRecordingType recording_type) { if (FLAG_debug_info && pos != RelocInfo::kNoPosition) { - masm_->RecordPosition(pos); + masm_->positions_recorder()->RecordPosition(pos, recording_type); } } @@ -1225,13 +1221,6 @@ int FullCodeGenerator::TryCatch::Exit(int stack_depth) { } -void FullCodeGenerator::EmitRegExpCloneResult(ZoneList* args) { - ASSERT(args->length() == 1); - VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kRegExpCloneResult, 1); - context()->Plug(result_register()); -} - #undef __ diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h index 201507b2af..6a1def6ee1 100644 --- a/deps/v8/src/full-codegen.h +++ b/deps/v8/src/full-codegen.h @@ -423,7 +423,9 @@ class FullCodeGenerator: public AstVisitor { void SetStatementPosition(Statement* stmt); void SetExpressionPosition(Expression* expr, int pos); void SetStatementPosition(int pos); - void SetSourcePosition(int pos); + void SetSourcePosition( + int pos, + PositionRecordingType recording_type = NORMAL_POSITION); // Non-local control flow support. void EnterFinallyBlock(); @@ -462,9 +464,6 @@ class FullCodeGenerator: public AstVisitor { // in v8::internal::Context. void LoadContextField(Register dst, int context_index); - // Create an operand for a context field. - MemOperand ContextOperand(Register context, int context_index); - // AST node visit functions. #define DECLARE_VISIT(type) virtual void Visit##type(type* node); AST_NODE_LIST(DECLARE_VISIT) diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc index 9ede908528..5339840988 100644 --- a/deps/v8/src/global-handles.cc +++ b/deps/v8/src/global-handles.cc @@ -372,13 +372,14 @@ void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) { int post_gc_processing_count = 0; -void GlobalHandles::PostGarbageCollectionProcessing() { +bool GlobalHandles::PostGarbageCollectionProcessing() { // Process weak global handle callbacks. This must be done after the // GC is completely done, because the callbacks may invoke arbitrary // API functions. // At the same time deallocate all DESTROYED nodes. ASSERT(Heap::gc_state() == Heap::NOT_IN_GC); const int initial_post_gc_processing_count = ++post_gc_processing_count; + bool next_gc_likely_to_collect_more = false; Node** p = &head_; while (*p != NULL) { if ((*p)->PostGarbageCollectionProcessing()) { @@ -399,6 +400,7 @@ void GlobalHandles::PostGarbageCollectionProcessing() { } node->set_next_free(first_deallocated()); set_first_deallocated(node); + next_gc_likely_to_collect_more = true; } else { p = (*p)->next_addr(); } @@ -407,6 +409,8 @@ void GlobalHandles::PostGarbageCollectionProcessing() { if (first_deallocated()) { first_deallocated()->set_next(head()); } + + return next_gc_likely_to_collect_more; } diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h index 659f86eca7..37b2b44522 100644 --- a/deps/v8/src/global-handles.h +++ b/deps/v8/src/global-handles.h @@ -96,7 +96,8 @@ class GlobalHandles : public AllStatic { static bool IsWeak(Object** location); // Process pending weak handles. - static void PostGarbageCollectionProcessing(); + // Returns true if next major GC is likely to collect more garbage. + static bool PostGarbageCollectionProcessing(); // Iterates over all strong handles. static void IterateStrongRoots(ObjectVisitor* v); diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index c218f80dc1..88c3e780d9 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -1,4 +1,4 @@ -// Copyright 2006-2009 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -195,8 +195,8 @@ const int kCharSize = sizeof(char); // NOLINT const int kShortSize = sizeof(short); // NOLINT const int kIntSize = sizeof(int); // NOLINT const int kDoubleSize = sizeof(double); // NOLINT -const int kPointerSize = sizeof(void*); // NOLINT const int kIntptrSize = sizeof(intptr_t); // NOLINT +const int kPointerSize = sizeof(void*); // NOLINT #if V8_HOST_ARCH_64_BIT const int kPointerSizeLog2 = 3; @@ -208,38 +208,6 @@ const intptr_t kIntptrSignBit = 0x80000000; const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu; #endif -// Mask for the sign bit in a smi. -const intptr_t kSmiSignMask = kIntptrSignBit; - -const int kObjectAlignmentBits = kPointerSizeLog2; -const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits; -const intptr_t kObjectAlignmentMask = kObjectAlignment - 1; - -// Desired alignment for pointers. -const intptr_t kPointerAlignment = (1 << kPointerSizeLog2); -const intptr_t kPointerAlignmentMask = kPointerAlignment - 1; - -// Desired alignment for maps. -#if V8_HOST_ARCH_64_BIT -const intptr_t kMapAlignmentBits = kObjectAlignmentBits; -#else -const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3; -#endif -const intptr_t kMapAlignment = (1 << kMapAlignmentBits); -const intptr_t kMapAlignmentMask = kMapAlignment - 1; - -// Desired alignment for generated code is 32 bytes (to improve cache line -// utilization). -const int kCodeAlignmentBits = 5; -const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits; -const intptr_t kCodeAlignmentMask = kCodeAlignment - 1; - -// Tag information for Failure. -const int kFailureTag = 3; -const int kFailureTagSize = 2; -const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1; - - const int kBitsPerByte = 8; const int kBitsPerByteLog2 = 3; const int kBitsPerPointer = kPointerSize * kBitsPerByte; @@ -255,364 +223,6 @@ const int kBinary32MinExponent = 0x01; const int kBinary32MantissaBits = 23; const int kBinary32ExponentShift = 23; -// Zap-value: The value used for zapping dead objects. -// Should be a recognizable hex value tagged as a heap object pointer. -#ifdef V8_HOST_ARCH_64_BIT -const Address kZapValue = - reinterpret_cast
(V8_UINT64_C(0xdeadbeedbeadbeed)); -const Address kHandleZapValue = - reinterpret_cast
(V8_UINT64_C(0x1baddead0baddead)); -const Address kFromSpaceZapValue = - reinterpret_cast
(V8_UINT64_C(0x1beefdad0beefdad)); -const uint64_t kDebugZapValue = 0xbadbaddbbadbaddb; -#else -const Address kZapValue = reinterpret_cast
(0xdeadbeed); -const Address kHandleZapValue = reinterpret_cast
(0xbaddead); -const Address kFromSpaceZapValue = reinterpret_cast
(0xbeefdad); -const uint32_t kDebugZapValue = 0xbadbaddb; -#endif - - -// Number of bits to represent the page size for paged spaces. The value of 13 -// gives 8K bytes per page. -const int kPageSizeBits = 13; - -// On Intel architecture, cache line size is 64 bytes. -// On ARM it may be less (32 bytes), but as far this constant is -// used for aligning data, it doesn't hurt to align on a greater value. -const int kProcessorCacheLineSize = 64; - -// Constants relevant to double precision floating point numbers. - -// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no -// other bits set. -const uint64_t kQuietNaNMask = static_cast(0xfff) << 51; -// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30. -const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32); - - -// ----------------------------------------------------------------------------- -// Forward declarations for frequently used classes -// (sorted alphabetically) - -class AccessorInfo; -class Allocation; -class Arguments; -class Assembler; -class AssertNoAllocation; -class BreakableStatement; -class Code; -class CodeGenerator; -class CodeStub; -class Context; -class Debug; -class Debugger; -class DebugInfo; -class Descriptor; -class DescriptorArray; -class Expression; -class ExternalReference; -class FixedArray; -class FunctionEntry; -class FunctionLiteral; -class FunctionTemplateInfo; -class NumberDictionary; -class StringDictionary; -class FreeStoreAllocationPolicy; -template class Handle; -class Heap; -class HeapObject; -class IC; -class InterceptorInfo; -class IterationStatement; -class JSArray; -class JSFunction; -class JSObject; -class LargeObjectSpace; -template class List; -class LookupResult; -class MacroAssembler; -class Map; -class MapSpace; -class MarkCompactCollector; -class NewSpace; -class NodeVisitor; -class Object; -class MaybeObject; -class OldSpace; -class Property; -class Proxy; -class RegExpNode; -struct RegExpCompileData; -class RegExpTree; -class RegExpCompiler; -class RegExpVisitor; -class Scope; -template class ScopeInfo; -class SerializedScopeInfo; -class Script; -class Slot; -class Smi; -template - class SplayTree; -class Statement; -class String; -class Struct; -class SwitchStatement; -class AstVisitor; -class Variable; -class VariableProxy; -class RelocInfo; -class Deserializer; -class MessageLocation; -class ObjectGroup; -class TickSample; -class VirtualMemory; -class Mutex; - -typedef bool (*WeakSlotCallback)(Object** pointer); - -// ----------------------------------------------------------------------------- -// Miscellaneous - -// NOTE: SpaceIterator depends on AllocationSpace enumeration values being -// consecutive. -enum AllocationSpace { - NEW_SPACE, // Semispaces collected with copying collector. - OLD_POINTER_SPACE, // May contain pointers to new space. - OLD_DATA_SPACE, // Must not have pointers to new space. - CODE_SPACE, // No pointers to new space, marked executable. - MAP_SPACE, // Only and all map objects. - CELL_SPACE, // Only and all cell objects. - LO_SPACE, // Promoted large objects. - - FIRST_SPACE = NEW_SPACE, - LAST_SPACE = LO_SPACE, - FIRST_PAGED_SPACE = OLD_POINTER_SPACE, - LAST_PAGED_SPACE = CELL_SPACE -}; -const int kSpaceTagSize = 3; -const int kSpaceTagMask = (1 << kSpaceTagSize) - 1; - - -// A flag that indicates whether objects should be pretenured when -// allocated (allocated directly into the old generation) or not -// (allocated in the young generation if the object size and type -// allows). -enum PretenureFlag { NOT_TENURED, TENURED }; - -enum GarbageCollector { SCAVENGER, MARK_COMPACTOR }; - -enum Executability { NOT_EXECUTABLE, EXECUTABLE }; - -enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG }; - -// Flag indicating whether code is built into the VM (one of the natives files). -enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE }; - - -// A CodeDesc describes a buffer holding instructions and relocation -// information. The instructions start at the beginning of the buffer -// and grow forward, the relocation information starts at the end of -// the buffer and grows backward. -// -// |<--------------- buffer_size ---------------->| -// |<-- instr_size -->| |<-- reloc_size -->| -// +==================+========+==================+ -// | instructions | free | reloc info | -// +==================+========+==================+ -// ^ -// | -// buffer - -struct CodeDesc { - byte* buffer; - int buffer_size; - int instr_size; - int reloc_size; - Assembler* origin; -}; - - -// Callback function on object slots, used for iterating heap object slots in -// HeapObjects, global pointers to heap objects, etc. The callback allows the -// callback function to change the value of the slot. -typedef void (*ObjectSlotCallback)(HeapObject** pointer); - - -// Callback function used for iterating objects in heap spaces, -// for example, scanning heap objects. -typedef int (*HeapObjectCallback)(HeapObject* obj); - - -// Callback function used for checking constraints when copying/relocating -// objects. Returns true if an object can be copied/relocated from its -// old_addr to a new_addr. -typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr); - - -// Callback function on inline caches, used for iterating over inline caches -// in compiled code. -typedef void (*InlineCacheCallback)(Code* code, Address ic); - - -// State for inline cache call sites. Aliased as IC::State. -enum InlineCacheState { - // Has never been executed. - UNINITIALIZED, - // Has been executed but monomorhic state has been delayed. - PREMONOMORPHIC, - // Has been executed and only one receiver type has been seen. - MONOMORPHIC, - // Like MONOMORPHIC but check failed due to prototype. - MONOMORPHIC_PROTOTYPE_FAILURE, - // Multiple receiver types have been seen. - MEGAMORPHIC, - // Special states for debug break or step in prepare stubs. - DEBUG_BREAK, - DEBUG_PREPARE_STEP_IN -}; - - -enum InLoopFlag { - NOT_IN_LOOP, - IN_LOOP -}; - - -enum CallFunctionFlags { - NO_CALL_FUNCTION_FLAGS = 0, - RECEIVER_MIGHT_BE_VALUE = 1 << 0 // Receiver might not be a JSObject. -}; - - -enum InlineCacheHolderFlag { - OWN_MAP, // For fast properties objects. - PROTOTYPE_MAP // For slow properties objects (except GlobalObjects). -}; - - -// Type of properties. -// Order of properties is significant. -// Must fit in the BitField PropertyDetails::TypeField. -// A copy of this is in mirror-debugger.js. -enum PropertyType { - NORMAL = 0, // only in slow mode - FIELD = 1, // only in fast mode - CONSTANT_FUNCTION = 2, // only in fast mode - CALLBACKS = 3, - INTERCEPTOR = 4, // only in lookup results, not in descriptors. - MAP_TRANSITION = 5, // only in fast mode - CONSTANT_TRANSITION = 6, // only in fast mode - NULL_DESCRIPTOR = 7, // only in fast mode - // All properties before MAP_TRANSITION are real. - FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION, - // There are no IC stubs for NULL_DESCRIPTORS. Therefore, - // NULL_DESCRIPTOR can be used as the type flag for IC stubs for - // nonexistent properties. - NONEXISTENT = NULL_DESCRIPTOR -}; - - -// Whether to remove map transitions and constant transitions from a -// DescriptorArray. -enum TransitionFlag { - REMOVE_TRANSITIONS, - KEEP_TRANSITIONS -}; - - -// Union used for fast testing of specific double values. -union DoubleRepresentation { - double value; - int64_t bits; - DoubleRepresentation(double x) { value = x; } -}; - - -// Union used for customized checking of the IEEE double types -// inlined within v8 runtime, rather than going to the underlying -// platform headers and libraries -union IeeeDoubleLittleEndianArchType { - double d; - struct { - unsigned int man_low :32; - unsigned int man_high :20; - unsigned int exp :11; - unsigned int sign :1; - } bits; -}; - - -union IeeeDoubleBigEndianArchType { - double d; - struct { - unsigned int sign :1; - unsigned int exp :11; - unsigned int man_high :20; - unsigned int man_low :32; - } bits; -}; - - -// AccessorCallback -struct AccessorDescriptor { - MaybeObject* (*getter)(Object* object, void* data); - MaybeObject* (*setter)(JSObject* object, Object* value, void* data); - void* data; -}; - - -// Logging and profiling. -// A StateTag represents a possible state of the VM. When compiled with -// ENABLE_VMSTATE_TRACKING, the logger maintains a stack of these. -// Creating a VMState object enters a state by pushing on the stack, and -// destroying a VMState object leaves a state by popping the current state -// from the stack. - -#define STATE_TAG_LIST(V) \ - V(JS) \ - V(GC) \ - V(COMPILER) \ - V(OTHER) \ - V(EXTERNAL) - -enum StateTag { -#define DEF_STATE_TAG(name) name, - STATE_TAG_LIST(DEF_STATE_TAG) -#undef DEF_STATE_TAG - // Pseudo-types. - state_tag_count -}; - - -// ----------------------------------------------------------------------------- -// Macros - -// Testers for test. - -#define HAS_SMI_TAG(value) \ - ((reinterpret_cast(value) & kSmiTagMask) == kSmiTag) - -#define HAS_FAILURE_TAG(value) \ - ((reinterpret_cast(value) & kFailureTagMask) == kFailureTag) - -// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer -#define OBJECT_POINTER_ALIGN(value) \ - (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask) - -// POINTER_SIZE_ALIGN returns the value aligned as a pointer. -#define POINTER_SIZE_ALIGN(value) \ - (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask) - -// MAP_POINTER_ALIGN returns the value aligned as a map pointer. -#define MAP_POINTER_ALIGN(value) \ - (((value) + kMapAlignmentMask) & ~kMapAlignmentMask) - -// CODE_POINTER_ALIGN returns the value aligned as a generated code segment. -#define CODE_POINTER_ALIGN(value) \ - (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask) - // The expression OFFSET_OF(type, field) computes the byte-offset // of the specified field relative to the containing type. This // corresponds to 'offsetof' (in stddef.h), except that it doesn't @@ -669,26 +279,6 @@ F FUNCTION_CAST(Address addr) { DISALLOW_COPY_AND_ASSIGN(TypeName) -// Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk") -// inside a C++ class and new and delete will be overloaded so logging is -// performed. -// This file (globals.h) is included before log.h, so we use direct calls to -// the Logger rather than the LOG macro. -#ifdef DEBUG -#define TRACK_MEMORY(name) \ - void* operator new(size_t size) { \ - void* result = ::operator new(size); \ - Logger::NewEvent(name, result, size); \ - return result; \ - } \ - void operator delete(void* object) { \ - Logger::DeleteEvent(name, object); \ - ::operator delete(object); \ - } -#else -#define TRACK_MEMORY(name) -#endif - // Define used for helping GCC to make better inlining. Don't bother for debug // builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation // errors in debug build. @@ -712,20 +302,12 @@ F FUNCTION_CAST(Address addr) { #define MUST_USE_RESULT #endif +// ----------------------------------------------------------------------------- +// Forward declarations for frequently used classes +// (sorted alphabetically) -// Feature flags bit positions. They are mostly based on the CPUID spec. -// (We assign CPUID itself to one of the currently reserved bits -- -// feel free to change this if needed.) -// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX. -enum CpuFeature { SSE4_1 = 32 + 19, // x86 - SSE3 = 32 + 0, // x86 - SSE2 = 26, // x86 - CMOV = 15, // x86 - RDTSC = 4, // x86 - CPUID = 10, // x86 - VFP3 = 1, // ARM - ARMv7 = 2, // ARM - SAHF = 0}; // x86 +class FreeStoreAllocationPolicy; +template class List; } } // namespace v8::internal diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index 7a46bc3ea2..1364951b71 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -499,6 +499,10 @@ void InitScriptLineEnds(Handle