Browse Source

v8: upgrade 3.21.18.3

v0.11.8-release
Timothy J Fontaine 11 years ago
parent
commit
a53c763c16
  1. 215
      deps/v8/ChangeLog
  2. 22
      deps/v8/build/toolchain.gypi
  3. 46
      deps/v8/include/v8-debug.h
  4. 54
      deps/v8/include/v8-profiler.h
  5. 1519
      deps/v8/include/v8.h
  6. 451
      deps/v8/include/v8config.h
  7. 6
      deps/v8/include/v8stdint.h
  8. 5
      deps/v8/samples/process.cc
  9. 175
      deps/v8/src/accessors.cc
  10. 98
      deps/v8/src/accessors.h
  11. 951
      deps/v8/src/api.cc
  12. 12
      deps/v8/src/api.h
  13. 65
      deps/v8/src/apinatives.js
  14. 138
      deps/v8/src/arguments.cc
  15. 118
      deps/v8/src/arguments.h
  16. 5
      deps/v8/src/arm/assembler-arm-inl.h
  17. 162
      deps/v8/src/arm/assembler-arm.cc
  18. 9
      deps/v8/src/arm/assembler-arm.h
  19. 182
      deps/v8/src/arm/builtins-arm.cc
  20. 529
      deps/v8/src/arm/code-stubs-arm.cc
  21. 38
      deps/v8/src/arm/code-stubs-arm.h
  22. 6
      deps/v8/src/arm/codegen-arm.h
  23. 2
      deps/v8/src/arm/constants-arm.h
  24. 9
      deps/v8/src/arm/cpu-arm.cc
  25. 6
      deps/v8/src/arm/debug-arm.cc
  26. 203
      deps/v8/src/arm/deoptimizer-arm.cc
  27. 3
      deps/v8/src/arm/disasm-arm.cc
  28. 38
      deps/v8/src/arm/full-codegen-arm.cc
  29. 8
      deps/v8/src/arm/ic-arm.cc
  30. 152
      deps/v8/src/arm/lithium-arm.cc
  31. 507
      deps/v8/src/arm/lithium-arm.h
  32. 333
      deps/v8/src/arm/lithium-codegen-arm.cc
  33. 12
      deps/v8/src/arm/lithium-codegen-arm.h
  34. 2
      deps/v8/src/arm/lithium-gap-resolver-arm.h
  35. 242
      deps/v8/src/arm/macro-assembler-arm.cc
  36. 67
      deps/v8/src/arm/macro-assembler-arm.h
  37. 119
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  38. 11
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  39. 95
      deps/v8/src/arm/simulator-arm.cc
  40. 678
      deps/v8/src/arm/stub-cache-arm.cc
  41. 11
      deps/v8/src/assembler.cc
  42. 34
      deps/v8/src/assembler.h
  43. 38
      deps/v8/src/ast.cc
  44. 518
      deps/v8/src/ast.h
  45. 163
      deps/v8/src/bootstrapper.cc
  46. 4
      deps/v8/src/bootstrapper.h
  47. 51
      deps/v8/src/builtins.cc
  48. 28
      deps/v8/src/builtins.h
  49. 28
      deps/v8/src/checks.cc
  50. 17
      deps/v8/src/checks.h
  51. 62
      deps/v8/src/circular-queue-inl.h
  52. 125
      deps/v8/src/circular-queue.cc
  53. 72
      deps/v8/src/circular-queue.h
  54. 309
      deps/v8/src/code-stubs-hydrogen.cc
  55. 34
      deps/v8/src/code-stubs.cc
  56. 136
      deps/v8/src/code-stubs.h
  57. 17
      deps/v8/src/codegen.cc
  58. 186
      deps/v8/src/compiler.cc
  59. 54
      deps/v8/src/compiler.h
  60. 43
      deps/v8/src/contexts.cc
  61. 22
      deps/v8/src/contexts.h
  62. 10
      deps/v8/src/counters.cc
  63. 14
      deps/v8/src/counters.h
  64. 21
      deps/v8/src/cpu-profiler-inl.h
  65. 137
      deps/v8/src/cpu-profiler.cc
  66. 37
      deps/v8/src/cpu-profiler.h
  67. 466
      deps/v8/src/cpu.cc
  68. 91
      deps/v8/src/cpu.h
  69. 22
      deps/v8/src/d8-debug.cc
  70. 7
      deps/v8/src/d8-debug.h
  71. 41
      deps/v8/src/d8.cc
  72. 14
      deps/v8/src/d8.h
  73. 111
      deps/v8/src/debug-agent.cc
  74. 36
      deps/v8/src/debug-agent.h
  75. 9
      deps/v8/src/debug-debugger.js
  76. 179
      deps/v8/src/debug.cc
  77. 38
      deps/v8/src/debug.h
  78. 710
      deps/v8/src/deoptimizer.cc
  79. 128
      deps/v8/src/deoptimizer.h
  80. 6
      deps/v8/src/disassembler.cc
  81. 2
      deps/v8/src/effects.h
  82. 12
      deps/v8/src/elements.cc
  83. 119
      deps/v8/src/execution.cc
  84. 51
      deps/v8/src/execution.h
  85. 6
      deps/v8/src/extensions/externalize-string-extension.cc
  86. 5
      deps/v8/src/extensions/gc-extension.cc
  87. 333
      deps/v8/src/extensions/i18n/break-iterator.cc
  88. 85
      deps/v8/src/extensions/i18n/break-iterator.h
  89. 197
      deps/v8/src/extensions/i18n/break-iterator.js
  90. 209
      deps/v8/src/extensions/i18n/collator.js
  91. 474
      deps/v8/src/extensions/i18n/date-format.js
  92. 168
      deps/v8/src/extensions/i18n/globals.js
  93. 77
      deps/v8/src/extensions/i18n/i18n-extension.cc
  94. 177
      deps/v8/src/extensions/i18n/i18n-utils.cc
  95. 91
      deps/v8/src/extensions/i18n/i18n-utils.h
  96. 536
      deps/v8/src/extensions/i18n/i18n-utils.js
  97. 190
      deps/v8/src/extensions/i18n/locale.js
  98. 289
      deps/v8/src/extensions/i18n/number-format.js
  99. 220
      deps/v8/src/extensions/i18n/overrides.js
  100. 2
      deps/v8/src/extensions/statistics-extension.cc

215
deps/v8/ChangeLog

@ -1,3 +1,216 @@
2013-09-18: Version 3.21.17
Implemented local load/store elimination on basic blocks.
Added mutex when accessing concurrent recompilation output queue.
(Chromium issue 291236)
Don't lookup the cache for the result of Function::New.
(Chromium issue 272579)
Tweaked HConstant::EmitAtUses() to eliminate useless constant
generation.
(Chromium issue 2881)
Performance and stability improvements on all platforms.
2013-09-16: Version 3.21.16
Every place where AllocationMemento is initialized with an
AllocationSite is now checked to be sure a valid Site goes in. This is
temporary code to diagnose chromium bug 284577.
Performance and stability improvements on all platforms.
2013-09-13: Version 3.21.15
Non-JSObject heap objects are now handled using slow-path IC stub
guarded by the map.
(Chromium issue 280632)
i18n Javascript code added to the snapshot.
(V8 issue 2745)
Performance and stability improvements on all platforms.
2013-09-12: Version 3.21.14
Added access check for observed objects.
(V8 issue 2778)
Cleaned up v8::ArrayBuffer::Allocator interface.
(V8 issue 2823)
Performance and stability improvements on all platforms.
2013-09-11: Version 3.21.13
Added a ResourceConstraint for the embedder to specify that V8 is
running on a memory constrained device.
(Chromium issue 280984)
Removed HandleScope default ctor.
(Chromium issue 236173)
Enabled escape analysis for Hydrogen.
Correctly stringified mixed encoding indirect strings.
(Chromium issue 287476)
Performance and stability improvements on all platforms.
2013-09-09: Version 3.21.12
Fixed bitwise negation on x64.
(Chromium issue 285355)
Dropped GetCurrentThreadId() and TerminateExecution(int) from
the external API.
Fixed polymorphic INTERCEPTOR StoreICs on ARM/MIPS.
(Chromium issue 284998)
Added check if timeout has expired after processing each sample.
(issue 2814,v8:2871)
Removed obsolete global V8::has_been_fooed flags.
(issue 2744)
Performance and stability improvements on all platforms.
2013-09-05: Version 3.21.11
Performance and stability improvements on all platforms.
2013-09-04: Version 3.21.10
Fixed Eternal::IsEmpty logic (issue 2870).
Performance and stability improvements on all platforms.
2013-09-03: Version 3.21.9
Deprecated Persistent functions which were marked to be deprecated.
Allowed uncacheable identifiers to go generic (issue 2867).
Performance and stability improvements on all platforms.
2013-09-02: Version 3.21.8
Added scriptId to StackTrace frames (issue 2865).
Performance and stability improvements on all platforms.
2013-08-30: Version 3.21.7
Fixed casts of eternal handles.
Turned on global handle zapping.
Always visit branches during HGraph building (Chromium issue 280333).
Profiler changes: removed deprecated API, support higher sampling
rate on Windows.
Performance and stability improvements on all platforms.
2013-08-29: Version 3.21.6
Fixed inlined 'throw' statements interfering with live range
computation. (issue 2843)
Performance and stability improvements on all platforms.
2013-08-28: Version 3.21.5
Fixed compilation with recent MinGW64 versions. (issue 2300)
Added RemovePrototype to FunctionTemplate. (Chromium issue 272440)
Performance and stability improvements on all platforms.
2013-08-26: Version 3.21.4
Lowered kInitialMaxFastElementArray constant to 95K (issue 2790).
Use signals for cpu profiling on Mac OS X (issue 2814).
Deprecated CpuProfileNode::GetSelfSamplesCount (Chromium issue 267595).
Added support for higher CPU profiler sampling rate on posix systems
(issue 2814).
Worked around 'inlining failed' build error with older GCC 4.x releases.
Added source map support to tick processor.
Stability improvements on all platforms.
2013-08-23: Version 3.21.3
Temporarily disabled optimization for StringWrappers to use native
valueOf. (issue 2855)
Fixed crash on function declarations in eval inside non-trivial local
scope. (issue 2594)
Rewrote SamplingCircularQueue. (issue 2814)
Fixed hidden properties on object with frozen prototype. (issue 2829)
Fix deoptimization bug. (Chromium issue 274164)
Stability improvements on all platforms.
2013-08-22: Version 3.21.2
Stability improvements on all platforms.
2013-08-21: Version 3.21.1
Promoted ArrayBuffer, DataView and typed arrays to non-experimental.
(Chromium issue 270527)
Replaced OS::MemCopy with memcpy in typed array initialization.
(Chromium issue 270642)
Moved i18n break iterator C++ code to runtime (issue 2745)
Fixed invalid out-of-bounds store in MacroAssembler::Allocate.
(Chromium issue 263515)
Fixed register misuse in Allocate() on ARM. (issue 2851)
Fixed empty handle dereference in Runtime_InternalNumberFormat.
(Chromium issue 275467)
Performance and stability improvements on all platforms.
2013-08-19: Version 3.21.0
Fixed GC-related crasher (Chromium issue 274438)
Reverted making Intl non-enumerable.
Performance and stability improvements on all platforms.
2013-08-14: Version 3.20.17
Fixed Math.round/floor that had bogus Smi representation
@ -31,7 +244,7 @@
Performance and stability improvements on all platforms.
2013-08-07: Version 3.20.14
2013-08-07: Version 3.20.15
Exposed eternal handle api.

22
deps/v8/build/toolchain.gypi

@ -561,13 +561,21 @@
'cflags!': [
'-O0',
'-O1',
'-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
'-O3',
],
'conditions': [
# TODO(crbug.com/272548): Avoid -O3 in NaCl
['nacl_target_arch=="none"', {
'cflags': ['-O3'],
'cflags!': ['-O2'],
}, {
'cflags': ['-O2'],
'cflags!': ['-O3'],
}],
],
}],
['v8_optimized_debug!=0 and gcc_version==44 and clang==0', {
@ -614,13 +622,11 @@
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags!': [
'-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
'-O3',
'<(wno_array_bounds)',
],
'conditions': [
@ -630,6 +636,14 @@
'-fno-tree-vrp',
],
}],
# TODO(crbug.com/272548): Avoid -O3 in NaCl
['nacl_target_arch=="none"', {
'cflags': ['-O3'],
'cflags!': ['-O2'],
}, {
'cflags': ['-O2'],
'cflags!': ['-O3'],
}],
],
}],
['OS=="android"', {

46
deps/v8/include/v8-debug.h

@ -106,6 +106,8 @@ class V8_EXPORT Debug {
*/
virtual ClientData* GetClientData() const = 0;
virtual Isolate* GetIsolate() const = 0;
virtual ~Message() {}
};
@ -150,21 +152,6 @@ class V8_EXPORT Debug {
virtual ~EventDetails() {}
};
/**
* Debug event callback function.
*
* \param event the type of the debug event that triggered the callback
* (enum DebugEvent)
* \param exec_state execution state (JavaScript object)
* \param event_data event specific data (JavaScript object)
* \param data value passed by the user to SetDebugEventListener
*/
typedef void (*EventCallback)(DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
Handle<Value> data);
/**
* Debug event callback function.
*
@ -175,27 +162,12 @@ class V8_EXPORT Debug {
*/
typedef void (*EventCallback2)(const EventDetails& event_details);
/**
* Debug message callback function.
*
* \param message the debug message handler message object
* \param length length of the message
* \param client_data the data value passed when registering the message handler
* A MessageHandler does not take possession of the message string,
* and must not rely on the data persisting after the handler returns.
*
* This message handler is deprecated. Use MessageHandler2 instead.
*/
typedef void (*MessageHandler)(const uint16_t* message, int length,
ClientData* client_data);
/**
* Debug message callback function.
*
* \param message the debug message handler message object
*
* A MessageHandler does not take possession of the message data,
* A MessageHandler2 does not take possession of the message data,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*MessageHandler2)(const Message& message);
@ -210,10 +182,6 @@ class V8_EXPORT Debug {
*/
typedef void (*DebugMessageDispatchHandler)();
// Set a C debug event listener.
V8_DEPRECATED(static bool SetDebugEventListener(
EventCallback that,
Handle<Value> data = Handle<Value>()));
static bool SetDebugEventListener2(EventCallback2 that,
Handle<Value> data = Handle<Value>());
@ -234,16 +202,12 @@ class V8_EXPORT Debug {
// Break execution of JavaScript in the given isolate (this method
// can be invoked from a non-VM thread) for further client command
// execution on a VM thread. Client data is then passed in
// EventDetails to EventCallback at the moment when the VM actually
// EventDetails to EventCallback2 at the moment when the VM actually
// stops. If no isolate is provided the default isolate is used.
static void DebugBreakForCommand(ClientData* data = NULL,
Isolate* isolate = NULL);
// Message based interface. The message protocol is JSON. NOTE the message
// handler thread is not supported any more parameter must be false.
V8_DEPRECATED(static void SetMessageHandler(
MessageHandler handler,
bool message_handler_thread = false));
// Message based interface. The message protocol is JSON.
static void SetMessageHandler2(MessageHandler2 handler);
// If no isolate is provided the default isolate is

54
deps/v8/include/v8-profiler.h

@ -57,25 +57,15 @@ class V8_EXPORT CpuProfileNode {
*/
int GetLineNumber() const;
/**
* Returns total (self + children) execution time of the function,
* in milliseconds, estimated by samples count.
*/
V8_DEPRECATED(double GetTotalTime() const);
/**
* Returns self execution time of the function, in milliseconds,
* estimated by samples count.
*/
V8_DEPRECATED(double GetSelfTime() const);
/** Returns the count of samples where function exists. */
V8_DEPRECATED(double GetTotalSamplesCount() const);
/** Returns bailout reason for the function
* if the optimization was disabled for it.
*/
const char* GetBailoutReason() const;
/** DEPRECATED. Please use GetHitCount instead.
* Returns the count of samples where function was currently executing.
*/
double GetSelfSamplesCount() const;
V8_DEPRECATED(double GetSelfSamplesCount() const);
/**
* Returns the count of samples where the function was currently executing.
@ -156,13 +146,11 @@ class V8_EXPORT CpuProfile {
class V8_EXPORT CpuProfiler {
public:
/**
* A note on security tokens usage. As scripts from different
* origins can run inside a single V8 instance, it is possible to
* have functions from different security contexts intermixed in a
* single CPU profile. To avoid exposing function names belonging to
* other contexts, filtering by security token is performed while
* obtaining profiling results.
* Changes default CPU profiler sampling interval to the specified number
* of microseconds. Default interval is 1000us. This method must be called
* when there are no profiles being recorded.
*/
void SetSamplingInterval(int us);
/**
* Returns the number of profiles collected (doesn't include
@ -258,17 +246,19 @@ class V8_EXPORT HeapGraphEdge {
class V8_EXPORT HeapGraphNode {
public:
enum Type {
kHidden = 0, // Hidden node, may be filtered when shown to user.
kArray = 1, // An array of elements.
kString = 2, // A string.
kObject = 3, // A JS object (except for arrays and strings).
kCode = 4, // Compiled code.
kClosure = 5, // Function closure.
kRegExp = 6, // RegExp.
kHeapNumber = 7, // Number stored in the heap.
kNative = 8, // Native object (not from V8 heap).
kSynthetic = 9 // Synthetic object, usualy used for grouping
// snapshot items together.
kHidden = 0, // Hidden node, may be filtered when shown to user.
kArray = 1, // An array of elements.
kString = 2, // A string.
kObject = 3, // A JS object (except for arrays and strings).
kCode = 4, // Compiled code.
kClosure = 5, // Function closure.
kRegExp = 6, // RegExp.
kHeapNumber = 7, // Number stored in the heap.
kNative = 8, // Native object (not from V8 heap).
kSynthetic = 9, // Synthetic object, usualy used for grouping
// snapshot items together.
kConsString = 10, // Concatenated string. A pair of pointers to strings.
kSlicedString = 11 // Sliced string. A fragment of another string.
};
/** Returns node type (see HeapGraphNode::Type). */

1519
deps/v8/include/v8.h

File diff suppressed because it is too large

451
deps/v8/include/v8config.h

@ -0,0 +1,451 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8CONFIG_H_
#define V8CONFIG_H_
// Platform headers for feature detection below.
#if defined(__ANDROID__)
# include <sys/cdefs.h>
#elif defined(__APPLE__)
# include <TargetConditionals.h>
#elif defined(__linux__)
# include <features.h>
#endif
// This macro allows to test for the version of the GNU C library (or
// a compatible C library that masquerades as glibc). It evaluates to
// 0 if libc is not GNU libc or compatible.
// Use like:
// #if V8_GLIBC_PREREQ(2, 3)
// ...
// #endif
#if defined(__GLIBC__) && defined(__GLIBC_MINOR__)
# define V8_GLIBC_PREREQ(major, minor) \
((__GLIBC__ * 100 + __GLIBC_MINOR__) >= ((major) * 100 + (minor)))
#else
# define V8_GLIBC_PREREQ(major, minor) 0
#endif
// This macro allows to test for the version of the GNU C++ compiler.
// Note that this also applies to compilers that masquerade as GCC,
// for example clang and the Intel C++ compiler for Linux.
// Use like:
// #if V8_GNUC_PREREQ(4, 3, 1)
// ...
// #endif
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
# define V8_GNUC_PREREQ(major, minor, patchlevel) \
((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= \
((major) * 10000 + (minor) * 100 + (patchlevel)))
#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
# define V8_GNUC_PREREQ(major, minor, patchlevel) \
((__GNUC__ * 10000 + __GNUC_MINOR__) >= \
((major) * 10000 + (minor) * 100 + (patchlevel)))
#else
# define V8_GNUC_PREREQ(major, minor, patchlevel) 0
#endif
// -----------------------------------------------------------------------------
// Operating system detection
//
// V8_OS_ANDROID - Android
// V8_OS_BSD - BSDish (Mac OS X, Net/Free/Open/DragonFlyBSD)
// V8_OS_CYGWIN - Cygwin
// V8_OS_DRAGONFLYBSD - DragonFlyBSD
// V8_OS_FREEBSD - FreeBSD
// V8_OS_LINUX - Linux
// V8_OS_MACOSX - Mac OS X
// V8_OS_NACL - Native Client
// V8_OS_NETBSD - NetBSD
// V8_OS_OPENBSD - OpenBSD
// V8_OS_POSIX - POSIX compatible (mostly everything except Windows)
// V8_OS_SOLARIS - Sun Solaris and OpenSolaris
// V8_OS_WIN - Microsoft Windows
#if defined(__ANDROID__)
# define V8_OS_ANDROID 1
# define V8_OS_LINUX 1
# define V8_OS_POSIX 1
#elif defined(__APPLE__)
# define V8_OS_BSD 1
# define V8_OS_MACOSX 1
# define V8_OS_POSIX 1
#elif defined(__native_client__)
# define V8_OS_NACL 1
# define V8_OS_POSIX 1
#elif defined(__CYGWIN__)
# define V8_OS_CYGWIN 1
# define V8_OS_POSIX 1
#elif defined(__linux__)
# define V8_OS_LINUX 1
# define V8_OS_POSIX 1
#elif defined(__sun)
# define V8_OS_POSIX 1
# define V8_OS_SOLARIS 1
#elif defined(__FreeBSD__)
# define V8_OS_BSD 1
# define V8_OS_FREEBSD 1
# define V8_OS_POSIX 1
#elif defined(__DragonFly__)
# define V8_OS_BSD 1
# define V8_OS_DRAGONFLYBSD 1
# define V8_OS_POSIX 1
#elif defined(__NetBSD__)
# define V8_OS_BSD 1
# define V8_OS_NETBSD 1
# define V8_OS_POSIX 1
#elif defined(__OpenBSD__)
# define V8_OS_BSD 1
# define V8_OS_OPENBSD 1
# define V8_OS_POSIX 1
#elif defined(_WIN32)
# define V8_OS_WIN 1
#endif
// -----------------------------------------------------------------------------
// C library detection
//
// V8_LIBC_BIONIC - Bionic libc
// V8_LIBC_BSD - BSD libc derivate
// V8_LIBC_GLIBC - GNU C library
// V8_LIBC_UCLIBC - uClibc
//
// Note that testing for libc must be done using #if not #ifdef. For example,
// to test for the GNU C library, use:
// #if V8_LIBC_GLIBC
// ...
// #endif
#if defined(__BIONIC__)
# define V8_LIBC_BIONIC 1
# define V8_LIBC_BSD 1
#elif defined(__UCLIBC__)
# define V8_LIBC_UCLIBC 1
#elif defined(__GLIBC__) || defined(__GNU_LIBRARY__)
# define V8_LIBC_GLIBC 1
#else
# define V8_LIBC_BSD V8_OS_BSD
#endif
// -----------------------------------------------------------------------------
// Compiler detection
//
// V8_CC_CLANG - Clang
// V8_CC_GNU - GNU C++
// V8_CC_INTEL - Intel C++
// V8_CC_MINGW - Minimalist GNU for Windows
// V8_CC_MINGW32 - Minimalist GNU for Windows (mingw32)
// V8_CC_MINGW64 - Minimalist GNU for Windows (mingw-w64)
// V8_CC_MSVC - Microsoft Visual C/C++
//
// C++11 feature detection
//
// V8_HAS_CXX11_ALIGNAS - alignas specifier supported
// V8_HAS_CXX11_ALIGNOF - alignof(type) operator supported
// V8_HAS_CXX11_STATIC_ASSERT - static_assert() supported
// V8_HAS_CXX11_DELETE - deleted functions supported
// V8_HAS_CXX11_FINAL - final marker supported
// V8_HAS_CXX11_OVERRIDE - override marker supported
//
// Compiler-specific feature detection
//
// V8_HAS___ALIGNOF - __alignof(type) operator supported
// V8_HAS___ALIGNOF__ - __alignof__(type) operator supported
// V8_HAS_ATTRIBUTE_ALIGNED - __attribute__((aligned(n))) supported
// V8_HAS_ATTRIBUTE_ALWAYS_INLINE - __attribute__((always_inline))
// supported
// V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported
// V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported
// V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported
// V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result))
// supported
// V8_HAS_BUILTIN_EXPECT - __builtin_expect() supported
// V8_HAS_DECLSPEC_ALIGN - __declspec(align(n)) supported
// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
// V8_HAS___FINAL - __final supported in non-C++11 mode
// V8_HAS___FORCEINLINE - __forceinline supported
// V8_HAS_SEALED - MSVC style sealed marker supported
//
// Note that testing for compilers and/or features must be done using #if
// not #ifdef. For example, to test for Intel C++ Compiler, use:
// #if V8_CC_INTEL
// ...
// #endif
#if defined(__clang__)
# define V8_CC_CLANG 1
// Clang defines __alignof__ as alias for __alignof
# define V8_HAS___ALIGNOF 1
# define V8_HAS___ALIGNOF__ V8_HAS___ALIGNOF
# define V8_HAS_ATTRIBUTE_ALIGNED (__has_attribute(aligned))
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
# define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(__has_attribute(warn_unused_result))
# define V8_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect))
# define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
# define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert))
# define V8_HAS_CXX11_DELETE (__has_feature(cxx_deleted_functions))
# define V8_HAS_CXX11_FINAL (__has_feature(cxx_override_control))
# define V8_HAS_CXX11_OVERRIDE (__has_feature(cxx_override_control))
#elif defined(__GNUC__)
# define V8_CC_GNU 1
// Intel C++ also masquerades as GCC 3.2.0
# define V8_CC_INTEL (defined(__INTEL_COMPILER))
# define V8_CC_MINGW32 (defined(__MINGW32__))
# define V8_CC_MINGW64 (defined(__MINGW64__))
# define V8_CC_MINGW (V8_CC_MINGW32 || V8_CC_MINGW64)
# define V8_HAS___ALIGNOF__ (V8_GNUC_PREREQ(4, 3, 0))
# define V8_HAS_ATTRIBUTE_ALIGNED (V8_GNUC_PREREQ(2, 95, 0))
// always_inline is available in gcc 4.0 but not very reliable until 4.4.
// Works around "sorry, unimplemented: inlining failed" build errors with
// older compilers.
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (V8_GNUC_PREREQ(4, 4, 0))
# define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0))
# define V8_HAS_BUILTIN_EXPECT (V8_GNUC_PREREQ(2, 96, 0))
// g++ requires -std=c++0x or -std=gnu++0x to support C++11 functionality
// without warnings (functionality used by the macros below). These modes
// are detectable by checking whether __GXX_EXPERIMENTAL_CXX0X__ is defined or,
// more standardly, by checking whether __cplusplus has a C++11 or greater
// value. Current versions of g++ do not correctly set __cplusplus, so we check
// both for forward compatibility.
# if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
# define V8_HAS_CXX11_ALIGNAS (V8_GNUC_PREREQ(4, 8, 0))
# define V8_HAS_CXX11_ALIGNOF (V8_GNUC_PREREQ(4, 8, 0))
# define V8_HAS_CXX11_STATIC_ASSERT (V8_GNUC_PREREQ(4, 3, 0))
# define V8_HAS_CXX11_DELETE (V8_GNUC_PREREQ(4, 4, 0))
# define V8_HAS_CXX11_OVERRIDE (V8_GNUC_PREREQ(4, 7, 0))
# define V8_HAS_CXX11_FINAL (V8_GNUC_PREREQ(4, 7, 0))
# else
// '__final' is a non-C++11 GCC synonym for 'final', per GCC r176655.
# define V8_HAS___FINAL (V8_GNUC_PREREQ(4, 7, 0))
# endif
#elif defined(_MSC_VER)
# define V8_CC_MSVC 1
# define V8_HAS___ALIGNOF 1
// Override control was added with Visual Studio 2005, but
// Visual Studio 2010 and earlier spell "final" as "sealed".
# define V8_HAS_CXX11_FINAL (_MSC_VER >= 1700)
# define V8_HAS_CXX11_OVERRIDE (_MSC_VER >= 1400)
# define V8_HAS_SEALED (_MSC_VER >= 1400)
# define V8_HAS_DECLSPEC_ALIGN 1
# define V8_HAS_DECLSPEC_DEPRECATED (_MSC_VER >= 1300)
# define V8_HAS_DECLSPEC_NOINLINE 1
# define V8_HAS___FORCEINLINE 1
#endif
// -----------------------------------------------------------------------------
// Helper macros
// A macro used to make better inlining. Don't bother for debug builds.
// Use like:
// V8_INLINE int GetZero() { return 0; }
#if !defined(DEBUG) && V8_HAS_ATTRIBUTE_ALWAYS_INLINE
# define V8_INLINE inline __attribute__((always_inline))
#elif !defined(DEBUG) && V8_HAS___FORCEINLINE
# define V8_INLINE __forceinline
#else
# define V8_INLINE inline
#endif
// A macro used to tell the compiler to never inline a particular function.
// Don't bother for debug builds.
// Use like:
// V8_NOINLINE int GetMinusOne() { return -1; }
#if !defined(DEBUG) && V8_HAS_ATTRIBUTE_NOINLINE
# define V8_NOINLINE __attribute__((noinline))
#elif !defined(DEBUG) && V8_HAS_DECLSPEC_NOINLINE
# define V8_NOINLINE __declspec(noinline)
#else
# define V8_NOINLINE /* NOT SUPPORTED */
#endif
// A macro to mark classes or functions as deprecated.
#if !V8_DISABLE_DEPRECATIONS && V8_HAS_ATTRIBUTE_DEPRECATED
# define V8_DEPRECATED(declarator) declarator __attribute__((deprecated))
#elif !V8_DISABLE_DEPRECATIONS && V8_HAS_DECLSPEC_DEPRECATED
# define V8_DEPRECATED(declarator) __declspec(deprecated) declarator
#else
# define V8_DEPRECATED(declarator) declarator
#endif
// Annotate a function indicating the caller must examine the return value.
// Use like:
// int foo() V8_WARN_UNUSED_RESULT;
#if V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT
# define V8_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
#else
# define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
#endif
// A macro to provide the compiler with branch prediction information.
#if V8_HAS_BUILTIN_EXPECT
# define V8_UNLIKELY(condition) (__builtin_expect(!!(condition), 0))
# define V8_LIKELY(condition) (__builtin_expect(!!(condition), 1))
#else
# define V8_UNLIKELY(condition) (condition)
# define V8_LIKELY(condition) (condition)
#endif
// A macro to specify that a method is deleted from the corresponding class.
// Any attempt to use the method will always produce an error at compile time
// when this macro can be implemented (i.e. if the compiler supports C++11).
// If the current compiler does not support C++11, use of the annotated method
// will still cause an error, but the error will most likely occur at link time
// rather than at compile time. As a backstop, method declarations using this
// macro should be private.
// Use like:
// class A {
// private:
// A(const A& other) V8_DELETE;
// A& operator=(const A& other) V8_DELETE;
// };
#if V8_HAS_CXX11_DELETE
# define V8_DELETE = delete
#else
# define V8_DELETE /* NOT SUPPORTED */
#endif
// Annotate a virtual method indicating it must be overriding a virtual
// method in the parent class.
// Use like:
// virtual void bar() V8_OVERRIDE;
#if V8_HAS_CXX11_OVERRIDE
# define V8_OVERRIDE override
#else
# define V8_OVERRIDE /* NOT SUPPORTED */
#endif
// Annotate a virtual method indicating that subclasses must not override it,
// or annotate a class to indicate that it cannot be subclassed.
// Use like:
// class B V8_FINAL : public A {};
// virtual void bar() V8_FINAL;
#if V8_HAS_CXX11_FINAL
# define V8_FINAL final
#elif V8_HAS___FINAL
# define V8_FINAL __final
#elif V8_HAS_SEALED
# define V8_FINAL sealed
#else
# define V8_FINAL /* NOT SUPPORTED */
#endif
// This macro allows to specify memory alignment for structs, classes, etc.
// Use like:
// class V8_ALIGNED(16) MyClass { ... };
// V8_ALIGNED(32) int array[42];
#if V8_HAS_CXX11_ALIGNAS
# define V8_ALIGNED(n) alignas(n)
#elif V8_HAS_ATTRIBUTE_ALIGNED
# define V8_ALIGNED(n) __attribute__((aligned(n)))
#elif V8_HAS_DECLSPEC_ALIGN
# define V8_ALIGNED(n) __declspec(align(n))
#else
# define V8_ALIGNED(n) /* NOT SUPPORTED */
#endif
// This macro is similar to V8_ALIGNED(), but takes a type instead of size
// in bytes. If the compiler does not supports using the alignment of the
// |type|, it will align according to the |alignment| instead. For example,
// Visual Studio C++ cannot combine __declspec(align) and __alignof. The
// |alignment| must be a literal that is used as a kind of worst-case fallback
// alignment.
// Use like:
// struct V8_ALIGNAS(AnotherClass, 16) NewClass { ... };
// V8_ALIGNAS(double, 8) int array[100];
#if V8_HAS_CXX11_ALIGNAS
# define V8_ALIGNAS(type, alignment) alignas(type)
#elif V8_HAS___ALIGNOF__ && V8_HAS_ATTRIBUTE_ALIGNED
# define V8_ALIGNAS(type, alignment) __attribute__((aligned(__alignof__(type))))
#else
# define V8_ALIGNAS(type, alignment) V8_ALIGNED(alignment)
#endif
// This macro returns alignment in bytes (an integer power of two) required for
// any instance of the given type, which is either complete type, an array type,
// or a reference type.
// Use like:
// size_t alignment = V8_ALIGNOF(double);
#if V8_HAS_CXX11_ALIGNOF
# define V8_ALIGNOF(type) alignof(type)
#elif V8_HAS___ALIGNOF
# define V8_ALIGNOF(type) __alignof(type)
#elif V8_HAS___ALIGNOF__
# define V8_ALIGNOF(type) __alignof__(type)
#else
// Note that alignment of a type within a struct can be less than the
// alignment of the type stand-alone (because of ancient ABIs), so this
// should only be used as a last resort.
namespace v8 { template <typename T> class AlignOfHelper { char c; T t; }; }
# define V8_ALIGNOF(type) (sizeof(::v8::AlignOfHelper<type>) - sizeof(type))
#endif
#endif // V8CONFIG_H_

6
deps/v8/include/v8stdint.h

@ -33,7 +33,9 @@
#include <stddef.h>
#include <stdio.h>
#if defined(_WIN32) && !defined(__MINGW32__)
#include "v8config.h"
#if V8_OS_WIN && !V8_CC_MINGW
typedef signed char int8_t;
typedef unsigned char uint8_t;
@ -47,7 +49,7 @@ typedef unsigned __int64 uint64_t;
#else
#include <stdint.h>
#include <stdint.h> // NOLINT
#endif

5
deps/v8/samples/process.cc

@ -291,9 +291,8 @@ JsHttpRequestProcessor::~JsHttpRequestProcessor() {
// Dispose the persistent handles. When noone else has any
// references to the objects stored in the handles they will be
// automatically reclaimed.
Isolate* isolate = GetIsolate();
context_.Dispose(isolate);
process_.Dispose(isolate);
context_.Dispose();
process_.Dispose();
}

175
deps/v8/src/accessors.cc

@ -51,19 +51,27 @@ static C* FindInstanceOf(Isolate* isolate, Object* obj) {
// Entry point that never should be called.
MaybeObject* Accessors::IllegalSetter(JSObject*, Object*, void*) {
MaybeObject* Accessors::IllegalSetter(Isolate* isolate,
JSObject*,
Object*,
void*) {
UNREACHABLE();
return NULL;
}
Object* Accessors::IllegalGetAccessor(Object* object, void*) {
Object* Accessors::IllegalGetAccessor(Isolate* isolate,
Object* object,
void*) {
UNREACHABLE();
return object;
}
MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
MaybeObject* Accessors::ReadOnlySetAccessor(Isolate* isolate,
JSObject*,
Object* value,
void*) {
// According to ECMA-262, section 8.6.2.2, page 28, setting
// read-only properties must be silently ignored.
return value;
@ -75,38 +83,41 @@ MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
//
MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
MaybeObject* Accessors::ArrayGetLength(Isolate* isolate,
Object* object,
void*) {
// Traverse the prototype chain until we reach an array.
JSArray* holder = FindInstanceOf<JSArray>(Isolate::Current(), object);
JSArray* holder = FindInstanceOf<JSArray>(isolate, object);
return holder == NULL ? Smi::FromInt(0) : holder->length();
}
// The helper function will 'flatten' Number objects.
Object* Accessors::FlattenNumber(Object* value) {
Object* Accessors::FlattenNumber(Isolate* isolate, Object* value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
JSValue* wrapper = JSValue::cast(value);
ASSERT(Isolate::Current()->context()->native_context()->number_function()->
ASSERT(wrapper->GetIsolate()->context()->native_context()->number_function()->
has_initial_map());
Map* number_map = Isolate::Current()->context()->native_context()->
Map* number_map = isolate->context()->native_context()->
number_function()->initial_map();
if (wrapper->map() == number_map) return wrapper->value();
return value;
}
MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
Isolate* isolate = object->GetIsolate();
MaybeObject* Accessors::ArraySetLength(Isolate* isolate,
JSObject* object,
Object* value,
void*) {
// This means one of the object's prototypes is a JSArray and the
// object does not have a 'length' property. Calling SetProperty
// causes an infinite loop.
if (!object->IsJSArray()) {
return object->SetLocalPropertyIgnoreAttributes(
return object->SetLocalPropertyIgnoreAttributesTrampoline(
isolate->heap()->length_string(), value, NONE);
}
value = FlattenNumber(value);
value = FlattenNumber(isolate, value);
// Need to call methods that may trigger GC.
HandleScope scope(isolate);
@ -116,9 +127,11 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
Handle<Object> value_handle(value, isolate);
bool has_exception;
Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception);
Handle<Object> uint32_v =
Execution::ToUint32(isolate, value_handle, &has_exception);
if (has_exception) return Failure::Exception();
Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception);
Handle<Object> number_v =
Execution::ToNumber(isolate, value_handle, &has_exception);
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
@ -142,7 +155,9 @@ const AccessorDescriptor Accessors::ArrayLength = {
//
MaybeObject* Accessors::StringGetLength(Object* object, void*) {
MaybeObject* Accessors::StringGetLength(Isolate* isolate,
Object* object,
void*) {
Object* value = object;
if (object->IsJSValue()) value = JSValue::cast(object)->value();
if (value->IsString()) return Smi::FromInt(String::cast(value)->length());
@ -164,7 +179,9 @@ const AccessorDescriptor Accessors::StringLength = {
//
MaybeObject* Accessors::ScriptGetSource(Object* object, void*) {
MaybeObject* Accessors::ScriptGetSource(Isolate* isolate,
Object* object,
void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->source();
}
@ -182,7 +199,9 @@ const AccessorDescriptor Accessors::ScriptSource = {
//
MaybeObject* Accessors::ScriptGetName(Object* object, void*) {
MaybeObject* Accessors::ScriptGetName(Isolate* isolate,
Object* object,
void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->name();
}
@ -200,7 +219,7 @@ const AccessorDescriptor Accessors::ScriptName = {
//
MaybeObject* Accessors::ScriptGetId(Object* object, void*) {
MaybeObject* Accessors::ScriptGetId(Isolate* isolate, Object* object, void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->id();
}
@ -218,7 +237,9 @@ const AccessorDescriptor Accessors::ScriptId = {
//
MaybeObject* Accessors::ScriptGetLineOffset(Object* object, void*) {
MaybeObject* Accessors::ScriptGetLineOffset(Isolate* isolate,
Object* object,
void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->line_offset();
}
@ -236,7 +257,9 @@ const AccessorDescriptor Accessors::ScriptLineOffset = {
//
MaybeObject* Accessors::ScriptGetColumnOffset(Object* object, void*) {
MaybeObject* Accessors::ScriptGetColumnOffset(Isolate* isolate,
Object* object,
void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->column_offset();
}
@ -254,7 +277,9 @@ const AccessorDescriptor Accessors::ScriptColumnOffset = {
//
MaybeObject* Accessors::ScriptGetData(Object* object, void*) {
MaybeObject* Accessors::ScriptGetData(Isolate* isolate,
Object* object,
void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->data();
}
@ -272,7 +297,9 @@ const AccessorDescriptor Accessors::ScriptData = {
//
MaybeObject* Accessors::ScriptGetType(Object* object, void*) {
MaybeObject* Accessors::ScriptGetType(Isolate* isolate,
Object* object,
void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->type();
}
@ -290,7 +317,9 @@ const AccessorDescriptor Accessors::ScriptType = {
//
MaybeObject* Accessors::ScriptGetCompilationType(Object* object, void*) {
MaybeObject* Accessors::ScriptGetCompilationType(Isolate* isolate,
Object* object,
void*) {
Object* script = JSValue::cast(object)->value();
return Smi::FromInt(Script::cast(script)->compilation_type());
}
@ -308,9 +337,10 @@ const AccessorDescriptor Accessors::ScriptCompilationType = {
//
MaybeObject* Accessors::ScriptGetLineEnds(Object* object, void*) {
MaybeObject* Accessors::ScriptGetLineEnds(Isolate* isolate,
Object* object,
void*) {
JSValue* wrapper = JSValue::cast(object);
Isolate* isolate = wrapper->GetIsolate();
HandleScope scope(isolate);
Handle<Script> script(Script::cast(wrapper->value()), isolate);
InitScriptLineEnds(script);
@ -337,7 +367,9 @@ const AccessorDescriptor Accessors::ScriptLineEnds = {
//
MaybeObject* Accessors::ScriptGetContextData(Object* object, void*) {
MaybeObject* Accessors::ScriptGetContextData(Isolate* isolate,
Object* object,
void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->context_data();
}
@ -355,7 +387,9 @@ const AccessorDescriptor Accessors::ScriptContextData = {
//
MaybeObject* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
MaybeObject* Accessors::ScriptGetEvalFromScript(Isolate* isolate,
Object* object,
void*) {
Object* script = JSValue::cast(object)->value();
if (!Script::cast(script)->eval_from_shared()->IsUndefined()) {
Handle<SharedFunctionInfo> eval_from_shared(
@ -366,7 +400,7 @@ MaybeObject* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
return *GetScriptWrapper(eval_from_script);
}
}
return HEAP->undefined_value();
return isolate->heap()->undefined_value();
}
@ -382,9 +416,11 @@ const AccessorDescriptor Accessors::ScriptEvalFromScript = {
//
MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) {
MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Isolate* isolate,
Object* object,
void*) {
Script* raw_script = Script::cast(JSValue::cast(object)->value());
HandleScope scope(raw_script->GetIsolate());
HandleScope scope(isolate);
Handle<Script> script(raw_script);
// If this is not a script compiled through eval there is no eval position.
@ -413,7 +449,9 @@ const AccessorDescriptor Accessors::ScriptEvalFromScriptPosition = {
//
MaybeObject* Accessors::ScriptGetEvalFromFunctionName(Object* object, void*) {
MaybeObject* Accessors::ScriptGetEvalFromFunctionName(Isolate* isolate,
Object* object,
void*) {
Object* script = JSValue::cast(object)->value();
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(
Script::cast(script)->eval_from_shared()));
@ -440,15 +478,30 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
//
Handle<Object> Accessors::FunctionGetPrototype(Handle<Object> object) {
Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
isolate, Accessors::FunctionGetPrototype(*object, 0), Object);
Handle<Object> Accessors::FunctionGetPrototype(Handle<JSFunction> function) {
CALL_HEAP_FUNCTION(function->GetIsolate(),
Accessors::FunctionGetPrototype(function->GetIsolate(),
*function,
NULL),
Object);
}
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
Isolate* isolate = Isolate::Current();
Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
Handle<Object> prototype) {
ASSERT(function->should_have_prototype());
CALL_HEAP_FUNCTION(function->GetIsolate(),
Accessors::FunctionSetPrototype(function->GetIsolate(),
*function,
*prototype,
NULL),
Object);
}
MaybeObject* Accessors::FunctionGetPrototype(Isolate* isolate,
Object* object,
void*) {
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
if (function_raw == NULL) return isolate->heap()->undefined_value();
while (!function_raw->should_have_prototype()) {
@ -469,18 +522,17 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
}
MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
JSObject* object,
Object* value_raw,
void*) {
Isolate* isolate = object->GetIsolate();
Heap* heap = isolate->heap();
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
if (function_raw == NULL) return heap->undefined_value();
if (!function_raw->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
return object->SetLocalPropertyIgnoreAttributes(heap->prototype_string(),
value_raw,
NONE);
return object->SetLocalPropertyIgnoreAttributesTrampoline(
heap->prototype_string(), value_raw, NONE);
}
HandleScope scope(isolate);
@ -523,8 +575,9 @@ const AccessorDescriptor Accessors::FunctionPrototype = {
//
MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
Isolate* isolate = Isolate::Current();
MaybeObject* Accessors::FunctionGetLength(Isolate* isolate,
Object* object,
void*) {
JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
if (function == NULL) return Smi::FromInt(0);
// Check if already compiled.
@ -554,8 +607,9 @@ const AccessorDescriptor Accessors::FunctionLength = {
//
MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
Isolate* isolate = Isolate::Current();
MaybeObject* Accessors::FunctionGetName(Isolate* isolate,
Object* object,
void*) {
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
return holder == NULL
? isolate->heap()->undefined_value()
@ -575,10 +629,12 @@ const AccessorDescriptor Accessors::FunctionName = {
//
Handle<Object> Accessors::FunctionGetArguments(Handle<Object> object) {
Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
isolate, Accessors::FunctionGetArguments(*object, 0), Object);
Handle<Object> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
CALL_HEAP_FUNCTION(function->GetIsolate(),
Accessors::FunctionGetArguments(function->GetIsolate(),
*function,
NULL),
Object);
}
@ -609,8 +665,9 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
}
MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
Isolate* isolate = Isolate::Current();
MaybeObject* Accessors::FunctionGetArguments(Isolate* isolate,
Object* object,
void*) {
HandleScope scope(isolate);
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
if (holder == NULL) return isolate->heap()->undefined_value();
@ -732,8 +789,9 @@ class FrameFunctionIterator {
};
MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
Isolate* isolate = Isolate::Current();
MaybeObject* Accessors::FunctionGetCaller(Isolate* isolate,
Object* object,
void*) {
HandleScope scope(isolate);
DisallowHeapAllocation no_allocation;
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
@ -839,15 +897,16 @@ Handle<AccessorInfo> Accessors::MakeModuleExport(
Handle<String> name,
int index,
PropertyAttributes attributes) {
Factory* factory = name->GetIsolate()->factory();
Isolate* isolate = name->GetIsolate();
Factory* factory = isolate->factory();
Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
info->set_property_attributes(attributes);
info->set_all_can_read(true);
info->set_all_can_write(true);
info->set_name(*name);
info->set_data(Smi::FromInt(index));
Handle<Object> getter = v8::FromCData(&ModuleGetExport);
Handle<Object> setter = v8::FromCData(&ModuleSetExport);
Handle<Object> getter = v8::FromCData(isolate, &ModuleGetExport);
Handle<Object> setter = v8::FromCData(isolate, &ModuleSetExport);
info->set_getter(*getter);
if (!(attributes & ReadOnly)) info->set_setter(*setter);
return info;

98
deps/v8/src/accessors.h

@ -77,12 +77,10 @@ class Accessors : public AllStatic {
};
// Accessor functions called directly from the runtime system.
static Handle<Object> FunctionGetPrototype(Handle<Object> object);
static Handle<Object> FunctionGetArguments(Handle<Object> object);
MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object,
Object* value,
void*);
static Handle<Object> FunctionSetPrototype(Handle<JSFunction> object,
Handle<Object> value);
static Handle<Object> FunctionGetPrototype(Handle<JSFunction> object);
static Handle<Object> FunctionGetArguments(Handle<JSFunction> object);
// Accessor infos.
static Handle<AccessorInfo> MakeModuleExport(
@ -90,34 +88,70 @@ class Accessors : public AllStatic {
private:
// Accessor functions only used through the descriptor.
static MaybeObject* FunctionGetPrototype(Object* object, void*);
static MaybeObject* FunctionGetLength(Object* object, void*);
static MaybeObject* FunctionGetName(Object* object, void*);
static MaybeObject* FunctionGetArguments(Object* object, void*);
static MaybeObject* FunctionGetCaller(Object* object, void*);
MUST_USE_RESULT static MaybeObject* ArraySetLength(JSObject* object,
Object* value, void*);
static MaybeObject* ArrayGetLength(Object* object, void*);
static MaybeObject* StringGetLength(Object* object, void*);
static MaybeObject* ScriptGetName(Object* object, void*);
static MaybeObject* ScriptGetId(Object* object, void*);
static MaybeObject* ScriptGetSource(Object* object, void*);
static MaybeObject* ScriptGetLineOffset(Object* object, void*);
static MaybeObject* ScriptGetColumnOffset(Object* object, void*);
static MaybeObject* ScriptGetData(Object* object, void*);
static MaybeObject* ScriptGetType(Object* object, void*);
static MaybeObject* ScriptGetCompilationType(Object* object, void*);
static MaybeObject* ScriptGetLineEnds(Object* object, void*);
static MaybeObject* ScriptGetContextData(Object* object, void*);
static MaybeObject* ScriptGetEvalFromScript(Object* object, void*);
static MaybeObject* ScriptGetEvalFromScriptPosition(Object* object, void*);
static MaybeObject* ScriptGetEvalFromFunctionName(Object* object, void*);
static MaybeObject* FunctionSetPrototype(Isolate* isolate,
JSObject* object,
Object*,
void*);
static MaybeObject* FunctionGetPrototype(Isolate* isolate,
Object* object,
void*);
static MaybeObject* FunctionGetLength(Isolate* isolate,
Object* object,
void*);
static MaybeObject* FunctionGetName(Isolate* isolate, Object* object, void*);
static MaybeObject* FunctionGetArguments(Isolate* isolate,
Object* object,
void*);
static MaybeObject* FunctionGetCaller(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ArraySetLength(Isolate* isolate,
JSObject* object,
Object*,
void*);
static MaybeObject* ArrayGetLength(Isolate* isolate, Object* object, void*);
static MaybeObject* StringGetLength(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetName(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetId(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetSource(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetLineOffset(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetColumnOffset(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetData(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetType(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetCompilationType(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetLineEnds(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetContextData(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetEvalFromScript(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetEvalFromScriptPosition(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetEvalFromFunctionName(Isolate* isolate,
Object* object,
void*);
// Helper functions.
static Object* FlattenNumber(Object* value);
static MaybeObject* IllegalSetter(JSObject*, Object*, void*);
static Object* IllegalGetAccessor(Object* object, void*);
static MaybeObject* ReadOnlySetAccessor(JSObject*, Object* value, void*);
static Object* FlattenNumber(Isolate* isolate, Object* value);
static MaybeObject* IllegalSetter(Isolate* isolate,
JSObject*,
Object*,
void*);
static Object* IllegalGetAccessor(Isolate* isolate, Object* object, void*);
static MaybeObject* ReadOnlySetAccessor(Isolate* isolate,
JSObject*,
Object* value,
void*);
};
} } // namespace v8::internal

951
deps/v8/src/api.cc

File diff suppressed because it is too large

12
deps/v8/src/api.h

@ -125,8 +125,8 @@ template <typename T> inline T ToCData(v8::internal::Object* obj) {
template <typename T>
inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
v8::internal::Isolate* isolate = v8::internal::Isolate::Current();
inline v8::internal::Handle<v8::internal::Object> FromCData(
v8::internal::Isolate* isolate, T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
return isolate->factory()->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
@ -690,19 +690,11 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
// Interceptor functions called from generated inline caches to notify
// CPU profiler that external callbacks are invoked.
v8::Handle<v8::Value> InvokeAccessorGetter(
v8::Local<v8::String> property,
const v8::AccessorInfo& info,
v8::AccessorGetter getter);
void InvokeAccessorGetterCallback(
v8::Local<v8::String> property,
const v8::PropertyCallbackInfo<v8::Value>& info,
v8::AccessorGetterCallback getter);
v8::Handle<v8::Value> InvokeInvocationCallback(const v8::Arguments& args,
v8::InvocationCallback callback);
void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
v8::FunctionCallback callback);

65
deps/v8/src/apinatives.js

@ -74,25 +74,31 @@ function InstantiateFunction(data, name) {
cache[serialNumber] = null;
var fun = %CreateApiFunction(data);
if (name) %FunctionSetName(fun, name);
cache[serialNumber] = fun;
var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
var flags = %GetTemplateField(data, kApiFlagOffset);
// Note: Do not directly use an object template as a condition, our
// internal ToBoolean doesn't handle that!
fun.prototype = typeof prototype === 'undefined' ?
{} : Instantiate(prototype);
if (flags & (1 << kReadOnlyPrototypeBit)) {
%FunctionSetReadOnlyPrototype(fun);
}
%SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
var parent = %GetTemplateField(data, kApiParentTemplateOffset);
// Note: Do not directly use a function template as a condition, our
// internal ToBoolean doesn't handle that!
if (!(typeof parent === 'undefined')) {
var parent_fun = Instantiate(parent);
%SetPrototype(fun.prototype, parent_fun.prototype);
var doNotCache = flags & (1 << kDoNotCacheBit);
if (!doNotCache) cache[serialNumber] = fun;
if (flags & (1 << kRemovePrototypeBit)) {
%FunctionRemovePrototype(fun);
} else {
var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
// Note: Do not directly use an object template as a condition, our
// internal ToBoolean doesn't handle that!
fun.prototype = typeof prototype === 'undefined' ?
{} : Instantiate(prototype);
if (flags & (1 << kReadOnlyPrototypeBit)) {
%FunctionSetReadOnlyPrototype(fun);
}
%SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
var parent = %GetTemplateField(data, kApiParentTemplateOffset);
// Note: Do not directly use a function template as a condition, our
// internal ToBoolean doesn't handle that!
if (!(typeof parent === 'undefined')) {
var parent_fun = Instantiate(parent);
%SetPrototype(fun.prototype, parent_fun.prototype);
}
}
ConfigureTemplateInstance(fun, data);
if (doNotCache) return fun;
} catch (e) {
cache[serialNumber] = kUninitialized;
throw e;
@ -104,19 +110,32 @@ function InstantiateFunction(data, name) {
function ConfigureTemplateInstance(obj, data) {
var properties = %GetTemplateField(data, kApiPropertyListOffset);
if (properties) {
// Disable access checks while instantiating the object.
var requires_access_checks = %DisableAccessChecks(obj);
try {
for (var i = 0; i < properties[0]; i += 3) {
if (!properties) return;
// Disable access checks while instantiating the object.
var requires_access_checks = %DisableAccessChecks(obj);
try {
for (var i = 1; i < properties[0];) {
var length = properties[i];
if (length == 3) {
var name = properties[i + 1];
var prop_data = properties[i + 2];
var attributes = properties[i + 3];
var value = Instantiate(prop_data, name);
%SetProperty(obj, name, value, attributes);
} else if (length == 5) {
var name = properties[i + 1];
var getter = properties[i + 2];
var setter = properties[i + 3];
var attribute = properties[i + 4];
var access_control = properties[i + 5];
%SetAccessorProperty(
obj, name, getter, setter, attribute, access_control);
} else {
throw "Bad properties array";
}
} finally {
if (requires_access_checks) %EnableAccessChecks(obj);
i += length + 1;
}
} finally {
if (requires_access_checks) %EnableAccessChecks(obj);
}
}

138
deps/v8/src/arguments.cc

@ -34,49 +34,6 @@ namespace v8 {
namespace internal {
static bool Match(void* a, void* b) {
return a == b;
}
static uint32_t Hash(void* function) {
uintptr_t as_int = reinterpret_cast<uintptr_t>(function);
if (sizeof(function) == 4) return static_cast<uint32_t>(as_int);
uint64_t as_64 = static_cast<uint64_t>(as_int);
return
static_cast<uint32_t>(as_64 >> 32) ^
static_cast<uint32_t>(as_64);
}
CallbackTable::CallbackTable(): map_(Match, 64) {}
bool CallbackTable::Contains(void* function) {
ASSERT(function != NULL);
return map_.Lookup(function, Hash(function), false) != NULL;
}
void CallbackTable::InsertCallback(Isolate* isolate,
void* function,
bool returns_void) {
if (function == NULL) return;
// Don't store for performance.
if (kStoreVoidFunctions != returns_void) return;
CallbackTable* table = isolate->callback_table();
if (table == NULL) {
table = new CallbackTable();
isolate->set_callback_table(table);
}
typedef HashMap::Entry Entry;
Entry* entry = table->map_.Lookup(function, Hash(function), true);
ASSERT(entry != NULL);
ASSERT(entry->value == NULL || entry->value == function);
entry->value = function;
}
template<typename T>
template<typename V>
v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
@ -88,110 +45,67 @@ v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
}
v8::Handle<v8::Value> FunctionCallbackArguments::Call(InvocationCallback f) {
v8::Handle<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
void* f_as_void = CallbackTable::FunctionToVoidPtr(f);
bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
if (new_style) {
FunctionCallback c = reinterpret_cast<FunctionCallback>(f);
FunctionCallbackInfo<v8::Value> info(end(),
argv_,
argc_,
is_construct_call_);
c(info);
} else {
v8::Arguments args(end(),
argv_,
argc_,
is_construct_call_);
v8::Handle<v8::Value> return_value = f(args);
if (!return_value.IsEmpty()) return return_value;
}
FunctionCallbackInfo<v8::Value> info(end(),
argv_,
argc_,
is_construct_call_);
f(info);
return GetReturnValue<v8::Value>(isolate);
}
#define WRITE_CALL_0(OldFunction, NewFunction, ReturnValue) \
v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f) { \
#define WRITE_CALL_0(Function, ReturnValue) \
v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
Isolate* isolate = this->isolate(); \
void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
if (new_style) { \
NewFunction c = reinterpret_cast<NewFunction>(f); \
PropertyCallbackInfo<ReturnValue> info(end()); \
c(info); \
} else { \
v8::AccessorInfo info(end()); \
v8::Handle<ReturnValue> return_value = f(info); \
if (!return_value.IsEmpty()) return return_value; \
} \
PropertyCallbackInfo<ReturnValue> info(end()); \
f(info); \
return GetReturnValue<ReturnValue>(isolate); \
}
#define WRITE_CALL_1(OldFunction, NewFunction, ReturnValue, Arg1) \
v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f, \
#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Arg1 arg1) { \
Isolate* isolate = this->isolate(); \
void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
if (new_style) { \
NewFunction c = reinterpret_cast<NewFunction>(f); \
PropertyCallbackInfo<ReturnValue> info(end()); \
c(arg1, info); \
} else { \
v8::AccessorInfo info(end()); \
v8::Handle<ReturnValue> return_value = f(arg1, info); \
if (!return_value.IsEmpty()) return return_value; \
} \
PropertyCallbackInfo<ReturnValue> info(end()); \
f(arg1, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
#define WRITE_CALL_2(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f, \
#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Arg1 arg1, \
Arg2 arg2) { \
Isolate* isolate = this->isolate(); \
void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
if (new_style) { \
NewFunction c = reinterpret_cast<NewFunction>(f); \
PropertyCallbackInfo<ReturnValue> info(end()); \
c(arg1, arg2, info); \
} else { \
v8::AccessorInfo info(end()); \
v8::Handle<ReturnValue> return_value = f(arg1, arg2, info); \
if (!return_value.IsEmpty()) return return_value; \
} \
PropertyCallbackInfo<ReturnValue> info(end()); \
f(arg1, arg2, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
#define WRITE_CALL_2_VOID(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
void PropertyCallbackArguments::Call(OldFunction f, \
#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
void PropertyCallbackArguments::Call(Function f, \
Arg1 arg1, \
Arg2 arg2) { \
Isolate* isolate = this->isolate(); \
void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
if (new_style) { \
NewFunction c = reinterpret_cast<NewFunction>(f); \
PropertyCallbackInfo<ReturnValue> info(end()); \
c(arg1, arg2, info); \
} else { \
v8::AccessorInfo info(end()); \
f(arg1, arg2, info); \
} \
PropertyCallbackInfo<ReturnValue> info(end()); \
f(arg1, arg2, info); \
}
FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2)

118
deps/v8/src/arguments.h

@ -83,116 +83,49 @@ class Arguments BASE_EMBEDDED {
};
// mappings from old property callbacks to new ones
// F(old name, new name, return value, parameters...)
//
// For each type of callback, we have a list of arguments
// They are used to generate the Call() functions below
// These aren't included in the list as they have duplicate signatures
// F(NamedPropertyEnumerator, NamedPropertyEnumeratorCallback, ...)
// F(NamedPropertyGetter, NamedPropertyGetterCallback, ...)
// F(NamedPropertyEnumeratorCallback, ...)
// F(NamedPropertyGetterCallback, ...)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \
F(IndexedPropertyEnumerator, IndexedPropertyEnumeratorCallback, v8::Array) \
F(IndexedPropertyEnumeratorCallback, v8::Array) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \
F(AccessorGetter, AccessorGetterCallback, v8::Value, v8::Local<v8::String>) \
F(NamedPropertyQuery, \
NamedPropertyQueryCallback, \
F(AccessorGetterCallback, v8::Value, v8::Local<v8::String>) \
F(NamedPropertyQueryCallback, \
v8::Integer, \
v8::Local<v8::String>) \
F(NamedPropertyDeleter, \
NamedPropertyDeleterCallback, \
F(NamedPropertyDeleterCallback, \
v8::Boolean, \
v8::Local<v8::String>) \
F(IndexedPropertyGetter, \
IndexedPropertyGetterCallback, \
F(IndexedPropertyGetterCallback, \
v8::Value, \
uint32_t) \
F(IndexedPropertyQuery, \
IndexedPropertyQueryCallback, \
F(IndexedPropertyQueryCallback, \
v8::Integer, \
uint32_t) \
F(IndexedPropertyDeleter, \
IndexedPropertyDeleterCallback, \
F(IndexedPropertyDeleterCallback, \
v8::Boolean, \
uint32_t) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_2(F) \
F(NamedPropertySetter, \
NamedPropertySetterCallback, \
F(NamedPropertySetterCallback, \
v8::Value, \
v8::Local<v8::String>, \
v8::Local<v8::Value>) \
F(IndexedPropertySetter, \
IndexedPropertySetterCallback, \
F(IndexedPropertySetterCallback, \
v8::Value, \
uint32_t, \
v8::Local<v8::Value>) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \
F(AccessorSetter, \
AccessorSetterCallback, \
F(AccessorSetterCallback, \
void, \
v8::Local<v8::String>, \
v8::Local<v8::Value>) \
// All property callbacks as well as invocation callbacks
#define FOR_EACH_CALLBACK_TABLE_MAPPING(F) \
F(InvocationCallback, FunctionCallback) \
F(AccessorGetter, AccessorGetterCallback) \
F(AccessorSetter, AccessorSetterCallback) \
F(NamedPropertySetter, NamedPropertySetterCallback) \
F(NamedPropertyQuery, NamedPropertyQueryCallback) \
F(NamedPropertyDeleter, NamedPropertyDeleterCallback) \
F(IndexedPropertyGetter, IndexedPropertyGetterCallback) \
F(IndexedPropertySetter, IndexedPropertySetterCallback) \
F(IndexedPropertyQuery, IndexedPropertyQueryCallback) \
F(IndexedPropertyDeleter, IndexedPropertyDeleterCallback) \
F(IndexedPropertyEnumerator, IndexedPropertyEnumeratorCallback) \
// TODO(dcarney): Remove this class when old callbacks are gone.
class CallbackTable {
public:
static const bool kStoreVoidFunctions = false;
static inline bool ReturnsVoid(Isolate* isolate, void* function) {
CallbackTable* table = isolate->callback_table();
bool contains =
table != NULL &&
table->map_.occupancy() != 0 &&
table->Contains(function);
return contains == kStoreVoidFunctions;
}
STATIC_ASSERT(sizeof(intptr_t) == sizeof(AccessorGetterCallback));
template<typename F>
static inline void* FunctionToVoidPtr(F function) {
return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(function));
}
#define WRITE_REGISTER(OldFunction, NewFunction) \
static NewFunction Register(Isolate* isolate, OldFunction f) { \
InsertCallback(isolate, FunctionToVoidPtr(f), false); \
return reinterpret_cast<NewFunction>(f); \
} \
\
static NewFunction Register(Isolate* isolate, NewFunction f) { \
InsertCallback(isolate, FunctionToVoidPtr(f), true); \
return f; \
}
FOR_EACH_CALLBACK_TABLE_MAPPING(WRITE_REGISTER)
#undef WRITE_REGISTER
private:
CallbackTable();
bool Contains(void* function);
static void InsertCallback(Isolate* isolate,
void* function,
bool returns_void);
HashMap map_;
DISALLOW_COPY_AND_ASSIGN(CallbackTable);
};
// Custom arguments replicate a small segment of stack that can be
// accessed through an Arguments object the same way the actual stack
@ -218,7 +151,6 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
typedef CustomArgumentsBase<T::kArgsLength> Super;
~CustomArguments() {
// TODO(dcarney): create a new zap value for this.
this->end()[kReturnValueOffset] =
reinterpret_cast<Object*>(kHandleZapValue);
}
@ -243,6 +175,10 @@ class PropertyCallbackArguments
static const int kArgsLength = T::kArgsLength;
static const int kThisIndex = T::kThisIndex;
static const int kHolderIndex = T::kHolderIndex;
static const int kDataIndex = T::kDataIndex;
static const int kReturnValueDefaultValueIndex =
T::kReturnValueDefaultValueIndex;
static const int kIsolateIndex = T::kIsolateIndex;
PropertyCallbackArguments(Isolate* isolate,
Object* data,
@ -271,17 +207,17 @@ class PropertyCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
#define WRITE_CALL_0(OldFunction, NewFunction, ReturnValue) \
v8::Handle<ReturnValue> Call(OldFunction f); \
#define WRITE_CALL_0(Function, ReturnValue) \
v8::Handle<ReturnValue> Call(Function f); \
#define WRITE_CALL_1(OldFunction, NewFunction, ReturnValue, Arg1) \
v8::Handle<ReturnValue> Call(OldFunction f, Arg1 arg1); \
#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
v8::Handle<ReturnValue> Call(Function f, Arg1 arg1); \
#define WRITE_CALL_2(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
v8::Handle<ReturnValue> Call(OldFunction f, Arg1 arg1, Arg2 arg2); \
#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
v8::Handle<ReturnValue> Call(Function f, Arg1 arg1, Arg2 arg2); \
#define WRITE_CALL_2_VOID(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
void Call(OldFunction f, Arg1 arg1, Arg2 arg2); \
#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
void Call(Function f, Arg1 arg1, Arg2 arg2); \
FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
@ -336,7 +272,7 @@ class FunctionCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
v8::Handle<v8::Value> Call(InvocationCallback f);
v8::Handle<v8::Value> Call(FunctionCallback f);
private:
internal::Object** argv_;

5
deps/v8/src/arm/assembler-arm-inl.h

@ -279,7 +279,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
void RelocInfo::Visit(ObjectVisitor* visitor) {
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
@ -292,12 +292,11 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
Isolate::Current()->debug()->has_break_points()) {
isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {

162
deps/v8/src/arm/assembler-arm.cc

@ -39,6 +39,7 @@
#if V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
#include "macro-assembler.h"
#include "serialize.h"
namespace v8 {
@ -152,7 +153,8 @@ void CpuFeatures::Probe() {
#else // __arm__
// Probe for additional features not already known to be available.
if (!IsSupported(VFP3) && FLAG_enable_vfp3 && OS::ArmCpuHasFeature(VFP3)) {
CPU cpu;
if (!IsSupported(VFP3) && FLAG_enable_vfp3 && cpu.has_vfp3()) {
// This implementation also sets the VFP flags if runtime
// detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
// 0406B, page A1-6.
@ -161,38 +163,40 @@ void CpuFeatures::Probe() {
static_cast<uint64_t>(1) << ARMv7;
}
if (!IsSupported(NEON) && FLAG_enable_neon && OS::ArmCpuHasFeature(NEON)) {
if (!IsSupported(NEON) && FLAG_enable_neon && cpu.has_neon()) {
found_by_runtime_probing_only_ |= 1u << NEON;
}
if (!IsSupported(ARMv7) && FLAG_enable_armv7 && OS::ArmCpuHasFeature(ARMv7)) {
if (!IsSupported(ARMv7) && FLAG_enable_armv7 && cpu.architecture() >= 7) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
}
if (!IsSupported(SUDIV) && FLAG_enable_sudiv && OS::ArmCpuHasFeature(SUDIV)) {
if (!IsSupported(SUDIV) && FLAG_enable_sudiv && cpu.has_idiva()) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV;
}
if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses
&& OS::ArmCpuHasFeature(ARMv7)) {
&& cpu.architecture() >= 7) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
}
CpuImplementer implementer = OS::GetCpuImplementer();
if (implementer == QUALCOMM_IMPLEMENTER &&
FLAG_enable_movw_movt && OS::ArmCpuHasFeature(ARMv7)) {
// Use movw/movt for QUALCOMM ARMv7 cores.
if (cpu.implementer() == CPU::QUALCOMM &&
cpu.architecture() >= 7 &&
FLAG_enable_movw_movt) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
}
CpuPart part = OS::GetCpuPart(implementer);
if ((part == CORTEX_A9) || (part == CORTEX_A5)) {
// ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
if (cpu.implementer() == CPU::ARM &&
(cpu.part() == CPU::ARM_CORTEX_A5 ||
cpu.part() == CPU::ARM_CORTEX_A9)) {
cache_line_size_ = 32;
}
if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs
&& OS::ArmCpuHasFeature(VFP32DREGS)) {
if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs && cpu.has_vfp3_d32()) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
}
@ -321,15 +325,12 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// See assembler-arm-inl.h for inlined constructors
Operand::Operand(Handle<Object> handle) {
#ifdef DEBUG
Isolate* isolate = Isolate::Current();
#endif
AllowDeferredHandleDereference using_raw_address;
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
ASSERT(!isolate->heap()->InNewSpace(obj));
if (obj->IsHeapObject()) {
ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@ -775,9 +776,9 @@ int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
if ((instr & ~kImm24Mask) == 0) {
// Emitted label constant, not part of a branch.
return instr - (Code::kHeaderSize - kHeapObjectTag);
if (is_uint24(instr)) {
// Emitted link to a label, not part of a branch.
return instr;
}
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
int imm26 = ((instr & kImm24Mask) << 8) >> 6;
@ -792,11 +793,72 @@ int Assembler::target_at(int pos) {
void Assembler::target_at_put(int pos, int target_pos) {
Instr instr = instr_at(pos);
if ((instr & ~kImm24Mask) == 0) {
if (is_uint24(instr)) {
ASSERT(target_pos == pos || target_pos >= 0);
// Emitted label constant, not part of a branch.
// Make label relative to Code* of generated Code object.
instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
// Emitted link to a label, not part of a branch.
// Load the position of the label relative to the generated code object
// pointer in a register.
// Here are the instructions we need to emit:
// For ARMv7: target24 => target16_1:target16_0
// movw dst, #target16_0
// movt dst, #target16_1
// For ARMv6: target24 => target8_2:target8_1:target8_0
// mov dst, #target8_0
// orr dst, dst, #target8_1 << 8
// orr dst, dst, #target8_2 << 16
// We extract the destination register from the emitted nop instruction.
Register dst = Register::from_code(
Instruction::RmValue(instr_at(pos + kInstrSize)));
ASSERT(IsNop(instr_at(pos + kInstrSize), dst.code()));
uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
ASSERT(is_uint24(target24));
if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov
// instruction.
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
1,
CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target24));
} else {
uint16_t target16_0 = target24 & kImm16Mask;
uint16_t target16_1 = target24 >> 16;
if (CpuFeatures::IsSupported(ARMv7)) {
// Patch with movw/movt.
if (target16_1 == 0) {
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
1,
CodePatcher::DONT_FLUSH);
patcher.masm()->movw(dst, target16_0);
} else {
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
2,
CodePatcher::DONT_FLUSH);
patcher.masm()->movw(dst, target16_0);
patcher.masm()->movt(dst, target16_1);
}
} else {
// Patch with a sequence of mov/orr/orr instructions.
uint8_t target8_0 = target16_0 & kImm8Mask;
uint8_t target8_1 = target16_0 >> 8;
uint8_t target8_2 = target16_1 & kImm8Mask;
if (target8_2 == 0) {
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
2,
CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target8_0));
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
} else {
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
3,
CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target8_0));
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
}
}
}
return;
}
int imm26 = target_pos - (pos + kPcLoadDelta);
@ -1229,21 +1291,6 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
}
void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
ASSERT(!L->is_bound());
if (L->is_linked()) {
// Point to previous instruction that uses the link.
target_pos = L->pos();
} else {
// First entry of the link chain points to itself.
target_pos = at_offset;
}
L->link_to(at_offset);
instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
}
// Branch instructions.
void Assembler::b(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0);
@ -1386,6 +1433,45 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
}
void Assembler::mov_label_offset(Register dst, Label* label) {
if (label->is_bound()) {
mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
} else {
// Emit the link to the label in the code stream followed by extra nop
// instructions.
// If the label is not linked, then start a new link chain by linking it to
// itself, emitting pc_offset().
int link = label->is_linked() ? label->pos() : pc_offset();
label->link_to(pc_offset());
// When the label is bound, these instructions will be patched with a
// sequence of movw/movt or mov/orr/orr instructions. They will load the
// destination register with the position of the label from the beginning
// of the code.
//
// The link will be extracted from the first instruction and the destination
// register from the second.
// For ARMv7:
// link
// mov dst, dst
// For ARMv6:
// link
// mov dst, dst
// mov dst, dst
//
// When the label gets bound: target_at extracts the link and target_at_put
// patches the instructions.
ASSERT(is_uint24(link));
BlockConstPoolScope block_const_pool(this);
emit(link);
nop(dst.code());
if (!CpuFeatures::IsSupported(ARMv7)) {
nop(dst.code());
}
}
}
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
ASSERT(immediate < 0x10000);
// May use movw if supported, but on unsupported platforms will try to use

9
deps/v8/src/arm/assembler-arm.h

@ -748,10 +748,6 @@ class Assembler : public AssemblerBase {
// Manages the jump elimination optimization if the second parameter is true.
int branch_offset(Label* L, bool jump_elimination_allowed);
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
@ -903,6 +899,10 @@ class Assembler : public AssemblerBase {
mov(dst, Operand(src), s, cond);
}
// Load the position of the label relative to the generated code object
// pointer in a register.
void mov_label_offset(Register dst, Label* label);
// ARMv7 instructions for loading a 32 bit immediate in two instructions.
// This may actually emit a different mov instruction, but on an ARMv7 it
// is guaranteed to only emit one instruction.
@ -1561,7 +1561,6 @@ class Assembler : public AssemblerBase {
void RecordRelocInfo(double data);
void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
friend class RegExpMacroAssemblerARM;
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;

182
deps/v8/src/arm/builtins-arm.cc

@ -291,68 +291,55 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
static void CallRuntimePassFunction(MacroAssembler* masm,
Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
// Push call kind information.
__ push(r5);
// Function is also the parameter to the runtime call.
__ push(r1);
__ CallRuntime(function_id, 1);
// Restore call kind information.
__ pop(r5);
// Restore receiver.
__ pop(r1);
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ mov(pc, r2);
__ Jump(r2);
}
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
// Tail call to returned code.
__ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r0);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve the function.
__ push(r1);
// Push call kind information.
__ push(r5);
// Push the function on the stack as the argument to the runtime function.
__ push(r1);
__ CallRuntime(Runtime::kInstallRecompiledCode, 1);
// Calculate the entry point.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore call kind information.
__ pop(r5);
// Restore saved function.
__ pop(r1);
// Tear down internal frame.
}
// Do a tail-call of the compiled function.
__ Jump(r2);
}
void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
// Push call kind information.
__ push(r5);
__ push(r1); // Function is also the parameter to the runtime call.
__ CallRuntime(Runtime::kParallelRecompile, 1);
// Restore call kind information.
__ pop(r5);
// Restore receiver.
__ pop(r1);
// Tear down internal frame.
}
void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@ -795,59 +782,17 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve the function.
__ push(r1);
// Push call kind information.
__ push(r5);
// Push the function on the stack as the argument to the runtime function.
__ push(r1);
__ CallRuntime(Runtime::kLazyCompile, 1);
// Calculate the entry point.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore call kind information.
__ pop(r5);
// Restore saved function.
__ pop(r1);
// Tear down internal frame.
}
CallRuntimePassFunction(masm, Runtime::kLazyCompile);
// Do a tail-call of the compiled function.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve the function.
__ push(r1);
// Push call kind information.
__ push(r5);
// Push the function on the stack as the argument to the runtime function.
__ push(r1);
__ CallRuntime(Runtime::kLazyRecompile, 1);
// Calculate the entry point.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore call kind information.
__ pop(r5);
// Restore saved function.
__ pop(r1);
// Tear down internal frame.
}
CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
// Do a tail-call of the compiled function.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
@ -966,31 +911,48 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Lookup and calculate pc offset.
__ ldr(r1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ ldr(r2, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
__ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ sub(r1, r1, r2);
__ SmiTag(r1);
// Pass both function and pc offset as arguments.
__ push(r0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
__ push(r1);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
// If the code object is null, just return to the unoptimized code.
Label skip;
__ cmp(r0, Operand(Smi::FromInt(-1)));
__ cmp(r0, Operand(Smi::FromInt(0)));
__ b(ne, &skip);
__ Ret();
__ bind(&skip);
// Untag the AST id and push it on the stack.
__ SmiUntag(r0);
__ push(r0);
// Generate the code for doing the frame-to-frame translation using
// the deoptimizer infrastructure.
Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
generator.Generate();
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ ldr(r1, MemOperand(r0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ ldr(r1, MemOperand(r1, FixedArray::OffsetOfElementAt(
DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ add(r0, r0, Operand::SmiUntag(r1));
__ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// And "return" to the OSR entry point of the function.
__ Ret();
}

529
deps/v8/src/arm/code-stubs-arm.cc

@ -38,6 +38,17 @@ namespace v8 {
namespace internal {
void FastNewClosureStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r2 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
}
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@ -309,134 +320,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in cp.
Counters* counters = masm->isolate()->counters();
Label gc;
// Pop the function info from the stack.
__ pop(r3);
// Attempt to allocate new JSFunction in new space.
__ Allocate(JSFunction::kSize, r0, r1, r2, &gc, TAG_OBJECT);
__ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
// Compute the function map in the current native context and set that
// as the map of the allocated object.
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
__ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
__ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
__ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
__ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
__ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
__ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
__ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
// But first check if there is an optimized version for our context.
Label check_optimized;
Label install_unoptimized;
if (FLAG_cache_optimized_code) {
__ ldr(r1,
FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ tst(r1, r1);
__ b(ne, &check_optimized);
}
__ bind(&install_unoptimized);
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
// Return result. The argument function info has been popped already.
__ Ret();
__ bind(&check_optimized);
__ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
// r2 holds native context, r1 points to fixed array of 3-element entries
// (native context, optimized code, literals).
// The optimized code map must never be empty, so check the first elements.
Label install_optimized;
// Speculatively move code object into r4.
__ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot));
__ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot));
__ cmp(r2, r5);
__ b(eq, &install_optimized);
// Iterate through the rest of map backwards. r4 holds an index as a Smi.
Label loop;
__ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
__ bind(&loop);
// Do not double check first entry.
__ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
__ b(eq, &install_unoptimized);
__ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4));
__ ldr(r5, MemOperand(r5));
__ cmp(r2, r5);
__ b(ne, &loop);
// Hit: fetch the optimized code.
__ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4));
__ add(r5, r5, Operand(kPointerSize));
__ ldr(r4, MemOperand(r5));
__ bind(&install_optimized);
__ IncrementCounter(counters->fast_new_closure_install_optimized(),
1, r6, r7);
// TODO(fschneider): Idea: store proper code pointers in the map and either
// unmangle them on marking or do nothing as the whole map is discarded on
// major GC anyway.
__ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
// Now link a function into a list of optimized functions.
__ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
// No need for write barrier as JSFunction (eax) is in the new space.
__ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
// Store JSFunction (eax) into edx before issuing write barrier as
// it clobbers all the registers passed.
__ mov(r4, r0);
__ RecordWriteContextSlot(
r2,
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
r4,
r1,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
// Return result. The argument function info has been popped already.
__ Ret();
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ LoadRoot(r4, Heap::kFalseValueRootIndex);
__ Push(cp, r3, r4);
__ TailCallRuntime(Runtime::kNewClosure, 3, 1);
}
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
@ -634,7 +517,112 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
}
bool WriteInt32ToHeapNumberStub::IsPregenerated() {
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
Register result_reg = destination();
int double_offset = offset();
// Account for saved regs if input is sp.
if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
// Immediate values for this stub fit in instructions, so it's safe to use ip.
Register scratch = ip;
Register scratch_low =
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
Register scratch_high =
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
LowDwVfpRegister double_scratch = kScratchDoubleReg;
__ Push(scratch_high, scratch_low);
if (!skip_fastpath()) {
// Load double input.
__ vldr(double_scratch, MemOperand(input_reg, double_offset));
__ vmov(scratch_low, scratch_high, double_scratch);
// Do fast-path convert from double to int.
__ vcvt_s32_f64(double_scratch.low(), double_scratch);
__ vmov(result_reg, double_scratch.low());
// If result is not saturated (0x7fffffff or 0x80000000), we are done.
__ sub(scratch, result_reg, Operand(1));
__ cmp(scratch, Operand(0x7ffffffe));
__ b(lt, &done);
} else {
// We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
// know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
if (double_offset == 0) {
__ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
} else {
__ ldr(scratch_low, MemOperand(input_reg, double_offset));
__ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
}
}
__ Ubfx(scratch, scratch_high,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Load scratch with exponent - 1. This is faster than loading
// with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
__ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
// If exponent is greater than or equal to 84, the 32 less significant
// bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
// the result is 0.
// Compare exponent with 84 (compare exponent - 1 with 83).
__ cmp(scratch, Operand(83));
__ b(ge, &out_of_range);
// If we reach this code, 31 <= exponent <= 83.
// So, we don't have to handle cases where 0 <= exponent <= 20 for
// which we would need to shift right the high part of the mantissa.
// Scratch contains exponent - 1.
// Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
__ rsb(scratch, scratch, Operand(51), SetCC);
__ b(ls, &only_low);
// 21 <= exponent <= 51, shift scratch_low and scratch_high
// to generate the result.
__ mov(scratch_low, Operand(scratch_low, LSR, scratch));
// Scratch contains: 52 - exponent.
// We needs: exponent - 20.
// So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
__ rsb(scratch, scratch, Operand(32));
__ Ubfx(result_reg, scratch_high,
0, HeapNumber::kMantissaBitsInTopWord);
// Set the implicit 1 before the mantissa part in scratch_high.
__ orr(result_reg, result_reg,
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
__ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
__ b(&negate);
__ bind(&out_of_range);
__ mov(result_reg, Operand::Zero());
__ b(&done);
__ bind(&only_low);
// 52 <= exponent <= 83, shift only scratch_low.
// On entry, scratch contains: 52 - exponent.
__ rsb(scratch, scratch, Operand::Zero());
__ mov(result_reg, Operand(scratch_low, LSL, scratch));
__ bind(&negate);
// If input was positive, scratch_high ASR 31 equals 0 and
// scratch_high LSR 31 equals zero.
// New result = (result eor 0) + 0 = result.
// If the input was negative, we have to negate the result.
// Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
// New result = (result eor 0xffffffff) + 1 = 0 - result.
__ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
__ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
__ bind(&done);
__ Pop(scratch_high, scratch_low);
__ Ret();
}
bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
// These variants are compiled ahead of time. See next method.
if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
return true;
@ -1591,7 +1579,6 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
Register right = r0;
Register scratch1 = r6;
Register scratch2 = r7;
Register scratch3 = r4;
ASSERT(smi_operands || (not_numbers != NULL));
if (smi_operands) {
@ -1689,12 +1676,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ SmiUntag(r2, right);
} else {
// Convert operands to 32-bit integers. Right in r2 and left in r3.
__ ConvertNumberToInt32(
left, r3, heap_number_map,
scratch1, scratch2, scratch3, d0, d1, not_numbers);
__ ConvertNumberToInt32(
right, r2, heap_number_map,
scratch1, scratch2, scratch3, d0, d1, not_numbers);
__ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers);
__ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers);
}
Label result_not_a_smi;
@ -2508,16 +2491,6 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
}
void StackCheckStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kStackGuard, 0, 1);
}
void InterruptStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kInterrupt, 0, 1);
}
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = r1;
const Register exponent = r2;
@ -2721,8 +2694,8 @@ bool CEntryStub::NeedsImmovableCode() {
}
bool CEntryStub::IsPregenerated() {
return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
bool CEntryStub::IsPregenerated(Isolate* isolate) {
return (!save_doubles_ || isolate->fp_stubs_generated()) &&
result_size_ == 1;
}
@ -5817,7 +5790,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ b(lt, &done);
// Check the number to string cache.
Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
NumberToStringStub::GenerateLookupNumberStringCache(masm,
@ -5826,26 +5798,9 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch2,
scratch3,
scratch4,
&not_cached);
slow);
__ mov(arg, scratch1);
__ str(arg, MemOperand(sp, stack_offset));
__ jmp(&done);
// Check if the argument is a safe string wrapper.
__ bind(&not_cached);
__ JumpIfSmi(arg, slow);
__ CompareObjectType(
arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
__ b(ne, slow);
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
__ and_(scratch2,
scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ cmp(scratch2,
Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ b(ne, slow);
__ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
__ str(arg, MemOperand(sp, stack_offset));
__ bind(&done);
}
@ -6170,6 +6125,11 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
void DirectCEntryStub::Generate(MacroAssembler* masm) {
// Place the return address on the stack, making the call
// GC safe. The RegExp backend also relies on this.
__ str(lr, MemOperand(sp, 0));
__ blx(ip); // Call the C++ function.
__ VFPEnsureFPSCRState(r2);
__ ldr(pc, MemOperand(sp, 0));
}
@ -6178,21 +6138,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
intptr_t code =
reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
__ Move(ip, target);
__ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
// Prevent literal pool emission during calculation of return address.
Assembler::BlockConstPoolScope block_const_pool(masm);
// Push return address (accessible to GC through exit frame pc).
// Note that using pc with str is deprecated.
Label start;
__ bind(&start);
__ add(ip, pc, Operand(Assembler::kInstrSize));
__ str(ip, MemOperand(sp, 0));
__ Jump(target); // Call the C++ function.
ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta,
masm->SizeOfCodeGeneratedSince(&start));
__ VFPEnsureFPSCRState(r2);
__ blx(lr); // Call the stub.
}
@ -6458,8 +6406,6 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
{ REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
// Used in CompileStoreGlobal.
{ REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
{ REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
@ -6491,7 +6437,7 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#undef REG
bool RecordWriteStub::IsPregenerated() {
bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@ -6870,6 +6816,9 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
// It additionally takes an isolate as a third parameter
__ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ mov(ip, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
@ -6888,90 +6837,128 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
template<class T>
static void CreateArrayDispatch(MacroAssembler* masm) {
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
__ b(ne, &next);
T stub(kind);
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
T stub(GetInitialFastElementsKind(),
CONTEXT_CHECK_REQUIRED,
mode);
__ TailCallStub(&stub);
__ bind(&next);
}
} else if (mode == DONT_OVERRIDE) {
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
__ b(ne, &next);
T stub(kind);
__ TailCallStub(&stub);
__ bind(&next);
}
// If we reached this point there is a problem.
__ Abort(kUnexpectedElementsKindInArrayConstructor);
// If we reached this point there is a problem.
__ Abort(kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
}
static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
// r2 - type info cell
// r3 - kind
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
// r2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
// r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
// r0 - number of arguments
// r1 - constructor?
// sp[0] - last argument
ASSERT(FAST_SMI_ELEMENTS == 0);
ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
ASSERT(FAST_ELEMENTS == 2);
ASSERT(FAST_HOLEY_ELEMENTS == 3);
ASSERT(FAST_DOUBLE_ELEMENTS == 4);
ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ tst(r3, Operand(1));
Label normal_sequence;
__ b(ne, &normal_sequence);
if (mode == DONT_OVERRIDE) {
ASSERT(FAST_SMI_ELEMENTS == 0);
ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
ASSERT(FAST_ELEMENTS == 2);
ASSERT(FAST_HOLEY_ELEMENTS == 3);
ASSERT(FAST_DOUBLE_ELEMENTS == 4);
ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ tst(r3, Operand(1));
__ b(ne, &normal_sequence);
}
// look at the first argument
__ ldr(r5, MemOperand(sp, 0));
__ cmp(r5, Operand::Zero());
__ b(eq, &normal_sequence);
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the cell).
__ add(r3, r3, Operand(1));
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &normal_sequence);
__ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
__ ldr(r5, FieldMemOperand(r5, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &normal_sequence);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
// Save the resulting elements kind in type info
__ SmiTag(r3);
__ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
__ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
ArraySingleArgumentConstructorStub stub_holey(holey_initial,
CONTEXT_CHECK_REQUIRED,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
__ b(ne, &next);
ArraySingleArgumentConstructorStub stub(kind);
__ bind(&normal_sequence);
ArraySingleArgumentConstructorStub stub(initial,
CONTEXT_CHECK_REQUIRED,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
__ bind(&next);
}
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the cell).
__ add(r3, r3, Operand(1));
__ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
if (FLAG_debug_code) {
__ ldr(r5, FieldMemOperand(r5, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, kExpectedAllocationSiteInCell);
__ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
}
// If we reached this point there is a problem.
__ Abort(kUnexpectedElementsKindInArrayConstructor);
// Save the resulting elements kind in type info
__ SmiTag(r3);
__ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
__ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
__ b(ne, &next);
ArraySingleArgumentConstructorStub stub(kind);
__ TailCallStub(&stub);
__ bind(&next);
}
// If we reached this point there is a problem.
__ Abort(kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
}
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
ElementsKind initial_kind = GetInitialFastElementsKind();
ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
(!FLAG_track_allocation_sites &&
(kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@ -7004,6 +6991,34 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
}
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
__ tst(r0, r0);
__ b(ne, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
__ bind(&not_zero_case);
__ cmp(r0, Operand(1));
__ b(gt, &not_one_case);
CreateArrayDispatchOneArgument(masm, mode);
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
} else if (argument_count_ == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
} else if (argument_count_ == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
} else if (argument_count_ == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
} else {
UNREACHABLE();
}
}
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc (only if argument_count_ == ANY)
@ -7035,50 +7050,24 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&okay_here);
}
Label no_info, switch_ready;
Label no_info;
// Get the elements kind and case on that.
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
__ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
// The type cell may have undefined in its value.
__ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
// The type cell has either an AllocationSite or a JSFunction
// If the type cell is undefined, or contains anything other than an
// AllocationSite, call an array constructor that doesn't use AllocationSites.
__ ldr(r4, FieldMemOperand(r3, 0));
__ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &no_info);
__ ldr(r3, FieldMemOperand(r3, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
__ jmp(&switch_ready);
__ bind(&no_info);
__ mov(r3, Operand(GetInitialFastElementsKind()));
__ bind(&switch_ready);
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
__ tst(r0, r0);
__ b(ne, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
__ bind(&not_zero_case);
__ cmp(r0, Operand(1));
__ b(gt, &not_one_case);
CreateArrayDispatchOneArgument(masm);
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else if (argument_count_ == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
} else if (argument_count_ == ONE) {
CreateArrayDispatchOneArgument(masm);
} else if (argument_count_ == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else {
UNREACHABLE();
}
__ bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
}

38
deps/v8/src/arm/code-stubs-arm.h

@ -68,7 +68,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated() { return true; }
virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@ -232,7 +232,7 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
the_heap_number_(the_heap_number),
scratch_(scratch) { }
bool IsPregenerated();
virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
@ -305,7 +305,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
virtual bool IsPregenerated();
virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@ -376,7 +376,7 @@ class RecordWriteStub: public PlatformCodeStub {
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
@ -419,19 +419,6 @@ class RecordWriteStub: public PlatformCodeStub {
Register scratch0_;
Register scratch1_;
Register GetRegThatIsNotOneOf(Register r1,
Register r2,
Register r3) {
for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
if (candidate.is(r3)) continue;
return candidate;
}
UNREACHABLE();
return no_reg;
}
friend class RecordWriteStub;
};
@ -478,23 +465,6 @@ class RecordWriteStub: public PlatformCodeStub {
};
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
class RegExpCEntryStub: public PlatformCodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return RegExpCEntry; }
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
};
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the

6
deps/v8/src/arm/codegen-arm.h

@ -44,8 +44,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
CodeGenerator() {
InitializeAstVisitor();
explicit CodeGenerator(Isolate* isolate) {
InitializeAstVisitor(isolate);
}
static bool MakeCode(CompilationInfo* info);
@ -61,7 +61,7 @@ class CodeGenerator: public AstVisitor {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
static bool ShouldGenerateLog(Expression* type);
static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,

2
deps/v8/src/arm/constants-arm.h

@ -220,6 +220,8 @@ enum {
kCoprocessorMask = 15 << 8,
kOpCodeMask = 15 << 21, // In data-processing instructions.
kImm24Mask = (1 << 24) - 1,
kImm16Mask = (1 << 16) - 1,
kImm8Mask = (1 << 8) - 1,
kOff12Mask = (1 << 12) - 1,
kOff8Mask = (1 << 8) - 1
};

9
deps/v8/src/arm/cpu-arm.cc

@ -106,15 +106,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif
}
void CPU::DebugBreak() {
#if !defined (__arm__)
UNIMPLEMENTED(); // when building ARM emulator target
#else
asm volatile("bkpt 0");
#endif
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

6
deps/v8/src/arm/debug-arm.cc

@ -55,7 +55,8 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry());
patcher.Emit(
debug_info_->GetIsolate()->debug()->debug_break_return()->entry());
patcher.masm()->bkpt(0);
}
@ -95,7 +96,8 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry());
patcher.Emit(
debug_info_->GetIsolate()->debug()->debug_break_slot()->entry());
}

203
deps/v8/src/arm/deoptimizer-arm.cc

@ -101,12 +101,7 @@ static const int32_t kBranchBeforeInterrupt = 0x5a000004;
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(!InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
static const int kInstrSize = Assembler::kInstrSize;
// Turn the jump into nops.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
@ -125,12 +120,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the original jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
@ -150,10 +140,10 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
@ -164,185 +154,27 @@ bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT(reinterpret_cast<uint32_t>(replacement_code->entry()) ==
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return true;
return PATCHED_FOR_OSR;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_builtin =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
ASSERT(reinterpret_cast<uint32_t>(interrupt_code->entry()) ==
ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return false;
return NOT_PATCHED;
}
}
#endif // DEBUG
static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
if (data->AstId(i) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
// Read the number of frames.
value = it.Next();
if (value == 1) return i;
}
}
UNREACHABLE();
return -1;
}
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
TranslationIterator iterator(translations, translation_index);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator.Next());
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
iterator.Skip(1); // Drop JS frame count.
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
ASSERT(Translation::JS_FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
int closure_id = iterator.Next();
USE(closure_id);
ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
unsigned fixed_size = ComputeFixedSize(function_);
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
if (FLAG_trace_osr) {
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_));
PrintFunctionName();
PrintF(" => node=%u, frame=%d->%d]\n",
ast_id,
input_frame_size,
output_frame_size);
}
// There's only one output frame in the OSR case.
output_count_ = 1;
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
unsigned output_offset = output_frame_size - kPointerSize;
int parameter_count = function_->shared()->formal_parameter_count() + 1;
for (int i = 0; i < parameter_count; ++i) {
output_[0]->SetFrameSlot(output_offset, 0);
output_offset -= kPointerSize;
}
// Translate the incoming parameters. This may overwrite some of the
// incoming argument slots we've just cleared.
int input_offset = input_frame_size - kPointerSize;
bool ok = true;
int limit = input_offset - (parameter_count * kPointerSize);
while (ok && input_offset > limit) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
}
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly.
for (int i = StandardFrameConstants::kCallerPCOffset;
ok && i >= StandardFrameConstants::kMarkerOffset;
i -= kPointerSize) {
uint32_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) {
const char* name = "UNKNOWN";
switch (i) {
case StandardFrameConstants::kCallerPCOffset:
name = "caller's pc";
break;
case StandardFrameConstants::kCallerFPOffset:
name = "fp";
break;
case StandardFrameConstants::kContextOffset:
name = "context";
break;
case StandardFrameConstants::kMarkerOffset:
name = "function";
break;
}
PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
output_offset,
input_value,
input_offset,
name);
}
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize;
output_offset -= kPointerSize;
}
// Translate the rest of the frame.
while (ok && input_offset >= 0) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
}
// If translation of any command failed, continue using the input frame.
if (!ok) {
delete output_[0];
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Set up the frame pointer and the context pointer.
output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
reinterpret_cast<intptr_t>(function_));
PrintFunctionName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@ -555,11 +387,8 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {
__ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
__ push(r6);
}
__ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
__ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
__ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));

3
deps/v8/src/arm/disasm-arm.cc

@ -50,9 +50,6 @@
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#ifndef WIN32
#include <stdint.h>
#endif
#include "v8.h"

38
deps/v8/src/arm/full-codegen-arm.cc

@ -296,8 +296,7 @@ void FullCodeGenerator::Generate() {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
StackCheckStub stub;
__ CallStub(&stub);
__ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@ -366,8 +365,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
}
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
InterruptStub stub;
__ CallStub(&stub);
__ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
@ -416,8 +414,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(r2);
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
InterruptStub stub;
__ CallStub(&stub);
__ Call(isolate()->builtins()->InterruptCheck(),
RelocInfo::CODE_TARGET);
}
__ pop(r0);
EmitProfilingCounterReset();
@ -1330,8 +1328,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
__ mov(r0, Operand(info));
__ push(r0);
__ mov(r2, Operand(info));
__ CallStub(&stub);
} else {
__ mov(r0, Operand(info));
@ -3010,7 +3007,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@ -3022,7 +3019,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
__ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ b(ne, if_true);
__ b(ne, &skip_lookup);
// Check for fast case object. Generate false result for slow case object.
__ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
@ -3068,6 +3065,14 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ b(ne, &loop);
__ bind(&done);
// Set the bit in the map to indicate that there is no local valueOf field.
__ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
__ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
__ bind(&skip_lookup);
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
@ -3077,16 +3082,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
__ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
__ cmp(r2, r3);
__ b(ne, if_false);
// Set the bit in the map to indicate that it has been checked safe for
// default valueOf and set true result.
__ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
__ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
__ jmp(if_true);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@ -3320,7 +3318,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);

8
deps/v8/src/arm/ic-arm.cc

@ -354,7 +354,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
extra_state,
Code::NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
// If the stub cache probing failed, the receiver might be a value.
@ -393,7 +393,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
Isolate::Current()->stub_cache()->GenerateProbe(
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
__ bind(&miss);
@ -658,7 +658,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
Isolate::Current()->stub_cache()->GenerateProbe(
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
@ -1490,7 +1490,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::STUB, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
Isolate::Current()->stub_cache()->GenerateProbe(
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.

152
deps/v8/src/arm/lithium-arm.cc

@ -30,6 +30,7 @@
#include "lithium-allocator-inl.h"
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@ -260,6 +261,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
function()->PrintTo(stream);
stream->Add(".code_entry = ");
code_object()->PrintTo(stream);
}
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
@ -425,6 +434,15 @@ LPlatformChunk* LChunkBuilder::Build() {
chunk_ = new(zone()) LPlatformChunk(info(), graph());
LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
// If compiling for OSR, reserve space for the unoptimized frame,
// which will be subsumed into this frame.
if (graph()->has_osr()) {
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
chunk_->GetNextSpillIndex(false);
}
}
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@ -718,12 +736,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Left shifts can deoptimize if we shift by > 0 and the result cannot be
// truncated to smi.
if (instr->representation().IsSmi() && constant_value > 0) {
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->CheckFlag(HValue::kTruncatingToSmi)) {
does_deopt = true;
break;
}
}
does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
}
} else {
right = UseRegisterAtStart(right_value);
@ -735,12 +748,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
does_deopt = true;
break;
}
}
does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
@ -1089,6 +1097,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
LInstruction* LChunkBuilder::DoStoreCodeEntry(
HStoreCodeEntry* store_code_entry) {
LOperand* function = UseRegister(store_code_entry->function());
LOperand* code_object = UseTempRegister(store_code_entry->code_object());
return new(zone()) LStoreCodeEntry(function, code_object);
}
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
@ -1505,20 +1521,39 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left;
LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
(instr->CheckFlag(HValue::kCanOverflow) ||
!right->IsConstantOperand())) {
left = UseRegister(instr->BetterLeftOperand());
temp = TempRegister();
HValue* left = instr->BetterLeftOperand();
HValue* right = instr->BetterRightOperand();
LOperand* left_op;
LOperand* right_op;
bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
if (right->IsConstant()) {
HConstant* constant = HConstant::cast(right);
int32_t constant_value = constant->Integer32Value();
// Constants -1, 0 and 1 can be optimized if the result can overflow.
// For other constants, it can be optimized only without overflow.
if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
left_op = UseRegisterAtStart(left);
right_op = UseConstant(right);
} else {
if (bailout_on_minus_zero) {
left_op = UseRegister(left);
} else {
left_op = UseRegisterAtStart(left);
}
right_op = UseRegister(right);
}
} else {
left = UseRegisterAtStart(instr->BetterLeftOperand());
if (bailout_on_minus_zero) {
left_op = UseRegister(left);
} else {
left_op = UseRegisterAtStart(left);
}
right_op = UseRegister(right);
}
LMulI* mul = new(zone()) LMulI(left, right, temp);
if (instr->CheckFlag(HValue::kCanOverflow) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
LMulI* mul = new(zone()) LMulI(left_op, right_op);
if (can_overflow || bailout_on_minus_zero) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
@ -1689,9 +1724,13 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
LOperand* global_object = UseFixed(instr->global_object(), r0);
LRandom* result = new(zone()) LRandom(global_object);
return MarkAsCall(DefineFixedDouble(result, d7), instr);
LOperand* global_object = UseTempRegister(instr->global_object());
LOperand* scratch = TempRegister();
LOperand* scratch2 = TempRegister();
LOperand* scratch3 = TempRegister();
LRandom* result = new(zone()) LRandom(
global_object, scratch, scratch2, scratch3);
return DefineFixedDouble(result, d7);
}
@ -1912,19 +1951,17 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
ASSERT(to.IsInteger32());
LOperand* value = NULL;
LInstruction* res = NULL;
if (instr->value()->type().IsSmi()) {
value = UseRegisterAtStart(instr->value());
HValue* val = instr->value();
if (val->type().IsSmi() || val->representation().IsSmi()) {
value = UseRegisterAtStart(val);
res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
value = UseRegister(instr->value());
value = UseRegister(val);
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL;
LOperand* temp3 = FixedTemp(d11);
LOperand* temp2 = FixedTemp(d11);
res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1,
temp2,
temp3));
temp2));
res = AssignEnvironment(res);
}
return res;
@ -1944,14 +1981,12 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignPointerMap(result);
} else if (to.IsSmi()) {
LOperand* value = UseRegister(instr->value());
return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToSmi(value,
TempRegister(), TempRegister())));
return AssignEnvironment(
DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
LDoubleToI* res = new(zone()) LDoubleToI(value);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
@ -2018,9 +2053,9 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckFunction(value));
return AssignEnvironment(new(zone()) LCheckValue(value));
}
@ -2418,10 +2453,18 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
// Use an index that corresponds to the location in the unoptimized frame,
// which the optimized frame will subsume.
int env_index = instr->index();
int spill_index = 0;
if (instr->environment()->is_parameter_index(env_index)) {
spill_index = chunk()->GetParameterStackSlot(env_index);
} else {
spill_index = env_index - instr->environment()->first_local_index();
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
}
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
@ -2443,6 +2486,8 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
// There are no real uses of a captured object.
return NULL;
}
@ -2489,20 +2534,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
HEnvironment* env = current_block_->last_environment();
ASSERT(env != NULL);
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
} else {
env->Push(value);
}
}
instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.

507
deps/v8/src/arm/lithium-arm.h

File diff suppressed because it is too large

333
deps/v8/src/arm/lithium-codegen-arm.cc

@ -31,12 +31,13 @@
#include "arm/lithium-gap-resolver-arm.h"
#include "code-stubs.h"
#include "stub-cache.h"
#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
class SafepointGenerator : public CallWrapper {
class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@ -44,11 +45,11 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) { }
virtual ~SafepointGenerator() { }
virtual ~SafepointGenerator() {}
virtual void BeforeCall(int call_size) const { }
virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
virtual void AfterCall() const {
virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@ -253,6 +254,21 @@ bool LCodeGen::GeneratePrologue() {
}
void LCodeGen::GenerateOsrPrologue() {
// Generate the OSR entry prologue at the first unknown OSR value, or if there
// are none, at the OSR entrypoint instruction.
if (osr_pc_offset_ >= 0) return;
osr_pc_offset_ = masm()->pc_offset();
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
ASSERT(slots >= 0);
__ sub(sp, sp, Operand(slots * kPointerSize));
}
bool LCodeGen::GenerateBody() {
ASSERT(is_generating());
bool emit_instructions = true;
@ -423,7 +439,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
Handle<Object> literal = constant->handle();
Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@ -431,7 +447,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
} else if (r.IsDouble()) {
Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
ASSERT(r.IsTagged());
ASSERT(r.IsSmiOrTagged());
__ LoadObject(scratch, literal);
}
return scratch;
@ -458,7 +474,7 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
Handle<Object> literal = constant->handle();
Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@ -486,7 +502,7 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
return constant->handle();
return constant->handle(isolate());
}
@ -543,7 +559,7 @@ Operand LCodeGen::ToOperand(LOperand* op) {
Abort(kToOperandUnsupportedDoubleImmediate);
}
ASSERT(r.IsTagged());
return Operand(constant->handle());
return Operand(constant->handle(isolate()));
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
@ -690,7 +706,7 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
int src_index = DefineDeoptimizationLiteral(constant->handle());
int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@ -1098,8 +1114,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
// Record the address of the first unknown OSR value as the place to enter.
if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
GenerateOsrPrologue();
}
@ -1573,21 +1588,17 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());
// Note that result may alias left.
Register left = ToRegister(instr->left());
LOperand* right_op = instr->right();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero =
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (right_op->IsConstantOperand() && !can_overflow) {
// Use optimized code for specific constants.
int32_t constant = ToRepresentation(
LConstantOperand::cast(right_op),
instr->hydrogen()->right()->representation());
if (right_op->IsConstantOperand()) {
int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
@ -1598,7 +1609,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
switch (constant) {
case -1:
__ rsb(result, left, Operand::Zero());
if (overflow) {
__ rsb(result, left, Operand::Zero(), SetCC);
DeoptimizeIf(vs, instr->environment());
} else {
__ rsb(result, left, Operand::Zero());
}
break;
case 0:
if (bailout_on_minus_zero) {
@ -1619,23 +1635,21 @@ void LCodeGen::DoMulI(LMulI* instr) {
int32_t mask = constant >> 31;
uint32_t constant_abs = (constant + mask) ^ mask;
if (IsPowerOf2(constant_abs) ||
IsPowerOf2(constant_abs - 1) ||
IsPowerOf2(constant_abs + 1)) {
if (IsPowerOf2(constant_abs)) {
int32_t shift = WhichPowerOf2(constant_abs);
__ mov(result, Operand(left, LSL, shift));
} else if (IsPowerOf2(constant_abs - 1)) {
int32_t shift = WhichPowerOf2(constant_abs - 1);
__ add(result, left, Operand(left, LSL, shift));
} else if (IsPowerOf2(constant_abs + 1)) {
int32_t shift = WhichPowerOf2(constant_abs + 1);
__ rsb(result, left, Operand(left, LSL, shift));
}
if (IsPowerOf2(constant_abs)) {
int32_t shift = WhichPowerOf2(constant_abs);
__ mov(result, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
if (constant < 0) __ rsb(result, result, Operand::Zero());
} else if (IsPowerOf2(constant_abs - 1)) {
int32_t shift = WhichPowerOf2(constant_abs - 1);
__ add(result, left, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
if (constant < 0) __ rsb(result, result, Operand::Zero());
} else if (IsPowerOf2(constant_abs + 1)) {
int32_t shift = WhichPowerOf2(constant_abs + 1);
__ rsb(result, left, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
if (constant < 0) __ rsb(result, result, Operand::Zero());
} else {
// Generate standard code.
__ mov(ip, Operand(constant));
@ -1644,12 +1658,11 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else {
Register right = EmitLoadRegister(right_op, scratch);
if (bailout_on_minus_zero) {
__ orr(ToRegister(instr->temp()), left, right);
}
ASSERT(right_op->IsRegister());
Register right = ToRegister(right_op);
if (can_overflow) {
if (overflow) {
Register scratch = scratch0();
// scratch:result = left * right.
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@ -1669,12 +1682,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (bailout_on_minus_zero) {
// Bail out if the result is supposed to be negative zero.
Label done;
__ teq(left, Operand(right));
__ b(pl, &done);
// Bail out if the result is minus zero.
__ cmp(result, Operand::Zero());
__ b(ne, &done);
__ cmp(ToRegister(instr->temp()), Operand::Zero());
DeoptimizeIf(mi, instr->environment());
DeoptimizeIf(eq, instr->environment());
__ bind(&done);
}
}
@ -1871,7 +1884,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value();
Handle<Object> value = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
__ LoadObject(ToRegister(instr->result()), value);
}
@ -2735,15 +2748,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal: public LDeferredCode {
class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
virtual LInstruction* instr() { return instr_; }
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@ -3722,14 +3735,14 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
virtual LInstruction* instr() { return instr_; }
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@ -3878,80 +3891,64 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LRandom* instr_;
};
DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(d7));
ASSERT(ToRegister(instr->global_object()).is(r0));
// Assert that the register size is indeed the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
// Load native context
Register global_object = ToRegister(instr->global_object());
Register native_context = global_object;
__ ldr(native_context, FieldMemOperand(
global_object, GlobalObject::kNativeContextOffset));
// Load state (FixedArray of the native context's random seeds)
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
// r2: FixedArray of the native context's random seeds
Register state = native_context;
__ ldr(state, FieldMemOperand(native_context, kRandomSeedOffset));
// Load state[0].
__ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
__ cmp(r1, Operand::Zero());
__ b(eq, deferred->entry());
Register state0 = ToRegister(instr->scratch());
__ ldr(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// Load state[1].
__ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
// r1: state[0].
// r0: state[1].
Register state1 = ToRegister(instr->scratch2());
__ ldr(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
__ and_(r3, r1, Operand(0xFFFF));
__ mov(r4, Operand(18273));
__ mul(r3, r3, r4);
__ add(r1, r3, Operand(r1, LSR, 16));
Register scratch3 = ToRegister(instr->scratch3());
Register scratch4 = scratch0();
__ and_(scratch3, state0, Operand(0xFFFF));
__ mov(scratch4, Operand(18273));
__ mul(scratch3, scratch3, scratch4);
__ add(state0, scratch3, Operand(state0, LSR, 16));
// Save state[0].
__ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
__ str(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
__ and_(r3, r0, Operand(0xFFFF));
__ mov(r4, Operand(36969));
__ mul(r3, r3, r4);
__ add(r0, r3, Operand(r0, LSR, 16));
__ and_(scratch3, state1, Operand(0xFFFF));
__ mov(scratch4, Operand(36969));
__ mul(scratch3, scratch3, scratch4);
__ add(state1, scratch3, Operand(state1, LSR, 16));
// Save state[1].
__ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
__ str(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
__ and_(r0, r0, Operand(0x3FFFF));
__ add(r0, r0, Operand(r1, LSL, 14));
Register random = scratch4;
__ and_(random, state1, Operand(0x3FFFF));
__ add(random, random, Operand(state0, LSL, 14));
__ bind(deferred->exit());
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000));
__ orr(r1, r1, Operand(0x300000));
__ mov(scratch3, Operand(0x41000000));
__ orr(scratch3, scratch3, Operand(0x300000));
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
__ vmov(d7, r0, r1);
DwVfpRegister result = ToDoubleRegister(instr->result());
__ vmov(result, random, scratch3);
// Move 0x4130000000000000 to VFP.
__ mov(r0, Operand::Zero());
__ vmov(d8, r0, r1);
// Subtract and store the result in the heap number.
__ vsub(d7, d7, d8);
}
void LCodeGen::DoDeferredRandom(LRandom* instr) {
__ PrepareCallCFunction(1, scratch0());
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// Return value is in r0.
__ mov(scratch4, Operand::Zero());
DwVfpRegister scratch5 = double_scratch0();
__ vmov(scratch5, scratch4, scratch3);
__ vsub(result, result, scratch5);
}
@ -4146,6 +4143,15 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object());
__ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(code_object,
FieldMemOperand(function, JSFunction::kCodeEntryOffset));
}
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
@ -4520,12 +4526,14 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
class DeferredStringCharCodeAt: public LDeferredCode {
class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
virtual LInstruction* instr() { return instr_; }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredStringCharCodeAt(instr_);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@ -4573,12 +4581,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
class DeferredStringCharFromCode: public LDeferredCode {
class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
virtual LInstruction* instr() { return instr_; }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredStringCharFromCode(instr_);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@ -4661,16 +4671,16 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI: public LDeferredCode {
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
SIGNED_INT32);
}
virtual LInstruction* instr() { return instr_; }
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@ -4686,16 +4696,16 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
class DeferredNumberTagU: public LDeferredCode {
class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
UNSIGNED_INT32);
}
virtual LInstruction* instr() { return instr_; }
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@ -4768,12 +4778,14 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
class DeferredNumberTagD: public LDeferredCode {
class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
virtual LInstruction* instr() { return instr_; }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagD(instr_);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@ -4902,7 +4914,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
LowDwVfpRegister double_scratch = double_scratch0();
DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3());
DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@ -4913,18 +4925,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// The carry flag is set when we reach this deferred code as we just executed
// SmiUntag(heap_object, SetCC)
STATIC_ASSERT(kHeapObjectTag == 1);
__ adc(input_reg, input_reg, Operand(input_reg));
__ adc(scratch2, input_reg, Operand(input_reg));
// Heap number map check.
__ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
!scratch3.is(scratch2));
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
Label heap_number;
@ -4932,23 +4940,18 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
__ cmp(scratch2, Operand(ip));
DeoptimizeIf(ne, instr->environment());
__ mov(input_reg, Operand::Zero());
__ b(&done);
__ bind(&heap_number);
__ sub(scratch1, input_reg, Operand(kHeapObjectTag));
__ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
__ ECMAToInt32(input_reg, double_scratch2,
scratch1, scratch2, scratch3, double_scratch);
__ TruncateHeapNumberToI(input_reg, scratch2);
} else {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment());
__ sub(ip, input_reg, Operand(kHeapObjectTag));
__ sub(ip, scratch2, Operand(kHeapObjectTag));
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
DeoptimizeIf(ne, instr->environment());
@ -4966,12 +4969,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
class DeferredTaggedToI: public LDeferredCode {
class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
virtual LInstruction* instr() { return instr_; }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredTaggedToI(instr_);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@ -5018,14 +5023,11 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
LowDwVfpRegister double_scratch = double_scratch0();
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
__ ECMAToInt32(result_reg, double_input,
scratch1, scratch2, scratch3, double_scratch);
__ TruncateDoubleToI(result_reg, double_input);
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
@ -5046,14 +5048,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
LowDwVfpRegister double_scratch = double_scratch0();
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
__ ECMAToInt32(result_reg, double_input,
scratch1, scratch2, scratch3, double_scratch);
__ TruncateDoubleToI(result_reg, double_input);
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
@ -5132,18 +5131,18 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
Handle<JSFunction> target = instr->hydrogen()->target();
Handle<HeapObject> object = instr->hydrogen()->object();
AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*target)) {
if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewCell(target);
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ mov(ip, Operand(Handle<Object>(cell)));
__ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(reg, ip);
} else {
__ cmp(reg, Operand(target));
__ cmp(reg, Operand(object));
}
DeoptimizeIf(ne, instr->environment());
}
@ -5162,17 +5161,17 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
class DeferredCheckMaps: public LDeferredCode {
class DeferredCheckMaps V8_FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
virtual void Generate() {
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
virtual LInstruction* instr() { return instr_; }
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
@ -5265,12 +5264,14 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
virtual LInstruction* instr() { return instr_; }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredAllocate(instr_);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@ -5422,8 +5423,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
__ mov(r1, Operand(instr->hydrogen()->shared_info()));
__ push(r1);
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
@ -5621,12 +5621,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
class DeferredStackCheck: public LDeferredCode {
class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
virtual LInstruction* instr() { return instr_; }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredStackCheck(instr_);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@ -5641,9 +5643,10 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &done);
StackCheckStub stub;
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
EnsureSpaceForLazyDeopt();
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
@ -5680,9 +5683,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
// Normally we record the first unknown OSR value as the entrypoint to the OSR
// code, but if there were none, record the entrypoint here.
if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
GenerateOsrPrologue();
}

12
deps/v8/src/arm/lithium-codegen-arm.h

@ -43,7 +43,7 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
class LCodeGen V8_FINAL BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: zone_(info->zone()),
@ -149,7 +149,6 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@ -227,6 +226,9 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
void GenerateOsrPrologue();
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
@ -420,7 +422,7 @@ class LCodeGen BASE_EMBEDDED {
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
@ -468,7 +470,7 @@ class LCodeGen BASE_EMBEDDED {
};
class LDeferredCode: public ZoneObject {
class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
@ -477,7 +479,7 @@ class LDeferredCode: public ZoneObject {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;

2
deps/v8/src/arm/lithium-gap-resolver-arm.h

@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver BASE_EMBEDDED {
class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);

242
deps/v8/src/arm/macro-assembler-arm.cc

@ -829,26 +829,6 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
void MacroAssembler::ConvertNumberToInt32(Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch1,
LowDwVfpRegister double_scratch2,
Label* not_number) {
Label done;
UntagAndJumpIfSmi(dst, object, &done);
JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
vldr(double_scratch1, FieldMemOperand(object, HeapNumber::kValueOffset));
ECMAToInt32(dst, double_scratch1,
scratch1, scratch2, scratch3, double_scratch2);
bind(&done);
}
void MacroAssembler::LoadNumber(Register object,
LowDwVfpRegister dst,
Register heap_number_map,
@ -1702,15 +1682,9 @@ void MacroAssembler::Allocate(int object_size,
ASSERT((limit - top) == kPointerSize);
ASSERT(result.code() < ip.code());
// Set up allocation top address and object size registers.
// Set up allocation top address register.
Register topaddr = scratch1;
Register obj_size_reg = scratch2;
mov(topaddr, Operand(allocation_top));
Operand obj_size_operand = Operand(object_size);
if (!obj_size_operand.is_single_instruction(this)) {
// We are about to steal IP, so we need to load this value first
mov(obj_size_reg, obj_size_operand);
}
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
@ -1734,7 +1708,7 @@ void MacroAssembler::Allocate(int object_size,
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
@ -1748,13 +1722,25 @@ void MacroAssembler::Allocate(int object_size,
}
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
if (obj_size_operand.is_single_instruction(this)) {
// We can add the size as an immediate
add(scratch2, result, obj_size_operand, SetCC);
} else {
// Doesn't fit in an immediate, we have to use the register
add(scratch2, result, obj_size_reg, SetCC);
// to calculate the new top. We must preserve the ip register at this
// point, so we cannot just use add().
ASSERT(object_size > 0);
Register source = result;
Condition cond = al;
int shift = 0;
while (object_size != 0) {
if (((object_size >> shift) & 0x03) == 0) {
shift += 2;
} else {
int bits = object_size & (0xff << shift);
object_size -= bits;
shift += 8;
Operand bits_operand(bits);
ASSERT(bits_operand.is_single_instruction(this));
add(scratch2, source, bits_operand, SetCC, cond);
source = scratch2;
cond = cc;
}
}
b(cs, gc_required);
cmp(scratch2, Operand(ip));
@ -2299,7 +2285,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
bool returns_handle,
int return_value_offset) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
@ -2368,15 +2353,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
Label leave_exit_frame;
Label return_value_loaded;
if (returns_handle) {
Label load_return_value;
cmp(r0, Operand::Zero());
b(eq, &load_return_value);
// derefernce returned value
ldr(r0, MemOperand(r0));
b(&return_value_loaded);
bind(&load_return_value);
}
// load value from ReturnValue
ldr(r0, MemOperand(fp, return_value_offset*kPointerSize));
bind(&return_value_loaded);
@ -2532,84 +2508,76 @@ void MacroAssembler::TryInt32Floor(Register result,
bind(&exception);
}
void MacroAssembler::ECMAToInt32(Register result,
DwVfpRegister double_input,
Register scratch,
Register scratch_high,
Register scratch_low,
LowDwVfpRegister double_scratch) {
ASSERT(!scratch_high.is(result));
ASSERT(!scratch_low.is(result));
ASSERT(!scratch_low.is(scratch_high));
ASSERT(!scratch.is(result) &&
!scratch.is(scratch_high) &&
!scratch.is(scratch_low));
ASSERT(!double_input.is(double_scratch));
Label out_of_range, only_low, negate, done;
void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
DwVfpRegister double_input,
Label* done) {
LowDwVfpRegister double_scratch = kScratchDoubleReg;
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
// If result is not saturated (0x7fffffff or 0x80000000), we are done.
sub(scratch, result, Operand(1));
cmp(scratch, Operand(0x7ffffffe));
b(lt, &done);
sub(ip, result, Operand(1));
cmp(ip, Operand(0x7ffffffe));
b(lt, done);
}
vmov(scratch_low, scratch_high, double_input);
Ubfx(scratch, scratch_high,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Load scratch with exponent - 1. This is faster than loading
// with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
// If exponent is greater than or equal to 84, the 32 less significant
// bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
// the result is 0.
// Compare exponent with 84 (compare exponent - 1 with 83).
cmp(scratch, Operand(83));
b(ge, &out_of_range);
// If we reach this code, 31 <= exponent <= 83.
// So, we don't have to handle cases where 0 <= exponent <= 20 for
// which we would need to shift right the high part of the mantissa.
// Scratch contains exponent - 1.
// Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
rsb(scratch, scratch, Operand(51), SetCC);
b(ls, &only_low);
// 21 <= exponent <= 51, shift scratch_low and scratch_high
// to generate the result.
mov(scratch_low, Operand(scratch_low, LSR, scratch));
// Scratch contains: 52 - exponent.
// We needs: exponent - 20.
// So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
rsb(scratch, scratch, Operand(32));
Ubfx(result, scratch_high,
0, HeapNumber::kMantissaBitsInTopWord);
// Set the implicit 1 before the mantissa part in scratch_high.
orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord));
orr(result, scratch_low, Operand(result, LSL, scratch));
b(&negate);
bind(&out_of_range);
mov(result, Operand::Zero());
b(&done);
bind(&only_low);
// 52 <= exponent <= 83, shift only scratch_low.
// On entry, scratch contains: 52 - exponent.
rsb(scratch, scratch, Operand::Zero());
mov(result, Operand(scratch_low, LSL, scratch));
bind(&negate);
// If input was positive, scratch_high ASR 31 equals 0 and
// scratch_high LSR 31 equals zero.
// New result = (result eor 0) + 0 = result.
// If the input was negative, we have to negate the result.
// Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
// New result = (result eor 0xffffffff) + 1 = 0 - result.
eor(result, result, Operand(scratch_high, ASR, 31));
add(result, result, Operand(scratch_high, LSR, 31));
void MacroAssembler::TruncateDoubleToI(Register result,
DwVfpRegister double_input) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
// If we fell through then inline version didn't succeed - call stub instead.
push(lr);
sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
vstr(double_input, MemOperand(sp, 0));
DoubleToIStub stub(sp, result, 0, true, true);
CallStub(&stub);
add(sp, sp, Operand(kDoubleSize));
pop(lr);
bind(&done);
}
void MacroAssembler::TruncateHeapNumberToI(Register result,
Register object) {
Label done;
LowDwVfpRegister double_scratch = kScratchDoubleReg;
ASSERT(!result.is(object));
vldr(double_scratch,
MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
TryInlineTruncateDoubleToI(result, double_scratch, &done);
// If we fell through then inline version didn't succeed - call stub instead.
push(lr);
DoubleToIStub stub(object,
result,
HeapNumber::kValueOffset - kHeapObjectTag,
true,
true);
CallStub(&stub);
pop(lr);
bind(&done);
}
void MacroAssembler::TruncateNumberToI(Register object,
Register result,
Register heap_number_map,
Register scratch1,
Label* not_number) {
Label done;
ASSERT(!result.is(object));
UntagAndJumpIfSmi(result, object, &done);
JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
TruncateHeapNumberToI(result, object);
bind(&done);
}
@ -2841,6 +2809,11 @@ void MacroAssembler::Abort(BailoutReason reason) {
RecordComment("Abort message: ");
RecordComment(msg);
}
if (FLAG_trap_on_abort) {
stop(msg);
return;
}
#endif
mov(r0, Operand(p0));
@ -3824,6 +3797,30 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
}
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2,
Register reg3,
Register reg4,
Register reg5,
Register reg6) {
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
if (reg2.is_valid()) regs |= reg2.bit();
if (reg3.is_valid()) regs |= reg3.bit();
if (reg4.is_valid()) regs |= reg4.bit();
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (regs & candidate.bit()) continue;
return candidate;
}
UNREACHABLE();
return no_reg;
}
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
@ -3848,10 +3845,13 @@ bool AreAliased(Register reg1,
#endif
CodePatcher::CodePatcher(byte* address, int instructions)
CodePatcher::CodePatcher(byte* address,
int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
masm_(NULL, address, size_ + Assembler::kGap) {
masm_(NULL, address, size_ + Assembler::kGap),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
@ -3861,7 +3861,9 @@ CodePatcher::CodePatcher(byte* address, int instructions)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
CPU::FlushICache(address_, size_);
if (flush_cache_ == FLUSH) {
CPU::FlushICache(address_, size_);
}
// Check that the code was patched as expected.
ASSERT(masm_.pc_ == address_ + size_);

67
deps/v8/src/arm/macro-assembler-arm.h

@ -62,6 +62,14 @@ enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2 = no_reg,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg);
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
@ -491,19 +499,6 @@ class MacroAssembler: public Assembler {
void VmovLow(Register dst, DwVfpRegister src);
void VmovLow(DwVfpRegister dst, Register src);
// Converts the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
void ConvertNumberToInt32(Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch1,
LowDwVfpRegister double_scratch2,
Label* not_int32);
// Loads the number from object into dst register.
// If |object| is neither smi nor heap number, |not_number| is jumped to
// with |object| still intact.
@ -988,16 +983,35 @@ class MacroAssembler: public Assembler {
Label* done,
Label* exact);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
//
// Only public for the test code in test-code-stubs-arm.cc.
void TryInlineTruncateDoubleToI(Register result,
DwVfpRegister input,
Label* done);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Double_scratch must be between d0 and d15.
// Exits with 'result' holding the answer and all other registers clobbered.
void ECMAToInt32(Register result,
DwVfpRegister double_input,
Register scratch,
Register scratch_high,
Register scratch_low,
LowDwVfpRegister double_scratch);
// Exits with 'result' holding the answer.
void TruncateDoubleToI(Register result, DwVfpRegister double_input);
// Performs a truncating conversion of a heap number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
// must be different registers. Exits with 'result' holding the answer.
void TruncateHeapNumberToI(Register result, Register object);
// Converts the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
// different registers.
void TruncateNumberToI(Register object,
Register result,
Register heap_number_map,
Register scratch1,
Label* not_int32);
// Check whether d16-d31 are available on the CPU. The result is given by the
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
@ -1097,7 +1111,6 @@ class MacroAssembler: public Assembler {
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
bool returns_handle,
int return_value_offset_from_fp);
// Jump to a runtime routine.
@ -1416,7 +1429,14 @@ class MacroAssembler: public Assembler {
// an assertion to fail.
class CodePatcher {
public:
CodePatcher(byte* address, int instructions);
enum FlushICache {
FLUSH,
DONT_FLUSH
};
CodePatcher(byte* address,
int instructions,
FlushICache flush_cache = FLUSH);
virtual ~CodePatcher();
// Macro assembler to emit code.
@ -1436,6 +1456,7 @@ class CodePatcher {
byte* address_; // The address of the code being patched.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
FlushICache flush_cache_; // Whether to flush the I cache after patching.
};

119
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -134,7 +134,6 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
exit_label_() {
ASSERT_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
EmitBacktrackConstantPool();
__ bind(&start_label_); // And then continue from here.
}
@ -872,7 +871,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
masm_->GetCode(&code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code);
}
@ -938,37 +937,8 @@ void RegExpMacroAssemblerARM::PopRegister(int register_index) {
}
static bool is_valid_memory_offset(int value) {
if (value < 0) value = -value;
return value < (1<<12);
}
void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
if (label->is_bound()) {
int target = label->pos();
__ mov(r0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
} else {
int constant_offset = GetBacktrackConstantPoolEntry();
masm_->label_at_put(label, constant_offset);
// Reading pc-relative is based on the address 8 bytes ahead of
// the current opcode.
unsigned int offset_of_pc_register_read =
masm_->pc_offset() + Assembler::kPcLoadDelta;
int pc_offset_of_constant =
constant_offset - offset_of_pc_register_read;
ASSERT(pc_offset_of_constant < 0);
if (is_valid_memory_offset(pc_offset_of_constant)) {
Assembler::BlockConstPoolScope block_const_pool(masm_);
__ ldr(r0, MemOperand(pc, pc_offset_of_constant));
} else {
// Not a 12-bit offset, so it needs to be loaded from the constant
// pool.
Assembler::BlockConstPoolScope block_const_pool(masm_);
__ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
__ ldr(r0, MemOperand(pc, r0));
}
}
__ mov_label_offset(r0, label);
Push(r0);
CheckStackLimit();
}
@ -1055,16 +1025,34 @@ void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, scratch);
__ PrepareCallCFunction(3, scratch);
// RegExp code frame pointer.
__ mov(r2, frame_pointer());
// Code* of self.
__ mov(r1, Operand(masm_->CodeObject()));
// r0 becomes return address pointer.
// We need to make room for the return address on the stack.
int stack_alignment = OS::ActivationFrameAlignment();
ASSERT(IsAligned(stack_alignment, kPointerSize));
__ sub(sp, sp, Operand(stack_alignment));
// r0 will point to the return address, placed by DirectCEntry.
__ mov(r0, sp);
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(isolate());
CallCFunctionUsingStub(stack_guard_check, num_arguments);
__ mov(ip, Operand(stack_guard_check));
DirectCEntryStub stub;
stub.GenerateCall(masm_, ip);
// Drop the return address from the stack.
__ add(sp, sp, Operand(stack_alignment));
ASSERT(stack_alignment != 0);
__ ldr(sp, MemOperand(sp, 0));
__ mov(code_pointer(), Operand(masm_->CodeObject()));
}
@ -1079,7 +1067,6 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
@ -1262,53 +1249,6 @@ void RegExpMacroAssemblerARM::CheckStackLimit() {
}
void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
__ CheckConstPool(false, false);
Assembler::BlockConstPoolScope block_const_pool(masm_);
backtrack_constant_pool_offset_ = masm_->pc_offset();
for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
__ emit(0);
}
backtrack_constant_pool_capacity_ = kBacktrackConstantPoolSize;
}
int RegExpMacroAssemblerARM::GetBacktrackConstantPoolEntry() {
while (backtrack_constant_pool_capacity_ > 0) {
int offset = backtrack_constant_pool_offset_;
backtrack_constant_pool_offset_ += kPointerSize;
backtrack_constant_pool_capacity_--;
if (masm_->pc_offset() - offset < 2 * KB) {
return offset;
}
}
Label new_pool_skip;
__ jmp(&new_pool_skip);
EmitBacktrackConstantPool();
__ bind(&new_pool_skip);
int offset = backtrack_constant_pool_offset_;
backtrack_constant_pool_offset_ += kPointerSize;
backtrack_constant_pool_capacity_--;
return offset;
}
void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
ExternalReference function,
int num_arguments) {
// Must pass all arguments in registers. The stub pushes on the stack.
ASSERT(num_arguments <= 4);
__ mov(code_pointer(), Operand(function));
RegExpCEntryStub stub;
__ CallStub(&stub);
if (OS::ActivationFrameAlignment() != 0) {
__ ldr(sp, MemOperand(sp, 0));
}
__ mov(code_pointer(), Operand(masm_->CodeObject()));
}
bool RegExpMacroAssemblerARM::CanReadUnaligned() {
return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
}
@ -1351,17 +1291,6 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
}
void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
int stack_alignment = OS::ActivationFrameAlignment();
if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
// Stack is already aligned for call, so decrement by alignment
// to make room for storing the link register.
__ str(lr, MemOperand(sp, stack_alignment, NegPreIndex));
__ mov(r0, sp);
__ Call(r5);
__ ldr(pc, MemOperand(sp, stack_alignment, PostIndex));
}
#undef __
#endif // V8_INTERPRETED_REGEXP

11
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -160,9 +160,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
void EmitBacktrackConstantPool();
int GetBacktrackConstantPoolEntry();
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
@ -212,14 +209,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// and increments it by a word size.
inline void Pop(Register target);
// Calls a C function and cleans up the frame alignment done by
// by FrameAlign. The called function *is* allowed to trigger a garbage
// collection, but may not take more than four arguments (no arguments
// passed on the stack), and the first argument will be a pointer to the
// return address.
inline void CallCFunctionUsingStub(ExternalReference function,
int num_arguments);
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;

95
deps/v8/src/arm/simulator-arm.cc

@ -1686,20 +1686,12 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
typedef void (*SimulatorRuntimeDirectApiCallNew)(int32_t arg0);
typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingApiCall)(
int32_t arg0, int32_t arg1);
typedef void (*SimulatorRuntimeProfilingApiCallNew)(int32_t arg0, int32_t arg1);
typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, int32_t arg1);
// This signature supports direct call to accessor getter callback.
typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
int32_t arg1);
typedef void (*SimulatorRuntimeDirectGetterCallNew)(int32_t arg0,
int32_t arg1);
typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingGetterCall)(
int32_t arg0, int32_t arg1, int32_t arg2);
typedef void (*SimulatorRuntimeProfilingGetterCallNew)(
typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
typedef void (*SimulatorRuntimeProfilingGetterCall)(
int32_t arg0, int32_t arg1, int32_t arg2);
// Software interrupt instructions are used by the simulator to call into the
@ -1839,9 +1831,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
}
}
} else if (
redirection->type() == ExternalReference::DIRECT_API_CALL ||
redirection->type() == ExternalReference::DIRECT_API_CALL_NEW) {
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x",
reinterpret_cast<void*>(external), arg0);
@ -1851,22 +1841,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
SimulatorRuntimeDirectApiCall target =
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
v8::Handle<v8::Value> result = target(arg0);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
set_register(r0, reinterpret_cast<int32_t>(*result));
} else {
SimulatorRuntimeDirectApiCallNew target =
reinterpret_cast<SimulatorRuntimeDirectApiCallNew>(external);
target(arg0);
}
SimulatorRuntimeDirectApiCall target =
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
target(arg0);
} else if (
redirection->type() == ExternalReference::PROFILING_API_CALL ||
redirection->type() == ExternalReference::PROFILING_API_CALL_NEW) {
redirection->type() == ExternalReference::PROFILING_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
reinterpret_cast<void*>(external), arg0, arg1);
@ -1876,22 +1855,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
SimulatorRuntimeProfilingApiCall target =
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
v8::Handle<v8::Value> result = target(arg0, arg1);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
set_register(r0, reinterpret_cast<int32_t>(*result));
} else {
SimulatorRuntimeProfilingApiCallNew target =
reinterpret_cast<SimulatorRuntimeProfilingApiCallNew>(external);
target(arg0, arg1);
}
SimulatorRuntimeProfilingApiCall target =
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
target(arg0, arg1);
} else if (
redirection->type() == ExternalReference::DIRECT_GETTER_CALL ||
redirection->type() == ExternalReference::DIRECT_GETTER_CALL_NEW) {
redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
reinterpret_cast<void*>(external), arg0, arg1);
@ -1901,22 +1869,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
v8::Handle<v8::Value> result = target(arg0, arg1);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
set_register(r0, reinterpret_cast<int32_t>(*result));
} else {
SimulatorRuntimeDirectGetterCallNew target =
reinterpret_cast<SimulatorRuntimeDirectGetterCallNew>(external);
target(arg0, arg1);
}
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
target(arg0, arg1);
} else if (
redirection->type() == ExternalReference::PROFILING_GETTER_CALL ||
redirection->type() == ExternalReference::PROFILING_GETTER_CALL_NEW) {
redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x %08x",
reinterpret_cast<void*>(external), arg0, arg1, arg2);
@ -1926,20 +1883,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
if (redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
v8::Handle<v8::Value> result = target(arg0, arg1, arg2);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
set_register(r0, reinterpret_cast<int32_t>(*result));
} else {
SimulatorRuntimeProfilingGetterCallNew target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCallNew>(
external);
target(arg0, arg1, arg2);
}
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
external);
target(arg0, arg1, arg2);
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);

678
deps/v8/src/arm/stub-cache-arm.cc

@ -785,6 +785,11 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
@ -793,10 +798,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(scratch);
__ push(receiver);
__ push(holder);
__ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
__ push(scratch);
__ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
__ push(scratch);
}
@ -811,7 +812,7 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
__ mov(r0, Operand(6));
__ mov(r0, Operand(StubCache::kInterceptorArgsLength));
__ mov(r1, Operand(ref));
CEntryStub stub(1);
@ -903,23 +904,13 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
Address function_address = v8::ToCData<Address>(api_call_info->callback());
bool returns_handle =
!CallbackTable::ReturnsVoid(masm->isolate(), function_address);
ApiFunction fun(function_address);
ExternalReference::Type type =
returns_handle ?
ExternalReference::DIRECT_API_CALL :
ExternalReference::DIRECT_API_CALL_NEW;
ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
ExternalReference ref = ExternalReference(&fun,
type,
masm->isolate());
Address thunk_address = returns_handle
? FUNCTION_ADDR(&InvokeInvocationCallback)
: FUNCTION_ADDR(&InvokeFunctionCallback);
ExternalReference::Type thunk_type =
returns_handle ?
ExternalReference::PROFILING_API_CALL :
ExternalReference::PROFILING_API_CALL_NEW;
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
ApiFunction thunk_fun(thunk_address);
ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
masm->isolate());
@ -930,11 +921,39 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
thunk_ref,
r1,
kStackUnwindSpace,
returns_handle,
kFastApiCallArguments + 1);
}
// Generate call to api function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
Register receiver,
Register scratch,
int argc,
Register* values) {
ASSERT(optimization.is_simple_api_call());
ASSERT(!receiver.is(scratch));
const int stack_space = kFastApiCallArguments + argc + 1;
// Assign stack space for the call arguments.
__ sub(sp, sp, Operand(stack_space * kPointerSize));
// Write holder to stack frame.
__ str(receiver, MemOperand(sp, 0));
// Write receiver to stack frame.
int index = stack_space - 1;
__ str(receiver, MemOperand(sp, index * kPointerSize));
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
ASSERT(!receiver.is(values[i]));
ASSERT(!scratch.is(values[i]));
__ str(receiver, MemOperand(sp, index-- * kPointerSize));
}
GenerateFastApiDirectCall(masm, optimization, argc);
}
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@ -1092,7 +1111,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
6);
StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
// Leave the internal frame.
@ -1150,21 +1169,6 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
// Convert and store int passed in register ival to IEEE 754 single precision
// floating point value at memory location (dst + 4 * wordoffset)
// If VFP3 is available use it for conversion.
static void StoreIntAsFloat(MacroAssembler* masm,
Register dst,
Register wordoffset,
Register ival,
Register scratch1) {
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
__ vstr(s0, scratch1, 0);
}
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
@ -1318,7 +1322,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> holder,
Handle<Name> name,
Label* success,
Handle<ExecutableAccessorInfo> callback) {
Handle<Object> callback) {
Label miss;
Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@ -1405,11 +1409,27 @@ void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
}
void BaseLoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 0, NULL);
}
void BaseLoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
ASSERT(!scratch2().is(reg));
ASSERT(!scratch3().is(reg));
ASSERT(!scratch4().is(reg));
__ push(receiver());
__ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
if (heap()->InNewSpace(callback->data())) {
@ -1419,13 +1439,13 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
} else {
__ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
}
__ Push(reg, scratch3());
__ push(scratch3());
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
__ mov(scratch4(), scratch3());
__ Push(scratch3(), scratch4());
__ mov(scratch4(),
Operand(ExternalReference::isolate_address(isolate())));
__ Push(scratch4(), name());
__ Push(scratch4(), reg, name());
__ mov(r0, sp); // r0 = Handle<Name>
const int kApiStackSpace = 1;
@ -1439,23 +1459,14 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
const int kStackUnwindSpace = kFastApiCallArguments + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
bool returns_handle =
!CallbackTable::ReturnsVoid(isolate(), getter_address);
ApiFunction fun(getter_address);
ExternalReference::Type type =
returns_handle ?
ExternalReference::DIRECT_GETTER_CALL :
ExternalReference::DIRECT_GETTER_CALL_NEW;
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
ExternalReference ref = ExternalReference(&fun, type, isolate());
Address thunk_address = returns_handle
? FUNCTION_ADDR(&InvokeAccessorGetter)
: FUNCTION_ADDR(&InvokeAccessorGetterCallback);
Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
ExternalReference::Type thunk_type =
returns_handle ?
ExternalReference::PROFILING_GETTER_CALL :
ExternalReference::PROFILING_GETTER_CALL_NEW;
ExternalReference::PROFILING_GETTER_CALL;
ApiFunction thunk_fun(thunk_address);
ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
isolate());
@ -1464,8 +1475,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
thunk_ref,
r2,
kStackUnwindSpace,
returns_handle,
5);
6);
}
@ -1553,7 +1563,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
isolate());
__ TailCallExternalReference(ref, 6, 1);
__ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
@ -2811,6 +2821,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization) {
Label success;
HandlerFrontend(object, receiver(), holder, name, &success);
__ bind(&success);
Register values[] = { value() };
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 1, values);
// Return the generated code.
return GetCode(kind(), Code::CALLBACKS, name);
}
#undef __
#define __ ACCESS_MASM(masm)
@ -2894,47 +2922,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
Handle<Code> StoreStubCompiler::CompileStoreGlobal(
Handle<GlobalObject> object,
Handle<PropertyCell> cell,
Handle<Name> name) {
Label miss;
// Check that the map of the global has not changed.
__ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
__ cmp(scratch1(), Operand(Handle<Map>(object->map())));
__ b(ne, &miss);
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
__ mov(scratch1(), Operand(cell));
__ LoadRoot(scratch2(), Heap::kTheHoleValueRootIndex);
__ ldr(scratch3(), FieldMemOperand(scratch1(), Cell::kValueOffset));
__ cmp(scratch3(), scratch2());
__ b(eq, &miss);
// Store the value in the cell.
__ str(value(), FieldMemOperand(scratch1(), Cell::kValueOffset));
// Cells are always rescanned, so no write barrier here.
Counters* counters = isolate()->counters();
__ IncrementCounter(
counters->named_store_global_inline(), 1, scratch1(), scratch2());
__ Ret();
// Handle store cache miss.
__ bind(&miss);
__ IncrementCounter(
counters->named_store_global_inline_miss(), 1, scratch1(), scratch2());
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
return GetICCode(kind(), Code::NORMAL, name);
}
Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
Handle<JSObject> object,
Handle<JSObject> last,
@ -3190,509 +3177,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch0,
DwVfpRegister double_scratch0,
LowDwVfpRegister double_scratch1,
Label* fail) {
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi
// range.
__ JumpIfSmi(key, &key_ok);
__ CheckMap(key,
scratch0,
Heap::kHeapNumberMapRootIndex,
fail,
DONT_DO_SMI_CHECK);
__ sub(ip, key, Operand(kHeapObjectTag));
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
__ b(ne, fail);
__ TrySmiTag(key, scratch0, fail);
__ bind(&key_ok);
}
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
Label slow, check_heap_number, miss_force_generic;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
// r3 mostly holds the elements array or the destination external array.
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key, r4, d1, d2, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the index is in range
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(key, ip);
// Unsigned comparison catches both negative and too-large values.
__ b(hs, &miss_force_generic);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// r3: external array.
if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
// Double to pixel conversion is only implemented in the runtime for now.
__ UntagAndJumpIfNotSmi(r5, value, &slow);
} else {
__ UntagAndJumpIfNotSmi(r5, value, &check_heap_number);
}
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r5: value (integer).
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
// Clamp the value to [0..255].
__ Usat(r5, 8, Operand(r5));
__ strb(r5, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, key, LSL, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
__ SmiUntag(r4, key);
StoreIntAsFloat(masm, r3, r4, r5, r7);
break;
case EXTERNAL_DOUBLE_ELEMENTS:
__ vmov(s2, r5);
__ vcvt_f64_s32(d0, s2);
__ add(r3, r3, Operand(key, LSL, 2));
// r3: effective address of the double element
__ vstr(d0, r3, 0);
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
// r3: external array.
__ bind(&check_heap_number);
__ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(key, LSL, 1));
__ vcvt_f32_f64(s0, d0);
__ vstr(s0, r5, 0);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(key, LSL, 2));
__ vstr(d0, r5, 0);
} else {
// Hoisted load. vldr requires offset to be a multiple of 4 so we can
// not include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ ECMAToInt32(r5, d0, r6, r7, r9, d1);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, key, LSL, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
}
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
__ IncrementCounter(
masm->isolate()->counters()->keyed_load_external_array_slow(),
1, r2, r3);
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
// Miss case, call the runtime.
__ bind(&miss_force_generic);
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
}
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : scratch
// -- r4 : scratch (elements)
// -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
Register scratch = r4;
Register elements_reg = r3;
Register length_reg = r5;
Register scratch2 = r6;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
}
// Check that the key is within bounds.
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
if (is_js_array) {
__ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
__ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis.
__ cmp(key_reg, scratch);
if (is_js_array && IsGrowStoreMode(store_mode)) {
__ b(hs, &grow);
} else {
__ b(hs, &miss_force_generic);
}
// Make sure elements is a fast element array, not 'cow'.
__ CheckMap(elements_reg,
scratch,
Heap::kFixedArrayMapRootIndex,
&miss_force_generic,
DONT_DO_SMI_CHECK);
__ bind(&finish_store);
if (IsFastSmiElementsKind(elements_kind)) {
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
__ str(value_reg, MemOperand(scratch));
} else {
ASSERT(IsFastObjectElementsKind(elements_kind));
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
__ str(value_reg, MemOperand(scratch));
__ mov(receiver_reg, value_reg);
__ RecordWrite(elements_reg, // Object.
scratch, // Address.
receiver_reg, // Value.
kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
// value_reg (r0) is preserved.
// Done.
__ Ret();
__ bind(&miss_force_generic);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
__ bind(&transition_elements_kind);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
if (is_js_array && IsGrowStoreMode(store_mode)) {
// Grow the array by a single element if possible.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime. Flags already set by previous compare.
__ b(ne, &miss_force_generic);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ ldr(length_reg,
FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
__ b(ne, &check_capacity);
int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ Allocate(size, elements_reg, scratch, scratch2, &slow, TAG_OBJECT);
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
__ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
}
// Store the element at index zero.
__ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
// Install the new backing store in the JSArray.
__ str(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ mov(length_reg, Operand(Smi::FromInt(1)));
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ Ret();
__ bind(&check_capacity);
// Check for cow elements, in general they are not handled by this stub
__ CheckMap(elements_reg,
scratch,
Heap::kFixedCOWArrayMapRootIndex,
&miss_force_generic,
DONT_DO_SMI_CHECK);
__ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
__ cmp(length_reg, scratch);
__ b(hs, &slow);
// Grow the array and finish the store.
__ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ jmp(&finish_store);
__ bind(&slow);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
}
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
bool is_js_array,
KeyedAccessStoreMode store_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : scratch (elements backing store)
// -- r4 : scratch
// -- r5 : scratch
// -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
Register elements_reg = r3;
Register scratch1 = r4;
Register scratch2 = r5;
Register length_reg = r7;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
// Check that the key is within bounds.
if (is_js_array) {
__ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
__ ldr(scratch1,
FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
__ cmp(key_reg, scratch1);
if (IsGrowStoreMode(store_mode)) {
__ b(hs, &grow);
} else {
__ b(hs, &miss_force_generic);
}
__ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg, key_reg, elements_reg,
scratch1, d0, &transition_elements_kind);
__ Ret();
// Handle store cache miss, replacing the ic with the generic stub.
__ bind(&miss_force_generic);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
__ bind(&transition_elements_kind);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
if (is_js_array && IsGrowStoreMode(store_mode)) {
// Grow the array by a single element if possible.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime. Flags already set by previous compare.
__ b(ne, &miss_force_generic);
// Transition on values that can't be stored in a FixedDoubleArray.
Label value_is_smi;
__ JumpIfSmi(value_reg, &value_is_smi);
__ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
__ b(ne, &transition_elements_kind);
__ bind(&value_is_smi);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ ldr(length_reg,
FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
__ b(ne, &check_capacity);
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ Allocate(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT);
// Initialize the new FixedDoubleArray.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
__ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ mov(scratch1,
Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ str(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
__ mov(scratch1, elements_reg);
__ StoreNumberToDoubleElements(value_reg, key_reg, scratch1,
scratch2, d0, &transition_elements_kind);
__ mov(scratch1, Operand(kHoleNanLower32));
__ mov(scratch2, Operand(kHoleNanUpper32));
for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
int offset = FixedDoubleArray::OffsetOfElementAt(i);
__ str(scratch1, FieldMemOperand(elements_reg, offset));
__ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
}
// Install the new backing store in the JSArray.
__ str(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ mov(length_reg, Operand(Smi::FromInt(1)));
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ Ret();
__ bind(&check_capacity);
// Make sure that the backing store can hold additional elements.
__ ldr(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
__ cmp(length_reg, scratch1);
__ b(hs, &slow);
// Grow the array and finish the store.
__ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ jmp(&finish_store);
__ bind(&slow);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
}
}
#undef __
} } // namespace v8::internal

11
deps/v8/src/assembler.cc

@ -43,7 +43,7 @@
#include "deoptimizer.h"
#include "execution.h"
#include "ic.h"
#include "isolate.h"
#include "isolate-inl.h"
#include "jsregexp.h"
#include "lazy-instance.h"
#include "platform.h"
@ -119,7 +119,7 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = V8::RandomPrivate(isolate);
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
if (buffer == NULL) {
@ -798,7 +798,7 @@ void RelocInfo::Print(Isolate* isolate, FILE* out) {
target_object()->ShortPrint(out);
PrintF(out, ")");
} else if (rmode_ == EXTERNAL_REFERENCE) {
ExternalReferenceEncoder ref_encoder;
ExternalReferenceEncoder ref_encoder(isolate);
PrintF(out, " (%s) (%p)",
ref_encoder.NameOfAddress(*target_reference_address()),
*target_reference_address());
@ -891,7 +891,7 @@ void ExternalReference::SetUp() {
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
math_exp_data_mutex = OS::CreateMutex();
math_exp_data_mutex = new Mutex();
}
@ -899,7 +899,7 @@ void ExternalReference::InitializeMathExpData() {
// Early return?
if (math_exp_data_initialized) return;
math_exp_data_mutex->Lock();
LockGuard<Mutex> lock_guard(math_exp_data_mutex);
if (!math_exp_data_initialized) {
// If this is changed, generated code must be adapted too.
const int kTableSizeBits = 11;
@ -935,7 +935,6 @@ void ExternalReference::InitializeMathExpData() {
math_exp_data_initialized = true;
}
math_exp_data_mutex->Unlock();
}

34
deps/v8/src/assembler.h

@ -196,7 +196,6 @@ class Label BASE_EMBEDDED {
}
friend class Assembler;
friend class RegexpAssembler;
friend class Displacement;
friend class RegExpMacroAssemblerIrregexp;
};
@ -425,7 +424,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Object** call_object_address());
template<typename StaticVisitor> inline void Visit(Heap* heap);
inline void Visit(ObjectVisitor* v);
inline void Visit(Isolate* isolate, ObjectVisitor* v);
// Patch the code with some other code.
void PatchCode(byte* instructions, int instruction_count);
@ -644,38 +643,21 @@ class ExternalReference BASE_EMBEDDED {
BUILTIN_FP_INT_CALL,
// Direct call to API function callback.
// Handle<Value> f(v8::Arguments&)
// void f(v8::FunctionCallbackInfo&)
DIRECT_API_CALL,
// Call to invocation callback via InvokeInvocationCallback.
// Handle<Value> f(v8::Arguments&, v8::InvocationCallback)
PROFILING_API_CALL,
// Direct call to API function callback.
// void f(v8::Arguments&)
DIRECT_API_CALL_NEW,
// Call to function callback via InvokeFunctionCallback.
// void f(v8::Arguments&, v8::FunctionCallback)
PROFILING_API_CALL_NEW,
// void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
PROFILING_API_CALL,
// Direct call to accessor getter callback.
// Handle<value> f(Local<String> property, AccessorInfo& info)
// void f(Local<String> property, PropertyCallbackInfo& info)
DIRECT_GETTER_CALL,
// Call to accessor getter callback via InvokeAccessorGetter.
// Handle<value> f(Local<String> property, AccessorInfo& info,
// AccessorGetter getter)
PROFILING_GETTER_CALL,
// Direct call to accessor getter callback.
// void f(Local<String> property, AccessorInfo& info)
DIRECT_GETTER_CALL_NEW,
// Call to accessor getter callback via InvokeAccessorGetterCallback.
// void f(Local<String> property, AccessorInfo& info,
// void f(Local<String> property, PropertyCallbackInfo& info,
// AccessorGetterCallback callback)
PROFILING_GETTER_CALL_NEW
PROFILING_GETTER_CALL
};
static void SetUp();
@ -708,7 +690,7 @@ class ExternalReference BASE_EMBEDDED {
explicit ExternalReference(const SCTableReference& table_ref);
// Isolate::Current() as an external reference.
// Isolate as an external reference.
static ExternalReference isolate_address(Isolate* isolate);
// One-of-a-kind references. These references are not part of a general

38
deps/v8/src/ast.cc

@ -599,7 +599,7 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
if (!HEAP->InNewSpace(*candidate)) {
if (!lookup->isolate()->heap()->InNewSpace(*candidate)) {
target_ = candidate;
return true;
}
@ -646,8 +646,15 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Literal* key = property->key()->AsLiteral();
ASSERT(key != NULL && key->value()->IsString());
Handle<String> name = Handle<String>::cast(key->value());
check_type_ = oracle->GetCallCheckType(this);
receiver_types_.Clear();
oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
if (check_type_ == RECEIVER_MAP_CHECK) {
oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
is_monomorphic_ = is_monomorphic_ && receiver_types_.length() > 0;
} else {
holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
receiver_types_.Add(handle(holder_->map()), oracle->zone());
}
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
@ -657,17 +664,8 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
}
}
#endif
check_type_ = oracle->GetCallCheckType(this);
if (is_monomorphic_) {
Handle<Map> map;
if (receiver_types_.length() > 0) {
ASSERT(check_type_ == RECEIVER_MAP_CHECK);
map = receiver_types_.at(0);
} else {
ASSERT(check_type_ != RECEIVER_MAP_CHECK);
holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
map = Handle<Map>(holder_->map());
}
Handle<Map> map = receiver_types_.first();
is_monomorphic_ = ComputeTarget(map, name);
}
}
@ -860,12 +858,13 @@ bool RegExpCapture::IsAnchoredAtEnd() {
// in as many cases as possible, to make it more difficult for incorrect
// parses to look as correct ones which is likely if the input and
// output formats are alike.
class RegExpUnparser: public RegExpVisitor {
class RegExpUnparser V8_FINAL : public RegExpVisitor {
public:
explicit RegExpUnparser(Zone* zone);
void VisitCharacterRange(CharacterRange that);
SmartArrayPointer<const char> ToString() { return stream_.ToCString(); }
#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data);
#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, \
void* data) V8_OVERRIDE;
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
#undef MAKE_CASE
private:
@ -963,12 +962,12 @@ void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) {
void* RegExpUnparser::VisitText(RegExpText* that, void* data) {
if (that->elements()->length() == 1) {
that->elements()->at(0).data.u_atom->Accept(this, data);
that->elements()->at(0).tree()->Accept(this, data);
} else {
stream()->Add("(!");
for (int i = 0; i < that->elements()->length(); i++) {
stream()->Add(" ");
that->elements()->at(i).data.u_atom->Accept(this, data);
that->elements()->at(i).tree()->Accept(this, data);
}
stream()->Add(")");
}
@ -1084,7 +1083,7 @@ CaseClause::CaseClause(Isolate* isolate,
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_flag(kDontOptimize); \
set_dont_optimize_reason(k##NodeType); \
add_flag(kDontInline); \
add_flag(kDontSelfOptimize); \
}
@ -1096,7 +1095,7 @@ CaseClause::CaseClause(Isolate* isolate,
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_flag(kDontOptimize); \
set_dont_optimize_reason(k##NodeType); \
add_flag(kDontInline); \
add_flag(kDontSelfOptimize); \
add_flag(kDontCache); \
@ -1182,7 +1181,6 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
Handle<String> Literal::ToString() {
if (value_->IsString()) return Handle<String>::cast(value_);
Factory* factory = Isolate::Current()->factory();
ASSERT(value_->IsNumber());
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
@ -1194,7 +1192,7 @@ Handle<String> Literal::ToString() {
} else {
str = DoubleToCString(value_->Number(), buffer);
}
return factory->NewStringFromAscii(CStrVector(str));
return isolate_->factory()->NewStringFromAscii(CStrVector(str));
}

518
deps/v8/src/ast.h

File diff suppressed because it is too large

163
deps/v8/src/bootstrapper.cc

@ -45,10 +45,6 @@
#include "extensions/statistics-extension.h"
#include "code-stubs.h"
#if defined(V8_I18N_SUPPORT)
#include "extensions/i18n/i18n-extension.h"
#endif
namespace v8 {
namespace internal {
@ -98,7 +94,7 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
void Bootstrapper::Initialize(bool create_heap_objects) {
extensions_cache_.Initialize(create_heap_objects);
extensions_cache_.Initialize(isolate_, create_heap_objects);
}
@ -106,9 +102,6 @@ void Bootstrapper::InitializeOncePerProcess() {
GCExtension::Register();
ExternalizeStringExtension::Register();
StatisticsExtension::Register();
#if defined(V8_I18N_SUPPORT)
v8_i18n::Extension::Register();
#endif
}
@ -147,7 +140,7 @@ void Bootstrapper::TearDown() {
delete_these_arrays_on_tear_down_ = NULL;
}
extensions_cache_.Initialize(false); // Yes, symmetrical
extensions_cache_.Initialize(isolate_, false); // Yes, symmetrical
}
@ -491,7 +484,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// prototype, otherwise the missing initial_array_prototype will cause
// assertions during startup.
native_context()->set_initial_array_prototype(*prototype);
SetPrototype(object_fun, prototype);
Accessors::FunctionSetPrototype(object_fun, prototype);
}
// Allocate the empty function as the prototype for function ECMAScript
@ -1064,6 +1057,54 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
native_context()->set_json_object(*json_object);
}
{ // -- A r r a y B u f f e r
Handle<JSFunction> array_buffer_fun =
InstallFunction(
global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSizeWithInternalFields,
isolate->initial_object_prototype(),
Builtins::kIllegal, true, true);
native_context()->set_array_buffer_fun(*array_buffer_fun);
}
{ // -- T y p e d A r r a y s
Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array",
EXTERNAL_BYTE_ELEMENTS);
native_context()->set_int8_array_fun(*int8_fun);
Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array",
EXTERNAL_UNSIGNED_BYTE_ELEMENTS);
native_context()->set_uint8_array_fun(*uint8_fun);
Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array",
EXTERNAL_SHORT_ELEMENTS);
native_context()->set_int16_array_fun(*int16_fun);
Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array",
EXTERNAL_UNSIGNED_SHORT_ELEMENTS);
native_context()->set_uint16_array_fun(*uint16_fun);
Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array",
EXTERNAL_INT_ELEMENTS);
native_context()->set_int32_array_fun(*int32_fun);
Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array",
EXTERNAL_UNSIGNED_INT_ELEMENTS);
native_context()->set_uint32_array_fun(*uint32_fun);
Handle<JSFunction> float_fun = InstallTypedArray("Float32Array",
EXTERNAL_FLOAT_ELEMENTS);
native_context()->set_float_array_fun(*float_fun);
Handle<JSFunction> double_fun = InstallTypedArray("Float64Array",
EXTERNAL_DOUBLE_ELEMENTS);
native_context()->set_double_array_fun(*double_fun);
Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray",
EXTERNAL_PIXEL_ELEMENTS);
native_context()->set_uint8c_array_fun(*uint8c_fun);
Handle<JSFunction> data_view_fun =
InstallFunction(
global, "DataView", JS_DATA_VIEW_TYPE,
JSDataView::kSizeWithInternalFields,
isolate->initial_object_prototype(),
Builtins::kIllegal, true, true);
native_context()->set_data_view_fun(*data_view_fun);
}
{ // --- arguments_boilerplate_
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
@ -1095,12 +1136,12 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->length_string(),
factory->undefined_value(), DONT_ENUM,
Object::FORCE_TAGGED, JSReceiver::FORCE_FIELD));
Object::FORCE_TAGGED, FORCE_FIELD));
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->callee_string(),
factory->undefined_value(), DONT_ENUM,
Object::FORCE_TAGGED, JSReceiver::FORCE_FIELD));
Object::FORCE_TAGGED, FORCE_FIELD));
#ifdef DEBUG
LookupResult lookup(isolate);
@ -1268,13 +1309,9 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<FixedArray> embedder_data = factory->NewFixedArray(2);
native_context()->set_embedder_data(*embedder_data);
{
// Initialize the random seed slot.
Handle<ByteArray> zeroed_byte_array(
factory->NewByteArray(kRandomStateSize));
native_context()->set_random_seed(*zeroed_byte_array);
memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
}
// Allocate the random seed slot.
Handle<ByteArray> random_seed = factory->NewByteArray(kRandomStateSize);
native_context()->set_random_seed(*random_seed);
}
@ -1331,56 +1368,6 @@ void Genesis::InitializeExperimentalGlobal() {
}
}
if (FLAG_harmony_array_buffer) {
// -- A r r a y B u f f e r
Handle<JSFunction> array_buffer_fun =
InstallFunction(
global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSizeWithInternalFields,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
native_context()->set_array_buffer_fun(*array_buffer_fun);
}
if (FLAG_harmony_typed_arrays) {
// -- T y p e d A r r a y s
Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array",
EXTERNAL_BYTE_ELEMENTS);
native_context()->set_int8_array_fun(*int8_fun);
Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array",
EXTERNAL_UNSIGNED_BYTE_ELEMENTS);
native_context()->set_uint8_array_fun(*uint8_fun);
Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array",
EXTERNAL_SHORT_ELEMENTS);
native_context()->set_int16_array_fun(*int16_fun);
Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array",
EXTERNAL_UNSIGNED_SHORT_ELEMENTS);
native_context()->set_uint16_array_fun(*uint16_fun);
Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array",
EXTERNAL_INT_ELEMENTS);
native_context()->set_int32_array_fun(*int32_fun);
Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array",
EXTERNAL_UNSIGNED_INT_ELEMENTS);
native_context()->set_uint32_array_fun(*uint32_fun);
Handle<JSFunction> float_fun = InstallTypedArray("Float32Array",
EXTERNAL_FLOAT_ELEMENTS);
native_context()->set_float_array_fun(*float_fun);
Handle<JSFunction> double_fun = InstallTypedArray("Float64Array",
EXTERNAL_DOUBLE_ELEMENTS);
native_context()->set_double_array_fun(*double_fun);
Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray",
EXTERNAL_PIXEL_ELEMENTS);
native_context()->set_uint8c_array_fun(*uint8c_fun);
Handle<JSFunction> data_view_fun =
InstallFunction(
global, "DataView", JS_DATA_VIEW_TYPE,
JSDataView::kSizeWithInternalFields,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
native_context()->set_data_view_fun(*data_view_fun);
}
if (FLAG_harmony_generators) {
// Create generator meta-objects and install them on the builtins object.
Handle<JSObject> builtins(native_context()->builtins());
@ -1554,7 +1541,7 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
: top_context->global_object(),
isolate);
bool has_pending_exception;
Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
Execution::Call(isolate, fun, receiver, 0, NULL, &has_pending_exception);
if (has_pending_exception) return false;
return true;
}
@ -1632,7 +1619,7 @@ Handle<JSFunction> Genesis::InstallInternalArray(
true, true);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(array_function, prototype);
Accessors::FunctionSetPrototype(array_function, prototype);
InternalArrayConstructorStub internal_array_constructor_stub(isolate());
Handle<Code> code = internal_array_constructor_stub.GetCode(isolate());
@ -1730,7 +1717,7 @@ bool Genesis::InstallNatives() {
Builtins::kIllegal, false, false);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(script_fun, prototype);
Accessors::FunctionSetPrototype(script_fun, prototype);
native_context()->set_script_function(*script_fun);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
@ -1886,7 +1873,7 @@ bool Genesis::InstallNatives() {
Builtins::kIllegal, false, false);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(opaque_reference_fun, prototype);
Accessors::FunctionSetPrototype(opaque_reference_fun, prototype);
native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
@ -2060,16 +2047,6 @@ bool Genesis::InstallExperimentalNatives() {
"native object-observe.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
if (FLAG_harmony_array_buffer &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native arraybuffer.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
if (FLAG_harmony_typed_arrays &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native typedarray.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
if (FLAG_harmony_generators &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native generator.js") == 0) {
@ -2307,12 +2284,6 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
InstallExtension(isolate, "v8/statistics", &extension_states);
}
#if defined(V8_I18N_SUPPORT)
if (FLAG_enable_i18n) {
InstallExtension(isolate, "v8/i18n", &extension_states);
}
#endif
if (extensions == NULL) return true;
// Install required extensions
int count = v8::ImplementationUtilities::GetNameCount(extensions);
@ -2600,8 +2571,8 @@ Genesis::Genesis(Isolate* isolate,
: isolate_(isolate),
active_(isolate->bootstrapper()) {
result_ = Handle<Context>::null();
// If V8 isn't running and cannot be initialized, just return.
if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
// If V8 cannot be initialized, just return.
if (!V8::Initialize(NULL)) return;
// Before creating the roots we must save the context and restore it
// on all function exits.
@ -2616,7 +2587,7 @@ Genesis::Genesis(Isolate* isolate,
// We can only de-serialize a context if the isolate was initialized from
// a snapshot. Otherwise we have to build the context from scratch.
if (isolate->initialized_from_snapshot()) {
native_context_ = Snapshot::NewContextFromSnapshot();
native_context_ = Snapshot::NewContextFromSnapshot(isolate);
} else {
native_context_ = Handle<Context>();
}
@ -2659,6 +2630,14 @@ Genesis::Genesis(Isolate* isolate,
InitializeExperimentalGlobal();
if (!InstallExperimentalNatives()) return;
// Initially seed the per-context random number generator
// using the per-isolate random number generator.
uint32_t* state = reinterpret_cast<uint32_t*>(
native_context()->random_seed()->GetDataStartAddress());
do {
isolate->random_number_generator()->NextBytes(state, kRandomStateSize);
} while (state[0] == 0 || state[1] == 0);
result_ = native_context();
}

4
deps/v8/src/bootstrapper.h

@ -44,8 +44,8 @@ class SourceCodeCache BASE_EMBEDDED {
public:
explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
void Initialize(bool create_heap_objects) {
cache_ = create_heap_objects ? HEAP->empty_fixed_array() : NULL;
void Initialize(Isolate* isolate, bool create_heap_objects) {
cache_ = create_heap_objects ? isolate->heap()->empty_fixed_array() : NULL;
}
void Iterate(ObjectVisitor* v) {

51
deps/v8/src/builtins.cc

@ -132,7 +132,6 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
MUST_USE_RESULT static MaybeObject* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
name##ArgumentsType args(args_length, args_object); \
ASSERT(isolate == Isolate::Current()); \
args.Verify(); \
return Builtin_Impl_##name(args, isolate); \
} \
@ -304,11 +303,11 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
} else {
entry_size = kDoubleSize;
}
ASSERT(elms->map() != HEAP->fixed_cow_array_map());
ASSERT(elms->map() != heap->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
ASSERT(!HEAP->lo_space()->Contains(elms));
ASSERT(!heap->lo_space()->Contains(elms));
STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
@ -448,7 +447,8 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
argv[i] = args.at<Object>(i + 1);
}
bool pending_exception;
Handle<Object> result = Execution::Call(function,
Handle<Object> result = Execution::Call(isolate,
function,
args.receiver(),
argc,
argv.start(),
@ -594,7 +594,7 @@ BUILTIN(ArrayPop) {
if (accessor->HasElement(array, array, new_length, elms_obj)) {
maybe_result = accessor->Get(array, array, new_length, elms_obj);
} else {
maybe_result = array->GetPrototype()->GetElement(len - 1);
maybe_result = array->GetPrototype()->GetElement(isolate, len - 1);
}
if (maybe_result->IsFailure()) return maybe_result;
MaybeObject* maybe_failure =
@ -1253,8 +1253,8 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
if (!raw_call_data->IsUndefined()) {
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
Object* callback_obj = call_data->callback();
v8::InvocationCallback callback =
v8::ToCData<v8::InvocationCallback>(callback_obj);
v8::FunctionCallback callback =
v8::ToCData<v8::FunctionCallback>(callback_obj);
Object* data_obj = call_data->data();
Object* result;
@ -1322,8 +1322,8 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
ASSERT(!handler->IsUndefined());
CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
Object* callback_obj = call_data->callback();
v8::InvocationCallback callback =
v8::ToCData<v8::InvocationCallback>(callback_obj);
v8::FunctionCallback callback =
v8::ToCData<v8::FunctionCallback>(callback_obj);
// Get the data for the call and perform the callback.
Object* result;
@ -1461,6 +1461,16 @@ static void Generate_StoreIC_Initialize_Strict(MacroAssembler* masm) {
}
static void Generate_StoreIC_PreMonomorphic(MacroAssembler* masm) {
StoreIC::GeneratePreMonomorphic(masm);
}
static void Generate_StoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
StoreIC::GeneratePreMonomorphic(masm);
}
static void Generate_StoreIC_Miss(MacroAssembler* masm) {
StoreIC::GenerateMiss(masm);
}
@ -1546,6 +1556,16 @@ static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
}
static void Generate_KeyedStoreIC_PreMonomorphic(MacroAssembler* masm) {
KeyedStoreIC::GeneratePreMonomorphic(masm);
}
static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
KeyedStoreIC::GeneratePreMonomorphic(masm);
}
static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
KeyedStoreIC::GenerateNonStrictArguments(masm);
}
@ -1717,9 +1737,8 @@ void Builtins::InitBuiltinFunctionTable() {
}
void Builtins::SetUp(bool create_heap_objects) {
void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
ASSERT(!initialized_);
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
// Create a scope for the handles in the builtins.
@ -1813,6 +1832,16 @@ const char* Builtins::Lookup(byte* pc) {
}
void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
}
void Builtins::Generate_StackCheck(MacroAssembler* masm) {
masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
}
#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \
Handle<Code> Builtins::name() { \
Code** code_address = \

28
deps/v8/src/builtins.h

@ -87,8 +87,6 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(InstallRecompiledCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
@ -103,7 +101,7 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(ParallelRecompile, BUILTIN, UNINITIALIZED, \
V(ConcurrentRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
@ -122,7 +120,7 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Slow, BUILTIN, UNINITIALIZED, \
V(KeyedLoadIC_Slow, STUB, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
@ -144,7 +142,7 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Slow, LOAD_IC, GENERIC, \
V(LoadIC_Slow, STUB, MONOMORPHIC, \
Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
@ -162,6 +160,8 @@ enum BuiltinExtraArguments {
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_PreMonomorphic, STORE_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
@ -174,6 +174,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
kStrictMode) \
V(StoreIC_PreMonomorphic_Strict, STORE_IC, PREMONOMORPHIC, \
kStrictMode) \
V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
kStrictMode) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
@ -185,11 +187,15 @@ enum BuiltinExtraArguments {
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, \
Code::kNoExtraICState) \
\
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
kStrictMode) \
V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
kStrictMode) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
kStrictMode) \
V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MONOMORPHIC, \
@ -211,6 +217,10 @@ enum BuiltinExtraArguments {
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(InterruptCheck, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StackCheck, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -288,7 +298,7 @@ class Builtins {
// Generate all builtin code objects. Should be called once during
// isolate initialization.
void SetUp(bool create_heap_objects);
void SetUp(Isolate* isolate, bool create_heap_objects);
void TearDown();
// Garbage collection support.
@ -370,8 +380,7 @@ class Builtins {
CFunctionId id,
BuiltinExtraArguments extra_args);
static void Generate_InRecompileQueue(MacroAssembler* masm);
static void Generate_InstallRecompiledCode(MacroAssembler* masm);
static void Generate_ParallelRecompile(MacroAssembler* masm);
static void Generate_ConcurrentRecompile(MacroAssembler* masm);
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
@ -395,6 +404,9 @@ class Builtins {
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
static void Generate_InterruptCheck(MacroAssembler* masm);
static void Generate_StackCheck(MacroAssembler* masm);
#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
static void Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm); \

28
deps/v8/src/checks.cc

@ -31,33 +31,19 @@
#include "platform.h"
// TODO(isolates): is it necessary to lift this?
static int fatal_error_handler_nesting_depth = 0;
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
i::AllowHandleDereference allow_deref;
i::AllowDeferredHandleDereference allow_deferred_deref;
fflush(stdout);
fflush(stderr);
fatal_error_handler_nesting_depth++;
// First time we try to print an error message
if (fatal_error_handler_nesting_depth < 2) {
i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
va_list arguments;
va_start(arguments, format);
i::OS::VPrintError(format, arguments);
va_end(arguments);
i::OS::PrintError("\n#\n");
i::OS::DumpBacktrace();
}
// First two times we may try to print a stack dump.
if (fatal_error_handler_nesting_depth < 3) {
if (i::FLAG_stack_trace_on_abort) {
// Call this one twice on double fault
i::Isolate::Current()->PrintStack(stderr);
}
}
i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
va_list arguments;
va_start(arguments, format);
i::OS::VPrintError(format, arguments);
va_end(arguments);
i::OS::PrintError("\n#\n");
i::OS::DumpBacktrace();
i::OS::Abort();
}

17
deps/v8/src/checks.h

@ -31,6 +31,7 @@
#include <string.h>
#include "../include/v8stdint.h"
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
@ -195,6 +196,20 @@ inline void CheckEqualsHelper(const char* file,
}
inline void CheckNonEqualsHelper(const char* file,
int line,
const char* expected_source,
int64_t expected,
const char* value_source,
int64_t value) {
if (expected == value) {
V8_Fatal(file, line,
"CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
expected_source, value_source, expected, value);
}
}
inline void CheckNonEqualsHelper(const char* file,
int line,
const char* expected_source,
@ -232,7 +247,7 @@ inline void CheckNonEqualsHelper(const char* file,
// Use C++11 static_assert if possible, which gives error
// messages that are easier to understand on first sight.
#if __cplusplus >= 201103L
#if V8_HAS_CXX11_STATIC_ASSERT
#define STATIC_CHECK(test) static_assert(test, #test)
#else
// This is inspired by the static assertion facility in boost. This

62
deps/v8/src/circular-queue-inl.h

@ -33,30 +33,60 @@
namespace v8 {
namespace internal {
template<typename T, unsigned L>
SamplingCircularQueue<T, L>::SamplingCircularQueue()
: enqueue_pos_(buffer_),
dequeue_pos_(buffer_) {
}
template<typename T, unsigned L>
SamplingCircularQueue<T, L>::~SamplingCircularQueue() {
}
template<typename T, unsigned L>
T* SamplingCircularQueue<T, L>::Peek() {
MemoryBarrier();
if (Acquire_Load(&dequeue_pos_->marker) == kFull) {
return &dequeue_pos_->record;
}
return NULL;
}
template<typename T, unsigned L>
void SamplingCircularQueue<T, L>::Remove() {
Release_Store(&dequeue_pos_->marker, kEmpty);
dequeue_pos_ = Next(dequeue_pos_);
}
void* SamplingCircularQueue::Enqueue() {
if (producer_pos_->enqueue_pos == producer_pos_->next_chunk_pos) {
if (producer_pos_->enqueue_pos == buffer_ + buffer_size_) {
producer_pos_->next_chunk_pos = buffer_;
producer_pos_->enqueue_pos = buffer_;
}
Acquire_Store(producer_pos_->next_chunk_pos, kEnqueueStarted);
// Skip marker.
producer_pos_->enqueue_pos += 1;
producer_pos_->next_chunk_pos += chunk_size_;
template<typename T, unsigned L>
T* SamplingCircularQueue<T, L>::StartEnqueue() {
MemoryBarrier();
if (Acquire_Load(&enqueue_pos_->marker) == kEmpty) {
return &enqueue_pos_->record;
}
void* result = producer_pos_->enqueue_pos;
producer_pos_->enqueue_pos += record_size_;
return result;
return NULL;
}
void SamplingCircularQueue::WrapPositionIfNeeded(
SamplingCircularQueue::Cell** pos) {
if (*pos == buffer_ + buffer_size_) *pos = buffer_;
template<typename T, unsigned L>
void SamplingCircularQueue<T, L>::FinishEnqueue() {
Release_Store(&enqueue_pos_->marker, kFull);
enqueue_pos_ = Next(enqueue_pos_);
}
template<typename T, unsigned L>
typename SamplingCircularQueue<T, L>::Entry* SamplingCircularQueue<T, L>::Next(
Entry* entry) {
Entry* next = entry + 1;
if (next == &buffer_[L]) return buffer_;
return next;
}
} } // namespace v8::internal
#endif // V8_CIRCULAR_QUEUE_INL_H_

125
deps/v8/src/circular-queue.cc

@ -1,125 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "circular-queue-inl.h"
namespace v8 {
namespace internal {
SamplingCircularQueue::SamplingCircularQueue(size_t record_size_in_bytes,
size_t desired_chunk_size_in_bytes,
unsigned buffer_size_in_chunks)
: record_size_(record_size_in_bytes / sizeof(Cell)),
chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes *
record_size_in_bytes + sizeof(Cell)),
chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)),
buffer_size_(chunk_size_ * buffer_size_in_chunks),
buffer_(NewArray<Cell>(buffer_size_)) {
ASSERT(record_size_ * sizeof(Cell) == record_size_in_bytes);
ASSERT(chunk_size_ * sizeof(Cell) == chunk_size_in_bytes_);
ASSERT(buffer_size_in_chunks > 2);
// Mark all chunks as clear.
for (size_t i = 0; i < buffer_size_; i += chunk_size_) {
buffer_[i] = kClear;
}
// Layout producer and consumer position pointers each on their own
// cache lines to avoid cache lines thrashing due to simultaneous
// updates of positions by different processor cores.
const int positions_size =
RoundUp(1, kProcessorCacheLineSize) +
RoundUp(static_cast<int>(sizeof(ProducerPosition)),
kProcessorCacheLineSize) +
RoundUp(static_cast<int>(sizeof(ConsumerPosition)),
kProcessorCacheLineSize);
positions_ = NewArray<byte>(positions_size);
producer_pos_ = reinterpret_cast<ProducerPosition*>(
RoundUp(positions_, kProcessorCacheLineSize));
producer_pos_->next_chunk_pos = buffer_;
producer_pos_->enqueue_pos = buffer_;
consumer_pos_ = reinterpret_cast<ConsumerPosition*>(
reinterpret_cast<byte*>(producer_pos_) + kProcessorCacheLineSize);
ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <=
positions_ + positions_size);
consumer_pos_->dequeue_chunk_pos = buffer_;
// The distance ensures that producer and consumer never step on
// each other's chunks and helps eviction of produced data from
// the CPU cache (having that chunk size is bigger than the cache.)
const size_t producer_consumer_distance = (2 * chunk_size_);
consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance;
consumer_pos_->dequeue_pos = NULL;
}
SamplingCircularQueue::~SamplingCircularQueue() {
DeleteArray(positions_);
DeleteArray(buffer_);
}
void* SamplingCircularQueue::StartDequeue() {
if (consumer_pos_->dequeue_pos != NULL) {
return consumer_pos_->dequeue_pos;
} else {
if (Acquire_Load(consumer_pos_->dequeue_chunk_poll_pos) != kClear) {
// Skip marker.
consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos + 1;
consumer_pos_->dequeue_end_pos =
consumer_pos_->dequeue_chunk_pos + chunk_size_;
return consumer_pos_->dequeue_pos;
} else {
return NULL;
}
}
}
void SamplingCircularQueue::FinishDequeue() {
consumer_pos_->dequeue_pos += record_size_;
if (consumer_pos_->dequeue_pos < consumer_pos_->dequeue_end_pos) return;
// Move to next chunk.
consumer_pos_->dequeue_pos = NULL;
*consumer_pos_->dequeue_chunk_pos = kClear;
consumer_pos_->dequeue_chunk_pos += chunk_size_;
WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_pos);
consumer_pos_->dequeue_chunk_poll_pos += chunk_size_;
WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_poll_pos);
}
void SamplingCircularQueue::FlushResidualRecords() {
// Eliminate producer / consumer distance.
consumer_pos_->dequeue_chunk_poll_pos = consumer_pos_->dequeue_chunk_pos;
}
} } // namespace v8::internal

72
deps/v8/src/circular-queue.h

@ -28,6 +28,8 @@
#ifndef V8_CIRCULAR_QUEUE_H_
#define V8_CIRCULAR_QUEUE_H_
#include "v8globals.h"
namespace v8 {
namespace internal {
@ -35,67 +37,49 @@ namespace internal {
// Lock-free cache-friendly sampling circular queue for large
// records. Intended for fast transfer of large records between a
// single producer and a single consumer. If the queue is full,
// previous unread records are overwritten. The queue is designed with
// StartEnqueue will return NULL. The queue is designed with
// a goal in mind to evade cache lines thrashing by preventing
// simultaneous reads and writes to adjanced memory locations.
//
// IMPORTANT: as a producer never checks for chunks cleanness, it is
// possible that it can catch up and overwrite a chunk that a consumer
// is currently reading, resulting in a corrupt record being read.
template<typename T, unsigned Length>
class SamplingCircularQueue {
public:
// Executed on the application thread.
SamplingCircularQueue(size_t record_size_in_bytes,
size_t desired_chunk_size_in_bytes,
unsigned buffer_size_in_chunks);
SamplingCircularQueue();
~SamplingCircularQueue();
// Enqueue returns a pointer to a memory location for storing the next
// record.
INLINE(void* Enqueue());
// StartEnqueue returns a pointer to a memory location for storing the next
// record or NULL if all entries are full at the moment.
T* StartEnqueue();
// Notifies the queue that the producer has complete writing data into the
// memory returned by StartEnqueue and it can be passed to the consumer.
void FinishEnqueue();
// Executed on the consumer (analyzer) thread.
// StartDequeue returns a pointer to a memory location for retrieving
// the next record. After the record had been read by a consumer,
// FinishDequeue must be called. Until that moment, subsequent calls
// to StartDequeue will return the same pointer.
void* StartDequeue();
void FinishDequeue();
// Due to a presence of slipping between the producer and the consumer,
// the queue must be notified whether producing has been finished in order
// to process remaining records from the buffer.
void FlushResidualRecords();
typedef AtomicWord Cell;
// Retrieves, but does not remove, the head of this queue, returning NULL
// if this queue is empty. After the record had been read by a consumer,
// Remove must be called.
T* Peek();
void Remove();
private:
// Reserved values for the chunk marker (first Cell in each chunk).
// Reserved values for the entry marker.
enum {
kClear, // Marks clean (processed) chunks.
kEnqueueStarted // Marks chunks where enqueue started.
kEmpty, // Marks clean (processed) entries.
kFull // Marks entries already filled by the producer but not yet
// completely processed by the consumer.
};
struct ProducerPosition {
Cell* next_chunk_pos;
Cell* enqueue_pos;
};
struct ConsumerPosition {
Cell* dequeue_chunk_pos;
Cell* dequeue_chunk_poll_pos;
Cell* dequeue_pos;
Cell* dequeue_end_pos;
struct V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry {
Entry() : marker(kEmpty) {}
T record;
Atomic32 marker;
};
INLINE(void WrapPositionIfNeeded(Cell** pos));
Entry* Next(Entry* entry);
const size_t record_size_;
const size_t chunk_size_in_bytes_;
const size_t chunk_size_;
const size_t buffer_size_;
Cell* buffer_;
byte* positions_;
ProducerPosition* producer_pos_;
ConsumerPosition* consumer_pos_;
Entry buffer_[Length];
V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry* enqueue_pos_;
V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry* dequeue_pos_;
DISALLOW_COPY_AND_ASSIGN(SamplingCircularQueue);
};

309
deps/v8/src/code-stubs-hydrogen.cc

@ -112,6 +112,13 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* BuildInternalArrayConstructor(ElementsKind kind,
ArgumentClass argument_class);
void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
HValue* code_object);
void BuildInstallCode(HValue* js_function, HValue* shared_info);
void BuildInstallFromOptimizedCodeMap(HValue* js_function,
HValue* shared_info,
HValue* native_context);
private:
HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
@ -210,8 +217,8 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
template <class Stub>
class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
public:
explicit CodeStubGraphBuilder(Stub* stub)
: CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
explicit CodeStubGraphBuilder(Isolate* isolate, Stub* stub)
: CodeStubGraphBuilderBase(isolate, stub) {}
protected:
virtual HValue* BuildCodeStub() {
@ -278,8 +285,7 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
template <class Stub>
static Handle<Code> DoGenerateCode(Stub* stub) {
Isolate* isolate = Isolate::Current();
static Handle<Code> DoGenerateCode(Isolate* isolate, Stub* stub) {
CodeStub::Major major_key =
static_cast<HydrogenCodeStub*>(stub)->MajorKey();
CodeStubInterfaceDescriptor* descriptor =
@ -295,7 +301,7 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
ASSERT(descriptor->stack_parameter_count_ == NULL);
return stub->GenerateLightweightMissCode(isolate);
}
CodeStubGraphBuilder<Stub> builder(stub);
CodeStubGraphBuilder<Stub> builder(isolate, stub);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen();
}
@ -327,8 +333,8 @@ HValue* CodeStubGraphBuilder<ToNumberStub>::BuildCodeStub() {
}
Handle<Code> ToNumberStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> ToNumberStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -352,7 +358,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
HValue* elements = AddLoadElements(boilerplate, NULL);
HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
@ -394,8 +400,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
}
Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> FastCloneShallowArrayStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -441,8 +447,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
}
Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> FastCloneShallowObjectStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -487,23 +493,23 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
}
Handle<Code> CreateAllocationSiteStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> CreateAllocationSiteStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
template <>
HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL, NULL,
GetParameter(0), GetParameter(1), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
false, NEVER_RETURN_HOLE, STANDARD_STORE);
return load;
}
Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> KeyedLoadFastElementStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -513,12 +519,12 @@ HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
return AddInstruction(BuildLoadNamedField(GetParameter(0), access, NULL));
return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
}
Handle<Code> LoadFieldStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> LoadFieldStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -528,19 +534,19 @@ HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
return AddInstruction(BuildLoadNamedField(GetParameter(0), access, NULL));
return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
}
Handle<Code> KeyedLoadFieldStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> KeyedLoadFieldStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
template <>
HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), GetParameter(2), NULL,
GetParameter(0), GetParameter(1), GetParameter(2),
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
true, NEVER_RETURN_HOLE, casted_stub()->store_mode());
@ -548,8 +554,8 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
}
Handle<Code> KeyedStoreFastElementStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> KeyedStoreFastElementStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -567,8 +573,8 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
}
Handle<Code> TransitionElementsKindStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> TransitionElementsKindStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
@ -702,8 +708,8 @@ HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
}
Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -717,8 +723,9 @@ HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
}
Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode(
Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -731,8 +738,8 @@ HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
}
Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -744,8 +751,9 @@ HValue* CodeStubGraphBuilder<InternalArrayNoArgumentConstructorStub>::
}
Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode(
Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -757,8 +765,9 @@ HValue* CodeStubGraphBuilder<InternalArraySingleArgumentConstructorStub>::
}
Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode(
Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -770,8 +779,9 @@ HValue* CodeStubGraphBuilder<InternalArrayNArgumentsConstructorStub>::
}
Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode(
Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -796,8 +806,8 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
}
Handle<Code> CompareNilICStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> CompareNilICStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -815,8 +825,8 @@ HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
}
Handle<Code> ToBooleanStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> ToBooleanStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -864,8 +874,8 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
}
Handle<Code> StoreGlobalStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> StoreGlobalStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@ -878,8 +888,7 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
Add<HDeoptimize>("Deopt due to --trace-elements-transitions",
Deoptimizer::EAGER);
Add<HDeoptimize>("Tracing elements transitions", Deoptimizer::EAGER);
} else {
info()->MarkAsSavesCallerDoubles();
@ -888,19 +897,209 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
casted_stub()->to_kind(),
casted_stub()->is_jsarray());
BuildUncheckedMonomorphicElementAccess(object, key, value, NULL,
casted_stub()->is_jsarray(),
casted_stub()->to_kind(),
true, ALLOW_RETURN_HOLE,
casted_stub()->store_mode());
BuildUncheckedMonomorphicElementAccess(object, key, value,
casted_stub()->is_jsarray(),
casted_stub()->to_kind(),
true, ALLOW_RETURN_HOLE,
casted_stub()->store_mode());
}
return value;
}
Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
return DoGenerateCode(this);
Handle<Code> ElementsTransitionAndStoreStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
HValue* js_function,
HValue* native_context,
HValue* code_object) {
Counters* counters = isolate()->counters();
AddIncrementCounter(counters->fast_new_closure_install_optimized(),
context());
// TODO(fschneider): Idea: store proper code pointers in the optimized code
// map and either unmangle them on marking or do nothing as the whole map is
// discarded on major GC anyway.
Add<HStoreCodeEntry>(js_function, code_object);
// Now link a function into a list of optimized functions.
HValue* optimized_functions_list = Add<HLoadNamedField>(native_context,
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST));
Add<HStoreNamedField>(js_function,
HObjectAccess::ForNextFunctionLinkPointer(),
optimized_functions_list);
// This store is the only one that should have a write barrier.
Add<HStoreNamedField>(native_context,
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
js_function);
}
void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
HValue* shared_info) {
Add<HStoreNamedField>(js_function,
HObjectAccess::ForNextFunctionLinkPointer(),
graph()->GetConstantUndefined());
HValue* code_object = Add<HLoadNamedField>(shared_info,
HObjectAccess::ForCodeOffset());
Add<HStoreCodeEntry>(js_function, code_object);
}
void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* js_function,
HValue* shared_info,
HValue* native_context) {
Counters* counters = isolate()->counters();
IfBuilder is_optimized(this);
HInstruction* optimized_map = Add<HLoadNamedField>(shared_info,
HObjectAccess::ForOptimizedCodeMap());
HValue* null_constant = Add<HConstant>(0);
is_optimized.If<HCompareObjectEqAndBranch>(optimized_map, null_constant);
is_optimized.Then();
{
BuildInstallCode(js_function, shared_info);
}
is_optimized.Else();
{
AddIncrementCounter(counters->fast_new_closure_try_optimized(), context());
// optimized_map points to fixed array of 3-element entries
// (native context, optimized code, literals).
// Map must never be empty, so check the first elements.
Label install_optimized;
HValue* first_context_slot = Add<HLoadNamedField>(optimized_map,
HObjectAccess::ForFirstContextSlot());
IfBuilder already_in(this);
already_in.If<HCompareObjectEqAndBranch>(native_context,
first_context_slot);
already_in.Then();
{
HValue* code_object = Add<HLoadNamedField>(optimized_map,
HObjectAccess::ForFirstCodeSlot());
BuildInstallOptimizedCode(js_function, native_context, code_object);
}
already_in.Else();
{
HValue* shared_function_entry_length =
Add<HConstant>(SharedFunctionInfo::kEntryLength);
LoopBuilder loop_builder(this,
context(),
LoopBuilder::kPostDecrement,
shared_function_entry_length);
HValue* array_length = Add<HLoadNamedField>(optimized_map,
HObjectAccess::ForFixedArrayLength());
HValue* key = loop_builder.BeginBody(array_length,
graph()->GetConstant0(),
Token::GT);
{
// Iterate through the rest of map backwards.
// Do not double check first entry.
HValue* second_entry_index =
Add<HConstant>(SharedFunctionInfo::kSecondEntryIndex);
IfBuilder restore_check(this);
restore_check.If<HCompareNumericAndBranch>(key, second_entry_index,
Token::EQ);
restore_check.Then();
{
// Store the unoptimized code
BuildInstallCode(js_function, shared_info);
loop_builder.Break();
}
restore_check.Else();
{
HValue* keyed_minus = AddInstruction(HSub::New(zone(), context(), key,
shared_function_entry_length));
HInstruction* keyed_lookup = Add<HLoadKeyed>(optimized_map,
keyed_minus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
IfBuilder done_check(this);
done_check.If<HCompareObjectEqAndBranch>(native_context,
keyed_lookup);
done_check.Then();
{
// Hit: fetch the optimized code.
HValue* keyed_plus = AddInstruction(HAdd::New(zone(), context(),
keyed_minus, graph()->GetConstant1()));
HValue* code_object = Add<HLoadKeyed>(optimized_map,
keyed_plus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
BuildInstallOptimizedCode(js_function, native_context, code_object);
// Fall out of the loop
loop_builder.Break();
}
done_check.Else();
done_check.End();
}
restore_check.End();
}
loop_builder.EndBody();
}
already_in.End();
}
is_optimized.End();
}
template<>
HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
Counters* counters = isolate()->counters();
Factory* factory = isolate()->factory();
HInstruction* empty_fixed_array =
Add<HConstant>(factory->empty_fixed_array());
HValue* shared_info = GetParameter(0);
// Create a new closure from the given function info in new space
HValue* size = Add<HConstant>(JSFunction::kSize);
HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
NOT_TENURED, JS_FUNCTION_TYPE);
AddIncrementCounter(counters->fast_new_closure_total(), context());
int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
casted_stub()->is_generator());
// Compute the function map in the current native context and set that
// as the map of the allocated object.
HInstruction* native_context = BuildGetNativeContext();
HInstruction* map_slot_value = Add<HLoadNamedField>(native_context,
HObjectAccess::ForContextSlot(map_index));
Add<HStoreNamedField>(js_function, HObjectAccess::ForMap(), map_slot_value);
// Initialize the rest of the function.
Add<HStoreNamedField>(js_function, HObjectAccess::ForPropertiesPointer(),
empty_fixed_array);
Add<HStoreNamedField>(js_function, HObjectAccess::ForElementsPointer(),
empty_fixed_array);
Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
empty_fixed_array);
Add<HStoreNamedField>(js_function, HObjectAccess::ForPrototypeOrInitialMap(),
graph()->GetConstantHole());
Add<HStoreNamedField>(js_function,
HObjectAccess::ForSharedFunctionInfoPointer(),
shared_info);
Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
shared_info);
Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
context());
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
// But first check if there is an optimized version for our context.
if (FLAG_cache_optimized_code) {
BuildInstallFromOptimizedCodeMap(js_function, shared_info, native_context);
} else {
BuildInstallCode(js_function, shared_info);
}
return js_function;
}
Handle<Code> FastNewClosureStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}

34
deps/v8/src/code-stubs.cc

@ -46,7 +46,7 @@ CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
register_params_(NULL),
deoptimization_handler_(NULL),
miss_handler_(IC_Utility(IC::kUnreachable), Isolate::Current()),
miss_handler_(),
has_miss_handler_(false) { }
@ -93,8 +93,7 @@ Handle<Code> CodeStub::GetCodeCopyFromTemplate(Isolate* isolate) {
}
Handle<Code> PlatformCodeStub::GenerateCode() {
Isolate* isolate = Isolate::Current();
Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
Factory* factory = isolate->factory();
// Generate the new code.
@ -137,14 +136,14 @@ Handle<Code> CodeStub::GetCode(Isolate* isolate) {
if (UseSpecialCache()
? FindCodeInSpecialCache(&code, isolate)
: FindCodeInCache(&code, isolate)) {
ASSERT(IsPregenerated() == code->is_pregenerated());
ASSERT(IsPregenerated(isolate) == code->is_pregenerated());
return Handle<Code>(code);
}
{
HandleScope scope(isolate);
Handle<Code> new_object = GenerateCode();
Handle<Code> new_object = GenerateCode(isolate);
new_object->set_major_key(MajorKey());
FinishCode(new_object);
RecordCodeGeneration(*new_object, isolate);
@ -596,19 +595,9 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS: {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_,
elements_kind_,
store_mode_);
}
break;
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_js_array_,
store_mode_);
break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
@ -618,7 +607,7 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
UNREACHABLE();
break;
case DICTIONARY_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
@ -742,8 +731,9 @@ void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
intptr_t stack_pointer) {
FunctionEntryHook entry_hook = Isolate::Current()->function_entry_hook();
intptr_t stack_pointer,
Isolate* isolate) {
FunctionEntryHook entry_hook = isolate->function_entry_hook();
ASSERT(entry_hook != NULL);
entry_hook(function, stack_pointer);
}
@ -769,6 +759,12 @@ void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
}
void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
FastNewClosureStub stub(STRICT_MODE, false);
InstallDescriptor(isolate, &stub);
}
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
: argument_count_(ANY) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);

136
deps/v8/src/code-stubs.h

@ -102,7 +102,6 @@ namespace internal {
V(GetProperty) \
V(SetProperty) \
V(InvokeBuiltin) \
V(RegExpCEntry) \
V(DirectCEntry)
#else
#define CODE_STUB_LIST_ARM(V)
@ -159,14 +158,14 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {}
bool CompilingCallsToThisStubIsGCSafe(Isolate* isolate) {
bool is_pregenerated = IsPregenerated();
bool is_pregenerated = IsPregenerated(isolate);
Code* code = NULL;
CHECK(!is_pregenerated || FindCodeInCache(&code, isolate));
return is_pregenerated;
}
// See comment above, where Instanceof is defined.
virtual bool IsPregenerated() { return false; }
virtual bool IsPregenerated(Isolate* isolate) { return false; }
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void GenerateFPStubs(Isolate* isolate);
@ -205,7 +204,7 @@ class CodeStub BASE_EMBEDDED {
static bool CanUseFPRegisters();
// Generates the assembler code for the stub.
virtual Handle<Code> GenerateCode() = 0;
virtual Handle<Code> GenerateCode(Isolate* isolate) = 0;
// Returns whether the code generated for this stub needs to be allocated as
@ -263,7 +262,7 @@ class CodeStub BASE_EMBEDDED {
class PlatformCodeStub : public CodeStub {
public:
// Retrieve the code for the stub. Generate the code if needed.
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
@ -353,7 +352,7 @@ class HydrogenCodeStub : public CodeStub {
CodeStubInterfaceDescriptor* descriptor) = 0;
// Retrieve the code for the stub. Generate the code if needed.
virtual Handle<Code> GenerateCode() = 0;
virtual Handle<Code> GenerateCode(Isolate* isolate) = 0;
virtual int NotMissMinorKey() = 0;
@ -449,35 +448,11 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
};
class StackCheckStub : public PlatformCodeStub {
public:
StackCheckStub() { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return StackCheck; }
int MinorKey() { return 0; }
};
class InterruptStub : public PlatformCodeStub {
public:
InterruptStub() { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return Interrupt; }
int MinorKey() { return 0; }
};
class ToNumberStub: public HydrogenCodeStub {
public:
ToNumberStub() { }
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -489,20 +464,29 @@ class ToNumberStub: public HydrogenCodeStub {
};
class FastNewClosureStub : public PlatformCodeStub {
class FastNewClosureStub : public HydrogenCodeStub {
public:
explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
: language_mode_(language_mode),
is_generator_(is_generator) { }
void Generate(MacroAssembler* masm);
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
static void InstallDescriptors(Isolate* isolate);
LanguageMode language_mode() const { return language_mode_; }
bool is_generator() const { return is_generator_; }
private:
class StrictModeBits: public BitField<bool, 0, 1> {};
class IsGeneratorBits: public BitField<bool, 1, 1> {};
Major MajorKey() { return FastNewClosure; }
int MinorKey() {
int NotMissMinorKey() {
return StrictModeBits::encode(language_mode_ != CLASSIC_MODE) |
IsGeneratorBits::encode(is_generator_);
}
@ -554,7 +538,7 @@ class StoreGlobalStub : public HydrogenCodeStub {
IsConstantBits::encode(is_constant);
}
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -636,7 +620,7 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub {
return LAST_ELEMENTS_KIND;
}
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -676,7 +660,7 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
int length() const { return length_; }
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -696,9 +680,9 @@ class CreateAllocationSiteStub : public HydrogenCodeStub {
public:
explicit CreateAllocationSiteStub() { }
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual bool IsPregenerated() { return true; }
virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateAheadOfTime(Isolate* isolate);
@ -752,6 +736,13 @@ class InstanceofStub: public PlatformCodeStub {
};
enum AllocationSiteOverrideMode {
DONT_OVERRIDE,
DISABLE_ALLOCATION_SITES,
LAST_ALLOCATION_SITE_OVERRIDE_MODE = DISABLE_ALLOCATION_SITES
};
class ArrayConstructorStub: public PlatformCodeStub {
public:
enum ArgumentCountKey { ANY, NONE, ONE, MORE_THAN_ONE };
@ -761,6 +752,9 @@ class ArrayConstructorStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
private:
void GenerateDispatchToArrayStub(MacroAssembler* masm,
AllocationSiteOverrideMode mode);
virtual CodeStub::Major MajorKey() { return ArrayConstructor; }
virtual int MinorKey() { return argument_count_; }
@ -913,7 +907,7 @@ class LoadFieldStub: public HandlerStub {
Initialize(Code::LOAD_IC, inobject, index, representation);
}
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -982,7 +976,7 @@ class KeyedLoadFieldStub: public LoadFieldStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
private:
virtual CodeStub::Major MajorKey() { return KeyedLoadField; }
@ -1253,7 +1247,7 @@ class CompareNilICStub : public HydrogenCodeStub {
virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; }
Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual Code::ExtraICState GetExtraICState() {
return NilValueField::encode(nil_value_) |
@ -1321,7 +1315,7 @@ class CEntryStub : public PlatformCodeStub {
// time, so it's OK to call it from other stubs that can't cope with GC during
// their code generation. On machines that always have gp registers (x64) we
// can generate both variants ahead of time.
virtual bool IsPregenerated();
virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateAheadOfTime(Isolate* isolate);
private:
@ -1333,6 +1327,7 @@ class CEntryStub : public PlatformCodeStub {
bool always_allocate_scope);
// Number of pointers/values returned.
Isolate* isolate_;
const int result_size_;
SaveFPRegsMode save_doubles_;
@ -1704,11 +1699,13 @@ class DoubleToIStub : public PlatformCodeStub {
DoubleToIStub(Register source,
Register destination,
int offset,
bool is_truncating) : bit_field_(0) {
bool is_truncating,
bool skip_fastpath = false) : bit_field_(0) {
bit_field_ = SourceRegisterBits::encode(source.code_) |
DestinationRegisterBits::encode(destination.code_) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating);
IsTruncatingBits::encode(is_truncating) |
SkipFastPathBits::encode(skip_fastpath);
}
Register source() {
@ -1725,12 +1722,18 @@ class DoubleToIStub : public PlatformCodeStub {
return IsTruncatingBits::decode(bit_field_);
}
bool skip_fastpath() {
return SkipFastPathBits::decode(bit_field_);
}
int offset() {
return OffsetBits::decode(bit_field_);
}
void Generate(MacroAssembler* masm);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
static const int kBitsPerRegisterNumber = 6;
STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
@ -1743,6 +1746,8 @@ class DoubleToIStub : public PlatformCodeStub {
public BitField<bool, 2 * kBitsPerRegisterNumber, 1> {}; // NOLINT
class OffsetBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
class SkipFastPathBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT
Major MajorKey() { return DoubleToI; }
int MinorKey() { return bit_field_; }
@ -1768,7 +1773,7 @@ class KeyedLoadFastElementStub : public HydrogenCodeStub {
return ElementsKindBits::decode(bit_field_);
}
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -1808,7 +1813,7 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
return StoreModeBits::decode(bit_field_);
}
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -1843,7 +1848,7 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
return ToKindBits::decode(bit_field_);
}
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -1868,13 +1873,6 @@ enum ContextCheckMode {
};
enum AllocationSiteOverrideMode {
DONT_OVERRIDE,
DISABLE_ALLOCATION_SITES,
LAST_ALLOCATION_SITE_OVERRIDE_MODE = DISABLE_ALLOCATION_SITES
};
class ArrayConstructorStubBase : public HydrogenCodeStub {
public:
ArrayConstructorStubBase(ElementsKind kind, ContextCheckMode context_mode,
@ -1882,7 +1880,8 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
// It only makes sense to override local allocation site behavior
// if there is a difference between the global allocation site policy
// for an ElementsKind and the desired usage of the stub.
ASSERT(override_mode != DISABLE_ALLOCATION_SITES ||
ASSERT(!(FLAG_track_allocation_sites &&
override_mode == DISABLE_ALLOCATION_SITES) ||
AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE);
bit_field_ = ElementsKindBits::encode(kind) |
AllocationSiteOverrideModeBits::encode(override_mode) |
@ -1901,7 +1900,7 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
return ContextCheckModeBits::decode(bit_field_);
}
virtual bool IsPregenerated() {
virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE {
// We only pre-generate stubs that verify correct context
return context_mode() == CONTEXT_CHECK_REQUIRED;
}
@ -1939,7 +1938,7 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
: ArrayConstructorStubBase(kind, context_mode, override_mode) {
}
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -1961,7 +1960,7 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
: ArrayConstructorStubBase(kind, context_mode, override_mode) {
}
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -1983,7 +1982,7 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
: ArrayConstructorStubBase(kind, context_mode, override_mode) {
}
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -2002,7 +2001,7 @@ class InternalArrayConstructorStubBase : public HydrogenCodeStub {
kind_ = kind;
}
virtual bool IsPregenerated() { return true; }
virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void InstallDescriptors(Isolate* isolate);
@ -2026,7 +2025,7 @@ class InternalArrayNoArgumentConstructorStub : public
explicit InternalArrayNoArgumentConstructorStub(ElementsKind kind)
: InternalArrayConstructorStubBase(kind) { }
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -2045,7 +2044,7 @@ class InternalArraySingleArgumentConstructorStub : public
explicit InternalArraySingleArgumentConstructorStub(ElementsKind kind)
: InternalArrayConstructorStubBase(kind) { }
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -2064,7 +2063,7 @@ class InternalArrayNArgumentsConstructorStub : public
explicit InternalArrayNArgumentsConstructorStub(ElementsKind kind)
: InternalArrayConstructorStubBase(kind) { }
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -2153,7 +2152,7 @@ class ToBooleanStub: public HydrogenCodeStub {
bool UpdateStatus(Handle<Object> object);
Types GetTypes() { return types_; }
virtual Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
@ -2213,7 +2212,7 @@ class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
bool is_jsarray() const { return is_jsarray_; }
KeyedAccessStoreMode store_mode() const { return store_mode_; }
Handle<Code> GenerateCode();
virtual Handle<Code> GenerateCode(Isolate* isolate);
void InitializeInterfaceDescriptor(
Isolate* isolate,
@ -2266,7 +2265,7 @@ class StubFailureTrampolineStub : public PlatformCodeStub {
explicit StubFailureTrampolineStub(StubFunctionMode function_mode)
: fp_registers_(CanUseFPRegisters()), function_mode_(function_mode) {}
virtual bool IsPregenerated() { return true; }
virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateAheadOfTime(Isolate* isolate);
@ -2301,7 +2300,8 @@ class ProfileEntryHookStub : public PlatformCodeStub {
private:
static void EntryHookTrampoline(intptr_t function,
intptr_t stack_pointer);
intptr_t stack_pointer,
Isolate* isolate);
Major MajorKey() { return ProfileEntryHook; }
int MinorKey() { return 0; }

17
deps/v8/src/codegen.cc

@ -89,12 +89,12 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
#ifdef DEBUG
if (!info->IsStub() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
PrettyPrinter().PrintProgram(info->function()));
PrettyPrinter(info->isolate()).PrintProgram(info->function()));
}
if (!info->IsStub() && print_ast) {
PrintF("--- AST ---\n%s\n",
AstPrinter().PrintProgram(info->function()));
AstPrinter(info->isolate()).PrintProgram(info->function()));
}
#endif // DEBUG
}
@ -114,11 +114,9 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm->CodeObject(),
false, is_crankshafted);
if (!code.is_null()) {
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
code->set_prologue_offset(info->prologue_offset());
}
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
code->set_prologue_offset(info->prologue_offset());
return code;
}
@ -126,7 +124,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#ifdef ENABLE_DISASSEMBLER
AllowDeferredHandleDereference allow_deference_for_print_code;
bool print_code = Isolate::Current()->bootstrapper()->IsActive()
bool print_code = info->isolate()->bootstrapper()->IsActive()
? FLAG_print_builtin_code
: (FLAG_print_code ||
(info->IsStub() && FLAG_print_code_stubs) ||
@ -173,9 +171,8 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
}
bool CodeGenerator::ShouldGenerateLog(Expression* type) {
bool CodeGenerator::ShouldGenerateLog(Isolate* isolate, Expression* type) {
ASSERT(type != NULL);
Isolate* isolate = Isolate::Current();
if (!isolate->logger()->is_logging() &&
!isolate->cpu_profiler()->is_profiling()) {
return false;

186
deps/v8/src/compiler.cc

@ -58,7 +58,8 @@ CompilationInfo::CompilationInfo(Handle<Script> script,
Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE)),
script_(script),
osr_ast_id_(BailoutId::None()) {
osr_ast_id_(BailoutId::None()),
osr_pc_offset_(0) {
Initialize(script->GetIsolate(), BASE, zone);
}
@ -68,7 +69,8 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
: flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
osr_ast_id_(BailoutId::None()) {
osr_ast_id_(BailoutId::None()),
osr_pc_offset_(0) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@ -80,7 +82,8 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
context_(closure->context()),
osr_ast_id_(BailoutId::None()) {
osr_ast_id_(BailoutId::None()),
osr_pc_offset_(0) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@ -90,7 +93,8 @@ CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
osr_ast_id_(BailoutId::None()) {
osr_ast_id_(BailoutId::None()),
osr_pc_offset_(0) {
Initialize(isolate, STUB, zone);
code_stub_ = stub;
}
@ -119,7 +123,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
mode_ = STUB;
return;
}
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
mode_ = isolate->use_crankshaft() ? mode : NONOPT;
abort_due_to_dependency_ = false;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
@ -226,18 +230,12 @@ bool CompilationInfo::ShouldSelfOptimize() {
return FLAG_self_optimization &&
FLAG_crankshaft &&
!function()->flags()->Contains(kDontSelfOptimize) &&
!function()->flags()->Contains(kDontOptimize) &&
!function()->dont_optimize() &&
function()->scope()->AllowsLazyCompilation() &&
(shared_info().is_null() || !shared_info()->optimization_disabled());
}
void CompilationInfo::AbortOptimization() {
Handle<Code> code(shared_info()->code());
SetCode(code);
}
// Determine whether to use the full compiler for all code. If the flag
// --always-full-compiler is specified this is the case. For the virtual frame
// based compiler the full compiler is also used if a debugger is connected, as
@ -248,7 +246,7 @@ void CompilationInfo::AbortOptimization() {
// break points has actually been set.
static bool IsDebuggerActive(Isolate* isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
return V8::UseCrankshaft() ?
return isolate->use_crankshaft() ?
isolate->debug()->has_break_points() :
isolate->debugger()->IsDebuggerActive();
#else
@ -266,10 +264,9 @@ void OptimizingCompiler::RecordOptimizationStats() {
Handle<JSFunction> function = info()->closure();
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
double ms_creategraph =
static_cast<double>(time_taken_to_create_graph_) / 1000;
double ms_optimize = static_cast<double>(time_taken_to_optimize_) / 1000;
double ms_codegen = static_cast<double>(time_taken_to_codegen_) / 1000;
double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
if (FLAG_trace_opt) {
PrintF("[optimizing ");
function->ShortPrint();
@ -317,14 +314,13 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
ASSERT(V8::UseCrankshaft());
ASSERT(isolate()->use_crankshaft());
ASSERT(info()->IsOptimizing());
ASSERT(!info()->IsCompilingForDebugging());
// We should never arrive here if there is no code object on the
// shared function object.
Handle<Code> code(info()->shared_info()->code());
ASSERT(code->kind() == Code::FUNCTION);
ASSERT(info()->shared_info()->code()->kind() == Code::FUNCTION);
// We should never arrive here if optimization has been disabled on the
// shared function info.
@ -334,7 +330,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object.
if (AlwaysFullCompiler(isolate())) {
info()->SetCode(code);
info()->AbortOptimization();
return SetLastStatus(BAILED_OUT);
}
@ -362,16 +358,16 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}
const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
if (!info()->osr_ast_id().IsNone() &&
if (info()->is_osr() &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
info()->set_bailout_reason(kTooManyParametersLocals);
return AbortOptimization();
}
// Take --hydrogen-filter into account.
if (!info()->closure()->PassesHydrogenFilter()) {
info()->SetCode(code);
return SetLastStatus(BAILED_OUT);
if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) {
info()->AbortOptimization();
return SetLastStatus(BAILED_OUT);
}
// Recompile the unoptimized version of the code if the current version
@ -380,9 +376,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// performance of the hydrogen-based compiler.
bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
int64_t start_ticks = 0;
ElapsedTimer timer;
if (FLAG_hydrogen_stats) {
start_ticks = OS::Ticks();
timer.Start();
}
CompilationInfoWithZone unoptimized(info()->shared_info());
// Note that we use the same AST that we will use for generating the
@ -401,8 +397,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
}
if (FLAG_hydrogen_stats) {
int64_t ticks = OS::Ticks() - start_ticks;
isolate()->GetHStatistics()->IncrementFullCodeGen(ticks);
isolate()->GetHStatistics()->IncrementFullCodeGen(timer.Elapsed());
}
}
@ -411,7 +406,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// optimizable marker in the code object and optimize anyway. This
// is safe as long as the unoptimized code has deoptimization
// support.
ASSERT(FLAG_always_opt || code->optimizable());
ASSERT(FLAG_always_opt || info()->shared_info()->code()->optimizable());
ASSERT(info()->shared_info()->has_deoptimization_support());
if (FLAG_trace_hydrogen) {
@ -503,12 +498,14 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
info()->SetCode(optimized_code);
}
RecordOptimizationStats();
// Add to the weak list of optimized code objects.
info()->context()->native_context()->AddOptimizedCode(*info()->code());
return SetLastStatus(SUCCEEDED);
}
static bool GenerateCode(CompilationInfo* info) {
bool is_optimizing = V8::UseCrankshaft() &&
bool is_optimizing = info->isolate()->use_crankshaft() &&
!info->IsCompilingForDebugging() &&
info->IsOptimizing();
if (is_optimizing) {
@ -728,7 +725,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
}
script->set_is_shared_cross_origin(is_shared_cross_origin);
script->set_data(script_data.is_null() ? HEAP->undefined_value()
script->set_data(script_data.is_null() ? isolate->heap()->undefined_value()
: *script_data);
// Compile the function and add it to the cache.
@ -745,8 +742,8 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
compilation_cache->PutScript(source, context, result);
}
} else {
if (result->ic_age() != HEAP->global_ic_age()) {
result->ResetForNewContext(HEAP->global_ic_age());
if (result->ic_age() != isolate->heap()->global_ic_age()) {
result->ResetForNewContext(isolate->heap()->global_ic_age());
}
}
@ -808,8 +805,8 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
}
}
} else {
if (result->ic_age() != HEAP->global_ic_age()) {
result->ResetForNewContext(HEAP->global_ic_age());
if (result->ic_age() != isolate->heap()->global_ic_age()) {
result->ResetForNewContext(isolate->heap()->global_ic_age());
}
}
@ -843,18 +840,18 @@ static bool InstallFullCode(CompilationInfo* info) {
// Check the function has compiled code.
ASSERT(shared->is_compiled());
shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
shared->set_dont_optimize_reason(lit->dont_optimize_reason());
shared->set_dont_inline(lit->flags()->Contains(kDontInline));
shared->set_ast_node_count(lit->ast_node_count());
if (V8::UseCrankshaft() &&
if (info->isolate()->use_crankshaft() &&
!function.is_null() &&
!shared->optimization_disabled()) {
// If we're asked to always optimize, we compile the optimized
// version of the function right away - unless the debugger is
// active as it makes no sense to compile optimized code then.
if (FLAG_always_opt &&
!Isolate::Current()->DebuggerHasBreakPoints()) {
!info->isolate()->DebuggerHasBreakPoints()) {
CompilationInfoWithZone optimized(function);
optimized.SetOptimizing(BailoutId::None());
return Compiler::CompileLazy(&optimized);
@ -884,9 +881,10 @@ static void InstallCodeCommon(CompilationInfo* info) {
static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
Handle<Code> code = info->code();
if (FLAG_cache_optimized_code &&
info->osr_ast_id().IsNone() &&
code->kind() == Code::OPTIMIZED_FUNCTION) {
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
// Cache non-OSR optimized code.
if (FLAG_cache_optimized_code && !info->is_osr()) {
Handle<JSFunction> function = info->closure();
Handle<SharedFunctionInfo> shared(function->shared());
Handle<FixedArray> literals(function->literals());
@ -898,9 +896,10 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
if (FLAG_cache_optimized_code &&
info->osr_ast_id().IsNone() &&
info->IsOptimizing()) {
if (!info->IsOptimizing()) return false; // Nothing to look up.
// Lookup non-OSR optimized code.
if (FLAG_cache_optimized_code && !info->is_osr()) {
Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<JSFunction> function = info->closure();
ASSERT(!function.is_null());
@ -956,12 +955,15 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
InstallCodeCommon(info);
if (info->IsOptimizing()) {
// Optimized code successfully created.
Handle<Code> code = info->code();
ASSERT(shared->scope_info() != ScopeInfo::Empty(isolate));
// TODO(titzer): Only replace the code if it was not an OSR compile.
info->closure()->ReplaceCode(*code);
InsertCodeIntoOptimizedCodeMap(info);
return true;
} else {
} else if (!info->is_osr()) {
// Compilation failed. Replace with full code if not OSR compile.
return InstallFullCode(info);
}
}
@ -972,38 +974,55 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
}
void Compiler::RecompileParallel(Handle<JSFunction> closure) {
ASSERT(closure->IsMarkedForParallelRecompilation());
bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
uint32_t osr_pc_offset) {
bool compiling_for_osr = (osr_pc_offset != 0);
Isolate* isolate = closure->GetIsolate();
// Here we prepare compile data for the parallel recompilation thread, but
// Here we prepare compile data for the concurrent recompilation thread, but
// this still happens synchronously and interrupts execution.
Logger::TimerEventScope timer(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
if (FLAG_trace_parallel_recompilation) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation queue full, will retry optimizing ");
closure->PrintName();
PrintF(" on next run.\n");
}
return;
return false;
}
SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
Handle<SharedFunctionInfo> shared = info->shared_info();
if (compiling_for_osr) {
BailoutId osr_ast_id =
shared->code()->TranslatePcOffsetToAstId(osr_pc_offset);
ASSERT(!osr_ast_id.IsNone());
info->SetOptimizing(osr_ast_id);
info->set_osr_pc_offset(osr_pc_offset);
if (FLAG_trace_osr) {
PrintF("[COSR - attempt to queue ");
closure->PrintName();
PrintF(" at AST id %d]\n", osr_ast_id.ToInt());
}
} else {
info->SetOptimizing(BailoutId::None());
}
VMState<COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
Handle<SharedFunctionInfo> shared = info->shared_info();
int compiled_size = shared->end_position() - shared->start_position();
isolate->counters()->total_compile_size()->Increment(compiled_size);
info->SetOptimizing(BailoutId::None());
{
CompilationHandleScope handle_scope(*info);
if (InstallCodeFromOptimizedCodeMap(*info)) {
return;
if (!compiling_for_osr && InstallCodeFromOptimizedCodeMap(*info)) {
return true;
}
if (Parser::Parse(*info)) {
@ -1020,6 +1039,8 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
info.Detach();
shared->code()->set_profiler_ticks(0);
isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
ASSERT(!isolate->has_pending_exception());
return true;
} else if (status == OptimizingCompiler::BAILED_OUT) {
isolate->clear_pending_exception();
InstallFullCode(*info);
@ -1028,38 +1049,26 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
}
}
if (shared->code()->back_edges_patched_for_osr()) {
// At this point we either put the function on recompilation queue or
// aborted optimization. In either case we want to continue executing
// the unoptimized code without running into OSR. If the unoptimized
// code has been patched for OSR, unpatch it.
InterruptStub interrupt_stub;
Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
Handle<Code> replacement_code =
isolate->builtins()->OnStackReplacement();
Deoptimizer::RevertInterruptCode(shared->code(),
*interrupt_code,
*replacement_code);
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
return false;
}
void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
Handle<Code> Compiler::InstallOptimizedCode(
OptimizingCompiler* optimizing_compiler) {
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
// The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
if (info->shared_info()->optimization_disabled()) {
info->AbortOptimization();
InstallFullCode(*info);
if (FLAG_trace_parallel_recompilation) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** aborting optimization for ");
info->closure()->PrintName();
PrintF(" as it has been disabled.\n");
}
ASSERT(!info->closure()->IsMarkedForInstallingRecompiledCode());
return;
ASSERT(!info->closure()->IsInRecompileQueue());
return Handle<Code>::null();
}
Isolate* isolate = info->isolate();
@ -1093,19 +1102,21 @@ void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
info->closure()->context()->native_context()) == -1) {
InsertCodeIntoOptimizedCodeMap(*info);
}
if (FLAG_trace_parallel_recompilation) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Optimized code for ");
info->closure()->PrintName();
PrintF(" installed.\n");
}
} else {
info->SetCode(Handle<Code>(info->shared_info()->code()));
info->AbortOptimization();
InstallFullCode(*info);
}
// Optimized code is finally replacing unoptimized code. Reset the latter's
// profiler ticks to prevent too soon re-opt after a deopt.
info->shared_info()->code()->set_profiler_ticks(0);
ASSERT(!info->closure()->IsMarkedForInstallingRecompiledCode());
ASSERT(!info->closure()->IsInRecompileQueue());
return (status == OptimizingCompiler::SUCCEEDED) ? info->code()
: Handle<Code>::null();
}
@ -1193,7 +1204,7 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
function_info->set_ast_node_count(lit->ast_node_count());
function_info->set_is_function(lit->is_function());
function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
function_info->set_dont_optimize_reason(lit->dont_optimize_reason());
function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
function_info->set_is_generator(lit->is_generator());
@ -1216,6 +1227,8 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
return;
int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
int column_num =
GetScriptColumnNumber(script, shared->start_position()) + 1;
USE(line_num);
if (script->name()->IsString()) {
PROFILE(info->isolate(),
@ -1224,7 +1237,8 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
*shared,
info,
String::cast(script->name()),
line_num));
line_num,
column_num));
} else {
PROFILE(info->isolate(),
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
@ -1232,7 +1246,8 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
*shared,
info,
info->isolate()->heap()->empty_string(),
line_num));
line_num,
column_num));
}
}
@ -1247,7 +1262,7 @@ CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
: name_(name), info_(info), zone_(info->isolate()) {
if (FLAG_hydrogen_stats) {
info_zone_start_allocation_size_ = info->zone()->allocation_size();
start_ticks_ = OS::Ticks();
timer_.Start();
}
}
@ -1256,8 +1271,7 @@ CompilationPhase::~CompilationPhase() {
if (FLAG_hydrogen_stats) {
unsigned size = zone()->allocation_size();
size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
int64_t ticks = OS::Ticks() - start_ticks_;
isolate()->GetHStatistics()->SaveTiming(name_, ticks, size);
isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size);
}
}
@ -1265,9 +1279,11 @@ CompilationPhase::~CompilationPhase() {
bool CompilationPhase::ShouldProduceTraceOutput() const {
// Trace if the appropriate trace flag is set and the phase name's first
// character is in the FLAG_trace_phase command line parameter.
bool tracing_on = info()->IsStub() ?
FLAG_trace_hydrogen_stubs :
FLAG_trace_hydrogen;
AllowHandleDereference allow_deref;
bool tracing_on = info()->IsStub()
? FLAG_trace_hydrogen_stubs
: (FLAG_trace_hydrogen &&
info()->closure()->PassesFilter(FLAG_trace_hydrogen_filter));
return (tracing_on &&
OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
}

54
deps/v8/src/compiler.h

@ -60,11 +60,11 @@ class CompilationInfo {
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
virtual ~CompilationInfo();
Isolate* isolate() {
ASSERT(Isolate::Current() == isolate_);
Isolate* isolate() const {
return isolate_;
}
Zone* zone() { return zone_; }
bool is_osr() const { return !osr_ast_id_.IsNone(); }
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
@ -235,9 +235,10 @@ class CompilationInfo {
// Determines whether or not to insert a self-optimization header.
bool ShouldSelfOptimize();
// Disable all optimization attempts of this info for the rest of the
// current compilation pipeline.
void AbortOptimization();
// Reset code to the unoptimized version when optimization is aborted.
void AbortOptimization() {
SetCode(handle(shared_info()->code()));
}
void set_deferred_handles(DeferredHandles* deferred_handles) {
ASSERT(deferred_handles_ == NULL);
@ -307,6 +308,14 @@ class CompilationInfo {
return abort_due_to_dependency_;
}
void set_osr_pc_offset(uint32_t pc_offset) {
osr_pc_offset_ = pc_offset;
}
bool HasSameOsrEntry(Handle<JSFunction> function, uint32_t pc_offset) {
return osr_pc_offset_ == pc_offset && function.is_identical_to(closure_);
}
protected:
CompilationInfo(Handle<Script> script,
Zone* zone);
@ -334,7 +343,7 @@ class CompilationInfo {
void Initialize(Isolate* isolate, Mode mode, Zone* zone);
void SetMode(Mode mode) {
ASSERT(V8::UseCrankshaft());
ASSERT(isolate()->use_crankshaft());
mode_ = mode;
}
@ -401,6 +410,9 @@ class CompilationInfo {
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
BailoutId osr_ast_id_;
// The pc_offset corresponding to osr_ast_id_ in unoptimized code.
// We can look this up in the back edge table, but cache it for quick access.
uint32_t osr_pc_offset_;
// Flag whether compilation needs to be aborted due to dependency change.
bool abort_due_to_dependency_;
@ -500,9 +512,6 @@ class OptimizingCompiler: public ZoneObject {
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
time_taken_to_create_graph_(0),
time_taken_to_optimize_(0),
time_taken_to_codegen_(0),
last_status_(FAILED) { }
enum Status {
@ -528,9 +537,9 @@ class OptimizingCompiler: public ZoneObject {
HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
int64_t time_taken_to_create_graph_;
int64_t time_taken_to_optimize_;
int64_t time_taken_to_codegen_;
TimeDelta time_taken_to_create_graph_;
TimeDelta time_taken_to_optimize_;
TimeDelta time_taken_to_codegen_;
Status last_status_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
@ -540,18 +549,20 @@ class OptimizingCompiler: public ZoneObject {
void RecordOptimizationStats();
struct Timer {
Timer(OptimizingCompiler* compiler, int64_t* location)
Timer(OptimizingCompiler* compiler, TimeDelta* location)
: compiler_(compiler),
start_(OS::Ticks()),
location_(location) { }
location_(location) {
ASSERT(location_ != NULL);
timer_.Start();
}
~Timer() {
*location_ += (OS::Ticks() - start_);
*location_ += timer_.Elapsed();
}
OptimizingCompiler* compiler_;
int64_t start_;
int64_t* location_;
ElapsedTimer timer_;
TimeDelta* location_;
};
};
@ -600,7 +611,8 @@ class Compiler : public AllStatic {
// success and false if the compilation resulted in a stack overflow.
static bool CompileLazy(CompilationInfo* info);
static void RecompileParallel(Handle<JSFunction> function);
static bool RecompileConcurrent(Handle<JSFunction> function,
uint32_t osr_pc_offset = 0);
// Compile a shared function info object (the function is possibly lazily
// compiled).
@ -613,7 +625,7 @@ class Compiler : public AllStatic {
bool is_toplevel,
Handle<Script> script);
static void InstallOptimizedCode(OptimizingCompiler* info);
static Handle<Code> InstallOptimizedCode(OptimizingCompiler* info);
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
@ -643,7 +655,7 @@ class CompilationPhase BASE_EMBEDDED {
CompilationInfo* info_;
Zone zone_;
unsigned info_zone_start_allocation_size_;
int64_t start_ticks_;
ElapsedTimer timer_;
DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
};

43
deps/v8/src/contexts.cc

@ -74,7 +74,7 @@ Context* Context::native_context() {
// During bootstrapping, the global object might not be set and we
// have to search the context chain to find the native context.
ASSERT(Isolate::Current()->bootstrapper()->IsActive());
ASSERT(this->GetIsolate()->bootstrapper()->IsActive());
Context* current = this;
while (!current->IsNativeContext()) {
JSFunction* closure = JSFunction::cast(current->closure());
@ -319,14 +319,48 @@ void Context::RemoveOptimizedFunction(JSFunction* function) {
}
void Context::SetOptimizedFunctionsListHead(Object* head) {
ASSERT(IsNativeContext());
set(OPTIMIZED_FUNCTIONS_LIST, head);
}
Object* Context::OptimizedFunctionsListHead() {
ASSERT(IsNativeContext());
return get(OPTIMIZED_FUNCTIONS_LIST);
}
void Context::ClearOptimizedFunctions() {
set(OPTIMIZED_FUNCTIONS_LIST, GetHeap()->undefined_value());
void Context::AddOptimizedCode(Code* code) {
ASSERT(IsNativeContext());
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(code->next_code_link()->IsUndefined());
code->set_next_code_link(get(OPTIMIZED_CODE_LIST));
set(OPTIMIZED_CODE_LIST, code);
}
void Context::SetOptimizedCodeListHead(Object* head) {
ASSERT(IsNativeContext());
set(OPTIMIZED_CODE_LIST, head);
}
Object* Context::OptimizedCodeListHead() {
ASSERT(IsNativeContext());
return get(OPTIMIZED_CODE_LIST);
}
void Context::SetDeoptimizedCodeListHead(Object* head) {
ASSERT(IsNativeContext());
set(DEOPTIMIZED_CODE_LIST, head);
}
Object* Context::DeoptimizedCodeListHead() {
ASSERT(IsNativeContext());
return get(DEOPTIMIZED_CODE_LIST);
}
@ -352,10 +386,9 @@ bool Context::IsBootstrappingOrValidParentContext(
}
bool Context::IsBootstrappingOrGlobalObject(Object* object) {
bool Context::IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object) {
// During bootstrapping we allow all objects to pass as global
// objects. This is necessary to fix circular dependencies.
Isolate* isolate = Isolate::Current();
return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
isolate->bootstrapper()->IsActive() ||
object->IsGlobalObject();

22
deps/v8/src/contexts.h

@ -337,8 +337,10 @@ class Context: public FixedArray {
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
OPTIMIZED_FUNCTIONS_LIST, // Weak.
MAP_CACHE_INDEX, // Weak.
NEXT_CONTEXT_LINK, // Weak.
OPTIMIZED_CODE_LIST, // Weak.
DEOPTIMIZED_CODE_LIST, // Weak.
MAP_CACHE_INDEX, // Weak.
NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.
NATIVE_CONTEXT_SLOTS,
@ -370,7 +372,7 @@ class Context: public FixedArray {
GlobalObject* global_object() {
Object* result = get(GLOBAL_OBJECT_INDEX);
ASSERT(IsBootstrappingOrGlobalObject(result));
ASSERT(IsBootstrappingOrGlobalObject(this->GetIsolate(), result));
return reinterpret_cast<GlobalObject*>(result);
}
void set_global_object(GlobalObject* object) {
@ -428,11 +430,19 @@ class Context: public FixedArray {
// Mark the native context with out of memory.
inline void mark_out_of_memory();
// A native context hold a list of all functions which have been optimized.
// A native context holds a list of all functions with optimized code.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
void SetOptimizedFunctionsListHead(Object* head);
Object* OptimizedFunctionsListHead();
void ClearOptimizedFunctions();
// The native context also stores a list of all optimized code and a
// list of all deoptimized code, which are needed by the deoptimizer.
void AddOptimizedCode(Code* code);
void SetOptimizedCodeListHead(Object* head);
Object* OptimizedCodeListHead();
void SetDeoptimizedCodeListHead(Object* head);
Object* DeoptimizedCodeListHead();
Handle<Object> ErrorMessageForCodeGenerationFromStrings();
@ -508,7 +518,7 @@ class Context: public FixedArray {
#ifdef DEBUG
// Bootstrapping-aware type checks.
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
static bool IsBootstrappingOrGlobalObject(Object* object);
static bool IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object);
#endif
STATIC_CHECK(kHeaderSize == Internals::kContextHeaderSize);

10
deps/v8/src/counters.cc

@ -41,7 +41,7 @@ StatsTable::StatsTable()
int* StatsCounter::FindLocationInStatsTable() const {
return Isolate::Current()->stats_table()->FindLocation(name_);
return isolate_->stats_table()->FindLocation(name_);
}
@ -60,8 +60,7 @@ void* Histogram::CreateHistogram() const {
// Start the timer.
void HistogramTimer::Start() {
if (Enabled()) {
stop_time_ = 0;
start_time_ = OS::Ticks();
timer_.Start();
}
if (FLAG_log_internal_timer_events) {
LOG(isolate(), TimerEvent(Logger::START, name()));
@ -72,10 +71,9 @@ void HistogramTimer::Start() {
// Stop the timer and record the results.
void HistogramTimer::Stop() {
if (Enabled()) {
stop_time_ = OS::Ticks();
// Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
AddSample(milliseconds);
AddSample(static_cast<int>(timer_.Elapsed().InMilliseconds()));
timer_.Stop();
}
if (FLAG_log_internal_timer_events) {
LOG(isolate(), TimerEvent(Logger::END, name()));

14
deps/v8/src/counters.h

@ -116,8 +116,8 @@ class StatsTable {
class StatsCounter {
public:
StatsCounter() { }
explicit StatsCounter(const char* name)
: name_(name), ptr_(NULL), lookup_done_(false) { }
explicit StatsCounter(Isolate* isolate, const char* name)
: isolate_(isolate), name_(name), ptr_(NULL), lookup_done_(false) { }
// Sets the counter to a specific value.
void Set(int value) {
@ -175,6 +175,7 @@ class StatsCounter {
private:
int* FindLocationInStatsTable() const;
Isolate* isolate_;
const char* name_;
int* ptr_;
bool lookup_done_;
@ -245,9 +246,7 @@ class HistogramTimer : public Histogram {
int max,
int num_buckets,
Isolate* isolate)
: Histogram(name, min, max, num_buckets, isolate),
start_time_(0),
stop_time_(0) { }
: Histogram(name, min, max, num_buckets, isolate) {}
// Start the timer.
void Start();
@ -257,12 +256,11 @@ class HistogramTimer : public Histogram {
// Returns true if the timer is running.
bool Running() {
return Enabled() && (start_time_ != 0) && (stop_time_ == 0);
return Enabled() && timer_.IsStarted();
}
private:
int64_t start_time_;
int64_t stop_time_;
ElapsedTimer timer_;
};
// Helper class for scoping a HistogramTimer.

21
deps/v8/src/cpu-profiler-inl.h

@ -67,13 +67,30 @@ void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
TickSample* ProfilerEventsProcessor::TickSampleEvent() {
TickSample* CpuProfiler::StartTickSample() {
if (is_profiling_) return processor_->StartTickSample();
return NULL;
}
void CpuProfiler::FinishTickSample() {
processor_->FinishTickSample();
}
TickSample* ProfilerEventsProcessor::StartTickSample() {
void* address = ticks_buffer_.StartEnqueue();
if (address == NULL) return NULL;
TickSampleEventRecord* evt =
new(ticks_buffer_.Enqueue()) TickSampleEventRecord(last_code_event_id_);
new(address) TickSampleEventRecord(last_code_event_id_);
return &evt->sample;
}
void ProfilerEventsProcessor::FinishTickSample() {
ticks_buffer_.FinishEnqueue();
}
} } // namespace v8::internal
#endif // V8_CPU_PROFILER_INL_H_

137
deps/v8/src/cpu-profiler.cc

@ -40,18 +40,18 @@
namespace v8 {
namespace internal {
static const int kTickSamplesBufferChunkSize = 64 * KB;
static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
ProfilerEventsProcessor::ProfilerEventsProcessor(
ProfileGenerator* generator,
Sampler* sampler,
TimeDelta period)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
sampler_(sampler),
running_(true),
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
period_(period),
last_code_event_id_(0), last_processed_code_event_id_(0) {
}
@ -103,54 +103,54 @@ bool ProfilerEventsProcessor::ProcessCodeEvent() {
return false;
}
bool ProfilerEventsProcessor::ProcessTicks() {
while (true) {
while (!ticks_from_vm_buffer_.IsEmpty()
&& ticks_from_vm_buffer_.Peek()->order ==
last_processed_code_event_id_) {
TickSampleEventRecord record;
ticks_from_vm_buffer_.Dequeue(&record);
generator_->RecordTickSample(record.sample);
}
const TickSampleEventRecord* rec =
TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
// Make a local copy of tick sample record to ensure that it won't
// be modified as we are processing it. This is possible as the
// sampler writes w/o any sync to the queue, so if the processor
// will get far behind, a record may be modified right under its
// feet.
TickSampleEventRecord record = *rec;
if (record.order != last_processed_code_event_id_) return true;
// A paranoid check to make sure that we don't get a memory overrun
// in case of frames_count having a wild value.
if (record.sample.frames_count < 0
|| record.sample.frames_count > TickSample::kMaxFramesCount)
record.sample.frames_count = 0;
ProfilerEventsProcessor::SampleProcessingResult
ProfilerEventsProcessor::ProcessOneSample() {
if (!ticks_from_vm_buffer_.IsEmpty()
&& ticks_from_vm_buffer_.Peek()->order ==
last_processed_code_event_id_) {
TickSampleEventRecord record;
ticks_from_vm_buffer_.Dequeue(&record);
generator_->RecordTickSample(record.sample);
ticks_buffer_.FinishDequeue();
return OneSampleProcessed;
}
const TickSampleEventRecord* record = ticks_buffer_.Peek();
if (record == NULL) {
if (ticks_from_vm_buffer_.IsEmpty()) return NoSamplesInQueue;
return FoundSampleForNextCodeEvent;
}
if (record->order != last_processed_code_event_id_) {
return FoundSampleForNextCodeEvent;
}
generator_->RecordTickSample(record->sample);
ticks_buffer_.Remove();
return OneSampleProcessed;
}
void ProfilerEventsProcessor::Run() {
while (running_) {
// Process ticks until we have any.
if (ProcessTicks()) {
// All ticks of the current last_processed_code_event_id_ are processed,
// proceed to the next code event.
ProcessCodeEvent();
}
YieldCPU();
ElapsedTimer timer;
timer.Start();
// Keep processing existing events until we need to do next sample.
do {
if (FoundSampleForNextCodeEvent == ProcessOneSample()) {
// All ticks of the current last_processed_code_event_id_ are
// processed, proceed to the next code event.
ProcessCodeEvent();
}
} while (!timer.HasExpired(period_));
// Schedule next sample. sampler_ is NULL in tests.
if (sampler_) sampler_->DoSample();
}
// Process remaining tick events.
ticks_buffer_.FlushResidualRecords();
do {
ProcessTicks();
SampleProcessingResult result;
do {
result = ProcessOneSample();
} while (result == OneSampleProcessed);
} while (ProcessCodeEvent());
}
@ -166,12 +166,6 @@ CpuProfile* CpuProfiler::GetProfile(int index) {
}
TickSample* CpuProfiler::TickSampleEvent() {
if (is_profiling_) return processor_->TickSampleEvent();
return NULL;
}
void CpuProfiler::DeleteAllProfiles() {
if (is_profiling_) StopProcessor();
ResetProfiles();
@ -253,6 +247,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
ASSERT(Script::cast(shared->script()));
Script* script = Script::cast(shared->script());
rec->entry->set_script_id(script->id()->value());
rec->entry->set_bailout_reason(
GetBailoutReason(shared->DisableOptimizationReason()));
}
rec->size = code->ExecutableSize();
rec->shared = shared->address();
@ -283,6 +279,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->entry->set_script_id(script->id()->value());
rec->size = code->ExecutableSize();
rec->shared = shared->address();
rec->entry->set_bailout_reason(
GetBailoutReason(shared->DisableOptimizationReason()));
processor_->Enqueue(evt_rec);
}
@ -373,11 +371,12 @@ void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
CpuProfiler::CpuProfiler(Isolate* isolate)
: isolate_(isolate),
profiles_(new CpuProfilesCollection()),
sampling_interval_(TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
profiles_(new CpuProfilesCollection(isolate->heap())),
next_profile_uid_(1),
generator_(NULL),
processor_(NULL),
need_to_stop_sampler_(false),
is_profiling_(false) {
}
@ -387,11 +386,12 @@ CpuProfiler::CpuProfiler(Isolate* isolate,
ProfileGenerator* test_generator,
ProfilerEventsProcessor* test_processor)
: isolate_(isolate),
sampling_interval_(TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
profiles_(test_profiles),
next_profile_uid_(1),
generator_(test_generator),
processor_(test_processor),
need_to_stop_sampler_(false),
is_profiling_(false) {
}
@ -402,9 +402,15 @@ CpuProfiler::~CpuProfiler() {
}
void CpuProfiler::set_sampling_interval(TimeDelta value) {
ASSERT(!is_profiling_);
sampling_interval_ = value;
}
void CpuProfiler::ResetProfiles() {
delete profiles_;
profiles_ = new CpuProfilesCollection();
profiles_ = new CpuProfilesCollection(isolate()->heap());
}
@ -425,12 +431,13 @@ void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
Logger* logger = isolate_->logger();
// Disable logging when using the new implementation.
saved_logging_nesting_ = logger->logging_nesting_;
logger->logging_nesting_ = 0;
saved_is_logging_ = logger->is_logging_;
logger->is_logging_ = false;
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
Sampler* sampler = logger->sampler();
processor_ = new ProfilerEventsProcessor(
generator_, sampler, sampling_interval_);
is_profiling_ = true;
processor_->StartSynchronously();
// Enumerate stuff we already have in the heap.
ASSERT(isolate_->heap()->HasBeenSetUp());
if (!FLAG_prof_browser_mode) {
@ -440,12 +447,9 @@ void CpuProfiler::StartProcessorIfNotStarted() {
logger->LogAccessorCallbacks();
LogBuiltins();
// Enable stack sampling.
Sampler* sampler = logger->sampler();
sampler->SetHasProcessingThread(true);
sampler->IncreaseProfilingDepth();
if (!sampler->IsActive()) {
sampler->Start();
need_to_stop_sampler_ = true;
}
processor_->StartSynchronously();
}
}
@ -477,18 +481,15 @@ void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
void CpuProfiler::StopProcessor() {
Logger* logger = isolate_->logger();
Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
sampler->DecreaseProfilingDepth();
if (need_to_stop_sampler_) {
sampler->Stop();
need_to_stop_sampler_ = false;
}
is_profiling_ = false;
processor_->StopSynchronously();
delete processor_;
delete generator_;
processor_ = NULL;
generator_ = NULL;
logger->logging_nesting_ = saved_logging_nesting_;
sampler->SetHasProcessingThread(false);
sampler->DecreaseProfilingDepth();
logger->is_logging_ = saved_is_logging_;
}

37
deps/v8/src/cpu-profiler.h

@ -31,6 +31,7 @@
#include "allocation.h"
#include "atomicops.h"
#include "circular-queue.h"
#include "platform/time.h"
#include "sampler.h"
#include "unbound-queue.h"
@ -114,10 +115,6 @@ class TickSampleEventRecord {
unsigned order;
TickSample sample;
static TickSampleEventRecord* cast(void* value) {
return reinterpret_cast<TickSampleEventRecord*>(value);
}
};
@ -140,7 +137,9 @@ class CodeEventsContainer {
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
explicit ProfilerEventsProcessor(ProfileGenerator* generator);
ProfilerEventsProcessor(ProfileGenerator* generator,
Sampler* sampler,
TimeDelta period);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
@ -156,17 +155,31 @@ class ProfilerEventsProcessor : public Thread {
// queue (because the structure is of fixed width, but usually not all
// stack frame entries are filled.) This method returns a pointer to the
// next record of the buffer.
INLINE(TickSample* TickSampleEvent());
inline TickSample* StartTickSample();
inline void FinishTickSample();
private:
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent();
bool ProcessTicks();
enum SampleProcessingResult {
OneSampleProcessed,
FoundSampleForNextCodeEvent,
NoSamplesInQueue
};
SampleProcessingResult ProcessOneSample();
ProfileGenerator* generator_;
Sampler* sampler_;
bool running_;
// Sampling period in microseconds.
const TimeDelta period_;
UnboundQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_;
static const size_t kTickSampleBufferSize = 1 * MB;
static const size_t kTickSampleQueueLength =
kTickSampleBufferSize / sizeof(TickSampleEventRecord);
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned last_code_event_id_;
unsigned last_processed_code_event_id_;
@ -195,6 +208,7 @@ class CpuProfiler : public CodeEventListener {
virtual ~CpuProfiler();
void set_sampling_interval(TimeDelta value);
void StartProfiling(const char* title, bool record_samples = false);
void StartProfiling(String* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
@ -205,7 +219,8 @@ class CpuProfiler : public CodeEventListener {
void DeleteProfile(CpuProfile* profile);
// Invoked from stack sampler (thread or signal handler.)
TickSample* TickSampleEvent();
inline TickSample* StartTickSample();
inline void FinishTickSample();
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
@ -251,12 +266,12 @@ class CpuProfiler : public CodeEventListener {
void LogBuiltins();
Isolate* isolate_;
TimeDelta sampling_interval_;
CpuProfilesCollection* profiles_;
unsigned next_profile_uid_;
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
int saved_logging_nesting_;
bool need_to_stop_sampler_;
bool saved_is_logging_;
bool is_profiling_;
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);

466
deps/v8/src/cpu.cc

@ -0,0 +1,466 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "cpu.h"
#if V8_CC_MSVC
#include <intrin.h> // __cpuid()
#endif
#if V8_OS_POSIX
#include <unistd.h> // sysconf()
#endif
#include <algorithm>
#include <cctype>
#include <climits>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "checks.h"
#if V8_OS_WIN
#include "win32-headers.h"
#endif
namespace v8 {
namespace internal {
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
// Define __cpuid() for non-MSVC compilers.
#if !V8_CC_MSVC
static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#if defined(__i386__) && defined(__pic__)
// Make sure to preserve ebx, which contains the pointer
// to the GOT in case we're generating PIC.
__asm__ volatile (
"mov %%ebx, %%edi\n\t"
"cpuid\n\t"
"xchg %%edi, %%ebx\n\t"
: "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
: "a"(info_type)
);
#else
__asm__ volatile (
"cpuid \n\t"
: "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
: "a"(info_type)
);
#endif // defined(__i386__) && defined(__pic__)
}
#endif // !V8_CC_MSVC
#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
#if V8_HOST_ARCH_ARM
// See <uapi/asm/hwcap.h> kernel header.
/*
* HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
*/
#define HWCAP_SWP (1 << 0)
#define HWCAP_HALF (1 << 1)
#define HWCAP_THUMB (1 << 2)
#define HWCAP_26BIT (1 << 3) /* Play it safe */
#define HWCAP_FAST_MULT (1 << 4)
#define HWCAP_FPA (1 << 5)
#define HWCAP_VFP (1 << 6)
#define HWCAP_EDSP (1 << 7)
#define HWCAP_JAVA (1 << 8)
#define HWCAP_IWMMXT (1 << 9)
#define HWCAP_CRUNCH (1 << 10)
#define HWCAP_THUMBEE (1 << 11)
#define HWCAP_NEON (1 << 12)
#define HWCAP_VFPv3 (1 << 13)
#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
#define HWCAP_TLS (1 << 15)
#define HWCAP_VFPv4 (1 << 16)
#define HWCAP_IDIVA (1 << 17)
#define HWCAP_IDIVT (1 << 18)
#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
#define HWCAP_LPAE (1 << 20)
#define AT_HWCAP 16
// Read the ELF HWCAP flags by parsing /proc/self/auxv.
static uint32_t ReadELFHWCaps() {
uint32_t result = 0;
FILE* fp = fopen("/proc/self/auxv", "r");
if (fp != NULL) {
struct { uint32_t tag; uint32_t value; } entry;
for (;;) {
size_t n = fread(&entry, sizeof(entry), 1, fp);
if (n == 0 || (entry.tag == 0 && entry.value == 0)) {
break;
}
if (entry.tag == AT_HWCAP) {
result = entry.value;
break;
}
}
fclose(fp);
}
return result;
}
#endif // V8_HOST_ARCH_ARM
// Extract the information exposed by the kernel via /proc/cpuinfo.
class CPUInfo V8_FINAL BASE_EMBEDDED {
public:
CPUInfo() : datalen_(0) {
// Get the size of the cpuinfo file by reading it until the end. This is
// required because files under /proc do not always return a valid size
// when using fseek(0, SEEK_END) + ftell(). Nor can the be mmap()-ed.
static const char PATHNAME[] = "/proc/cpuinfo";
FILE* fp = fopen(PATHNAME, "r");
if (fp != NULL) {
for (;;) {
char buffer[256];
size_t n = fread(buffer, 1, sizeof(buffer), fp);
if (n == 0) {
break;
}
datalen_ += n;
}
fclose(fp);
}
// Read the contents of the cpuinfo file.
data_ = new char[datalen_ + 1];
fp = fopen(PATHNAME, "r");
if (fp != NULL) {
for (size_t offset = 0; offset < datalen_; ) {
size_t n = fread(data_ + offset, 1, datalen_ - offset, fp);
if (n == 0) {
break;
}
offset += n;
}
fclose(fp);
}
// Zero-terminate the data.
data_[datalen_] = '\0';
}
~CPUInfo() {
delete[] data_;
}
// Extract the content of a the first occurence of a given field in
// the content of the cpuinfo file and return it as a heap-allocated
// string that must be freed by the caller using delete[].
// Return NULL if not found.
char* ExtractField(const char* field) const {
ASSERT(field != NULL);
// Look for first field occurence, and ensure it starts the line.
size_t fieldlen = strlen(field);
char* p = data_;
for (;;) {
p = strstr(p, field);
if (p == NULL) {
return NULL;
}
if (p == data_ || p[-1] == '\n') {
break;
}
p += fieldlen;
}
// Skip to the first colon followed by a space.
p = strchr(p + fieldlen, ':');
if (p == NULL || !isspace(p[1])) {
return NULL;
}
p += 2;
// Find the end of the line.
char* q = strchr(p, '\n');
if (q == NULL) {
q = data_ + datalen_;
}
// Copy the line into a heap-allocated buffer.
size_t len = q - p;
char* result = new char[len + 1];
if (result != NULL) {
memcpy(result, p, len);
result[len] = '\0';
}
return result;
}
private:
char* data_;
size_t datalen_;
};
// Checks that a space-separated list of items contains one given 'item'.
static bool HasListItem(const char* list, const char* item) {
ssize_t item_len = strlen(item);
const char* p = list;
if (p != NULL) {
while (*p != '\0') {
// Skip whitespace.
while (isspace(*p)) ++p;
// Find end of current list item.
const char* q = p;
while (*q != '\0' && !isspace(*q)) ++q;
if (item_len == q - p && memcmp(p, item, item_len) == 0) {
return true;
}
// Skip to next item.
p = q;
}
}
return false;
}
#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
CPU::CPU() : stepping_(0),
model_(0),
ext_model_(0),
family_(0),
ext_family_(0),
type_(0),
implementer_(0),
architecture_(0),
part_(0),
has_fpu_(false),
has_cmov_(false),
has_sahf_(false),
has_mmx_(false),
has_sse_(false),
has_sse2_(false),
has_sse3_(false),
has_ssse3_(false),
has_sse41_(false),
has_sse42_(false),
has_idiva_(false),
has_neon_(false),
has_thumbee_(false),
has_vfp_(false),
has_vfp3_(false),
has_vfp3_d32_(false) {
memcpy(vendor_, "Unknown", 8);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
int cpu_info[4];
// __cpuid with an InfoType argument of 0 returns the number of
// valid Ids in CPUInfo[0] and the CPU identification string in
// the other three array elements. The CPU identification string is
// not in linear order. The code below arranges the information
// in a human readable form. The human readable order is CPUInfo[1] |
// CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
// before using memcpy to copy these three array elements to cpu_string.
__cpuid(cpu_info, 0);
unsigned num_ids = cpu_info[0];
std::swap(cpu_info[2], cpu_info[3]);
memcpy(vendor_, cpu_info + 1, 12);
vendor_[12] = '\0';
// Interpret CPU feature information.
if (num_ids > 0) {
__cpuid(cpu_info, 1);
stepping_ = cpu_info[0] & 0xf;
model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
family_ = (cpu_info[0] >> 8) & 0xf;
type_ = (cpu_info[0] >> 12) & 0x3;
ext_model_ = (cpu_info[0] >> 16) & 0xf;
ext_family_ = (cpu_info[0] >> 20) & 0xff;
has_fpu_ = (cpu_info[3] & 0x00000001) != 0;
has_cmov_ = (cpu_info[3] & 0x00008000) != 0;
has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
has_sse_ = (cpu_info[3] & 0x02000000) != 0;
has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
}
// Query extended IDs.
__cpuid(cpu_info, 0x80000000);
unsigned num_ext_ids = cpu_info[0];
// Interpret extended CPU feature information.
if (num_ext_ids > 0x80000000) {
__cpuid(cpu_info, 0x80000001);
// SAHF is always available in compat/legacy mode,
// but must be probed in long mode.
#if V8_HOST_ARCH_IA32
has_sahf_ = true;
#else
has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
#endif
}
#elif V8_HOST_ARCH_ARM
CPUInfo cpu_info;
// Extract implementor from the "CPU implementer" field.
char* implementer = cpu_info.ExtractField("CPU implementer");
if (implementer != NULL) {
char* end ;
implementer_ = strtol(implementer, &end, 0);
if (end == implementer) {
implementer_ = 0;
}
delete[] implementer;
}
// Extract part number from the "CPU part" field.
char* part = cpu_info.ExtractField("CPU part");
if (part != NULL) {
char* end ;
part_ = strtol(part, &end, 0);
if (end == part) {
part_ = 0;
}
delete[] part;
}
// Extract architecture from the "CPU Architecture" field.
// The list is well-known, unlike the the output of
// the 'Processor' field which can vary greatly.
// See the definition of the 'proc_arch' array in
// $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
// same file.
char* architecture = cpu_info.ExtractField("CPU architecture");
if (architecture != NULL) {
char* end;
architecture_ = strtol(architecture, &end, 10);
if (end == architecture) {
architecture_ = 0;
}
delete[] architecture;
// Unfortunately, it seems that certain ARMv6-based CPUs
// report an incorrect architecture number of 7!
//
// See http://code.google.com/p/android/issues/detail?id=10812
//
// We try to correct this by looking at the 'elf_format'
// field reported by the 'Processor' field, which is of the
// form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
// an ARMv6-one. For example, the Raspberry Pi is one popular
// ARMv6 device that reports architecture 7.
if (architecture_ == 7) {
char* processor = cpu_info.ExtractField("Processor");
if (HasListItem(processor, "(v6l)")) {
architecture_ = 6;
}
delete[] processor;
}
}
// Try to extract the list of CPU features from ELF hwcaps.
uint32_t hwcaps = ReadELFHWCaps();
if (hwcaps != 0) {
has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0;
has_neon_ = (hwcaps & HWCAP_NEON) != 0;
has_thumbee_ = (hwcaps & HWCAP_THUMBEE) != 0;
has_vfp_ = (hwcaps & HWCAP_VFP) != 0;
has_vfp3_ = (hwcaps & (HWCAP_VFPv3 | HWCAP_VFPv3D16 | HWCAP_VFPv4)) != 0;
has_vfp3_d32_ = (has_vfp3_ && ((hwcaps & HWCAP_VFPv3D16) == 0 ||
(hwcaps & HWCAP_VFPD32) != 0));
} else {
// Try to fallback to "Features" CPUInfo field.
char* features = cpu_info.ExtractField("Features");
has_idiva_ = HasListItem(features, "idiva");
has_neon_ = HasListItem(features, "neon");
has_thumbee_ = HasListItem(features, "thumbee");
has_vfp_ = HasListItem(features, "vfp");
if (HasListItem(features, "vfpv3")) {
has_vfp3_ = true;
has_vfp3_d32_ = true;
} else if (HasListItem(features, "vfpv3d16")) {
has_vfp3_ = true;
}
delete[] features;
}
// Some old kernels will report vfp not vfpv3. Here we make an attempt
// to detect vfpv3 by checking for vfp *and* neon, since neon is only
// available on architectures with vfpv3. Checking neon on its own is
// not enough as it is possible to have neon without vfp.
if (has_vfp_ && has_neon_) {
has_vfp3_ = true;
}
// VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
if (architecture_ < 7 && has_vfp3_) {
architecture_ = 7;
}
// ARMv7 implies ThumbEE.
if (architecture_ >= 7) {
has_thumbee_ = true;
}
// The earliest architecture with ThumbEE is ARMv6T2.
if (has_thumbee_ && architecture_ < 6) {
architecture_ = 6;
}
// We don't support any FPUs other than VFP.
has_fpu_ = has_vfp_;
#elif V8_HOST_ARCH_MIPS
// Simple detection of FPU at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration
// to user-space applications. According to MIPS (early 2010), no similar
// facility is universally available on the MIPS architectures,
// so it's up to individual OSes to provide such.
CPUInfo cpu_info;
char* cpu_model = cpu_info.ExtractField("cpu model");
has_fpu_ = HasListItem(cpu_model, "FPU");
delete[] cpu_model;
#endif
}
// static
int CPU::NumberOfProcessorsOnline() {
#if V8_OS_WIN
SYSTEM_INFO info;
GetSystemInfo(&info);
return info.dwNumberOfProcessors;
#else
return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
#endif
}
} } // namespace v8::internal

91
deps/v8/src/cpu.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2006-2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -44,14 +44,64 @@ namespace internal {
// ----------------------------------------------------------------------------
// CPU
//
// This class has static methods for the architecture specific functions. Add
// methods here to cope with differences between the supported architectures.
// Query information about the processor.
//
// For each architecture the file cpu_<arch>.cc contains the implementation of
// these functions.
// This class also has static methods for the architecture specific functions.
// Add methods here to cope with differences between the supported
// architectures. For each architecture the file cpu_<arch>.cc contains the
// implementation of these static functions.
class CPU : public AllStatic {
class CPU V8_FINAL BASE_EMBEDDED {
public:
CPU();
// x86 CPUID information
const char* vendor() const { return vendor_; }
int stepping() const { return stepping_; }
int model() const { return model_; }
int ext_model() const { return ext_model_; }
int family() const { return family_; }
int ext_family() const { return ext_family_; }
int type() const { return type_; }
// arm implementer/part information
int implementer() const { return implementer_; }
static const int ARM = 0x41;
static const int QUALCOMM = 0x51;
int architecture() const { return architecture_; }
int part() const { return part_; }
static const int ARM_CORTEX_A5 = 0xc05;
static const int ARM_CORTEX_A7 = 0xc07;
static const int ARM_CORTEX_A8 = 0xc08;
static const int ARM_CORTEX_A9 = 0xc09;
static const int ARM_CORTEX_A12 = 0xc0c;
static const int ARM_CORTEX_A15 = 0xc0f;
// General features
bool has_fpu() const { return has_fpu_; }
// x86 features
bool has_cmov() const { return has_cmov_; }
bool has_sahf() const { return has_sahf_; }
bool has_mmx() const { return has_mmx_; }
bool has_sse() const { return has_sse_; }
bool has_sse2() const { return has_sse2_; }
bool has_sse3() const { return has_sse3_; }
bool has_ssse3() const { return has_ssse3_; }
bool has_sse41() const { return has_sse41_; }
bool has_sse42() const { return has_sse42_; }
// arm features
bool has_idiva() const { return has_idiva_; }
bool has_neon() const { return has_neon_; }
bool has_thumbee() const { return has_thumbee_; }
bool has_vfp() const { return has_vfp_; }
bool has_vfp3() const { return has_vfp3_; }
bool has_vfp3_d32() const { return has_vfp3_d32_; }
// Returns the number of processors online.
static int NumberOfProcessorsOnline();
// Initializes the cpu architecture support. Called once at VM startup.
static void SetUp();
@ -60,8 +110,33 @@ class CPU : public AllStatic {
// Flush instruction cache.
static void FlushICache(void* start, size_t size);
// Try to activate a system level debugger.
static void DebugBreak();
private:
char vendor_[13];
int stepping_;
int model_;
int ext_model_;
int family_;
int ext_family_;
int type_;
int implementer_;
int architecture_;
int part_;
bool has_fpu_;
bool has_cmov_;
bool has_sahf_;
bool has_mmx_;
bool has_sse_;
bool has_sse2_;
bool has_sse3_;
bool has_ssse3_;
bool has_sse41_;
bool has_sse42_;
bool has_idiva_;
bool has_neon_;
bool has_thumbee_;
bool has_vfp_;
bool has_vfp3_;
bool has_vfp3_d32_;
};
} } // namespace v8::internal

22
deps/v8/src/d8-debug.cc

@ -29,8 +29,9 @@
#include "d8.h"
#include "d8-debug.h"
#include "platform.h"
#include "debug-agent.h"
#include "platform.h"
#include "platform/socket.h"
namespace v8 {
@ -171,21 +172,14 @@ void RunRemoteDebugger(Isolate* isolate, int port) {
void RemoteDebugger::Run() {
bool ok;
// Make sure that socket support is initialized.
ok = i::Socket::SetUp();
if (!ok) {
printf("Unable to initialize socket support %d\n", i::Socket::LastError());
return;
}
// Connect to the debugger agent.
conn_ = i::OS::CreateSocket();
conn_ = new i::Socket;
static const int kPortStrSize = 6;
char port_str[kPortStrSize];
i::OS::SNPrintF(i::Vector<char>(port_str, kPortStrSize), "%d", port_);
ok = conn_->Connect("localhost", port_str);
if (!ok) {
printf("Unable to connect to debug agent %d\n", i::Socket::LastError());
printf("Unable to connect to debug agent %d\n", i::Socket::GetLastError());
return;
}
@ -201,7 +195,7 @@ void RemoteDebugger::Run() {
// Process events received from debugged VM and from the keyboard.
bool terminate = false;
while (!terminate) {
event_available_->Wait();
event_available_.Wait();
RemoteDebuggerEvent* event = GetEvent();
switch (event->type()) {
case RemoteDebuggerEvent::kMessage:
@ -248,7 +242,7 @@ void RemoteDebugger::ConnectionClosed() {
void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
i::ScopedLock lock(event_access_);
i::LockGuard<i::Mutex> lock_guard(&event_access_);
if (head_ == NULL) {
ASSERT(tail_ == NULL);
head_ = event;
@ -258,12 +252,12 @@ void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
tail_->set_next(event);
tail_ = event;
}
event_available_->Signal();
event_available_.Signal();
}
RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
i::ScopedLock lock(event_access_);
i::LockGuard<i::Mutex> lock_guard(&event_access_);
ASSERT(head_ != NULL);
RemoteDebuggerEvent* result = head_;
head_ = head_->next();

7
deps/v8/src/d8-debug.h

@ -53,8 +53,7 @@ class RemoteDebugger {
explicit RemoteDebugger(Isolate* isolate, int port)
: isolate_(isolate),
port_(port),
event_access_(i::OS::CreateMutex()),
event_available_(i::OS::CreateSemaphore(0)),
event_available_(0),
head_(NULL), tail_(NULL) {}
void Run();
@ -84,8 +83,8 @@ class RemoteDebugger {
// Linked list of events from debugged V8 and from keyboard input. Access to
// the list is guarded by a mutex and a semaphore signals new items in the
// list.
i::Mutex* event_access_;
i::Semaphore* event_available_;
i::Mutex event_access_;
i::Semaphore event_available_;
RemoteDebuggerEvent* head_;
RemoteDebuggerEvent* tail_;

41
deps/v8/src/d8.cc

@ -157,7 +157,7 @@ CounterMap* Shell::counter_map_;
i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
i::Mutex Shell::context_mutex_;
Persistent<Context> Shell::utility_context_;
#endif // V8_SHARED
@ -271,10 +271,10 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
PerIsolateData::RealmScope::~RealmScope() {
// Drop realms to avoid keeping them alive.
for (int i = 0; i < data_->realm_count_; ++i)
data_->realms_[i].Dispose(data_->isolate_);
data_->realms_[i].Dispose();
delete[] data_->realms_;
if (!data_->realm_shared_.IsEmpty())
data_->realm_shared_.Dispose(data_->isolate_);
data_->realm_shared_.Dispose();
}
@ -361,7 +361,7 @@ void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw("Invalid realm index");
return;
}
data->realms_[index].Dispose(isolate);
data->realms_[index].Dispose();
data->realms_[index].Clear();
}
@ -420,7 +420,7 @@ void Shell::RealmSharedSet(Local<String> property,
const PropertyCallbackInfo<void>& info) {
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (!data->realm_shared_.IsEmpty()) data->realm_shared_.Dispose(isolate);
if (!data->realm_shared_.IsEmpty()) data->realm_shared_.Dispose();
data->realm_shared_.Reset(isolate, value);
}
@ -766,13 +766,14 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
// Install the debugger object in the utility scope
i::Debug* debug = i::Isolate::Current()->debug();
i::Debug* debug = reinterpret_cast<i::Isolate*>(isolate)->debug();
debug->Load();
i::Handle<i::JSObject> js_debug
= i::Handle<i::JSObject>(debug->debug_context()->global_object());
utility_context->Global()->Set(String::New("$debug"),
Utils::ToLocal(js_debug));
debug->debug_context()->set_security_token(HEAP->undefined_value());
debug->debug_context()->set_security_token(
reinterpret_cast<i::Isolate*>(isolate)->heap()->undefined_value());
#endif // ENABLE_DEBUGGER_SUPPORT
// Run the d8 shell utility script in the utility context
@ -925,7 +926,7 @@ void Shell::InitializeDebugger(Isolate* isolate) {
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
i::ScopedLock lock(context_mutex_);
i::LockGuard<i::Mutex> lock_guard(&context_mutex_);
#endif // V8_SHARED
// Initialize the global objects
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
@ -935,7 +936,7 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
Context::Scope scope(context);
#ifndef V8_SHARED
i::Factory* factory = i::Isolate::Current()->factory();
i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory();
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
factory->NewFixedArray(js_args.argc());
@ -1011,7 +1012,6 @@ void Shell::OnExit() {
"-------------+\n");
delete [] counters;
}
delete context_mutex_;
delete counters_file_;
delete counter_map_;
#endif // V8_SHARED
@ -1221,10 +1221,6 @@ void ShellThread::Run() {
SourceGroup::~SourceGroup() {
#ifndef V8_SHARED
delete next_semaphore_;
next_semaphore_ = NULL;
delete done_semaphore_;
done_semaphore_ = NULL;
delete thread_;
thread_ = NULL;
#endif // V8_SHARED
@ -1285,7 +1281,7 @@ i::Thread::Options SourceGroup::GetThreadOptions() {
void SourceGroup::ExecuteInThread() {
Isolate* isolate = Isolate::New();
do {
if (next_semaphore_ != NULL) next_semaphore_->Wait();
next_semaphore_.Wait();
{
Isolate::Scope iscope(isolate);
Locker lock(isolate);
@ -1305,7 +1301,7 @@ void SourceGroup::ExecuteInThread() {
V8::IdleNotification(kLongIdlePauseInMs);
}
}
if (done_semaphore_ != NULL) done_semaphore_->Signal();
done_semaphore_.Signal();
} while (!Shell::options.last_run);
isolate->Dispose();
}
@ -1316,7 +1312,7 @@ void SourceGroup::StartExecuteInThread() {
thread_ = new IsolateThread(this);
thread_->Start();
}
next_semaphore_->Signal();
next_semaphore_.Signal();
}
@ -1325,7 +1321,7 @@ void SourceGroup::WaitForThread() {
if (Shell::options.last_run) {
thread_->Join();
} else {
done_semaphore_->Wait();
done_semaphore_.Wait();
}
}
#endif // V8_SHARED
@ -1556,11 +1552,10 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
#ifdef V8_SHARED
static void SetStandaloneFlagsViaCommandLine() {
int fake_argc = 3;
char **fake_argv = new char*[3];
int fake_argc = 2;
char **fake_argv = new char*[2];
fake_argv[0] = NULL;
fake_argv[1] = strdup("--harmony-typed-arrays");
fake_argv[2] = strdup("--trace-hydrogen-file=hydrogen.cfg");
fake_argv[1] = strdup("--trace-hydrogen-file=hydrogen.cfg");
v8::V8::SetFlagsFromCommandLine(&fake_argc, fake_argv, false);
free(fake_argv[1]);
delete[] fake_argv;
@ -1649,8 +1644,6 @@ int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
v8::V8::InitializeICU();
#ifndef V8_SHARED
i::FLAG_harmony_array_buffer = true;
i::FLAG_harmony_typed_arrays = true;
i::FLAG_trace_hydrogen_file = "hydrogen.cfg";
#else
SetStandaloneFlagsViaCommandLine();

14
deps/v8/src/d8.h

@ -140,8 +140,8 @@ class SourceGroup {
public:
SourceGroup() :
#ifndef V8_SHARED
next_semaphore_(v8::internal::OS::CreateSemaphore(0)),
done_semaphore_(v8::internal::OS::CreateSemaphore(0)),
next_semaphore_(0),
done_semaphore_(0),
thread_(NULL),
#endif // V8_SHARED
argv_(NULL),
@ -180,8 +180,8 @@ class SourceGroup {
static i::Thread::Options GetThreadOptions();
void ExecuteInThread();
i::Semaphore* next_semaphore_;
i::Semaphore* done_semaphore_;
i::Semaphore next_semaphore_;
i::Semaphore done_semaphore_;
i::Thread* thread_;
#endif // V8_SHARED
@ -390,7 +390,7 @@ class Shell : public i::AllStatic {
static CounterCollection local_counters_;
static CounterCollection* counters_;
static i::OS::MemoryMappedFile* counters_file_;
static i::Mutex* context_mutex_;
static i::Mutex context_mutex_;
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript(Isolate* isolate);
@ -400,8 +400,8 @@ class Shell : public i::AllStatic {
static void RunShell(Isolate* isolate);
static bool SetOptions(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback);
static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback);
static Handle<FunctionTemplate> CreateArrayBufferTemplate(FunctionCallback);
static Handle<FunctionTemplate> CreateArrayTemplate(FunctionCallback);
static Handle<Value> CreateExternalArrayBuffer(Isolate* isolate,
Handle<Object> buffer,
int32_t size);

111
deps/v8/src/debug-agent.cc

@ -25,12 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef ENABLE_DEBUGGER_SUPPORT
#include "v8.h"
#include "debug.h"
#include "debug-agent.h"
#ifdef ENABLE_DEBUGGER_SUPPORT
#include "platform/socket.h"
namespace v8 {
namespace internal {
@ -38,16 +38,36 @@ namespace internal {
// Public V8 debugger API message handler function. This function just delegates
// to the debugger agent through it's data parameter.
void DebuggerAgentMessageHandler(const v8::Debug::Message& message) {
DebuggerAgent* agent = Isolate::Current()->debugger_agent_instance();
Isolate* isolate = reinterpret_cast<Isolate*>(message.GetIsolate());
DebuggerAgent* agent = isolate->debugger_agent_instance();
ASSERT(agent != NULL);
agent->DebuggerMessage(message);
}
DebuggerAgent::DebuggerAgent(Isolate* isolate, const char* name, int port)
: Thread(name),
isolate_(isolate),
name_(StrDup(name)),
port_(port),
server_(new Socket),
terminate_(false),
session_(NULL),
terminate_now_(0),
listening_(0) {
ASSERT(isolate_->debugger_agent_instance() == NULL);
isolate_->set_debugger_agent_instance(this);
}
DebuggerAgent::~DebuggerAgent() {
isolate_->set_debugger_agent_instance(NULL);
delete server_;
}
// Debugger agent main thread.
void DebuggerAgent::Run() {
const int kOneSecondInMicros = 1000000;
// Allow this socket to reuse port even if still in TIME_WAIT.
server_->SetReuseAddress(true);
@ -60,16 +80,20 @@ void DebuggerAgent::Run() {
// would be that the port is already in use so this avoids a busy loop and
// make the agent take over the port when it becomes free.
if (!bound) {
const TimeDelta kTimeout = TimeDelta::FromSeconds(1);
PrintF("Failed to open socket on port %d, "
"waiting %d ms before retrying\n", port_, kOneSecondInMicros / 1000);
terminate_now_->Wait(kOneSecondInMicros);
"waiting %d ms before retrying\n", port_,
static_cast<int>(kTimeout.InMilliseconds()));
if (!terminate_now_.WaitFor(kTimeout)) {
if (terminate_) return;
}
}
}
// Accept connections on the bound port.
while (!terminate_) {
bool ok = server_->Listen(1);
listening_->Signal();
listening_.Signal();
if (ok) {
// Accept the new connection.
Socket* client = server_->Accept();
@ -89,7 +113,7 @@ void DebuggerAgent::Shutdown() {
// Signal termination and make the server exit either its listen call or its
// binding loop. This makes sure that no new sessions can be established.
terminate_now_->Signal();
terminate_now_.Signal();
server_->Shutdown();
Join();
@ -99,19 +123,21 @@ void DebuggerAgent::Shutdown() {
void DebuggerAgent::WaitUntilListening() {
listening_->Wait();
listening_.Wait();
}
static const char* kCreateSessionMessage =
"Remote debugging session already active\r\n";
void DebuggerAgent::CreateSession(Socket* client) {
ScopedLock with(session_access_);
LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// If another session is already established terminate this one.
if (session_ != NULL) {
client->Send(kCreateSessionMessage, StrLength(kCreateSessionMessage));
int len = StrLength(kCreateSessionMessage);
int res = client->Send(kCreateSessionMessage, len);
delete client;
USE(res);
return;
}
@ -123,7 +149,7 @@ void DebuggerAgent::CreateSession(Socket* client) {
void DebuggerAgent::CloseSession() {
ScopedLock with(session_access_);
LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// Terminate the session.
if (session_ != NULL) {
@ -136,7 +162,7 @@ void DebuggerAgent::CloseSession() {
void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) {
ScopedLock with(session_access_);
LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// Forward the message handling to the session.
if (session_ != NULL) {
@ -154,7 +180,7 @@ void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) {
}
// Terminate the session.
ScopedLock with(session_access_);
LockGuard<RecursiveMutex> session_access_guard(&session_access_);
ASSERT(session == session_);
if (session == session_) {
session_->Shutdown();
@ -226,7 +252,7 @@ void DebuggerAgentSession::Shutdown() {
const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(Socket* conn) {
int received;
// Read header.
@ -243,7 +269,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
prev_c = c;
received = conn->Receive(&c, 1);
if (received == 0) {
PrintF("Error %d\n", Socket::LastError());
PrintF("Error %d\n", Socket::GetLastError());
return SmartArrayPointer<char>();
}
@ -305,7 +331,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
char* buffer = NewArray<char>(content_length + 1);
received = ReceiveAll(conn, buffer, content_length);
if (received < content_length) {
PrintF("Error %d\n", Socket::LastError());
PrintF("Error %d\n", Socket::GetLastError());
return SmartArrayPointer<char>();
}
buffer[content_length] = '\0';
@ -314,7 +340,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
}
bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
bool DebuggerAgentUtil::SendConnectMessage(Socket* conn,
const char* embedding_host) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer.
@ -360,7 +386,7 @@ bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
}
bool DebuggerAgentUtil::SendMessage(const Socket* conn,
bool DebuggerAgentUtil::SendMessage(Socket* conn,
const Vector<uint16_t> message) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer both for header and body.
@ -375,14 +401,17 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
}
// Send the header.
int len;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"%s: %d\r\n", kContentLength, utf8_len);
conn->Send(buffer, len);
int len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"%s: %d\r\n", kContentLength, utf8_len);
if (conn->Send(buffer, len) < len) {
return false;
}
// Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
conn->Send(buffer, len);
if (conn->Send(buffer, len) < len) {
return false;
}
// Send message body as UTF-8.
int buffer_position = 0; // Current buffer position.
@ -402,13 +431,19 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
const int kEncodedSurrogateLength =
unibrow::Utf16::kUtf8BytesToCodeASurrogate;
ASSERT(buffer_position >= kEncodedSurrogateLength);
conn->Send(buffer, buffer_position - kEncodedSurrogateLength);
len = buffer_position - kEncodedSurrogateLength;
if (conn->Send(buffer, len) < len) {
return false;
}
for (int i = 0; i < kEncodedSurrogateLength; i++) {
buffer[i] = buffer[buffer_position + i];
}
buffer_position = kEncodedSurrogateLength;
} else {
conn->Send(buffer, buffer_position);
len = buffer_position;
if (conn->Send(buffer, len) < len) {
return false;
}
buffer_position = 0;
}
}
@ -419,7 +454,7 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
}
bool DebuggerAgentUtil::SendMessage(const Socket* conn,
bool DebuggerAgentUtil::SendMessage(Socket* conn,
const v8::Handle<v8::String> request) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer both for header and body.
@ -428,24 +463,30 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
v8::String::Utf8Value utf8_request(request);
// Send the header.
int len;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Content-Length: %d\r\n", utf8_request.length());
conn->Send(buffer, len);
int len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Content-Length: %d\r\n", utf8_request.length());
if (conn->Send(buffer, len) < len) {
return false;
}
// Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
conn->Send(buffer, len);
if (conn->Send(buffer, len) < len) {
return false;
}
// Send message body as UTF-8.
conn->Send(*utf8_request, utf8_request.length());
len = utf8_request.length();
if (conn->Send(*utf8_request, len) < len) {
return false;
}
return true;
}
// Receive the full buffer before returning unless an error occours.
int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
int DebuggerAgentUtil::ReceiveAll(Socket* conn, char* data, int len) {
int total_received = 0;
while (total_received < len) {
int received = conn->Receive(data + total_received, len - total_received);

36
deps/v8/src/debug-agent.h

@ -37,27 +37,15 @@ namespace internal {
// Forward decelrations.
class DebuggerAgentSession;
class Socket;
// Debugger agent which starts a socket listener on the debugger port and
// handles connection from a remote debugger.
class DebuggerAgent: public Thread {
public:
DebuggerAgent(const char* name, int port)
: Thread(name),
isolate_(Isolate::Current()),
name_(StrDup(name)), port_(port),
server_(OS::CreateSocket()), terminate_(false),
session_access_(OS::CreateMutex()), session_(NULL),
terminate_now_(OS::CreateSemaphore(0)),
listening_(OS::CreateSemaphore(0)) {
ASSERT(isolate_->debugger_agent_instance() == NULL);
isolate_->set_debugger_agent_instance(this);
}
~DebuggerAgent() {
isolate_->set_debugger_agent_instance(NULL);
delete server_;
}
DebuggerAgent(Isolate* isolate, const char* name, int port);
~DebuggerAgent();
void Shutdown();
void WaitUntilListening();
@ -76,10 +64,10 @@ class DebuggerAgent: public Thread {
int port_; // Port to use for the agent.
Socket* server_; // Server socket for listen/accept.
bool terminate_; // Termination flag.
Mutex* session_access_; // Mutex guarging access to session_.
RecursiveMutex session_access_; // Mutex guarding access to session_.
DebuggerAgentSession* session_; // Current active session if any.
Semaphore* terminate_now_; // Semaphore to signal termination.
Semaphore* listening_;
Semaphore terminate_now_; // Semaphore to signal termination.
Semaphore listening_;
friend class DebuggerAgentSession;
friend void DebuggerAgentMessageHandler(const v8::Debug::Message& message);
@ -116,13 +104,11 @@ class DebuggerAgentUtil {
public:
static const char* const kContentLength;
static SmartArrayPointer<char> ReceiveMessage(const Socket* conn);
static bool SendConnectMessage(const Socket* conn,
const char* embedding_host);
static bool SendMessage(const Socket* conn, const Vector<uint16_t> message);
static bool SendMessage(const Socket* conn,
const v8::Handle<v8::String> message);
static int ReceiveAll(const Socket* conn, char* data, int len);
static SmartArrayPointer<char> ReceiveMessage(Socket* conn);
static bool SendConnectMessage(Socket* conn, const char* embedding_host);
static bool SendMessage(Socket* conn, const Vector<uint16_t> message);
static bool SendMessage(Socket* conn, const v8::Handle<v8::String> message);
static int ReceiveAll(Socket* conn, char* data, int len);
};
} } // namespace v8::internal

9
deps/v8/src/debug-debugger.js

@ -957,12 +957,17 @@ function ExecutionState(break_id) {
this.selected_frame = 0;
}
ExecutionState.prototype.prepareStep = function(opt_action, opt_count) {
ExecutionState.prototype.prepareStep = function(opt_action, opt_count,
opt_callframe) {
var action = Debug.StepAction.StepIn;
if (!IS_UNDEFINED(opt_action)) action = %ToNumber(opt_action);
var count = opt_count ? %ToNumber(opt_count) : 1;
var callFrameId = 0;
if (!IS_UNDEFINED(opt_callframe)) {
callFrameId = opt_callframe.details_.frameId();
}
return %PrepareStep(this.break_id, action, count);
return %PrepareStep(this.break_id, action, count, callFrameId);
};
ExecutionState.prototype.evaluateGlobal = function(source, disable_break,

179
deps/v8/src/debug.cc

@ -86,8 +86,9 @@ static void PrintLn(v8::Local<v8::Value> value) {
}
static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
Isolate* isolate = Isolate::Current();
static Handle<Code> ComputeCallDebugPrepareStepIn(Isolate* isolate,
int argc,
Code::Kind kind) {
return isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind);
}
@ -403,11 +404,11 @@ void BreakLocationIterator::ClearDebugBreak() {
bool BreakLocationIterator::IsStepInLocation(Isolate* isolate) {
if (RelocInfo::IsConstructCall(rmode())) {
if (RelocInfo::IsConstructCall(original_rmode())) {
return true;
} else if (RelocInfo::IsCodeTarget(rmode())) {
HandleScope scope(debug_info_->GetIsolate());
Address target = rinfo()->target_address();
Address target = original_rinfo()->target_address();
Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
if (target_code->kind() == Code::STUB) {
return target_code->major_key() == CodeStub::CallFunction;
@ -433,7 +434,7 @@ void BreakLocationIterator::PrepareStepIn(Isolate* isolate) {
// the call in the original code as it is the code there that will be
// executed in place of the debug break call.
Handle<Code> stub = ComputeCallDebugPrepareStepIn(
target_code->arguments_count(), target_code->kind());
isolate, target_code->arguments_count(), target_code->kind());
if (IsDebugBreak()) {
original_rinfo()->set_target_address(stub->entry());
} else {
@ -633,7 +634,7 @@ const int Debug::kFrameDropperFrameSize = 4;
void ScriptCache::Add(Handle<Script> script) {
GlobalHandles* global_handles = Isolate::Current()->global_handles();
GlobalHandles* global_handles = isolate_->global_handles();
// Create an entry in the hash map for the script.
int id = script->id()->value();
HashMap::Entry* entry =
@ -655,7 +656,7 @@ void ScriptCache::Add(Handle<Script> script) {
Handle<FixedArray> ScriptCache::GetScripts() {
Factory* factory = Isolate::Current()->factory();
Factory* factory = isolate_->factory();
Handle<FixedArray> instances = factory->NewFixedArray(occupancy());
int count = 0;
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
@ -670,7 +671,7 @@ Handle<FixedArray> ScriptCache::GetScripts() {
void ScriptCache::ProcessCollectedScripts() {
Debugger* debugger = Isolate::Current()->debugger();
Debugger* debugger = isolate_->debugger();
for (int i = 0; i < collected_scripts_.length(); i++) {
debugger->OnScriptCollected(collected_scripts_[i]);
}
@ -679,7 +680,7 @@ void ScriptCache::ProcessCollectedScripts() {
void ScriptCache::Clear() {
GlobalHandles* global_handles = Isolate::Current()->global_handles();
GlobalHandles* global_handles = isolate_->global_handles();
// Iterate the script cache to get rid of all the weak handles.
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry != NULL);
@ -708,7 +709,7 @@ void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
script_cache->collected_scripts_.Add(id);
// Clear the weak handle.
obj->Dispose(isolate);
obj->Dispose();
}
@ -750,7 +751,7 @@ void Debug::HandleWeakDebugInfo(v8::Isolate* isolate,
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
GlobalHandles* global_handles = Isolate::Current()->global_handles();
GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
// Globalize the request debug info object and make it weak.
debug_info_ = Handle<DebugInfo>::cast(
(global_handles->Create(debug_info)));
@ -761,13 +762,12 @@ DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
DebugInfoListNode::~DebugInfoListNode() {
Isolate::Current()->global_handles()->Destroy(
debug_info_->GetIsolate()->global_handles()->Destroy(
reinterpret_cast<Object**>(debug_info_.location()));
}
bool Debug::CompileDebuggerScript(int index) {
Isolate* isolate = Isolate::Current();
bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
@ -824,7 +824,7 @@ bool Debug::CompileDebuggerScript(int index) {
ASSERT(!isolate->has_pending_exception());
if (!exception.is_null()) {
isolate->set_pending_exception(*exception);
MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
MessageHandler::ReportMessage(isolate, NULL, message);
isolate->clear_pending_exception();
}
return false;
@ -852,7 +852,7 @@ bool Debug::Load() {
// Disable breakpoints and interrupts while compiling and running the
// debugger scripts including the context creation code.
DisableBreak disable(true);
DisableBreak disable(isolate_, true);
PostponeInterruptsScope postpone(isolate_);
// Create the debugger context.
@ -886,12 +886,12 @@ bool Debug::Load() {
// Compile the JavaScript for the debugger in the debugger context.
debugger->set_compiling_natives(true);
bool caught_exception =
!CompileDebuggerScript(Natives::GetIndex("mirror")) ||
!CompileDebuggerScript(Natives::GetIndex("debug"));
!CompileDebuggerScript(isolate_, Natives::GetIndex("mirror")) ||
!CompileDebuggerScript(isolate_, Natives::GetIndex("debug"));
if (FLAG_enable_liveedit) {
caught_exception = caught_exception ||
!CompileDebuggerScript(Natives::GetIndex("liveedit"));
!CompileDebuggerScript(isolate_, Natives::GetIndex("liveedit"));
}
debugger->set_compiling_natives(false);
@ -958,7 +958,7 @@ Object* Debug::Break(Arguments args) {
}
// Enter the debugger.
EnterDebugger debugger;
EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) {
return heap->undefined_value();
}
@ -1017,7 +1017,7 @@ Object* Debug::Break(Arguments args) {
// Clear queue
thread_local_.queued_step_count_ = 0;
PrepareStep(StepNext, step_count);
PrepareStep(StepNext, step_count, StackFrame::NO_ID);
} else {
// Notify the debug event listeners.
isolate_->debugger()->OnDebugBreak(break_points_hit, false);
@ -1055,7 +1055,7 @@ Object* Debug::Break(Arguments args) {
ClearStepping();
// Set up for the remaining steps.
PrepareStep(step_action, step_count);
PrepareStep(step_action, step_count, StackFrame::NO_ID);
}
if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
@ -1376,7 +1376,9 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
}
void Debug::PrepareStep(StepAction step_action, int step_count) {
void Debug::PrepareStep(StepAction step_action,
int step_count,
StackFrame::Id frame_id) {
HandleScope scope(isolate_);
PrepareForBreakPoints();
@ -1402,6 +1404,9 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// If there is no JavaScript stack don't do anything.
return;
}
if (frame_id != StackFrame::NO_ID) {
id = frame_id;
}
JavaScriptFrameIterator frames_it(isolate_, id);
JavaScriptFrame* frame = frames_it.frame();
@ -1649,7 +1654,7 @@ bool Debug::IsBreakStub(Code* code) {
// Find the builtin to use for invoking the debug break
Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
Isolate* isolate = Isolate::Current();
Isolate* isolate = code->GetIsolate();
// Find the builtin debug break function matching the calling convention
// used by the call site.
@ -1704,7 +1709,7 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
Handle<Object> Debug::GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared,
BreakPositionAlignment position_alignment) {
Isolate* isolate = Isolate::Current();
Isolate* isolate = shared->GetIsolate();
Heap* heap = isolate->heap();
if (!HasDebugInfo(shared)) {
return Handle<Object>(heap->undefined_value(), isolate);
@ -1883,7 +1888,7 @@ static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
// Use compile lazy which will end up compiling the full code in the
// configuration configured above.
bool result = Compiler::CompileLazy(&info);
ASSERT(result != Isolate::Current()->has_pending_exception());
ASSERT(result != info.isolate()->has_pending_exception());
info.isolate()->clear_pending_exception();
#if DEBUG
if (result) {
@ -2047,7 +2052,7 @@ void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
if (FLAG_parallel_recompilation) {
if (FLAG_concurrent_recompilation) {
isolate_->optimizing_compiler_thread()->Flush();
}
@ -2105,10 +2110,9 @@ void Debug::PrepareForBreakPoints() {
function->set_code(*lazy_compile);
function->shared()->set_code(*lazy_compile);
} else if (kind == Code::BUILTIN &&
(function->IsMarkedForInstallingRecompiledCode() ||
function->IsInRecompileQueue() ||
(function->IsInRecompileQueue() ||
function->IsMarkedForLazyRecompilation() ||
function->IsMarkedForParallelRecompilation())) {
function->IsMarkedForConcurrentRecompilation())) {
// Abort in-flight compilation.
Code* shared_code = function->shared()->code();
if (shared_code->kind() == Code::FUNCTION &&
@ -2537,7 +2541,7 @@ void Debug::CreateScriptCache() {
"Debug::CreateScriptCache");
ASSERT(script_cache_ == NULL);
script_cache_ = new ScriptCache();
script_cache_ = new ScriptCache(isolate_);
// Scan heap for Script objects.
int count = 0;
@ -2612,24 +2616,18 @@ Debugger::Debugger(Isolate* isolate)
message_handler_(NULL),
debugger_unload_pending_(false),
host_dispatch_handler_(NULL),
dispatch_handler_access_(OS::CreateMutex()),
debug_message_dispatch_handler_(NULL),
message_dispatch_helper_thread_(NULL),
host_dispatch_micros_(100 * 1000),
host_dispatch_period_(TimeDelta::FromMilliseconds(100)),
agent_(NULL),
command_queue_(isolate->logger(), kQueueInitialSize),
command_received_(OS::CreateSemaphore(0)),
command_received_(0),
event_command_queue_(isolate->logger(), kQueueInitialSize),
isolate_(isolate) {
}
Debugger::~Debugger() {
delete dispatch_handler_access_;
dispatch_handler_access_ = 0;
delete command_received_;
command_received_ = 0;
}
Debugger::~Debugger() {}
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
@ -2760,7 +2758,7 @@ void Debugger::OnException(Handle<Object> exception, bool uncaught) {
}
// Enter the debugger.
EnterDebugger debugger;
EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// Clear all current stepping setup.
@ -2826,7 +2824,7 @@ void Debugger::OnBeforeCompile(Handle<Script> script) {
if (!EventActive(v8::BeforeCompile)) return;
// Enter the debugger.
EnterDebugger debugger;
EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// Create the event data object.
@ -2863,7 +2861,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
bool in_debugger = debug->InDebugger();
// Enter the debugger.
EnterDebugger debugger;
EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// If debugging there might be script break points registered for this
@ -2891,7 +2889,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
bool caught_exception;
Handle<Object> argv[] = { wrapper };
Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
Isolate::Current()->js_builtins_object(),
isolate_->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
@ -2926,7 +2924,7 @@ void Debugger::OnScriptCollected(int id) {
if (!Debugger::EventActive(v8::ScriptCollected)) return;
// Enter the debugger.
EnterDebugger debugger;
EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// Create the script collected state object.
@ -3043,7 +3041,7 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
Handle<Context> Debugger::GetDebugContext() {
never_unload_debugger_ = true;
EnterDebugger debugger;
EnterDebugger debugger(isolate_);
return isolate_->debug()->debug_context();
}
@ -3152,14 +3150,14 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
// Wait for new command in the queue.
if (Debugger::host_dispatch_handler_) {
// In case there is a host dispatch - do periodic dispatches.
if (!command_received_->Wait(host_dispatch_micros_)) {
if (!command_received_.WaitFor(host_dispatch_period_)) {
// Timout expired, do the dispatch.
Debugger::host_dispatch_handler_();
continue;
}
} else {
// In case there is no host dispatch - just wait.
command_received_->Wait();
command_received_.Wait();
}
// Get the command from the queue.
@ -3272,7 +3270,7 @@ void Debugger::SetEventListener(Handle<Object> callback,
void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
ScopedLock with(debugger_access_);
LockGuard<RecursiveMutex> with(debugger_access_);
message_handler_ = handler;
ListenersChanged();
@ -3301,15 +3299,15 @@ void Debugger::ListenersChanged() {
void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
int period) {
TimeDelta period) {
host_dispatch_handler_ = handler;
host_dispatch_micros_ = period * 1000;
host_dispatch_period_ = period;
}
void Debugger::SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
ScopedLock with(dispatch_handler_access_);
LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
debug_message_dispatch_handler_ = handler;
if (provide_locker && message_dispatch_helper_thread_ == NULL) {
@ -3322,7 +3320,7 @@ void Debugger::SetDebugMessageDispatchHandler(
// Calls the registered debug message handler. This callback is part of the
// public API.
void Debugger::InvokeMessageHandler(MessageImpl message) {
ScopedLock with(debugger_access_);
LockGuard<RecursiveMutex> with(debugger_access_);
if (message_handler_ != NULL) {
message_handler_(message);
@ -3343,7 +3341,7 @@ void Debugger::ProcessCommand(Vector<const uint16_t> command,
client_data);
isolate_->logger()->DebugTag("Put command on command_queue.");
command_queue_.Put(message);
command_received_->Signal();
command_received_.Signal();
// Set the debug command break flag to have the command processed.
if (!isolate_->debug()->InDebugger()) {
@ -3352,7 +3350,7 @@ void Debugger::ProcessCommand(Vector<const uint16_t> command,
MessageDispatchHelperThread* dispatch_thread;
{
ScopedLock with(dispatch_handler_access_);
LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
dispatch_thread = message_dispatch_helper_thread_;
}
@ -3381,7 +3379,7 @@ void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
bool Debugger::IsDebuggerActive() {
ScopedLock with(debugger_access_);
LockGuard<RecursiveMutex> with(debugger_access_);
return message_handler_ != NULL ||
!event_listener_.is_null() ||
@ -3396,7 +3394,7 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
Debugger::never_unload_debugger_ = true;
// Enter the debugger.
EnterDebugger debugger;
EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) {
return isolate_->factory()->undefined_value();
}
@ -3410,6 +3408,7 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
Handle<Object> argv[] = { exec_state, data };
Handle<Object> result = Execution::Call(
isolate_,
fun,
Handle<Object>(isolate_->debug()->debug_context_->global_proxy(),
isolate_),
@ -3427,7 +3426,6 @@ static void StubMessageHandler2(const v8::Debug::Message& message) {
bool Debugger::StartAgent(const char* name, int port,
bool wait_for_connection) {
ASSERT(Isolate::Current() == isolate_);
if (wait_for_connection) {
// Suspend V8 if it is already running or set V8 to suspend whenever
// it starts.
@ -3439,20 +3437,15 @@ bool Debugger::StartAgent(const char* name, int port,
v8::Debug::DebugBreak();
}
if (Socket::SetUp()) {
if (agent_ == NULL) {
agent_ = new DebuggerAgent(name, port);
agent_->Start();
}
return true;
if (agent_ == NULL) {
agent_ = new DebuggerAgent(isolate_, name, port);
agent_->Start();
}
return false;
return true;
}
void Debugger::StopAgent() {
ASSERT(Isolate::Current() == isolate_);
if (agent_ != NULL) {
agent_->Shutdown();
agent_->Join();
@ -3463,7 +3456,6 @@ void Debugger::StopAgent() {
void Debugger::WaitForAgent() {
ASSERT(Isolate::Current() == isolate_);
if (agent_ != NULL)
agent_->WaitUntilListening();
}
@ -3472,7 +3464,7 @@ void Debugger::WaitForAgent() {
void Debugger::CallMessageDispatchHandler() {
v8::Debug::DebugMessageDispatchHandler handler;
{
ScopedLock with(dispatch_handler_access_);
LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
handler = Debugger::debug_message_dispatch_handler_;
}
if (handler != NULL) {
@ -3481,8 +3473,8 @@ void Debugger::CallMessageDispatchHandler() {
}
EnterDebugger::EnterDebugger()
: isolate_(Isolate::Current()),
EnterDebugger::EnterDebugger(Isolate* isolate)
: isolate_(isolate),
prev_(isolate_->debug()->debugger_entry()),
it_(isolate_),
has_js_frames_(!it_.done()),
@ -3517,7 +3509,6 @@ EnterDebugger::EnterDebugger()
EnterDebugger::~EnterDebugger() {
ASSERT(Isolate::Current() == isolate_);
Debug* debug = isolate_->debug();
// Restore to the previous break state.
@ -3632,6 +3623,11 @@ v8::Handle<v8::Object> MessageImpl::GetExecutionState() const {
}
v8::Isolate* MessageImpl::GetIsolate() const {
return reinterpret_cast<v8::Isolate*>(exec_state_->GetIsolate());
}
v8::Handle<v8::Object> MessageImpl::GetEventData() const {
return v8::Utils::ToLocal(event_data_);
}
@ -3662,7 +3658,7 @@ v8::Handle<v8::String> MessageImpl::GetJSON() const {
v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
Isolate* isolate = Isolate::Current();
Isolate* isolate = event_data_->GetIsolate();
v8::Handle<v8::Context> context = GetDebugEventContext(isolate);
// Isolate::context() may be NULL when "script collected" event occures.
ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
@ -3703,7 +3699,7 @@ v8::Handle<v8::Object> EventDetailsImpl::GetEventData() const {
v8::Handle<v8::Context> EventDetailsImpl::GetEventContext() const {
return GetDebugEventContext(Isolate::Current());
return GetDebugEventContext(exec_state_->GetIsolate());
}
@ -3793,24 +3789,17 @@ void CommandMessageQueue::Expand() {
LockingCommandMessageQueue::LockingCommandMessageQueue(Logger* logger, int size)
: logger_(logger), queue_(size) {
lock_ = OS::CreateMutex();
}
LockingCommandMessageQueue::~LockingCommandMessageQueue() {
delete lock_;
}
: logger_(logger), queue_(size) {}
bool LockingCommandMessageQueue::IsEmpty() const {
ScopedLock sl(lock_);
LockGuard<Mutex> lock_guard(&mutex_);
return queue_.IsEmpty();
}
CommandMessage LockingCommandMessageQueue::Get() {
ScopedLock sl(lock_);
LockGuard<Mutex> lock_guard(&mutex_);
CommandMessage result = queue_.Get();
logger_->DebugEvent("Get", result.text());
return result;
@ -3818,48 +3807,42 @@ CommandMessage LockingCommandMessageQueue::Get() {
void LockingCommandMessageQueue::Put(const CommandMessage& message) {
ScopedLock sl(lock_);
LockGuard<Mutex> lock_guard(&mutex_);
queue_.Put(message);
logger_->DebugEvent("Put", message.text());
}
void LockingCommandMessageQueue::Clear() {
ScopedLock sl(lock_);
LockGuard<Mutex> lock_guard(&mutex_);
queue_.Clear();
}
MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
: Thread("v8:MsgDispHelpr"),
isolate_(isolate), sem_(OS::CreateSemaphore(0)),
mutex_(OS::CreateMutex()), already_signalled_(false) {
}
MessageDispatchHelperThread::~MessageDispatchHelperThread() {
delete mutex_;
delete sem_;
isolate_(isolate), sem_(0),
already_signalled_(false) {
}
void MessageDispatchHelperThread::Schedule() {
{
ScopedLock lock(mutex_);
LockGuard<Mutex> lock_guard(&mutex_);
if (already_signalled_) {
return;
}
already_signalled_ = true;
}
sem_->Signal();
sem_.Signal();
}
void MessageDispatchHelperThread::Run() {
while (true) {
sem_->Wait();
sem_.Wait();
{
ScopedLock lock(mutex_);
LockGuard<Mutex> lock_guard(&mutex_);
already_signalled_ = false;
}
{

38
deps/v8/src/debug.h

@ -174,7 +174,8 @@ class BreakLocationIterator {
// the cache is the script id.
class ScriptCache : private HashMap {
public:
ScriptCache() : HashMap(ScriptMatch), collected_scripts_(10) {}
explicit ScriptCache(Isolate* isolate)
: HashMap(ScriptMatch), isolate_(isolate), collected_scripts_(10) {}
virtual ~ScriptCache() { Clear(); }
// Add script to the cache.
@ -203,6 +204,7 @@ class ScriptCache : private HashMap {
v8::Persistent<v8::Value>* obj,
void* data);
Isolate* isolate_;
// List used during GC to temporarily store id's of collected scripts.
List<int> collected_scripts_;
};
@ -259,7 +261,9 @@ class Debug {
void FloodHandlerWithOneShot();
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
void PrepareStep(StepAction step_action, int step_count);
void PrepareStep(StepAction step_action,
int step_count,
StackFrame::Id frame_id);
void ClearStepping();
void ClearStepOut();
bool IsStepping() { return thread_local_.step_count_ > 0; }
@ -532,7 +536,7 @@ class Debug {
explicit Debug(Isolate* isolate);
~Debug();
static bool CompileDebuggerScript(int index);
static bool CompileDebuggerScript(Isolate* isolate, int index);
void ClearOneShot();
void ActivateStepIn(StackFrame* frame);
void ClearStepIn();
@ -664,6 +668,7 @@ class MessageImpl: public v8::Debug::Message {
virtual v8::Handle<v8::String> GetJSON() const;
virtual v8::Handle<v8::Context> GetEventContext() const;
virtual v8::Debug::ClientData* GetClientData() const;
virtual v8::Isolate* GetIsolate() const;
private:
MessageImpl(bool is_event,
@ -762,7 +767,6 @@ class MessageDispatchHelperThread;
class LockingCommandMessageQueue BASE_EMBEDDED {
public:
LockingCommandMessageQueue(Logger* logger, int size);
~LockingCommandMessageQueue();
bool IsEmpty() const;
CommandMessage Get();
void Put(const CommandMessage& message);
@ -770,7 +774,7 @@ class LockingCommandMessageQueue BASE_EMBEDDED {
private:
Logger* logger_;
CommandMessageQueue queue_;
Mutex* lock_;
mutable Mutex mutex_;
DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
};
@ -821,7 +825,7 @@ class Debugger {
void SetEventListener(Handle<Object> callback, Handle<Object> data);
void SetMessageHandler(v8::Debug::MessageHandler2 handler);
void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
int period);
TimeDelta period);
void SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler,
bool provide_locker);
@ -863,7 +867,7 @@ class Debugger {
friend void ForceUnloadDebugger(); // In test-debug.cc
inline bool EventActive(v8::DebugEvent event) {
ScopedLock with(debugger_access_);
LockGuard<RecursiveMutex> lock_guard(debugger_access_);
// Check whether the message handler was been cleared.
if (debugger_unload_pending_) {
@ -918,7 +922,7 @@ class Debugger {
Handle<Object> event_data);
void ListenersChanged();
Mutex* debugger_access_; // Mutex guarding debugger variables.
RecursiveMutex* debugger_access_; // Mutex guarding debugger variables.
Handle<Object> event_listener_; // Global handle to listener.
Handle<Object> event_listener_data_;
bool compiling_natives_; // Are we compiling natives?
@ -929,16 +933,16 @@ class Debugger {
v8::Debug::MessageHandler2 message_handler_;
bool debugger_unload_pending_; // Was message handler cleared?
v8::Debug::HostDispatchHandler host_dispatch_handler_;
Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
Mutex dispatch_handler_access_; // Mutex guarding dispatch handler.
v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
MessageDispatchHelperThread* message_dispatch_helper_thread_;
int host_dispatch_micros_;
TimeDelta host_dispatch_period_;
DebuggerAgent* agent_;
static const int kQueueInitialSize = 4;
LockingCommandMessageQueue command_queue_;
Semaphore* command_received_; // Signaled for each command received.
Semaphore command_received_; // Signaled for each command received.
LockingCommandMessageQueue event_command_queue_;
Isolate* isolate_;
@ -956,7 +960,7 @@ class Debugger {
// some reason could not be entered FailedToEnter will return true.
class EnterDebugger BASE_EMBEDDED {
public:
EnterDebugger();
explicit EnterDebugger(Isolate* isolate);
~EnterDebugger();
// Check whether the debugger could be entered.
@ -983,12 +987,12 @@ class EnterDebugger BASE_EMBEDDED {
// Stack allocated class for disabling break.
class DisableBreak BASE_EMBEDDED {
public:
explicit DisableBreak(bool disable_break) : isolate_(Isolate::Current()) {
explicit DisableBreak(Isolate* isolate, bool disable_break)
: isolate_(isolate) {
prev_disable_break_ = isolate_->debug()->disable_break();
isolate_->debug()->set_disable_break(disable_break);
}
~DisableBreak() {
ASSERT(Isolate::Current() == isolate_);
isolate_->debug()->set_disable_break(prev_disable_break_);
}
@ -1047,7 +1051,7 @@ class Debug_Address {
class MessageDispatchHelperThread: public Thread {
public:
explicit MessageDispatchHelperThread(Isolate* isolate);
~MessageDispatchHelperThread();
~MessageDispatchHelperThread() {}
void Schedule();
@ -1055,8 +1059,8 @@ class MessageDispatchHelperThread: public Thread {
void Run();
Isolate* isolate_;
Semaphore* const sem_;
Mutex* const mutex_;
Semaphore sem_;
Mutex mutex_;
bool already_signalled_;
DISALLOW_COPY_AND_ASSIGN(MessageDispatchHelperThread);

710
deps/v8/src/deoptimizer.cc

@ -56,11 +56,10 @@ static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
: allocator_(allocator),
current_(NULL),
#ifdef ENABLE_DEBUGGER_SUPPORT
deoptimized_frame_info_(NULL),
#endif
deoptimizing_code_list_(NULL) {
current_(NULL) {
for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
deopt_entry_code_entries_[i] = -1;
deopt_entry_code_[i] = AllocateCodeChunk(allocator);
@ -73,14 +72,6 @@ DeoptimizerData::~DeoptimizerData() {
allocator_->Free(deopt_entry_code_[i]);
deopt_entry_code_[i] = NULL;
}
DeoptimizingCodeListNode* current = deoptimizing_code_list_;
while (current != NULL) {
DeoptimizingCodeListNode* prev = current;
current = current->next();
delete prev;
}
deoptimizing_code_list_ = NULL;
}
@ -93,33 +84,19 @@ void DeoptimizerData::Iterate(ObjectVisitor* v) {
#endif
Code* DeoptimizerData::FindDeoptimizingCode(Address addr) {
for (DeoptimizingCodeListNode* node = deoptimizing_code_list_;
node != NULL;
node = node->next()) {
if (node->code()->contains(addr)) return *node->code();
}
return NULL;
}
void DeoptimizerData::RemoveDeoptimizingCode(Code* code) {
for (DeoptimizingCodeListNode *prev = NULL, *cur = deoptimizing_code_list_;
cur != NULL;
prev = cur, cur = cur->next()) {
if (*cur->code() == code) {
if (prev == NULL) {
deoptimizing_code_list_ = cur->next();
} else {
prev->set_next(cur->next());
}
delete cur;
return;
Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
if (function_->IsHeapObject()) {
// Search all deoptimizing code in the native context of the function.
Context* native_context = function_->context()->native_context();
Object* element = native_context->DeoptimizedCodeListHead();
while (!element->IsUndefined()) {
Code* code = Code::cast(element);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
if (code->contains(addr)) return code;
element = code->next_code_link();
}
}
// Deoptimizing code is removed through weak callback. Each object is expected
// to be removed once and only once.
UNREACHABLE();
return NULL;
}
@ -289,27 +266,42 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
void Deoptimizer::VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor) {
Isolate* isolate = context->GetIsolate();
Zone zone(isolate);
DisallowHeapAllocation no_allocation;
ASSERT(context->IsNativeContext());
visitor->EnterContext(context);
// Create a snapshot of the optimized functions list. This is needed because
// visitors might remove more than one link from the list at once.
ZoneList<JSFunction*> snapshot(1, &zone);
// Visit the list of optimized functions, removing elements that
// no longer refer to optimized code.
JSFunction* prev = NULL;
Object* element = context->OptimizedFunctionsListHead();
while (!element->IsUndefined()) {
JSFunction* element_function = JSFunction::cast(element);
snapshot.Add(element_function, &zone);
element = element_function->next_function_link();
}
// Run through the snapshot of optimized functions and visit them.
for (int i = 0; i < snapshot.length(); ++i) {
visitor->VisitFunction(snapshot.at(i));
JSFunction* function = JSFunction::cast(element);
Object* next = function->next_function_link();
if (function->code()->kind() != Code::OPTIMIZED_FUNCTION ||
(visitor->VisitFunction(function),
function->code()->kind() != Code::OPTIMIZED_FUNCTION)) {
// The function no longer refers to optimized code, or the visitor
// changed the code to which it refers to no longer be optimized code.
// Remove the function from this list.
if (prev != NULL) {
prev->set_next_function_link(next);
} else {
context->SetOptimizedFunctionsListHead(next);
}
// The visitor should not alter the link directly.
ASSERT(function->next_function_link() == next);
// Set the next function link to undefined to indicate it is no longer
// in the optimized functions list.
function->set_next_function_link(context->GetHeap()->undefined_value());
} else {
// The visitor should not alter the link directly.
ASSERT(function->next_function_link() == next);
// preserve this element.
prev = function;
}
element = next;
}
visitor->LeaveContext(context);
@ -321,7 +313,7 @@ void Deoptimizer::VisitAllOptimizedFunctions(
OptimizedFunctionVisitor* visitor) {
DisallowHeapAllocation no_allocation;
// Run through the list of all native contexts and deoptimize.
// Run through the list of all native contexts.
Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined()) {
VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
@ -330,217 +322,161 @@ void Deoptimizer::VisitAllOptimizedFunctions(
}
// Removes the functions selected by the given filter from the optimized
// function list of the given context and adds their code to the list of
// code objects to be deoptimized.
static void SelectCodeToDeoptimize(Context* context,
OptimizedFunctionFilter* filter,
ZoneList<Code*>* codes,
Zone* zone,
Object* undefined) {
// Unlink functions referring to code marked for deoptimization, then move
// marked code from the optimized code list to the deoptimized code list,
// and patch code for lazy deopt.
void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
DisallowHeapAllocation no_allocation;
Object* current = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
Object* remainder_head = undefined;
Object* remainder_tail = undefined;
// TODO(titzer): rewrite to not modify unselected functions.
while (current != undefined) {
JSFunction* function = JSFunction::cast(current);
current = function->next_function_link();
if (filter->TakeFunction(function)) {
// Extract this function from the context's list and remember the code.
// A "closure" that unlinks optimized code that is going to be
// deoptimized from the functions that refer to it.
class SelectedCodeUnlinker: public OptimizedFunctionVisitor {
public:
virtual void EnterContext(Context* context) { } // Don't care.
virtual void LeaveContext(Context* context) { } // Don't care.
virtual void VisitFunction(JSFunction* function) {
Code* code = function->code();
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
if (code->marked_for_deoptimization()) {
ASSERT(codes->Contains(code));
} else {
code->set_marked_for_deoptimization(true);
codes->Add(code, zone);
}
if (!code->marked_for_deoptimization()) return;
// Unlink this function and evict from optimized code map.
SharedFunctionInfo* shared = function->shared();
// Replace the function's code with the shared code.
function->set_code(shared->code());
// Evict the code from the optimized code map.
shared->EvictFromOptimizedCodeMap(code, "deoptimized function");
// Remove the function from the optimized functions list.
function->set_next_function_link(undefined);
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
PrintF("[deoptimizer unlinked: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
}
} else {
// Don't select this function; link it back into the list.
if (remainder_head == undefined) {
remainder_head = function;
} else {
JSFunction::cast(remainder_tail)->set_next_function_link(function);
}
remainder_tail = function;
}
}
if (remainder_tail != undefined) {
JSFunction::cast(remainder_tail)->set_next_function_link(undefined);
}
context->set(Context::OPTIMIZED_FUNCTIONS_LIST, remainder_head);
}
};
// Unlink all functions that refer to marked code.
SelectedCodeUnlinker unlinker;
VisitAllOptimizedFunctionsForContext(context, &unlinker);
class DeoptimizeAllFilter : public OptimizedFunctionFilter {
public:
virtual bool TakeFunction(JSFunction* function) {
return true;
}
};
// Move marked code from the optimized code list to the deoptimized
// code list, collecting them into a ZoneList.
Isolate* isolate = context->GetHeap()->isolate();
Zone zone(isolate);
ZoneList<Code*> codes(10, &zone);
// Walk over all optimized code objects in this native context.
Code* prev = NULL;
Object* element = context->OptimizedCodeListHead();
while (!element->IsUndefined()) {
Code* code = Code::cast(element);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
Object* next = code->next_code_link();
if (code->marked_for_deoptimization()) {
// Put the code into the list for later patching.
codes.Add(code, &zone);
if (prev != NULL) {
// Skip this code in the optimized code list.
prev->set_next_code_link(next);
} else {
// There was no previous node, the next node is the new head.
context->SetOptimizedCodeListHead(next);
}
class DeoptimizeWithMatchingCodeFilter : public OptimizedFunctionFilter {
public:
explicit DeoptimizeWithMatchingCodeFilter(Code* code) : code_(code) {}
virtual bool TakeFunction(JSFunction* function) {
return function->code() == code_;
// Move the code to the _deoptimized_ code list.
code->set_next_code_link(context->DeoptimizedCodeListHead());
context->SetDeoptimizedCodeListHead(code);
} else {
// Not marked; preserve this element.
prev = code;
}
element = next;
}
private:
Code* code_;
};
// TODO(titzer): we need a handle scope only because of the macro assembler,
// which is only used in EnsureCodeForDeoptimizationEntry.
HandleScope scope(isolate);
// Now patch all the codes for deoptimization.
for (int i = 0; i < codes.length(); i++) {
// It is finally time to die, code object.
// Do platform-specific patching to force any activations to lazy deopt.
PatchCodeForDeoptimization(isolate, codes[i]);
class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter {
public:
virtual bool TakeFunction(JSFunction* function) {
return function->code()->marked_for_deoptimization();
// We might be in the middle of incremental marking with compaction.
// Tell collector to treat this code object in a special way and
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
}
};
}
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
DisallowHeapAllocation no_allocation;
if (FLAG_trace_deopt) {
PrintF("[deoptimize all contexts]\n");
PrintF("[deoptimize all code in all contexts]\n");
}
DeoptimizeAllFilter filter;
DeoptimizeAllFunctionsWith(isolate, &filter);
}
void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
DisallowHeapAllocation no_allocation;
DeoptimizeAllFilter filter;
if (object->IsJSGlobalProxy()) {
Object* proto = object->GetPrototype();
ASSERT(proto->IsJSGlobalObject());
DeoptimizeAllFunctionsForContext(
GlobalObject::cast(proto)->native_context(), &filter);
} else if (object->IsGlobalObject()) {
DeoptimizeAllFunctionsForContext(
GlobalObject::cast(object)->native_context(), &filter);
// For all contexts, mark all code, then deoptimize.
Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined()) {
Context* native_context = Context::cast(context);
MarkAllCodeForContext(native_context);
DeoptimizeMarkedCodeForContext(native_context);
context = native_context->get(Context::NEXT_CONTEXT_LINK);
}
}
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
Code* code = function->code();
if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
DeoptimizeWithMatchingCodeFilter filter(code);
DeoptimizeAllFunctionsForContext(
function->context()->native_context(), &filter);
}
void Deoptimizer::DeoptimizeAllFunctionsForContext(
Context* context, OptimizedFunctionFilter* filter) {
ASSERT(context->IsNativeContext());
Isolate* isolate = context->GetIsolate();
Object* undefined = isolate->heap()->undefined_value();
Zone zone(isolate);
ZoneList<Code*> codes(4, &zone);
SelectCodeToDeoptimize(context, filter, &codes, &zone, undefined);
for (int i = 0; i < codes.length(); i++) {
DeoptimizeCode(isolate, codes.at(i));
void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
if (FLAG_trace_deopt) {
PrintF("[deoptimize marked code in all contexts]\n");
}
}
void Deoptimizer::DeoptimizeAllFunctionsWith(Isolate* isolate,
OptimizedFunctionFilter* filter) {
DisallowHeapAllocation no_allocation;
// Run through the list of all native contexts and deoptimize.
// For all contexts, deoptimize code already marked.
Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined()) {
DeoptimizeAllFunctionsForContext(Context::cast(context), filter);
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Context* native_context = Context::cast(context);
DeoptimizeMarkedCodeForContext(native_context);
context = native_context->get(Context::NEXT_CONTEXT_LINK);
}
}
void Deoptimizer::DeoptimizeCodeList(Isolate* isolate, ZoneList<Code*>* codes) {
if (codes->length() == 0) return; // Nothing to do.
// Mark the code; any functions refering to this code will be selected.
for (int i = 0; i < codes->length(); i++) {
ASSERT(!codes->at(i)->marked_for_deoptimization());
codes->at(i)->set_marked_for_deoptimization(true);
}
// For all contexts, remove optimized functions that refer to the selected
// code from the optimized function lists.
Object* undefined = isolate->heap()->undefined_value();
Zone zone(isolate);
Object* list = isolate->heap()->native_contexts_list();
DeoptimizeMarkedCodeFilter filter;
while (!list->IsUndefined()) {
Context* context = Context::cast(list);
// Note that selecting code unlinks the functions that refer to it.
SelectCodeToDeoptimize(context, &filter, codes, &zone, undefined);
list = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
if (FLAG_trace_deopt) {
PrintF("[deoptimize global object @ 0x%08" V8PRIxPTR "]\n",
reinterpret_cast<intptr_t>(object));
}
// Now deoptimize all the code.
for (int i = 0; i < codes->length(); i++) {
DeoptimizeCode(isolate, codes->at(i));
if (object->IsJSGlobalProxy()) {
Object* proto = object->GetPrototype();
ASSERT(proto->IsJSGlobalObject());
Context* native_context = GlobalObject::cast(proto)->native_context();
MarkAllCodeForContext(native_context);
DeoptimizeMarkedCodeForContext(native_context);
} else if (object->IsGlobalObject()) {
Context* native_context = GlobalObject::cast(object)->native_context();
MarkAllCodeForContext(native_context);
DeoptimizeMarkedCodeForContext(native_context);
}
}
void Deoptimizer::DeoptimizeCode(Isolate* isolate, Code* code) {
HandleScope scope(isolate);
DisallowHeapAllocation nha;
// Do platform-specific patching of the optimized code.
PatchCodeForDeoptimization(isolate, code);
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
DeoptimizerData* data = isolate->deoptimizer_data();
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
// We might be in the middle of incremental marking with compaction.
// Tell collector to treat this code object in a special way and
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
void Deoptimizer::MarkAllCodeForContext(Context* context) {
Object* element = context->OptimizedCodeListHead();
while (!element->IsUndefined()) {
Code* code = Code::cast(element);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
code->set_marked_for_deoptimization(true);
element = code->next_code_link();
}
}
void Deoptimizer::HandleWeakDeoptimizedCode(v8::Isolate* isolate,
v8::Persistent<v8::Value>* obj,
void* parameter) {
DeoptimizingCodeListNode* node =
reinterpret_cast<DeoptimizingCodeListNode*>(parameter);
DeoptimizerData* data =
reinterpret_cast<Isolate*>(isolate)->deoptimizer_data();
data->RemoveDeoptimizingCode(*node->code());
#ifdef DEBUG
for (DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
current != NULL;
current = current->next()) {
ASSERT(current != node);
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
Code* code = function->code();
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
// Mark the code for deoptimization and unlink any functions that also
// refer to that code. The code cannot be shared across native contexts,
// so we only need to search one.
code->set_marked_for_deoptimization(true);
DeoptimizeMarkedCodeForContext(function->context()->native_context());
}
#endif
}
@ -559,8 +495,6 @@ bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
return (frame_type == StackFrame::STUB)
? FLAG_trace_stub_failures
: FLAG_trace_deopt;
case OSR:
return FLAG_trace_osr;
}
UNREACHABLE();
return false;
@ -573,7 +507,6 @@ const char* Deoptimizer::MessageFor(BailoutType type) {
case SOFT: return "soft";
case LAZY: return "lazy";
case DEBUGGER: return "debugger";
case OSR: return "OSR";
}
UNREACHABLE();
return NULL;
@ -627,6 +560,14 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
}
}
compiled_code_ = FindOptimizedCode(function, optimized_code);
#if DEBUG
ASSERT(compiled_code_ != NULL);
if (type == EAGER || type == SOFT || type == LAZY) {
ASSERT(compiled_code_->kind() != Code::FUNCTION);
}
#endif
StackFrame::Type frame_type = function == NULL
? StackFrame::STUB
: StackFrame::JAVA_SCRIPT;
@ -647,21 +588,11 @@ Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
case Deoptimizer::SOFT:
case Deoptimizer::EAGER:
case Deoptimizer::LAZY: {
Code* compiled_code =
isolate_->deoptimizer_data()->FindDeoptimizingCode(from_);
Code* compiled_code = FindDeoptimizingCode(from_);
return (compiled_code == NULL)
? static_cast<Code*>(isolate_->FindCodeObject(from_))
: compiled_code;
}
case Deoptimizer::OSR: {
// The function has already been optimized and we're transitioning
// from the unoptimized shared version to the optimized one in the
// function. The return address (from_) points to unoptimized code.
Code* compiled_code = function->code();
ASSERT(compiled_code->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(!compiled_code->contains(from_));
return compiled_code;
}
case Deoptimizer::DEBUGGER:
ASSERT(optimized_code->contains(from_));
return optimized_code;
@ -765,11 +696,18 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
int length = 0;
DeoptimizingCodeListNode* node =
isolate->deoptimizer_data()->deoptimizing_code_list_;
while (node != NULL) {
length++;
node = node->next();
// Count all entries in the deoptimizing code list of every context.
Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined()) {
Context* native_context = Context::cast(context);
Object* element = native_context->DeoptimizedCodeListHead();
while (!element->IsUndefined()) {
Code* code = Code::cast(element);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
length++;
element = code->next_code_link();
}
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
}
return length;
}
@ -778,18 +716,14 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
if (bailout_type_ == OSR) {
DoComputeOsrOutputFrame();
return;
}
// Print some helpful diagnostic information.
int64_t start = OS::Ticks();
if (FLAG_log_timer_events &&
compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
LOG(isolate(), CodeDeoptEvent(compiled_code_));
}
ElapsedTimer timer;
if (trace_) {
timer.Start();
PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ",
MessageFor(bailout_type_),
reinterpret_cast<intptr_t>(function_));
@ -870,7 +804,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// Print some helpful diagnostic information.
if (trace_) {
double ms = static_cast<double>(OS::Ticks() - start) / 1000;
double ms = timer.Elapsed().InMillisecondsF();
int index = output_count_ - 1; // Index of the topmost frame.
JSFunction* function = output_[index]->GetFunction();
PrintF("[deoptimizing (%s): end 0x%08" V8PRIxPTR " ",
@ -1696,13 +1630,25 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
Handle<Object> properties = MaterializeNextValue();
Handle<Object> elements = MaterializeNextValue();
object->set_properties(FixedArray::cast(*properties));
object->set_elements(FixedArray::cast(*elements));
object->set_elements(FixedArrayBase::cast(*elements));
for (int i = 0; i < length - 3; ++i) {
Handle<Object> value = MaterializeNextValue();
object->FastPropertyAtPut(i, *value);
}
break;
}
case JS_ARRAY_TYPE: {
Handle<JSArray> object =
isolate_->factory()->NewJSArray(0, map->elements_kind());
materialized_objects_->Add(object);
Handle<Object> properties = MaterializeNextValue();
Handle<Object> elements = MaterializeNextValue();
Handle<Object> length = MaterializeNextValue();
object->set_properties(FixedArray::cast(*properties));
object->set_elements(FixedArrayBase::cast(*elements));
object->set_length(*length);
break;
}
default:
PrintF("[couldn't handle instance type %d]\n", map->instance_type());
UNREACHABLE();
@ -2391,252 +2337,69 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
int* input_offset) {
disasm::NameConverter converter;
FrameDescription* output = output_[0];
// The input values are all part of the unoptimized frame so they
// are all tagged pointers.
uintptr_t input_value = input_->GetFrameSlot(*input_offset);
Object* input_object = reinterpret_cast<Object*>(input_value);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
switch (opcode) {
case Translation::BEGIN:
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
case Translation::COMPILED_STUB_FRAME:
UNREACHABLE(); // Malformed input.
return false;
case Translation::REGISTER: {
int output_reg = iterator->Next();
if (FLAG_trace_osr) {
PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
converter.NameOfCPURegister(output_reg),
input_value,
*input_offset);
}
output->SetRegister(output_reg, input_value);
break;
}
case Translation::INT32_REGISTER: {
int32_t int32_value = 0;
if (!input_object->ToInt32(&int32_value)) return false;
int output_reg = iterator->Next();
if (FLAG_trace_osr) {
PrintF(" %s <- %d (int32) ; [sp + %d]\n",
converter.NameOfCPURegister(output_reg),
int32_value,
*input_offset);
}
output->SetRegister(output_reg, int32_value);
break;
}
case Translation::UINT32_REGISTER: {
uint32_t uint32_value = 0;
if (!input_object->ToUint32(&uint32_value)) return false;
int output_reg = iterator->Next();
if (FLAG_trace_osr) {
PrintF(" %s <- %u (uint32) ; [sp + %d]\n",
converter.NameOfCPURegister(output_reg),
uint32_value,
*input_offset);
}
output->SetRegister(output_reg, static_cast<int32_t>(uint32_value));
}
case Translation::DOUBLE_REGISTER: {
// Abort OSR if we don't have a number.
if (!input_object->IsNumber()) return false;
int output_reg = iterator->Next();
double double_value = input_object->Number();
if (FLAG_trace_osr) {
PrintF(" %s <- %g (double) ; [sp + %d]\n",
DoubleRegister::AllocationIndexToString(output_reg),
double_value,
*input_offset);
}
output->SetDoubleRegister(output_reg, double_value);
break;
}
case Translation::STACK_SLOT: {
int output_index = iterator->Next();
unsigned output_offset =
output->GetOffsetFromSlotIndex(output_index);
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
output_offset,
input_value,
*input_offset);
reinterpret_cast<Object*>(input_value)->ShortPrint();
PrintF("\n");
}
output->SetFrameSlot(output_offset, input_value);
break;
}
case Translation::INT32_STACK_SLOT: {
int32_t int32_value = 0;
if (!input_object->ToInt32(&int32_value)) return false;
int output_index = iterator->Next();
unsigned output_offset =
output->GetOffsetFromSlotIndex(output_index);
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
output_offset,
int32_value,
*input_offset);
}
output->SetFrameSlot(output_offset, int32_value);
break;
}
case Translation::UINT32_STACK_SLOT: {
uint32_t uint32_value = 0;
if (!input_object->ToUint32(&uint32_value)) return false;
int output_index = iterator->Next();
unsigned output_offset =
output->GetOffsetFromSlotIndex(output_index);
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- %u (uint32) ; [sp + %d]\n",
output_offset,
uint32_value,
*input_offset);
}
output->SetFrameSlot(output_offset, static_cast<int32_t>(uint32_value));
break;
}
case Translation::DOUBLE_STACK_SLOT: {
static const int kLowerOffset = 0 * kPointerSize;
static const int kUpperOffset = 1 * kPointerSize;
// Abort OSR if we don't have a number.
if (!input_object->IsNumber()) return false;
int output_index = iterator->Next();
unsigned output_offset =
output->GetOffsetFromSlotIndex(output_index);
double double_value = input_object->Number();
uint64_t int_value = BitCast<uint64_t, double>(double_value);
int32_t lower = static_cast<int32_t>(int_value);
int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n",
output_offset + kUpperOffset,
upper,
double_value,
*input_offset);
PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n",
output_offset + kLowerOffset,
lower,
double_value,
*input_offset);
}
output->SetFrameSlot(output_offset + kLowerOffset, lower);
output->SetFrameSlot(output_offset + kUpperOffset, upper);
break;
}
case Translation::LITERAL: {
// Just ignore non-materialized literals.
iterator->Next();
break;
}
case Translation::DUPLICATED_OBJECT:
case Translation::ARGUMENTS_OBJECT:
case Translation::CAPTURED_OBJECT: {
// Optimized code assumes that the argument object has not been
// materialized and so bypasses it when doing arguments access.
// We should have bailed out before starting the frame
// translation.
UNREACHABLE();
return false;
}
}
*input_offset -= kPointerSize;
return true;
}
void Deoptimizer::PatchInterruptCode(Isolate* isolate,
Code* unoptimized) {
DisallowHeapAllocation no_gc;
Code* replacement_code =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
void Deoptimizer::PatchInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code) {
// Iterate over the back edge table and patch every interrupt
// call to an unconditional call to the replacement code.
int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized_code);
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) == loop_nesting_level) {
PatchInterruptCodeAt(unoptimized_code,
ASSERT_EQ(NOT_PATCHED, GetInterruptPatchState(isolate,
unoptimized,
back_edges.pc()));
PatchInterruptCodeAt(unoptimized,
back_edges.pc(),
interrupt_code,
replacement_code);
}
}
unoptimized_code->set_back_edges_patched_for_osr(true);
#ifdef DEBUG
Deoptimizer::VerifyInterruptCode(
unoptimized_code, interrupt_code, replacement_code, loop_nesting_level);
#endif // DEBUG
unoptimized->set_back_edges_patched_for_osr(true);
ASSERT(Deoptimizer::VerifyInterruptCode(
isolate, unoptimized, loop_nesting_level));
}
void Deoptimizer::RevertInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code) {
void Deoptimizer::RevertInterruptCode(Isolate* isolate,
Code* unoptimized) {
DisallowHeapAllocation no_gc;
Code* interrupt_code =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
// Iterate over the back edge table and revert the patched interrupt calls.
ASSERT(unoptimized_code->back_edges_patched_for_osr());
int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
ASSERT(unoptimized->back_edges_patched_for_osr());
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized_code);
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) <= loop_nesting_level) {
RevertInterruptCodeAt(unoptimized_code,
back_edges.pc(),
interrupt_code,
replacement_code);
ASSERT_EQ(PATCHED_FOR_OSR, GetInterruptPatchState(isolate,
unoptimized,
back_edges.pc()));
RevertInterruptCodeAt(unoptimized, back_edges.pc(), interrupt_code);
}
}
unoptimized_code->set_back_edges_patched_for_osr(false);
unoptimized_code->set_allow_osr_at_loop_nesting_level(0);
#ifdef DEBUG
unoptimized->set_back_edges_patched_for_osr(false);
unoptimized->set_allow_osr_at_loop_nesting_level(0);
// Assert that none of the back edges are patched anymore.
Deoptimizer::VerifyInterruptCode(
unoptimized_code, interrupt_code, replacement_code, -1);
#endif // DEBUG
ASSERT(Deoptimizer::VerifyInterruptCode(isolate, unoptimized, -1));
}
#ifdef DEBUG
void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code,
bool Deoptimizer::VerifyInterruptCode(Isolate* isolate,
Code* unoptimized,
int loop_nesting_level) {
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized_code);
DisallowHeapAllocation no_gc;
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
!back_edges.Done();
back_edges.Next()) {
uint32_t loop_depth = back_edges.loop_depth();
@ -2644,11 +2407,11 @@ void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
InterruptCodeIsPatched(unoptimized_code,
back_edges.pc(),
interrupt_code,
replacement_code));
GetInterruptPatchState(isolate,
unoptimized,
back_edges.pc()) != NOT_PATCHED);
}
return true;
}
#endif // DEBUG
@ -2659,12 +2422,7 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// into account so we have to avoid double counting them (-2).
unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
#ifdef DEBUG
if (bailout_type_ == OSR) {
// TODO(kasperl): It would be nice if we could verify that the
// size matches with the stack height we can compute based on the
// environment at the OSR entry. The code for that his built into
// the DoComputeOsrOutputFrame function for now.
} else if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
unsigned stack_slots = compiled_code_->stack_slots();
unsigned outgoing_size = ComputeOutgoingArgumentSize();
ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
@ -3103,22 +2861,6 @@ const char* Translation::StringFor(Opcode opcode) {
#endif
DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
GlobalHandles* global_handles = code->GetIsolate()->global_handles();
// Globalize the code object and make it weak.
code_ = Handle<Code>::cast(global_handles->Create(code));
global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
this,
Deoptimizer::HandleWeakDeoptimizedCode);
}
DeoptimizingCodeListNode::~DeoptimizingCodeListNode() {
GlobalHandles* global_handles = code_->GetIsolate()->global_handles();
global_handles->Destroy(reinterpret_cast<Object**>(code_.location()));
}
// We can't intermix stack decoding and allocations because
// deoptimization infrastracture is not GC safe.
// Thus we build a temporary structure in malloced space.

128
deps/v8/src/deoptimizer.h

@ -58,7 +58,6 @@ static inline double read_double_value(Address p) {
class FrameDescription;
class TranslationIterator;
class DeoptimizingCodeListNode;
class DeoptimizedFrameInfo;
class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
@ -121,29 +120,22 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
};
class OptimizedFunctionFilter BASE_EMBEDDED {
public:
virtual ~OptimizedFunctionFilter() {}
virtual bool TakeFunction(JSFunction* function) = 0;
};
class Deoptimizer;
class Deoptimizer : public Malloced {
public:
enum BailoutType {
EAGER,
LAZY,
SOFT,
OSR,
// This last bailout type is not really a bailout, but used by the
// debugger to deoptimize stack frames to allow inspection.
DEBUGGER
};
enum InterruptPatchState {
NOT_PATCHED,
PATCHED_FOR_OSR
};
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
struct JumpTableEntry {
@ -203,68 +195,54 @@ class Deoptimizer : public Malloced {
// execution returns.
static void DeoptimizeFunction(JSFunction* function);
// Iterate over all the functions which share the same code object
// and make them use unoptimized version.
static void ReplaceCodeForRelatedFunctions(JSFunction* function, Code* code);
// Deoptimize all functions in the heap.
// Deoptimize all code in the given isolate.
static void DeoptimizeAll(Isolate* isolate);
// Deoptimize code associated with the given global object.
static void DeoptimizeGlobalObject(JSObject* object);
static void DeoptimizeAllFunctionsWith(Isolate* isolate,
OptimizedFunctionFilter* filter);
static void DeoptimizeCodeList(Isolate* isolate, ZoneList<Code*>* codes);
static void DeoptimizeAllFunctionsForContext(
Context* context, OptimizedFunctionFilter* filter);
static void VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor);
// Deoptimizes all optimized code that has been previously marked
// (via code->set_marked_for_deoptimization) and unlinks all functions that
// refer to that code.
static void DeoptimizeMarkedCode(Isolate* isolate);
static void VisitAllOptimizedFunctions(Isolate* isolate,
OptimizedFunctionVisitor* visitor);
// Visit all the known optimized functions in a given isolate.
static void VisitAllOptimizedFunctions(
Isolate* isolate, OptimizedFunctionVisitor* visitor);
// The size in bytes of the code required at a lazy deopt patch site.
static int patch_size();
// Patch all interrupts with allowed loop depth in the unoptimized code to
// unconditionally call replacement_code.
static void PatchInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code);
static void PatchInterruptCode(Isolate* isolate,
Code* unoptimized_code);
// Patch the interrupt at the instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
static void PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code);
// Change all patched interrupts patched in the unoptimized code
// back to normal interrupts.
static void RevertInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code);
static void RevertInterruptCode(Isolate* isolate,
Code* unoptimized_code);
// Change patched interrupt in the unoptimized code
// back to a normal interrupt.
static void RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code);
Code* interrupt_code);
#ifdef DEBUG
static bool InterruptCodeIsPatched(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code);
static InterruptPatchState GetInterruptPatchState(Isolate* isolate,
Code* unoptimized_code,
Address pc_after);
// Verify that all back edges of a certain loop depth are patched.
static void VerifyInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code,
static bool VerifyInterruptCode(Isolate* isolate,
Code* unoptimized_code,
int loop_nesting_level);
#endif // DEBUG
@ -377,7 +355,6 @@ class Deoptimizer : public Malloced {
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
void DoComputeOsrOutputFrame();
void DoComputeJSFrame(TranslationIterator* iterator, int frame_index);
void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
int frame_index);
@ -403,13 +380,6 @@ class Deoptimizer : public Malloced {
unsigned output_offset,
DeoptimizerTranslatedValueType value_type = TRANSLATED_VALUE_IS_TAGGED);
// Translate a command for OSR. Updates the input offset to be used for
// the next command. Returns false if translation of the command failed
// (e.g., a number conversion failed) and may or may not have updated the
// input offset.
bool DoOsrTranslateCommand(TranslationIterator* iterator,
int* input_offset);
unsigned ComputeInputFrameSize() const;
unsigned ComputeFixedSize(JSFunction* function) const;
@ -443,17 +413,24 @@ class Deoptimizer : public Malloced {
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
// Weak handle callback for deoptimizing code objects.
static void HandleWeakDeoptimizedCode(v8::Isolate* isolate,
v8::Persistent<v8::Value>* obj,
void* data);
// Marks all the code in the given context for deoptimization.
static void MarkAllCodeForContext(Context* native_context);
// Deoptimize the given code and add to appropriate deoptimization lists.
static void DeoptimizeCode(Isolate* isolate, Code* code);
// Visit all the known optimized functions in a given context.
static void VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor);
// Deoptimizes all code marked in the given context.
static void DeoptimizeMarkedCodeForContext(Context* native_context);
// Patch the given code so that it will deoptimize itself.
static void PatchCodeForDeoptimization(Isolate* isolate, Code* code);
// Searches the list of known deoptimizing code for a Code object
// containing the given address (which is supposedly faster than
// searching all code objects).
Code* FindDeoptimizingCode(Address addr);
// Fill the input from from a JavaScript frame. This is used when
// the debugger needs to inspect an optimized frame. For normal
// deoptimizations the input frame is filled in generated code.
@ -515,7 +492,6 @@ class Deoptimizer : public Malloced {
static const int table_entry_size_;
friend class FrameDescription;
friend class DeoptimizingCodeListNode;
friend class DeoptimizedFrameInfo;
};
@ -689,24 +665,16 @@ class DeoptimizerData {
void Iterate(ObjectVisitor* v);
#endif
Code* FindDeoptimizingCode(Address addr);
void RemoveDeoptimizingCode(Code* code);
private:
MemoryAllocator* allocator_;
int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry];
MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry];
Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo* deoptimized_frame_info_;
#endif
// List of deoptimized code which still have references from active stack
// frames. These code objects are needed by the deoptimizer when deoptimizing
// a frame for which the code object for the function function has been
// changed from the code present when deoptimizing was done.
DeoptimizingCodeListNode* deoptimizing_code_list_;
Deoptimizer* current_;
friend class Deoptimizer;
@ -824,26 +792,6 @@ class Translation BASE_EMBEDDED {
};
// Linked list holding deoptimizing code objects. The deoptimizing code objects
// are kept as weak handles until they are no longer activated on the stack.
class DeoptimizingCodeListNode : public Malloced {
public:
explicit DeoptimizingCodeListNode(Code* code);
~DeoptimizingCodeListNode();
DeoptimizingCodeListNode* next() const { return next_; }
void set_next(DeoptimizingCodeListNode* next) { next_ = next; }
Handle<Code> code() const { return code_; }
private:
// Global (weak) handle to the deoptimizing code object.
Handle<Code> code_;
// Next pointer for linked list.
DeoptimizingCodeListNode* next_;
};
class SlotRef BASE_EMBEDDED {
public:
enum SlotRepresentation {

6
deps/v8/src/disassembler.cc

@ -71,7 +71,7 @@ class V8NameConverter: public disasm::NameConverter {
const char* V8NameConverter::NameOfAddress(byte* pc) const {
const char* name = Isolate::Current()->builtins()->Lookup(pc);
const char* name = code_->GetIsolate()->builtins()->Lookup(pc);
if (name != NULL) {
OS::SNPrintF(v8_buffer_, "%s (%p)", name, pc);
return v8_buffer_.start();
@ -117,8 +117,8 @@ static int DecodeIt(Isolate* isolate,
byte* end) {
SealHandleScope shs(isolate);
DisallowHeapAllocation no_alloc;
ExternalReferenceEncoder ref_encoder;
Heap* heap = HEAP;
ExternalReferenceEncoder ref_encoder(isolate);
Heap* heap = isolate->heap();
v8::internal::EmbeddedVector<char, 128> decode_buffer;
v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;

2
deps/v8/src/effects.h

@ -55,7 +55,7 @@ struct Effect {
Modality modality;
Bounds bounds;
Effect() {}
Effect() : modality(DEFINITE) {}
Effect(Bounds b, Modality m = DEFINITE) : modality(m), bounds(b) {}
// The unknown effect.

12
deps/v8/src/elements.cc

@ -154,7 +154,8 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
ASSERT(to_base->map() != HEAP->fixed_cow_array_map());
ASSERT(to_base->map() !=
from_base->GetIsolate()->heap()->fixed_cow_array_map());
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
@ -492,7 +493,6 @@ static void TraceTopFrame(Isolate* isolate) {
}
StackFrame* raw_frame = it.frame();
if (raw_frame->is_internal()) {
Isolate* isolate = Isolate::Current();
Code* apply_builtin = isolate->builtins()->builtin(
Builtins::kFunctionApply);
if (raw_frame->unchecked_code() == apply_builtin) {
@ -581,14 +581,8 @@ class ElementsAccessorBase : public ElementsAccessor {
// When objects are first allocated, its elements are Failures.
if (fixed_array_base->IsFailure()) return;
if (!fixed_array_base->IsHeapObject()) return;
Map* map = fixed_array_base->map();
// Arrays that have been shifted in place can't be verified.
Heap* heap = holder->GetHeap();
if (map == heap->one_pointer_filler_map() ||
map == heap->two_pointer_filler_map() ||
map == heap->free_space_map()) {
return;
}
if (fixed_array_base->IsFiller()) return;
int length = 0;
if (holder->IsJSArray()) {
Object* length_obj = JSArray::cast(holder)->length();

119
deps/v8/src/execution.cc

@ -148,7 +148,8 @@ static Handle<Object> Invoke(bool is_construct,
}
Handle<Object> Execution::Call(Handle<Object> callable,
Handle<Object> Execution::Call(Isolate* isolate,
Handle<Object> callable,
Handle<Object> receiver,
int argc,
Handle<Object> argv[],
@ -157,7 +158,7 @@ Handle<Object> Execution::Call(Handle<Object> callable,
*pending_exception = false;
if (!callable->IsJSFunction()) {
callable = TryGetFunctionDelegate(callable, pending_exception);
callable = TryGetFunctionDelegate(isolate, callable, pending_exception);
if (*pending_exception) return callable;
}
Handle<JSFunction> func = Handle<JSFunction>::cast(callable);
@ -174,7 +175,7 @@ Handle<Object> Execution::Call(Handle<Object> callable,
receiver = Handle<Object>(global, func->GetIsolate());
}
} else {
receiver = ToObject(receiver, pending_exception);
receiver = ToObject(isolate, receiver, pending_exception);
}
if (*pending_exception) return callable;
}
@ -234,9 +235,9 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
}
Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
Handle<Object> Execution::GetFunctionDelegate(Isolate* isolate,
Handle<Object> object) {
ASSERT(!object->IsJSFunction());
Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
// If you return a function from here, it will be called when an
@ -261,10 +262,10 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
}
Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
Handle<Object> Execution::TryGetFunctionDelegate(Isolate* isolate,
Handle<Object> object,
bool* has_pending_exception) {
ASSERT(!object->IsJSFunction());
Isolate* isolate = Isolate::Current();
// If object is a function proxy, get its handler. Iterate if necessary.
Object* fun = *object;
@ -292,9 +293,9 @@ Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
}
Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
Handle<Object> Execution::GetConstructorDelegate(Isolate* isolate,
Handle<Object> object) {
ASSERT(!object->IsJSFunction());
Isolate* isolate = Isolate::Current();
// If you return a function from here, it will be called when an
// attempt is made to call the given object as a constructor.
@ -319,10 +320,10 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
Handle<Object> Execution::TryGetConstructorDelegate(
Isolate* isolate,
Handle<Object> object,
bool* has_pending_exception) {
ASSERT(!object->IsJSFunction());
Isolate* isolate = Isolate::Current();
// If you return a function from here, it will be called when an
// attempt is made to call the given object as a constructor.
@ -458,6 +459,22 @@ void StackGuard::RequestGC() {
}
bool StackGuard::IsInstallCodeRequest() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & INSTALL_CODE) != 0;
}
void StackGuard::RequestInstallCode() {
ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= INSTALL_CODE;
if (thread_local_.postpone_interrupts_nesting_ == 0) {
thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
isolate_->heap()->SetStackLimits();
}
}
bool StackGuard::IsFullDeopt() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & FULL_DEOPT) != 0;
@ -596,54 +613,60 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
#define RETURN_NATIVE_CALL(name, args, has_pending_exception) \
do { \
Isolate* isolate = Isolate::Current(); \
Handle<Object> argv[] = args; \
ASSERT(has_pending_exception != NULL); \
return Call(isolate->name##_fun(), \
return Call(isolate, \
isolate->name##_fun(), \
isolate->js_builtins_object(), \
ARRAY_SIZE(argv), argv, \
has_pending_exception); \
} while (false)
Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
Handle<Object> Execution::ToNumber(
Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_number, { obj }, exc);
}
Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
Handle<Object> Execution::ToString(
Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_string, { obj }, exc);
}
Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
Handle<Object> Execution::ToDetailString(
Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_detail_string, { obj }, exc);
}
Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
Handle<Object> Execution::ToObject(
Isolate* isolate, Handle<Object> obj, bool* exc) {
if (obj->IsSpecObject()) return obj;
RETURN_NATIVE_CALL(to_object, { obj }, exc);
}
Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
Handle<Object> Execution::ToInteger(
Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_integer, { obj }, exc);
}
Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
Handle<Object> Execution::ToUint32(
Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_uint32, { obj }, exc);
}
Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
Handle<Object> Execution::ToInt32(
Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_int32, { obj }, exc);
}
Handle<Object> Execution::NewDate(double time, bool* exc) {
Isolate* isolate = Isolate::Current();
Handle<Object> Execution::NewDate(Isolate* isolate, double time, bool* exc) {
Handle<Object> time_obj = isolate->factory()->NewNumber(time);
RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
}
@ -698,15 +721,18 @@ Handle<JSFunction> Execution::InstantiateFunction(
Handle<FunctionTemplateInfo> data,
bool* exc) {
Isolate* isolate = data->GetIsolate();
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
Object* elm =
isolate->native_context()->function_cache()->
GetElementNoExceptionThrown(serial_number);
if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
if (!data->do_not_cache()) {
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
Object* elm =
isolate->native_context()->function_cache()->
GetElementNoExceptionThrown(isolate, serial_number);
if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
}
// The function has not yet been instantiated in this context; do it.
Handle<Object> args[] = { data };
Handle<Object> result = Call(isolate->instantiate_fun(),
Handle<Object> result = Call(isolate,
isolate->instantiate_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
@ -738,7 +764,8 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
return Handle<JSObject>(JSObject::cast(result));
} else {
Handle<Object> args[] = { data };
Handle<Object> result = Call(isolate->instantiate_fun(),
Handle<Object> result = Call(isolate,
isolate->instantiate_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
@ -749,12 +776,13 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
}
void Execution::ConfigureInstance(Handle<Object> instance,
void Execution::ConfigureInstance(Isolate* isolate,
Handle<Object> instance,
Handle<Object> instance_template,
bool* exc) {
Isolate* isolate = Isolate::Current();
Handle<Object> args[] = { instance, instance_template };
Execution::Call(isolate->configure_instance_fun(),
Execution::Call(isolate,
isolate->configure_instance_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
@ -782,9 +810,7 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
}
static Object* RuntimePreempt() {
Isolate* isolate = Isolate::Current();
static Object* RuntimePreempt(Isolate* isolate) {
// Clear the preempt request flag.
isolate->stack_guard()->Continue(PREEMPT);
@ -813,9 +839,7 @@ static Object* RuntimePreempt() {
#ifdef ENABLE_DEBUGGER_SUPPORT
Object* Execution::DebugBreakHelper() {
Isolate* isolate = Isolate::Current();
Object* Execution::DebugBreakHelper(Isolate* isolate) {
// Just continue if breaks are disabled.
if (isolate->debug()->disable_break()) {
return isolate->heap()->undefined_value();
@ -861,15 +885,15 @@ Object* Execution::DebugBreakHelper() {
// Clear the debug break request flag.
isolate->stack_guard()->Continue(DEBUGBREAK);
ProcessDebugMessages(debug_command_only);
ProcessDebugMessages(isolate, debug_command_only);
// Return to continue execution.
return isolate->heap()->undefined_value();
}
void Execution::ProcessDebugMessages(bool debug_command_only) {
Isolate* isolate = Isolate::Current();
void Execution::ProcessDebugMessages(Isolate* isolate,
bool debug_command_only) {
// Clear the debug command request flag.
isolate->stack_guard()->Continue(DEBUGCOMMAND);
@ -880,7 +904,7 @@ void Execution::ProcessDebugMessages(bool debug_command_only) {
HandleScope scope(isolate);
// Enter the debugger. Just continue if we fail to enter the debugger.
EnterDebugger debugger;
EnterDebugger debugger(isolate);
if (debugger.FailedToEnter()) {
return;
}
@ -908,13 +932,12 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
isolate->counters()->stack_interrupts()->Increment();
isolate->counters()->runtime_profiler_ticks()->Increment();
isolate->runtime_profiler()->OptimizeNow();
#ifdef ENABLE_DEBUGGER_SUPPORT
if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
DebugBreakHelper();
DebugBreakHelper(isolate);
}
#endif
if (stack_guard->IsPreempted()) RuntimePreempt();
if (stack_guard->IsPreempted()) RuntimePreempt(isolate);
if (stack_guard->IsTerminateExecution()) {
stack_guard->Continue(TERMINATE);
return isolate->TerminateExecution();
@ -927,6 +950,12 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(FULL_DEOPT);
Deoptimizer::DeoptimizeAll(isolate);
}
if (stack_guard->IsInstallCodeRequest()) {
ASSERT(FLAG_concurrent_recompilation);
stack_guard->Continue(INSTALL_CODE);
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
}
isolate->runtime_profiler()->OptimizeNow();
return isolate->heap()->undefined_value();
}

51
deps/v8/src/execution.h

@ -42,7 +42,8 @@ enum InterruptFlag {
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
GC_REQUEST = 1 << 5,
FULL_DEOPT = 1 << 6
FULL_DEOPT = 1 << 6,
INSTALL_CODE = 1 << 7
};
@ -62,7 +63,8 @@ class Execution : public AllStatic {
// and the function called is not in strict mode, receiver is converted to
// an object.
//
static Handle<Object> Call(Handle<Object> callable,
static Handle<Object> Call(Isolate* isolate,
Handle<Object> callable,
Handle<Object> receiver,
int argc,
Handle<Object> argv[],
@ -92,28 +94,36 @@ class Execution : public AllStatic {
bool* caught_exception);
// ECMA-262 9.3
static Handle<Object> ToNumber(Handle<Object> obj, bool* exc);
static Handle<Object> ToNumber(
Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.4
static Handle<Object> ToInteger(Handle<Object> obj, bool* exc);
static Handle<Object> ToInteger(
Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.5
static Handle<Object> ToInt32(Handle<Object> obj, bool* exc);
static Handle<Object> ToInt32(
Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.6
static Handle<Object> ToUint32(Handle<Object> obj, bool* exc);
static Handle<Object> ToUint32(
Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.8
static Handle<Object> ToString(Handle<Object> obj, bool* exc);
static Handle<Object> ToString(
Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.8
static Handle<Object> ToDetailString(Handle<Object> obj, bool* exc);
static Handle<Object> ToDetailString(
Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.9
static Handle<Object> ToObject(Handle<Object> obj, bool* exc);
static Handle<Object> ToObject(
Isolate* isolate, Handle<Object> obj, bool* exc);
// Create a new date object from 'time'.
static Handle<Object> NewDate(double time, bool* exc);
static Handle<Object> NewDate(
Isolate* isolate, double time, bool* exc);
// Create a new regular expression object from 'pattern' and 'flags'.
static Handle<JSRegExp> NewJSRegExp(Handle<String> pattern,
@ -128,7 +138,8 @@ class Execution : public AllStatic {
Handle<FunctionTemplateInfo> data, bool* exc);
static Handle<JSObject> InstantiateObject(Handle<ObjectTemplateInfo> data,
bool* exc);
static void ConfigureInstance(Handle<Object> instance,
static void ConfigureInstance(Isolate* isolate,
Handle<Object> instance,
Handle<Object> data,
bool* exc);
static Handle<String> GetStackTraceLine(Handle<Object> recv,
@ -136,8 +147,8 @@ class Execution : public AllStatic {
Handle<Object> pos,
Handle<Object> is_global);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Object* DebugBreakHelper();
static void ProcessDebugMessages(bool debug_command_only);
static Object* DebugBreakHelper(Isolate* isolate);
static void ProcessDebugMessages(Isolate* isolate, bool debug_command_only);
#endif
// If the stack guard is triggered, but it is not an actual
@ -147,14 +158,18 @@ class Execution : public AllStatic {
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as functions.
static Handle<Object> GetFunctionDelegate(Handle<Object> object);
static Handle<Object> TryGetFunctionDelegate(Handle<Object> object,
static Handle<Object> GetFunctionDelegate(Isolate* isolate,
Handle<Object> object);
static Handle<Object> TryGetFunctionDelegate(Isolate* isolate,
Handle<Object> object,
bool* has_pending_exception);
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as constructors.
static Handle<Object> GetConstructorDelegate(Handle<Object> object);
static Handle<Object> TryGetConstructorDelegate(Handle<Object> object,
static Handle<Object> GetConstructorDelegate(Isolate* isolate,
Handle<Object> object);
static Handle<Object> TryGetConstructorDelegate(Isolate* isolate,
Handle<Object> object,
bool* has_pending_exception);
};
@ -199,6 +214,8 @@ class StackGuard {
#endif
bool IsGCRequest();
void RequestGC();
bool IsInstallCodeRequest();
void RequestInstallCode();
bool IsFullDeopt();
void FullDeopt();
void Continue(InterruptFlag after_what);

6
deps/v8/src/extensions/externalize-string-extension.cc

@ -103,7 +103,8 @@ void ExternalizeStringExtension::Externalize(
reinterpret_cast<char*>(data), string->length());
result = string->MakeExternal(resource);
if (result && !string->IsInternalizedString()) {
HEAP->external_string_table()->AddString(*string);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
isolate->heap()->external_string_table()->AddString(*string);
}
if (!result) delete resource;
} else {
@ -113,7 +114,8 @@ void ExternalizeStringExtension::Externalize(
data, string->length());
result = string->MakeExternal(resource);
if (result && !string->IsInternalizedString()) {
HEAP->external_string_table()->AddString(*string);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
isolate->heap()->external_string_table()->AddString(*string);
}
if (!result) delete resource;
}

5
deps/v8/src/extensions/gc-extension.cc

@ -39,10 +39,11 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
if (args[0]->BooleanValue()) {
HEAP->CollectGarbage(NEW_SPACE, "gc extension");
isolate->heap()->CollectGarbage(NEW_SPACE, "gc extension");
} else {
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
}
}

333
deps/v8/src/extensions/i18n/break-iterator.cc

@ -1,333 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "break-iterator.h"
#include <string.h>
#include "i18n-utils.h"
#include "unicode/brkiter.h"
#include "unicode/locid.h"
#include "unicode/rbbi.h"
namespace v8_i18n {
static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
static icu::UnicodeString* ResetAdoptedText(v8::Handle<v8::Object>,
v8::Handle<v8::Value>);
static icu::BreakIterator* InitializeBreakIterator(v8::Handle<v8::String>,
v8::Handle<v8::Object>,
v8::Handle<v8::Object>);
static icu::BreakIterator* CreateICUBreakIterator(const icu::Locale&,
v8::Handle<v8::Object>);
static void SetResolvedSettings(const icu::Locale&,
icu::BreakIterator*,
v8::Handle<v8::Object>);
icu::BreakIterator* BreakIterator::UnpackBreakIterator(
v8::Handle<v8::Object> obj) {
v8::HandleScope handle_scope;
// v8::ObjectTemplate doesn't have HasInstance method so we can't check
// if obj is an instance of BreakIterator class. We'll check for a property
// that has to be in the object. The same applies to other services, like
// Collator and DateTimeFormat.
if (obj->HasOwnProperty(v8::String::New("breakIterator"))) {
return static_cast<icu::BreakIterator*>(
obj->GetAlignedPointerFromInternalField(0));
}
return NULL;
}
void BreakIterator::DeleteBreakIterator(v8::Isolate* isolate,
v8::Persistent<v8::Object>* object,
void* param) {
// First delete the hidden C++ object.
// Unpacking should never return NULL here. That would only happen if
// this method is used as the weak callback for persistent handles not
// pointing to a break iterator.
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Object> handle = v8::Local<v8::Object>::New(isolate, *object);
delete UnpackBreakIterator(handle);
delete static_cast<icu::UnicodeString*>(
handle->GetAlignedPointerFromInternalField(1));
// Then dispose of the persistent handle to JS object.
object->Dispose(isolate);
}
// Throws a JavaScript exception.
static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
// Returns undefined, and schedules an exception to be thrown.
return v8::ThrowException(v8::Exception::Error(
v8::String::New("BreakIterator method called on an object "
"that is not a BreakIterator.")));
}
// Deletes the old value and sets the adopted text in corresponding
// JavaScript object.
icu::UnicodeString* ResetAdoptedText(
v8::Handle<v8::Object> obj, v8::Handle<v8::Value> value) {
// Get the previous value from the internal field.
icu::UnicodeString* text = static_cast<icu::UnicodeString*>(
obj->GetAlignedPointerFromInternalField(1));
delete text;
// Assign new value to the internal pointer.
v8::String::Value text_value(value);
text = new icu::UnicodeString(
reinterpret_cast<const UChar*>(*text_value), text_value.length());
obj->SetAlignedPointerInInternalField(1, text);
// Return new unicode string pointer.
return text;
}
void BreakIterator::JSInternalBreakIteratorAdoptText(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsString()) {
v8::ThrowException(v8::Exception::Error(
v8::String::New(
"Internal error. Iterator and text have to be specified.")));
return;
}
icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
if (!break_iterator) {
ThrowUnexpectedObjectError();
return;
}
break_iterator->setText(*ResetAdoptedText(args[0]->ToObject(), args[1]));
}
void BreakIterator::JSInternalBreakIteratorFirst(
const v8::FunctionCallbackInfo<v8::Value>& args) {
icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
if (!break_iterator) {
ThrowUnexpectedObjectError();
return;
}
args.GetReturnValue().Set(static_cast<int32_t>(break_iterator->first()));
}
void BreakIterator::JSInternalBreakIteratorNext(
const v8::FunctionCallbackInfo<v8::Value>& args) {
icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
if (!break_iterator) {
ThrowUnexpectedObjectError();
return;
}
args.GetReturnValue().Set(static_cast<int32_t>(break_iterator->next()));
}
void BreakIterator::JSInternalBreakIteratorCurrent(
const v8::FunctionCallbackInfo<v8::Value>& args) {
icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
if (!break_iterator) {
ThrowUnexpectedObjectError();
return;
}
args.GetReturnValue().Set(static_cast<int32_t>(break_iterator->current()));
}
void BreakIterator::JSInternalBreakIteratorBreakType(
const v8::FunctionCallbackInfo<v8::Value>& args) {
icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
if (!break_iterator) {
ThrowUnexpectedObjectError();
return;
}
// TODO(cira): Remove cast once ICU fixes base BreakIterator class.
icu::RuleBasedBreakIterator* rule_based_iterator =
static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
int32_t status = rule_based_iterator->getRuleStatus();
// Keep return values in sync with JavaScript BreakType enum.
v8::Handle<v8::String> result;
if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
result = v8::String::New("none");
} else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
result = v8::String::New("number");
} else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
result = v8::String::New("letter");
} else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
result = v8::String::New("kana");
} else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
result = v8::String::New("ideo");
} else {
result = v8::String::New("unknown");
}
args.GetReturnValue().Set(result);
}
void BreakIterator::JSCreateBreakIterator(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 3 || !args[0]->IsString() || !args[1]->IsObject() ||
!args[2]->IsObject()) {
v8::ThrowException(v8::Exception::Error(
v8::String::New("Internal error, wrong parameters.")));
return;
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::ObjectTemplate> break_iterator_template =
Utils::GetTemplate2(isolate);
// Create an empty object wrapper.
v8::Local<v8::Object> local_object = break_iterator_template->NewInstance();
// But the handle shouldn't be empty.
// That can happen if there was a stack overflow when creating the object.
if (local_object.IsEmpty()) {
args.GetReturnValue().Set(local_object);
return;
}
// Set break iterator as internal field of the resulting JS object.
icu::BreakIterator* break_iterator = InitializeBreakIterator(
args[0]->ToString(), args[1]->ToObject(), args[2]->ToObject());
if (!break_iterator) {
v8::ThrowException(v8::Exception::Error(v8::String::New(
"Internal error. Couldn't create ICU break iterator.")));
return;
} else {
local_object->SetAlignedPointerInInternalField(0, break_iterator);
// Make sure that the pointer to adopted text is NULL.
local_object->SetAlignedPointerInInternalField(1, NULL);
v8::TryCatch try_catch;
local_object->Set(v8::String::New("breakIterator"),
v8::String::New("valid"));
if (try_catch.HasCaught()) {
v8::ThrowException(v8::Exception::Error(
v8::String::New("Internal error, couldn't set property.")));
return;
}
}
v8::Persistent<v8::Object> wrapper(isolate, local_object);
// Make object handle weak so we can delete iterator once GC kicks in.
wrapper.MakeWeak<void>(NULL, &DeleteBreakIterator);
args.GetReturnValue().Set(wrapper);
wrapper.ClearAndLeak();
}
static icu::BreakIterator* InitializeBreakIterator(
v8::Handle<v8::String> locale,
v8::Handle<v8::Object> options,
v8::Handle<v8::Object> resolved) {
// Convert BCP47 into ICU locale format.
UErrorCode status = U_ZERO_ERROR;
icu::Locale icu_locale;
char icu_result[ULOC_FULLNAME_CAPACITY];
int icu_length = 0;
v8::String::AsciiValue bcp47_locale(locale);
if (bcp47_locale.length() != 0) {
uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
&icu_length, &status);
if (U_FAILURE(status) || icu_length == 0) {
return NULL;
}
icu_locale = icu::Locale(icu_result);
}
icu::BreakIterator* break_iterator =
CreateICUBreakIterator(icu_locale, options);
if (!break_iterator) {
// Remove extensions and try again.
icu::Locale no_extension_locale(icu_locale.getBaseName());
break_iterator = CreateICUBreakIterator(no_extension_locale, options);
// Set resolved settings (locale).
SetResolvedSettings(no_extension_locale, break_iterator, resolved);
} else {
SetResolvedSettings(icu_locale, break_iterator, resolved);
}
return break_iterator;
}
static icu::BreakIterator* CreateICUBreakIterator(
const icu::Locale& icu_locale, v8::Handle<v8::Object> options) {
UErrorCode status = U_ZERO_ERROR;
icu::BreakIterator* break_iterator = NULL;
icu::UnicodeString type;
if (!Utils::ExtractStringSetting(options, "type", &type)) {
// Type had to be in the options. This would be an internal error.
return NULL;
}
if (type == UNICODE_STRING_SIMPLE("character")) {
break_iterator =
icu::BreakIterator::createCharacterInstance(icu_locale, status);
} else if (type == UNICODE_STRING_SIMPLE("sentence")) {
break_iterator =
icu::BreakIterator::createSentenceInstance(icu_locale, status);
} else if (type == UNICODE_STRING_SIMPLE("line")) {
break_iterator =
icu::BreakIterator::createLineInstance(icu_locale, status);
} else {
// Defualt is word iterator.
break_iterator =
icu::BreakIterator::createWordInstance(icu_locale, status);
}
if (U_FAILURE(status)) {
delete break_iterator;
return NULL;
}
return break_iterator;
}
static void SetResolvedSettings(const icu::Locale& icu_locale,
icu::BreakIterator* date_format,
v8::Handle<v8::Object> resolved) {
UErrorCode status = U_ZERO_ERROR;
// Set the locale
char result[ULOC_FULLNAME_CAPACITY];
status = U_ZERO_ERROR;
uloc_toLanguageTag(
icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
if (U_SUCCESS(status)) {
resolved->Set(v8::String::New("locale"), v8::String::New(result));
} else {
// This would never happen, since we got the locale from ICU.
resolved->Set(v8::String::New("locale"), v8::String::New("und"));
}
}
} // namespace v8_i18n

85
deps/v8/src/extensions/i18n/break-iterator.h

@ -1,85 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
#ifndef V8_EXTENSIONS_I18N_BREAK_ITERATOR_H_
#define V8_EXTENSIONS_I18N_BREAK_ITERATOR_H_
#include "unicode/uversion.h"
#include "v8.h"
namespace U_ICU_NAMESPACE {
class BreakIterator;
class UnicodeString;
}
namespace v8_i18n {
class BreakIterator {
public:
static void JSCreateBreakIterator(
const v8::FunctionCallbackInfo<v8::Value>& args);
// Helper methods for various bindings.
// Unpacks iterator object from corresponding JavaScript object.
static icu::BreakIterator* UnpackBreakIterator(v8::Handle<v8::Object> obj);
// Release memory we allocated for the BreakIterator once the JS object that
// holds the pointer gets garbage collected.
static void DeleteBreakIterator(v8::Isolate* isolate,
v8::Persistent<v8::Object>* object,
void* param);
// Assigns new text to the iterator.
static void JSInternalBreakIteratorAdoptText(
const v8::FunctionCallbackInfo<v8::Value>& args);
// Moves iterator to the beginning of the string and returns new position.
static void JSInternalBreakIteratorFirst(
const v8::FunctionCallbackInfo<v8::Value>& args);
// Moves iterator to the next position and returns it.
static void JSInternalBreakIteratorNext(
const v8::FunctionCallbackInfo<v8::Value>& args);
// Returns current iterator's current position.
static void JSInternalBreakIteratorCurrent(
const v8::FunctionCallbackInfo<v8::Value>& args);
// Returns type of the item from current position.
// This call is only valid for word break iterators. Others just return 0.
static void JSInternalBreakIteratorBreakType(
const v8::FunctionCallbackInfo<v8::Value>& args);
private:
BreakIterator() {}
};
} // namespace v8_i18n
#endif // V8_EXTENSIONS_I18N_BREAK_ITERATOR_H_

197
deps/v8/src/extensions/i18n/break-iterator.js

@ -1,197 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
// ECMAScript 402 API implementation is broken into separate files for
// each service. The build system combines them together into one
// Intl namespace.
/**
* Initializes the given object so it's a valid BreakIterator instance.
* Useful for subclassing.
*/
function initializeBreakIterator(iterator, locales, options) {
native function NativeJSCreateBreakIterator();
if (iterator.hasOwnProperty('__initializedIntlObject')) {
throw new TypeError('Trying to re-initialize v8BreakIterator object.');
}
if (options === undefined) {
options = {};
}
var getOption = getGetOption(options, 'breakiterator');
var internalOptions = {};
defineWEProperty(internalOptions, 'type', getOption(
'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
var locale = resolveLocale('breakiterator', locales, options);
var resolved = Object.defineProperties({}, {
requestedLocale: {value: locale.locale, writable: true},
type: {value: internalOptions.type, writable: true},
locale: {writable: true}
});
var internalIterator = NativeJSCreateBreakIterator(locale.locale,
internalOptions,
resolved);
Object.defineProperty(iterator, 'iterator', {value: internalIterator});
Object.defineProperty(iterator, 'resolved', {value: resolved});
Object.defineProperty(iterator, '__initializedIntlObject',
{value: 'breakiterator'});
return iterator;
}
/**
* Constructs Intl.v8BreakIterator object given optional locales and options
* parameters.
*
* @constructor
*/
%SetProperty(Intl, 'v8BreakIterator', function() {
var locales = arguments[0];
var options = arguments[1];
if (!this || this === Intl) {
// Constructor is called as a function.
return new Intl.v8BreakIterator(locales, options);
}
return initializeBreakIterator(toObject(this), locales, options);
},
ATTRIBUTES.DONT_ENUM
);
/**
* BreakIterator resolvedOptions method.
*/
%SetProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions', function() {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
if (!this || typeof this !== 'object' ||
this.__initializedIntlObject !== 'breakiterator') {
throw new TypeError('resolvedOptions method called on a non-object or ' +
'on a object that is not Intl.v8BreakIterator.');
}
var segmenter = this;
var locale = getOptimalLanguageTag(segmenter.resolved.requestedLocale,
segmenter.resolved.locale);
return {
locale: locale,
type: segmenter.resolved.type
};
},
ATTRIBUTES.DONT_ENUM
);
%FunctionSetName(Intl.v8BreakIterator.prototype.resolvedOptions,
'resolvedOptions');
%FunctionRemovePrototype(Intl.v8BreakIterator.prototype.resolvedOptions);
%SetNativeFlag(Intl.v8BreakIterator.prototype.resolvedOptions);
/**
* Returns the subset of the given locale list for which this locale list
* has a matching (possibly fallback) locale. Locales appear in the same
* order in the returned list as in the input list.
* Options are optional parameter.
*/
%SetProperty(Intl.v8BreakIterator, 'supportedLocalesOf', function(locales) {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
return supportedLocalesOf('breakiterator', locales, arguments[1]);
},
ATTRIBUTES.DONT_ENUM
);
%FunctionSetName(Intl.v8BreakIterator.supportedLocalesOf, 'supportedLocalesOf');
%FunctionRemovePrototype(Intl.v8BreakIterator.supportedLocalesOf);
%SetNativeFlag(Intl.v8BreakIterator.supportedLocalesOf);
/**
* Adopts text to segment using the iterator. Old text, if present,
* gets discarded.
*/
function adoptText(iterator, text) {
native function NativeJSBreakIteratorAdoptText();
NativeJSBreakIteratorAdoptText(iterator.iterator, String(text));
}
/**
* Returns index of the first break in the string and moves current pointer.
*/
function first(iterator) {
native function NativeJSBreakIteratorFirst();
return NativeJSBreakIteratorFirst(iterator.iterator);
}
/**
* Returns the index of the next break and moves the pointer.
*/
function next(iterator) {
native function NativeJSBreakIteratorNext();
return NativeJSBreakIteratorNext(iterator.iterator);
}
/**
* Returns index of the current break.
*/
function current(iterator) {
native function NativeJSBreakIteratorCurrent();
return NativeJSBreakIteratorCurrent(iterator.iterator);
}
/**
* Returns type of the current break.
*/
function breakType(iterator) {
native function NativeJSBreakIteratorBreakType();
return NativeJSBreakIteratorBreakType(iterator.iterator);
}
addBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1);
addBoundMethod(Intl.v8BreakIterator, 'first', first, 0);
addBoundMethod(Intl.v8BreakIterator, 'next', next, 0);
addBoundMethod(Intl.v8BreakIterator, 'current', current, 0);
addBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0);

209
deps/v8/src/extensions/i18n/collator.js

@ -1,209 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
// ECMAScript 402 API implementation is broken into separate files for
// each service. The build system combines them together into one
// Intl namespace.
/**
* Initializes the given object so it's a valid Collator instance.
* Useful for subclassing.
*/
function initializeCollator(collator, locales, options) {
if (collator.hasOwnProperty('__initializedIntlObject')) {
throw new TypeError('Trying to re-initialize Collator object.');
}
if (options === undefined) {
options = {};
}
var getOption = getGetOption(options, 'collator');
var internalOptions = {};
defineWEProperty(internalOptions, 'usage', getOption(
'usage', 'string', ['sort', 'search'], 'sort'));
var sensitivity = getOption('sensitivity', 'string',
['base', 'accent', 'case', 'variant']);
if (sensitivity === undefined && internalOptions.usage === 'sort') {
sensitivity = 'variant';
}
defineWEProperty(internalOptions, 'sensitivity', sensitivity);
defineWEProperty(internalOptions, 'ignorePunctuation', getOption(
'ignorePunctuation', 'boolean', undefined, false));
var locale = resolveLocale('collator', locales, options);
// ICU can't take kb, kc... parameters through localeID, so we need to pass
// them as options.
// One exception is -co- which has to be part of the extension, but only for
// usage: sort, and its value can't be 'standard' or 'search'.
var extensionMap = parseExtension(locale.extension);
setOptions(
options, extensionMap, COLLATOR_KEY_MAP, getOption, internalOptions);
var collation = 'default';
var extension = '';
if (extensionMap.hasOwnProperty('co') && internalOptions.usage === 'sort') {
if (ALLOWED_CO_VALUES.indexOf(extensionMap.co) !== -1) {
extension = '-u-co-' + extensionMap.co;
// ICU can't tell us what the collation is, so save user's input.
collation = extensionMap.co;
}
} else if (internalOptions.usage === 'search') {
extension = '-u-co-search';
}
defineWEProperty(internalOptions, 'collation', collation);
var requestedLocale = locale.locale + extension;
// We define all properties C++ code may produce, to prevent security
// problems. If malicious user decides to redefine Object.prototype.locale
// we can't just use plain x.locale = 'us' or in C++ Set("locale", "us").
// Object.defineProperties will either succeed defining or throw an error.
var resolved = Object.defineProperties({}, {
caseFirst: {writable: true},
collation: {value: internalOptions.collation, writable: true},
ignorePunctuation: {writable: true},
locale: {writable: true},
numeric: {writable: true},
requestedLocale: {value: requestedLocale, writable: true},
sensitivity: {writable: true},
strength: {writable: true},
usage: {value: internalOptions.usage, writable: true}
});
var internalCollator = %CreateCollator(requestedLocale,
internalOptions,
resolved);
// Writable, configurable and enumerable are set to false by default.
Object.defineProperty(collator, 'collator', {value: internalCollator});
Object.defineProperty(collator, '__initializedIntlObject',
{value: 'collator'});
Object.defineProperty(collator, 'resolved', {value: resolved});
return collator;
}
/**
* Constructs Intl.Collator object given optional locales and options
* parameters.
*
* @constructor
*/
%SetProperty(Intl, 'Collator', function() {
var locales = arguments[0];
var options = arguments[1];
if (!this || this === Intl) {
// Constructor is called as a function.
return new Intl.Collator(locales, options);
}
return initializeCollator(toObject(this), locales, options);
},
ATTRIBUTES.DONT_ENUM
);
/**
* Collator resolvedOptions method.
*/
%SetProperty(Intl.Collator.prototype, 'resolvedOptions', function() {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
if (!this || typeof this !== 'object' ||
this.__initializedIntlObject !== 'collator') {
throw new TypeError('resolvedOptions method called on a non-object ' +
'or on a object that is not Intl.Collator.');
}
var coll = this;
var locale = getOptimalLanguageTag(coll.resolved.requestedLocale,
coll.resolved.locale);
return {
locale: locale,
usage: coll.resolved.usage,
sensitivity: coll.resolved.sensitivity,
ignorePunctuation: coll.resolved.ignorePunctuation,
numeric: coll.resolved.numeric,
caseFirst: coll.resolved.caseFirst,
collation: coll.resolved.collation
};
},
ATTRIBUTES.DONT_ENUM
);
%FunctionSetName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
%FunctionRemovePrototype(Intl.Collator.prototype.resolvedOptions);
%SetNativeFlag(Intl.Collator.prototype.resolvedOptions);
/**
* Returns the subset of the given locale list for which this locale list
* has a matching (possibly fallback) locale. Locales appear in the same
* order in the returned list as in the input list.
* Options are optional parameter.
*/
%SetProperty(Intl.Collator, 'supportedLocalesOf', function(locales) {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
return supportedLocalesOf('collator', locales, arguments[1]);
},
ATTRIBUTES.DONT_ENUM
);
%FunctionSetName(Intl.Collator.supportedLocalesOf, 'supportedLocalesOf');
%FunctionRemovePrototype(Intl.Collator.supportedLocalesOf);
%SetNativeFlag(Intl.Collator.supportedLocalesOf);
/**
* When the compare method is called with two arguments x and y, it returns a
* Number other than NaN that represents the result of a locale-sensitive
* String comparison of x with y.
* The result is intended to order String values in the sort order specified
* by the effective locale and collation options computed during construction
* of this Collator object, and will be negative, zero, or positive, depending
* on whether x comes before y in the sort order, the Strings are equal under
* the sort order, or x comes after y in the sort order, respectively.
*/
function compare(collator, x, y) {
return %InternalCompare(collator.collator, String(x), String(y));
};
addBoundMethod(Intl.Collator, 'compare', compare, 2);

474
deps/v8/src/extensions/i18n/date-format.js

@ -1,474 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
// ECMAScript 402 API implementation is broken into separate files for
// each service. The build system combines them together into one
// Intl namespace.
/**
* Returns a string that matches LDML representation of the options object.
*/
function toLDMLString(options) {
var getOption = getGetOption(options, 'dateformat');
var ldmlString = '';
var option = getOption('weekday', 'string', ['narrow', 'short', 'long']);
ldmlString += appendToLDMLString(
option, {narrow: 'EEEEE', short: 'EEE', long: 'EEEE'});
option = getOption('era', 'string', ['narrow', 'short', 'long']);
ldmlString += appendToLDMLString(
option, {narrow: 'GGGGG', short: 'GGG', long: 'GGGG'});
option = getOption('year', 'string', ['2-digit', 'numeric']);
ldmlString += appendToLDMLString(option, {'2-digit': 'yy', 'numeric': 'y'});
option = getOption('month', 'string',
['2-digit', 'numeric', 'narrow', 'short', 'long']);
ldmlString += appendToLDMLString(option, {'2-digit': 'MM', 'numeric': 'M',
'narrow': 'MMMMM', 'short': 'MMM', 'long': 'MMMM'});
option = getOption('day', 'string', ['2-digit', 'numeric']);
ldmlString += appendToLDMLString(
option, {'2-digit': 'dd', 'numeric': 'd'});
var hr12 = getOption('hour12', 'boolean');
option = getOption('hour', 'string', ['2-digit', 'numeric']);
if (hr12 === undefined) {
ldmlString += appendToLDMLString(option, {'2-digit': 'jj', 'numeric': 'j'});
} else if (hr12 === true) {
ldmlString += appendToLDMLString(option, {'2-digit': 'hh', 'numeric': 'h'});
} else {
ldmlString += appendToLDMLString(option, {'2-digit': 'HH', 'numeric': 'H'});
}
option = getOption('minute', 'string', ['2-digit', 'numeric']);
ldmlString += appendToLDMLString(option, {'2-digit': 'mm', 'numeric': 'm'});
option = getOption('second', 'string', ['2-digit', 'numeric']);
ldmlString += appendToLDMLString(option, {'2-digit': 'ss', 'numeric': 's'});
option = getOption('timeZoneName', 'string', ['short', 'long']);
ldmlString += appendToLDMLString(option, {short: 'v', long: 'vv'});
return ldmlString;
}
/**
* Returns either LDML equivalent of the current option or empty string.
*/
function appendToLDMLString(option, pairs) {
if (option !== undefined) {
return pairs[option];
} else {
return '';
}
}
/**
* Returns object that matches LDML representation of the date.
*/
function fromLDMLString(ldmlString) {
// First remove '' quoted text, so we lose 'Uhr' strings.
ldmlString = ldmlString.replace(QUOTED_STRING_RE, '');
var options = {};
var match = ldmlString.match(/E{3,5}/g);
options = appendToDateTimeObject(
options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
match = ldmlString.match(/G{3,5}/g);
options = appendToDateTimeObject(
options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
match = ldmlString.match(/y{1,2}/g);
options = appendToDateTimeObject(
options, 'year', match, {y: 'numeric', yy: '2-digit'});
match = ldmlString.match(/M{1,5}/g);
options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
// Sometimes we get L instead of M for month - standalone name.
match = ldmlString.match(/L{1,5}/g);
options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
match = ldmlString.match(/d{1,2}/g);
options = appendToDateTimeObject(
options, 'day', match, {d: 'numeric', dd: '2-digit'});
match = ldmlString.match(/h{1,2}/g);
if (match !== null) {
options['hour12'] = true;
}
options = appendToDateTimeObject(
options, 'hour', match, {h: 'numeric', hh: '2-digit'});
match = ldmlString.match(/H{1,2}/g);
if (match !== null) {
options['hour12'] = false;
}
options = appendToDateTimeObject(
options, 'hour', match, {H: 'numeric', HH: '2-digit'});
match = ldmlString.match(/m{1,2}/g);
options = appendToDateTimeObject(
options, 'minute', match, {m: 'numeric', mm: '2-digit'});
match = ldmlString.match(/s{1,2}/g);
options = appendToDateTimeObject(
options, 'second', match, {s: 'numeric', ss: '2-digit'});
match = ldmlString.match(/v{1,2}/g);
options = appendToDateTimeObject(
options, 'timeZoneName', match, {v: 'short', vv: 'long'});
return options;
}
function appendToDateTimeObject(options, option, match, pairs) {
if (match === null) {
if (!options.hasOwnProperty(option)) {
defineWEProperty(options, option, undefined);
}
return options;
}
var property = match[0];
defineWEProperty(options, option, pairs[property]);
return options;
}
/**
* Returns options with at least default values in it.
*/
function toDateTimeOptions(options, required, defaults) {
if (options === undefined) {
options = null;
} else {
options = toObject(options);
}
options = Object.apply(this, [options]);
var needsDefault = true;
if ((required === 'date' || required === 'any') &&
(options.weekday !== undefined || options.year !== undefined ||
options.month !== undefined || options.day !== undefined)) {
needsDefault = false;
}
if ((required === 'time' || required === 'any') &&
(options.hour !== undefined || options.minute !== undefined ||
options.second !== undefined)) {
needsDefault = false;
}
if (needsDefault && (defaults === 'date' || defaults === 'all')) {
Object.defineProperty(options, 'year', {value: 'numeric',
writable: true,
enumerable: true,
configurable: true});
Object.defineProperty(options, 'month', {value: 'numeric',
writable: true,
enumerable: true,
configurable: true});
Object.defineProperty(options, 'day', {value: 'numeric',
writable: true,
enumerable: true,
configurable: true});
}
if (needsDefault && (defaults === 'time' || defaults === 'all')) {
Object.defineProperty(options, 'hour', {value: 'numeric',
writable: true,
enumerable: true,
configurable: true});
Object.defineProperty(options, 'minute', {value: 'numeric',
writable: true,
enumerable: true,
configurable: true});
Object.defineProperty(options, 'second', {value: 'numeric',
writable: true,
enumerable: true,
configurable: true});
}
return options;
}
/**
* Initializes the given object so it's a valid DateTimeFormat instance.
* Useful for subclassing.
*/
function initializeDateTimeFormat(dateFormat, locales, options) {
if (dateFormat.hasOwnProperty('__initializedIntlObject')) {
throw new TypeError('Trying to re-initialize DateTimeFormat object.');
}
if (options === undefined) {
options = {};
}
var locale = resolveLocale('dateformat', locales, options);
options = toDateTimeOptions(options, 'any', 'date');
var getOption = getGetOption(options, 'dateformat');
// We implement only best fit algorithm, but still need to check
// if the formatMatcher values are in range.
var matcher = getOption('formatMatcher', 'string',
['basic', 'best fit'], 'best fit');
// Build LDML string for the skeleton that we pass to the formatter.
var ldmlString = toLDMLString(options);
// Filter out supported extension keys so we know what to put in resolved
// section later on.
// We need to pass calendar and number system to the method.
var tz = canonicalizeTimeZoneID(options.timeZone);
// ICU prefers options to be passed using -u- extension key/values, so
// we need to build that.
var internalOptions = {};
var extensionMap = parseExtension(locale.extension);
var extension = setOptions(options, extensionMap, DATETIME_FORMAT_KEY_MAP,
getOption, internalOptions);
var requestedLocale = locale.locale + extension;
var resolved = Object.defineProperties({}, {
calendar: {writable: true},
day: {writable: true},
era: {writable: true},
hour12: {writable: true},
hour: {writable: true},
locale: {writable: true},
minute: {writable: true},
month: {writable: true},
numberingSystem: {writable: true},
pattern: {writable: true},
requestedLocale: {value: requestedLocale, writable: true},
second: {writable: true},
timeZone: {writable: true},
timeZoneName: {writable: true},
tz: {value: tz, writable: true},
weekday: {writable: true},
year: {writable: true}
});
var formatter = %CreateDateTimeFormat(
requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
if (tz !== undefined && tz !== resolved.timeZone) {
throw new RangeError('Unsupported time zone specified ' + tz);
}
Object.defineProperty(dateFormat, 'formatter', {value: formatter});
Object.defineProperty(dateFormat, 'resolved', {value: resolved});
Object.defineProperty(dateFormat, '__initializedIntlObject',
{value: 'dateformat'});
return dateFormat;
}
/**
* Constructs Intl.DateTimeFormat object given optional locales and options
* parameters.
*
* @constructor
*/
%SetProperty(Intl, 'DateTimeFormat', function() {
var locales = arguments[0];
var options = arguments[1];
if (!this || this === Intl) {
// Constructor is called as a function.
return new Intl.DateTimeFormat(locales, options);
}
return initializeDateTimeFormat(toObject(this), locales, options);
},
ATTRIBUTES.DONT_ENUM
);
/**
* DateTimeFormat resolvedOptions method.
*/
%SetProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
if (!this || typeof this !== 'object' ||
this.__initializedIntlObject !== 'dateformat') {
throw new TypeError('resolvedOptions method called on a non-object or ' +
'on a object that is not Intl.DateTimeFormat.');
}
var format = this;
var fromPattern = fromLDMLString(format.resolved.pattern);
var userCalendar = ICU_CALENDAR_MAP[format.resolved.calendar];
if (userCalendar === undefined) {
// Use ICU name if we don't have a match. It shouldn't happen, but
// it would be too strict to throw for this.
userCalendar = format.resolved.calendar;
}
var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
format.resolved.locale);
var result = {
locale: locale,
numberingSystem: format.resolved.numberingSystem,
calendar: userCalendar,
timeZone: format.resolved.timeZone
};
addWECPropertyIfDefined(result, 'timeZoneName', fromPattern.timeZoneName);
addWECPropertyIfDefined(result, 'era', fromPattern.era);
addWECPropertyIfDefined(result, 'year', fromPattern.year);
addWECPropertyIfDefined(result, 'month', fromPattern.month);
addWECPropertyIfDefined(result, 'day', fromPattern.day);
addWECPropertyIfDefined(result, 'weekday', fromPattern.weekday);
addWECPropertyIfDefined(result, 'hour12', fromPattern.hour12);
addWECPropertyIfDefined(result, 'hour', fromPattern.hour);
addWECPropertyIfDefined(result, 'minute', fromPattern.minute);
addWECPropertyIfDefined(result, 'second', fromPattern.second);
return result;
},
ATTRIBUTES.DONT_ENUM
);
%FunctionSetName(Intl.DateTimeFormat.prototype.resolvedOptions,
'resolvedOptions');
%FunctionRemovePrototype(Intl.DateTimeFormat.prototype.resolvedOptions);
%SetNativeFlag(Intl.DateTimeFormat.prototype.resolvedOptions);
/**
* Returns the subset of the given locale list for which this locale list
* has a matching (possibly fallback) locale. Locales appear in the same
* order in the returned list as in the input list.
* Options are optional parameter.
*/
%SetProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
return supportedLocalesOf('dateformat', locales, arguments[1]);
},
ATTRIBUTES.DONT_ENUM
);
%FunctionSetName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
%FunctionRemovePrototype(Intl.DateTimeFormat.supportedLocalesOf);
%SetNativeFlag(Intl.DateTimeFormat.supportedLocalesOf);
/**
* Returns a String value representing the result of calling ToNumber(date)
* according to the effective locale and the formatting options of this
* DateTimeFormat.
*/
function formatDate(formatter, dateValue) {
var dateMs;
if (dateValue === undefined) {
dateMs = Date.now();
} else {
dateMs = Number(dateValue);
}
if (!isFinite(dateMs)) {
throw new RangeError('Provided date is not in valid range.');
}
return %InternalDateFormat(formatter.formatter, new Date(dateMs));
}
/**
* Returns a Date object representing the result of calling ToString(value)
* according to the effective locale and the formatting options of this
* DateTimeFormat.
* Returns undefined if date string cannot be parsed.
*/
function parseDate(formatter, value) {
return %InternalDateParse(formatter.formatter, String(value));
}
// 0 because date is optional argument.
addBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0);
addBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
/**
* Returns canonical Area/Location name, or throws an exception if the zone
* name is invalid IANA name.
*/
function canonicalizeTimeZoneID(tzID) {
// Skip undefined zones.
if (tzID === undefined) {
return tzID;
}
// Special case handling (UTC, GMT).
var upperID = tzID.toUpperCase();
if (upperID === 'UTC' || upperID === 'GMT' ||
upperID === 'ETC/UTC' || upperID === 'ETC/GMT') {
return 'UTC';
}
// We expect only _ and / beside ASCII letters.
// All inputs should conform to Area/Location from now on.
var match = TIMEZONE_NAME_CHECK_RE.exec(tzID);
if (match === null) {
throw new RangeError('Expected Area/Location for time zone, got ' + tzID);
}
var result = toTitleCaseWord(match[1]) + '/' + toTitleCaseWord(match[2]);
var i = 3;
while (match[i] !== undefined && i < match.length) {
result = result + '_' + toTitleCaseWord(match[i]);
i++;
}
return result;
}

168
deps/v8/src/extensions/i18n/globals.js

@ -1,168 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
/**
* List of available services.
*/
var AVAILABLE_SERVICES = ['collator',
'numberformat',
'dateformat',
'breakiterator'];
/**
* Caches available locales for each service.
*/
var AVAILABLE_LOCALES = {
'collator': undefined,
'numberformat': undefined,
'dateformat': undefined,
'breakiterator': undefined
};
/**
* Caches default ICU locale.
*/
var DEFAULT_ICU_LOCALE = undefined;
/**
* Unicode extension regular expression.
*/
var UNICODE_EXTENSION_RE = new RegExp('-u(-[a-z0-9]{2,8})+', 'g');
/**
* Matches any Unicode extension.
*/
var ANY_EXTENSION_RE = new RegExp('-[a-z0-9]{1}-.*', 'g');
/**
* Replace quoted text (single quote, anything but the quote and quote again).
*/
var QUOTED_STRING_RE = new RegExp("'[^']+'", 'g');
/**
* Matches valid service name.
*/
var SERVICE_RE =
new RegExp('^(collator|numberformat|dateformat|breakiterator)$');
/**
* Validates a language tag against bcp47 spec.
* Actual value is assigned on first run.
*/
var LANGUAGE_TAG_RE = undefined;
/**
* Helps find duplicate variants in the language tag.
*/
var LANGUAGE_VARIANT_RE = undefined;
/**
* Helps find duplicate singletons in the language tag.
*/
var LANGUAGE_SINGLETON_RE = undefined;
/**
* Matches valid IANA time zone names.
*/
var TIMEZONE_NAME_CHECK_RE =
new RegExp('^([A-Za-z]+)/([A-Za-z]+)(?:_([A-Za-z]+))*$');
/**
* Maps ICU calendar names into LDML type.
*/
var ICU_CALENDAR_MAP = {
'gregorian': 'gregory',
'japanese': 'japanese',
'buddhist': 'buddhist',
'roc': 'roc',
'persian': 'persian',
'islamic-civil': 'islamicc',
'islamic': 'islamic',
'hebrew': 'hebrew',
'chinese': 'chinese',
'indian': 'indian',
'coptic': 'coptic',
'ethiopic': 'ethiopic',
'ethiopic-amete-alem': 'ethioaa'
};
/**
* Map of Unicode extensions to option properties, and their values and types,
* for a collator.
*/
var COLLATOR_KEY_MAP = {
'kn': {'property': 'numeric', 'type': 'boolean'},
'kf': {'property': 'caseFirst', 'type': 'string',
'values': ['false', 'lower', 'upper']}
};
/**
* Map of Unicode extensions to option properties, and their values and types,
* for a number format.
*/
var NUMBER_FORMAT_KEY_MAP = {
'nu': {'property': undefined, 'type': 'string'}
};
/**
* Map of Unicode extensions to option properties, and their values and types,
* for a date/time format.
*/
var DATETIME_FORMAT_KEY_MAP = {
'ca': {'property': undefined, 'type': 'string'},
'nu': {'property': undefined, 'type': 'string'}
};
/**
* Allowed -u-co- values. List taken from:
* http://unicode.org/repos/cldr/trunk/common/bcp47/collation.xml
*/
var ALLOWED_CO_VALUES = [
'big5han', 'dict', 'direct', 'ducet', 'gb2312', 'phonebk', 'phonetic',
'pinyin', 'reformed', 'searchjl', 'stroke', 'trad', 'unihan', 'zhuyin'
];
/**
* Object attributes (configurable, writable, enumerable).
* To combine attributes, OR them.
* Values/names are copied from v8/include/v8.h:PropertyAttribute
*/
var ATTRIBUTES = {
'NONE': 0,
'READ_ONLY': 1,
'DONT_ENUM': 2,
'DONT_DELETE': 4
};
/**
* Error message for when function object is created with new and it's not
* a constructor.
*/
var ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR =
'Function object that\'s not a constructor was created with new';

77
deps/v8/src/extensions/i18n/i18n-extension.cc

@ -1,77 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
#include "i18n-extension.h"
#include "break-iterator.h"
#include "natives.h"
using v8::internal::I18NNatives;
namespace v8_i18n {
Extension::Extension()
: v8::Extension("v8/i18n",
reinterpret_cast<const char*>(
I18NNatives::GetScriptsSource().start()),
0,
0,
I18NNatives::GetScriptsSource().length()) {}
v8::Handle<v8::FunctionTemplate> Extension::GetNativeFunction(
v8::Handle<v8::String> name) {
// Break iterator.
if (name->Equals(v8::String::New("NativeJSCreateBreakIterator"))) {
return v8::FunctionTemplate::New(BreakIterator::JSCreateBreakIterator);
} else if (name->Equals(v8::String::New("NativeJSBreakIteratorAdoptText"))) {
return v8::FunctionTemplate::New(
BreakIterator::JSInternalBreakIteratorAdoptText);
} else if (name->Equals(v8::String::New("NativeJSBreakIteratorFirst"))) {
return v8::FunctionTemplate::New(
BreakIterator::JSInternalBreakIteratorFirst);
} else if (name->Equals(v8::String::New("NativeJSBreakIteratorNext"))) {
return v8::FunctionTemplate::New(
BreakIterator::JSInternalBreakIteratorNext);
} else if (name->Equals(v8::String::New("NativeJSBreakIteratorCurrent"))) {
return v8::FunctionTemplate::New(
BreakIterator::JSInternalBreakIteratorCurrent);
} else if (name->Equals(v8::String::New("NativeJSBreakIteratorBreakType"))) {
return v8::FunctionTemplate::New(
BreakIterator::JSInternalBreakIteratorBreakType);
}
return v8::Handle<v8::FunctionTemplate>();
}
void Extension::Register() {
static Extension i18n_extension;
static v8::DeclareExtension extension_declaration(&i18n_extension);
}
} // namespace v8_i18n

177
deps/v8/src/extensions/i18n/i18n-utils.cc

@ -1,177 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
#include "i18n-utils.h"
#include <string.h>
#include "unicode/unistr.h"
namespace v8_i18n {
// static
void Utils::StrNCopy(char* dest, int length, const char* src) {
if (!dest || !src) return;
strncpy(dest, src, length);
dest[length - 1] = '\0';
}
// static
bool Utils::V8StringToUnicodeString(const v8::Handle<v8::Value>& input,
icu::UnicodeString* output) {
v8::String::Utf8Value utf8_value(input);
if (*utf8_value == NULL) return false;
output->setTo(icu::UnicodeString::fromUTF8(*utf8_value));
return true;
}
// static
bool Utils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
const char* setting,
icu::UnicodeString* result) {
if (!setting || !result) return false;
v8::HandleScope handle_scope;
v8::TryCatch try_catch;
v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
if (try_catch.HasCaught()) {
return false;
}
// No need to check if |value| is empty because it's taken care of
// by TryCatch above.
if (!value->IsUndefined() && !value->IsNull() && value->IsString()) {
return V8StringToUnicodeString(value, result);
}
return false;
}
// static
bool Utils::ExtractIntegerSetting(const v8::Handle<v8::Object>& settings,
const char* setting,
int32_t* result) {
if (!setting || !result) return false;
v8::HandleScope handle_scope;
v8::TryCatch try_catch;
v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
if (try_catch.HasCaught()) {
return false;
}
// No need to check if |value| is empty because it's taken care of
// by TryCatch above.
if (!value->IsUndefined() && !value->IsNull() && value->IsNumber()) {
*result = static_cast<int32_t>(value->Int32Value());
return true;
}
return false;
}
// static
bool Utils::ExtractBooleanSetting(const v8::Handle<v8::Object>& settings,
const char* setting,
bool* result) {
if (!setting || !result) return false;
v8::HandleScope handle_scope;
v8::TryCatch try_catch;
v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
if (try_catch.HasCaught()) {
return false;
}
// No need to check if |value| is empty because it's taken care of
// by TryCatch above.
if (!value->IsUndefined() && !value->IsNull() && value->IsBoolean()) {
*result = static_cast<bool>(value->BooleanValue());
return true;
}
return false;
}
// static
void Utils::AsciiToUChar(const char* source,
int32_t source_length,
UChar* target,
int32_t target_length) {
int32_t length =
source_length < target_length ? source_length : target_length;
if (length <= 0) {
return;
}
for (int32_t i = 0; i < length - 1; ++i) {
target[i] = static_cast<UChar>(source[i]);
}
target[length - 1] = 0x0u;
}
static v8::Local<v8::ObjectTemplate> ToLocal(i::Handle<i::Object> handle) {
return v8::Utils::ToLocal(i::Handle<i::ObjectTemplateInfo>::cast(handle));
}
template<int internal_fields, i::EternalHandles::SingletonHandle field>
static v8::Local<v8::ObjectTemplate> GetEternal(v8::Isolate* external) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external);
if (isolate->eternal_handles()->Exists(field)) {
return ToLocal(isolate->eternal_handles()->GetSingleton(field));
}
v8::Local<v8::ObjectTemplate> raw_template(v8::ObjectTemplate::New());
raw_template->SetInternalFieldCount(internal_fields);
return ToLocal(
isolate->eternal_handles()->CreateSingleton(
isolate,
*v8::Utils::OpenHandle(*raw_template),
field));
}
// static
v8::Local<v8::ObjectTemplate> Utils::GetTemplate(v8::Isolate* isolate) {
return GetEternal<1, i::EternalHandles::I18N_TEMPLATE_ONE>(isolate);
}
// static
v8::Local<v8::ObjectTemplate> Utils::GetTemplate2(v8::Isolate* isolate) {
return GetEternal<2, i::EternalHandles::I18N_TEMPLATE_TWO>(isolate);
}
} // namespace v8_i18n

91
deps/v8/src/extensions/i18n/i18n-utils.h

@ -1,91 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
#ifndef V8_EXTENSIONS_I18N_SRC_UTILS_H_
#define V8_EXTENSIONS_I18N_SRC_UTILS_H_
#include "unicode/uversion.h"
#include "v8.h"
namespace U_ICU_NAMESPACE {
class UnicodeString;
}
namespace v8_i18n {
class Utils {
public:
// Safe string copy. Null terminates the destination. Copies at most
// (length - 1) bytes.
// We can't use snprintf since it's not supported on all relevant platforms.
// We can't use OS::SNPrintF, it's only for internal code.
static void StrNCopy(char* dest, int length, const char* src);
// Converts v8::String into UnicodeString. Returns false if input
// can't be converted into utf8.
static bool V8StringToUnicodeString(const v8::Handle<v8::Value>& input,
icu::UnicodeString* output);
// Extract a String setting named in |settings| and set it to |result|.
// Return true if it's specified. Otherwise, return false.
static bool ExtractStringSetting(const v8::Handle<v8::Object>& settings,
const char* setting,
icu::UnicodeString* result);
// Extract a Integer setting named in |settings| and set it to |result|.
// Return true if it's specified. Otherwise, return false.
static bool ExtractIntegerSetting(const v8::Handle<v8::Object>& settings,
const char* setting,
int32_t* result);
// Extract a Boolean setting named in |settings| and set it to |result|.
// Return true if it's specified. Otherwise, return false.
static bool ExtractBooleanSetting(const v8::Handle<v8::Object>& settings,
const char* setting,
bool* result);
// Converts ASCII array into UChar array.
// Target is always \0 terminated.
static void AsciiToUChar(const char* source,
int32_t source_length,
UChar* target,
int32_t target_length);
// Creates an ObjectTemplate with one internal field.
static v8::Local<v8::ObjectTemplate> GetTemplate(v8::Isolate* isolate);
// Creates an ObjectTemplate with two internal fields.
static v8::Local<v8::ObjectTemplate> GetTemplate2(v8::Isolate* isolate);
private:
Utils() {}
};
} // namespace v8_i18n
#endif // V8_EXTENSIONS_I18N_UTILS_H_

536
deps/v8/src/extensions/i18n/i18n-utils.js

@ -1,536 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
// ECMAScript 402 API implementation is broken into separate files for
// each service. The build system combines them together into one
// Intl namespace.
/**
* Adds bound method to the prototype of the given object.
*/
function addBoundMethod(obj, methodName, implementation, length) {
function getter() {
if (!this || typeof this !== 'object' ||
this.__initializedIntlObject === undefined) {
throw new TypeError('Method ' + methodName + ' called on a ' +
'non-object or on a wrong type of object.');
}
var internalName = '__bound' + methodName + '__';
if (this[internalName] === undefined) {
var that = this;
var boundMethod;
if (length === undefined || length === 2) {
boundMethod = function(x, y) {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
return implementation(that, x, y);
}
} else if (length === 1) {
boundMethod = function(x) {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
return implementation(that, x);
}
} else {
boundMethod = function() {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
// DateTimeFormat.format needs to be 0 arg method, but can stil
// receive optional dateValue param. If one was provided, pass it
// along.
if (arguments.length > 0) {
return implementation(that, arguments[0]);
} else {
return implementation(that);
}
}
}
%FunctionSetName(boundMethod, internalName);
%FunctionRemovePrototype(boundMethod);
%SetNativeFlag(boundMethod);
this[internalName] = boundMethod;
}
return this[internalName];
}
%FunctionSetName(getter, methodName);
%FunctionRemovePrototype(getter);
%SetNativeFlag(getter);
Object.defineProperty(obj.prototype, methodName, {
get: getter,
enumerable: false,
configurable: true
});
}
/**
* Returns an intersection of locales and service supported locales.
* Parameter locales is treated as a priority list.
*/
function supportedLocalesOf(service, locales, options) {
if (service.match(SERVICE_RE) === null) {
throw new Error('Internal error, wrong service type: ' + service);
}
// Provide defaults if matcher was not specified.
if (options === undefined) {
options = {};
} else {
options = toObject(options);
}
var matcher = options.localeMatcher;
if (matcher !== undefined) {
matcher = String(matcher);
if (matcher !== 'lookup' && matcher !== 'best fit') {
throw new RangeError('Illegal value for localeMatcher:' + matcher);
}
} else {
matcher = 'best fit';
}
var requestedLocales = initializeLocaleList(locales);
// Cache these, they don't ever change per service.
if (AVAILABLE_LOCALES[service] === undefined) {
AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
}
// Use either best fit or lookup algorithm to match locales.
if (matcher === 'best fit') {
return initializeLocaleList(bestFitSupportedLocalesOf(
requestedLocales, AVAILABLE_LOCALES[service]));
}
return initializeLocaleList(lookupSupportedLocalesOf(
requestedLocales, AVAILABLE_LOCALES[service]));
}
/**
* Returns the subset of the provided BCP 47 language priority list for which
* this service has a matching locale when using the BCP 47 Lookup algorithm.
* Locales appear in the same order in the returned list as in the input list.
*/
function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
var matchedLocales = [];
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove -u- extension.
var locale = requestedLocales[i].replace(UNICODE_EXTENSION_RE, '');
do {
if (availableLocales[locale] !== undefined) {
// Push requested locale not the resolved one.
matchedLocales.push(requestedLocales[i]);
break;
}
// Truncate locale if possible, if not break.
var pos = locale.lastIndexOf('-');
if (pos === -1) {
break;
}
locale = locale.substring(0, pos);
} while (true);
}
return matchedLocales;
}
/**
* Returns the subset of the provided BCP 47 language priority list for which
* this service has a matching locale when using the implementation
* dependent algorithm.
* Locales appear in the same order in the returned list as in the input list.
*/
function bestFitSupportedLocalesOf(requestedLocales, availableLocales) {
// TODO(cira): implement better best fit algorithm.
return lookupSupportedLocalesOf(requestedLocales, availableLocales);
}
/**
* Returns a getOption function that extracts property value for given
* options object. If property is missing it returns defaultValue. If value
* is out of range for that property it throws RangeError.
*/
function getGetOption(options, caller) {
if (options === undefined) {
throw new Error('Internal ' + caller + ' error. ' +
'Default options are missing.');
}
var getOption = function getOption(property, type, values, defaultValue) {
if (options[property] !== undefined) {
var value = options[property];
switch (type) {
case 'boolean':
value = Boolean(value);
break;
case 'string':
value = String(value);
break;
case 'number':
value = Number(value);
break;
default:
throw new Error('Internal error. Wrong value type.');
}
if (values !== undefined && values.indexOf(value) === -1) {
throw new RangeError('Value ' + value + ' out of range for ' + caller +
' options property ' + property);
}
return value;
}
return defaultValue;
}
return getOption;
}
/**
* Compares a BCP 47 language priority list requestedLocales against the locales
* in availableLocales and determines the best available language to meet the
* request. Two algorithms are available to match the locales: the Lookup
* algorithm described in RFC 4647 section 3.4, and an implementation dependent
* best-fit algorithm. Independent of the locale matching algorithm, options
* specified through Unicode locale extension sequences are negotiated
* separately, taking the caller's relevant extension keys and locale data as
* well as client-provided options into consideration. Returns an object with
* a locale property whose value is the language tag of the selected locale,
* and properties for each key in relevantExtensionKeys providing the selected
* value for that key.
*/
function resolveLocale(service, requestedLocales, options) {
requestedLocales = initializeLocaleList(requestedLocales);
var getOption = getGetOption(options, service);
var matcher = getOption('localeMatcher', 'string',
['lookup', 'best fit'], 'best fit');
var resolved;
if (matcher === 'lookup') {
resolved = lookupMatcher(service, requestedLocales);
} else {
resolved = bestFitMatcher(service, requestedLocales);
}
return resolved;
}
/**
* Returns best matched supported locale and extension info using basic
* lookup algorithm.
*/
function lookupMatcher(service, requestedLocales) {
if (service.match(SERVICE_RE) === null) {
throw new Error('Internal error, wrong service type: ' + service);
}
// Cache these, they don't ever change per service.
if (AVAILABLE_LOCALES[service] === undefined) {
AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
}
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove all extensions.
var locale = requestedLocales[i].replace(ANY_EXTENSION_RE, '');
do {
if (AVAILABLE_LOCALES[service][locale] !== undefined) {
// Return the resolved locale and extension.
var extensionMatch = requestedLocales[i].match(UNICODE_EXTENSION_RE);
var extension = (extensionMatch === null) ? '' : extensionMatch[0];
return {'locale': locale, 'extension': extension, 'position': i};
}
// Truncate locale if possible.
var pos = locale.lastIndexOf('-');
if (pos === -1) {
break;
}
locale = locale.substring(0, pos);
} while (true);
}
// Didn't find a match, return default.
if (DEFAULT_ICU_LOCALE === undefined) {
DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
}
return {'locale': DEFAULT_ICU_LOCALE, 'extension': '', 'position': -1};
}
/**
* Returns best matched supported locale and extension info using
* implementation dependend algorithm.
*/
function bestFitMatcher(service, requestedLocales) {
// TODO(cira): implement better best fit algorithm.
return lookupMatcher(service, requestedLocales);
}
/**
* Parses Unicode extension into key - value map.
* Returns empty object if the extension string is invalid.
* We are not concerned with the validity of the values at this point.
*/
function parseExtension(extension) {
var extensionSplit = extension.split('-');
// Assume ['', 'u', ...] input, but don't throw.
if (extensionSplit.length <= 2 ||
(extensionSplit[0] !== '' && extensionSplit[1] !== 'u')) {
return {};
}
// Key is {2}alphanum, value is {3,8}alphanum.
// Some keys may not have explicit values (booleans).
var extensionMap = {};
var previousKey = undefined;
for (var i = 2; i < extensionSplit.length; ++i) {
var length = extensionSplit[i].length;
var element = extensionSplit[i];
if (length === 2) {
extensionMap[element] = undefined;
previousKey = element;
} else if (length >= 3 && length <=8 && previousKey !== undefined) {
extensionMap[previousKey] = element;
previousKey = undefined;
} else {
// There is a value that's too long, or that doesn't have a key.
return {};
}
}
return extensionMap;
}
/**
* Converts parameter to an Object if possible.
*/
function toObject(value) {
if (value === undefined || value === null) {
throw new TypeError('Value cannot be converted to an Object.');
}
return Object(value);
}
/**
* Populates internalOptions object with boolean key-value pairs
* from extensionMap and options.
* Returns filtered extension (number and date format constructors use
* Unicode extensions for passing parameters to ICU).
* It's used for extension-option pairs only, e.g. kn-normalization, but not
* for 'sensitivity' since it doesn't have extension equivalent.
* Extensions like nu and ca don't have options equivalent, so we place
* undefined in the map.property to denote that.
*/
function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
var extension = '';
var updateExtension = function updateExtension(key, value) {
return '-' + key + '-' + String(value);
}
var updateProperty = function updateProperty(property, type, value) {
if (type === 'boolean' && (typeof value === 'string')) {
value = (value === 'true') ? true : false;
}
if (property !== undefined) {
defineWEProperty(outOptions, property, value);
}
}
for (var key in keyValues) {
if (keyValues.hasOwnProperty(key)) {
var value = undefined;
var map = keyValues[key];
if (map.property !== undefined) {
// This may return true if user specifies numeric: 'false', since
// Boolean('nonempty') === true.
value = getOption(map.property, map.type, map.values);
}
if (value !== undefined) {
updateProperty(map.property, map.type, value);
extension += updateExtension(key, value);
continue;
}
// User options didn't have it, check Unicode extension.
// Here we want to convert strings 'true', 'false' into proper Boolean
// values (not a user error).
if (extensionMap.hasOwnProperty(key)) {
value = extensionMap[key];
if (value !== undefined) {
updateProperty(map.property, map.type, value);
extension += updateExtension(key, value);
} else if (map.type === 'boolean') {
// Boolean keys are allowed not to have values in Unicode extension.
// Those default to true.
updateProperty(map.property, map.type, true);
extension += updateExtension(key, true);
}
}
}
}
return extension === ''? '' : '-u' + extension;
}
/**
* Converts all OwnProperties into
* configurable: false, writable: false, enumerable: true.
*/
function freezeArray(array) {
array.forEach(function(element, index) {
Object.defineProperty(array, index, {value: element,
configurable: false,
writable: false,
enumerable: true});
});
Object.defineProperty(array, 'length', {value: array.length,
writable: false});
return array;
}
/**
* It's sometimes desireable to leave user requested locale instead of ICU
* supported one (zh-TW is equivalent to zh-Hant-TW, so we should keep shorter
* one, if that was what user requested).
* This function returns user specified tag if its maximized form matches ICU
* resolved locale. If not we return ICU result.
*/
function getOptimalLanguageTag(original, resolved) {
// Returns Array<Object>, where each object has maximized and base properties.
// Maximized: zh -> zh-Hans-CN
// Base: zh-CN-u-ca-gregory -> zh-CN
// Take care of grandfathered or simple cases.
if (original === resolved) {
return original;
}
var locales = %GetLanguageTagVariants([original, resolved]);
if (locales[0].maximized !== locales[1].maximized) {
return resolved;
}
// Preserve extensions of resolved locale, but swap base tags with original.
var resolvedBase = new RegExp('^' + locales[1].base);
return resolved.replace(resolvedBase, locales[0].base);
}
/**
* Returns an Object that contains all of supported locales for a given
* service.
* In addition to the supported locales we add xx-ZZ locale for each xx-Yyyy-ZZ
* that is supported. This is required by the spec.
*/
function getAvailableLocalesOf(service) {
var available = %AvailableLocalesOf(service);
for (var i in available) {
if (available.hasOwnProperty(i)) {
var parts = i.match(/^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/);
if (parts !== null) {
// Build xx-ZZ. We don't care about the actual value,
// as long it's not undefined.
available[parts[1] + '-' + parts[3]] = null;
}
}
}
return available;
}
/**
* Defines a property and sets writable and enumerable to true.
* Configurable is false by default.
*/
function defineWEProperty(object, property, value) {
Object.defineProperty(object, property,
{value: value, writable: true, enumerable: true});
}
/**
* Adds property to an object if the value is not undefined.
* Sets configurable descriptor to false.
*/
function addWEPropertyIfDefined(object, property, value) {
if (value !== undefined) {
defineWEProperty(object, property, value);
}
}
/**
* Defines a property and sets writable, enumerable and configurable to true.
*/
function defineWECProperty(object, property, value) {
Object.defineProperty(object, property,
{value: value,
writable: true,
enumerable: true,
configurable: true});
}
/**
* Adds property to an object if the value is not undefined.
* Sets all descriptors to true.
*/
function addWECPropertyIfDefined(object, property, value) {
if (value !== undefined) {
defineWECProperty(object, property, value);
}
}
/**
* Returns titlecased word, aMeRricA -> America.
*/
function toTitleCaseWord(word) {
return word.substr(0, 1).toUpperCase() + word.substr(1).toLowerCase();
}

190
deps/v8/src/extensions/i18n/locale.js

@ -1,190 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
// ECMAScript 402 API implementation is broken into separate files for
// each service. The build system combines them together into one
// Intl namespace.
/**
* Canonicalizes the language tag, or throws in case the tag is invalid.
*/
function canonicalizeLanguageTag(localeID) {
// null is typeof 'object' so we have to do extra check.
if (typeof localeID !== 'string' && typeof localeID !== 'object' ||
localeID === null) {
throw new TypeError('Language ID should be string or object.');
}
var localeString = String(localeID);
if (isValidLanguageTag(localeString) === false) {
throw new RangeError('Invalid language tag: ' + localeString);
}
// This call will strip -kn but not -kn-true extensions.
// ICU bug filled - http://bugs.icu-project.org/trac/ticket/9265.
// TODO(cira): check if -u-kn-true-kc-true-kh-true still throws after
// upgrade to ICU 4.9.
var tag = %CanonicalizeLanguageTag(localeString);
if (tag === 'invalid-tag') {
throw new RangeError('Invalid language tag: ' + localeString);
}
return tag;
}
/**
* Returns an array where all locales are canonicalized and duplicates removed.
* Throws on locales that are not well formed BCP47 tags.
*/
function initializeLocaleList(locales) {
var seen = [];
if (locales === undefined) {
// Constructor is called without arguments.
seen = [];
} else {
// We allow single string localeID.
if (typeof locales === 'string') {
seen.push(canonicalizeLanguageTag(locales));
return freezeArray(seen);
}
var o = toObject(locales);
// Converts it to UInt32 (>>> is shr on 32bit integers).
var len = o.length >>> 0;
for (var k = 0; k < len; k++) {
if (k in o) {
var value = o[k];
var tag = canonicalizeLanguageTag(value);
if (seen.indexOf(tag) === -1) {
seen.push(tag);
}
}
}
}
return freezeArray(seen);
}
/**
* Validates the language tag. Section 2.2.9 of the bcp47 spec
* defines a valid tag.
*
* ICU is too permissible and lets invalid tags, like
* hant-cmn-cn, through.
*
* Returns false if the language tag is invalid.
*/
function isValidLanguageTag(locale) {
// Check if it's well-formed, including grandfadered tags.
if (LANGUAGE_TAG_RE.test(locale) === false) {
return false;
}
// Just return if it's a x- form. It's all private.
if (locale.indexOf('x-') === 0) {
return true;
}
// Check if there are any duplicate variants or singletons (extensions).
// Remove private use section.
locale = locale.split(/-x-/)[0];
// Skip language since it can match variant regex, so we start from 1.
// We are matching i-klingon here, but that's ok, since i-klingon-klingon
// is not valid and would fail LANGUAGE_TAG_RE test.
var variants = [];
var extensions = [];
var parts = locale.split(/-/);
for (var i = 1; i < parts.length; i++) {
var value = parts[i];
if (LANGUAGE_VARIANT_RE.test(value) === true && extensions.length === 0) {
if (variants.indexOf(value) === -1) {
variants.push(value);
} else {
return false;
}
}
if (LANGUAGE_SINGLETON_RE.test(value) === true) {
if (extensions.indexOf(value) === -1) {
extensions.push(value);
} else {
return false;
}
}
}
return true;
}
/**
* Builds a regular expresion that validates the language tag
* against bcp47 spec.
* Uses http://tools.ietf.org/html/bcp47, section 2.1, ABNF.
* Runs on load and initializes the global REs.
*/
(function() {
var alpha = '[a-zA-Z]';
var digit = '[0-9]';
var alphanum = '(' + alpha + '|' + digit + ')';
var regular = '(art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|' +
'zh-min|zh-min-nan|zh-xiang)';
var irregular = '(en-GB-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|' +
'i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|' +
'i-tsu|sgn-BE-FR|sgn-BE-NL|sgn-CH-DE)';
var grandfathered = '(' + irregular + '|' + regular + ')';
var privateUse = '(x(-' + alphanum + '{1,8})+)';
var singleton = '(' + digit + '|[A-WY-Za-wy-z])';
LANGUAGE_SINGLETON_RE = new RegExp('^' + singleton + '$', 'i');
var extension = '(' + singleton + '(-' + alphanum + '{2,8})+)';
var variant = '(' + alphanum + '{5,8}|(' + digit + alphanum + '{3}))';
LANGUAGE_VARIANT_RE = new RegExp('^' + variant + '$', 'i');
var region = '(' + alpha + '{2}|' + digit + '{3})';
var script = '(' + alpha + '{4})';
var extLang = '(' + alpha + '{3}(-' + alpha + '{3}){0,2})';
var language = '(' + alpha + '{2,3}(-' + extLang + ')?|' + alpha + '{4}|' +
alpha + '{5,8})';
var langTag = language + '(-' + script + ')?(-' + region + ')?(-' +
variant + ')*(-' + extension + ')*(-' + privateUse + ')?';
var languageTag =
'^(' + langTag + '|' + privateUse + '|' + grandfathered + ')$';
LANGUAGE_TAG_RE = new RegExp(languageTag, 'i');
})();

289
deps/v8/src/extensions/i18n/number-format.js

@ -1,289 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
// ECMAScript 402 API implementation is broken into separate files for
// each service. The build system combines them together into one
// Intl namespace.
/**
* Verifies that the input is a well-formed ISO 4217 currency code.
* Don't uppercase to test. It could convert invalid code into a valid one.
* For example \u00DFP (Eszett+P) becomes SSP.
*/
function isWellFormedCurrencyCode(currency) {
return typeof currency == "string" &&
currency.length == 3 &&
currency.match(/[^A-Za-z]/) == null;
}
/**
* Returns the valid digit count for a property, or throws RangeError on
* a value out of the range.
*/
function getNumberOption(options, property, min, max, fallback) {
var value = options[property];
if (value !== undefined) {
value = Number(value);
if (isNaN(value) || value < min || value > max) {
throw new RangeError(property + ' value is out of range.');
}
return Math.floor(value);
}
return fallback;
}
/**
* Initializes the given object so it's a valid NumberFormat instance.
* Useful for subclassing.
*/
function initializeNumberFormat(numberFormat, locales, options) {
if (numberFormat.hasOwnProperty('__initializedIntlObject')) {
throw new TypeError('Trying to re-initialize NumberFormat object.');
}
if (options === undefined) {
options = {};
}
var getOption = getGetOption(options, 'numberformat');
var locale = resolveLocale('numberformat', locales, options);
var internalOptions = {};
defineWEProperty(internalOptions, 'style', getOption(
'style', 'string', ['decimal', 'percent', 'currency'], 'decimal'));
var currency = getOption('currency', 'string');
if (currency !== undefined && !isWellFormedCurrencyCode(currency)) {
throw new RangeError('Invalid currency code: ' + currency);
}
if (internalOptions.style === 'currency' && currency === undefined) {
throw new TypeError('Currency code is required with currency style.');
}
var currencyDisplay = getOption(
'currencyDisplay', 'string', ['code', 'symbol', 'name'], 'symbol');
if (internalOptions.style === 'currency') {
defineWEProperty(internalOptions, 'currency', currency.toUpperCase());
defineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
}
// Digit ranges.
var mnid = getNumberOption(options, 'minimumIntegerDigits', 1, 21, 1);
defineWEProperty(internalOptions, 'minimumIntegerDigits', mnid);
var mnfd = getNumberOption(options, 'minimumFractionDigits', 0, 20, 0);
defineWEProperty(internalOptions, 'minimumFractionDigits', mnfd);
var mxfd = getNumberOption(options, 'maximumFractionDigits', mnfd, 20, 3);
defineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
var mnsd = options['minimumSignificantDigits'];
var mxsd = options['maximumSignificantDigits'];
if (mnsd !== undefined || mxsd !== undefined) {
mnsd = getNumberOption(options, 'minimumSignificantDigits', 1, 21, 0);
defineWEProperty(internalOptions, 'minimumSignificantDigits', mnsd);
mxsd = getNumberOption(options, 'maximumSignificantDigits', mnsd, 21, 21);
defineWEProperty(internalOptions, 'maximumSignificantDigits', mxsd);
}
// Grouping.
defineWEProperty(internalOptions, 'useGrouping', getOption(
'useGrouping', 'boolean', undefined, true));
// ICU prefers options to be passed using -u- extension key/values for
// number format, so we need to build that.
var extensionMap = parseExtension(locale.extension);
var extension = setOptions(options, extensionMap, NUMBER_FORMAT_KEY_MAP,
getOption, internalOptions);
var requestedLocale = locale.locale + extension;
var resolved = Object.defineProperties({}, {
currency: {writable: true},
currencyDisplay: {writable: true},
locale: {writable: true},
maximumFractionDigits: {writable: true},
minimumFractionDigits: {writable: true},
minimumIntegerDigits: {writable: true},
numberingSystem: {writable: true},
requestedLocale: {value: requestedLocale, writable: true},
style: {value: internalOptions.style, writable: true},
useGrouping: {writable: true}
});
if (internalOptions.hasOwnProperty('minimumSignificantDigits')) {
defineWEProperty(resolved, 'minimumSignificantDigits', undefined);
}
if (internalOptions.hasOwnProperty('maximumSignificantDigits')) {
defineWEProperty(resolved, 'maximumSignificantDigits', undefined);
}
var formatter = %CreateNumberFormat(requestedLocale,
internalOptions,
resolved);
// We can't get information about number or currency style from ICU, so we
// assume user request was fulfilled.
if (internalOptions.style === 'currency') {
Object.defineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
writable: true});
}
Object.defineProperty(numberFormat, 'formatter', {value: formatter});
Object.defineProperty(numberFormat, 'resolved', {value: resolved});
Object.defineProperty(numberFormat, '__initializedIntlObject',
{value: 'numberformat'});
return numberFormat;
}
/**
* Constructs Intl.NumberFormat object given optional locales and options
* parameters.
*
* @constructor
*/
%SetProperty(Intl, 'NumberFormat', function() {
var locales = arguments[0];
var options = arguments[1];
if (!this || this === Intl) {
// Constructor is called as a function.
return new Intl.NumberFormat(locales, options);
}
return initializeNumberFormat(toObject(this), locales, options);
},
ATTRIBUTES.DONT_ENUM
);
/**
* NumberFormat resolvedOptions method.
*/
%SetProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
if (!this || typeof this !== 'object' ||
this.__initializedIntlObject !== 'numberformat') {
throw new TypeError('resolvedOptions method called on a non-object' +
' or on a object that is not Intl.NumberFormat.');
}
var format = this;
var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
format.resolved.locale);
var result = {
locale: locale,
numberingSystem: format.resolved.numberingSystem,
style: format.resolved.style,
useGrouping: format.resolved.useGrouping,
minimumIntegerDigits: format.resolved.minimumIntegerDigits,
minimumFractionDigits: format.resolved.minimumFractionDigits,
maximumFractionDigits: format.resolved.maximumFractionDigits,
};
if (result.style === 'currency') {
defineWECProperty(result, 'currency', format.resolved.currency);
defineWECProperty(result, 'currencyDisplay',
format.resolved.currencyDisplay);
}
if (format.resolved.hasOwnProperty('minimumSignificantDigits')) {
defineWECProperty(result, 'minimumSignificantDigits',
format.resolved.minimumSignificantDigits);
}
if (format.resolved.hasOwnProperty('maximumSignificantDigits')) {
defineWECProperty(result, 'maximumSignificantDigits',
format.resolved.maximumSignificantDigits);
}
return result;
},
ATTRIBUTES.DONT_ENUM
);
%FunctionSetName(Intl.NumberFormat.prototype.resolvedOptions,
'resolvedOptions');
%FunctionRemovePrototype(Intl.NumberFormat.prototype.resolvedOptions);
%SetNativeFlag(Intl.NumberFormat.prototype.resolvedOptions);
/**
* Returns the subset of the given locale list for which this locale list
* has a matching (possibly fallback) locale. Locales appear in the same
* order in the returned list as in the input list.
* Options are optional parameter.
*/
%SetProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
return supportedLocalesOf('numberformat', locales, arguments[1]);
},
ATTRIBUTES.DONT_ENUM
);
%FunctionSetName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
%FunctionRemovePrototype(Intl.NumberFormat.supportedLocalesOf);
%SetNativeFlag(Intl.NumberFormat.supportedLocalesOf);
/**
* Returns a String value representing the result of calling ToNumber(value)
* according to the effective locale and the formatting options of this
* NumberFormat.
*/
function formatNumber(formatter, value) {
// Spec treats -0 and +0 as 0.
var number = Number(value);
if (number === -0) {
number = 0;
}
return %InternalNumberFormat(formatter.formatter, number);
}
/**
* Returns a Number that represents string value that was passed in.
*/
function parseNumber(formatter, value) {
return %InternalNumberParse(formatter.formatter, String(value));
}
addBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1);
addBoundMethod(Intl.NumberFormat, 'v8Parse', parseNumber, 1);

220
deps/v8/src/extensions/i18n/overrides.js

@ -1,220 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
// ECMAScript 402 API implementation is broken into separate files for
// each service. The build system combines them together into one
// Intl namespace.
// Save references to Intl objects and methods we use, for added security.
var savedObjects = {
'collator': Intl.Collator,
'numberformat': Intl.NumberFormat,
'dateformatall': Intl.DateTimeFormat,
'dateformatdate': Intl.DateTimeFormat,
'dateformattime': Intl.DateTimeFormat
};
// Default (created with undefined locales and options parameters) collator,
// number and date format instances. They'll be created as needed.
var defaultObjects = {
'collator': undefined,
'numberformat': undefined,
'dateformatall': undefined,
'dateformatdate': undefined,
'dateformattime': undefined,
};
/**
* Returns cached or newly created instance of a given service.
* We cache only default instances (where no locales or options are provided).
*/
function cachedOrNewService(service, locales, options, defaults) {
var useOptions = (defaults === undefined) ? options : defaults;
if (locales === undefined && options === undefined) {
if (defaultObjects[service] === undefined) {
defaultObjects[service] = new savedObjects[service](locales, useOptions);
}
return defaultObjects[service];
}
return new savedObjects[service](locales, useOptions);
}
/**
* Compares this and that, and returns less than 0, 0 or greater than 0 value.
* Overrides the built-in method.
*/
Object.defineProperty(String.prototype, 'localeCompare', {
value: function(that) {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
if (this === undefined || this === null) {
throw new TypeError('Method invoked on undefined or null value.');
}
var locales = arguments[1];
var options = arguments[2];
var collator = cachedOrNewService('collator', locales, options);
return compare(collator, this, that);
},
writable: true,
configurable: true,
enumerable: false
});
%FunctionSetName(String.prototype.localeCompare, 'localeCompare');
%FunctionRemovePrototype(String.prototype.localeCompare);
%SetNativeFlag(String.prototype.localeCompare);
/**
* Formats a Number object (this) using locale and options values.
* If locale or options are omitted, defaults are used.
*/
Object.defineProperty(Number.prototype, 'toLocaleString', {
value: function() {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
if (!(this instanceof Number) && typeof(this) !== 'number') {
throw new TypeError('Method invoked on an object that is not Number.');
}
var locales = arguments[0];
var options = arguments[1];
var numberFormat = cachedOrNewService('numberformat', locales, options);
return formatNumber(numberFormat, this);
},
writable: true,
configurable: true,
enumerable: false
});
%FunctionSetName(Number.prototype.toLocaleString, 'toLocaleString');
%FunctionRemovePrototype(Number.prototype.toLocaleString);
%SetNativeFlag(Number.prototype.toLocaleString);
/**
* Returns actual formatted date or fails if date parameter is invalid.
*/
function toLocaleDateTime(date, locales, options, required, defaults, service) {
if (!(date instanceof Date)) {
throw new TypeError('Method invoked on an object that is not Date.');
}
if (isNaN(date)) {
return 'Invalid Date';
}
var internalOptions = toDateTimeOptions(options, required, defaults);
var dateFormat =
cachedOrNewService(service, locales, options, internalOptions);
return formatDate(dateFormat, date);
}
/**
* Formats a Date object (this) using locale and options values.
* If locale or options are omitted, defaults are used - both date and time are
* present in the output.
*/
Object.defineProperty(Date.prototype, 'toLocaleString', {
value: function() {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
var locales = arguments[0];
var options = arguments[1];
return toLocaleDateTime(
this, locales, options, 'any', 'all', 'dateformatall');
},
writable: true,
configurable: true,
enumerable: false
});
%FunctionSetName(Date.prototype.toLocaleString, 'toLocaleString');
%FunctionRemovePrototype(Date.prototype.toLocaleString);
%SetNativeFlag(Date.prototype.toLocaleString);
/**
* Formats a Date object (this) using locale and options values.
* If locale or options are omitted, defaults are used - only date is present
* in the output.
*/
Object.defineProperty(Date.prototype, 'toLocaleDateString', {
value: function() {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
var locales = arguments[0];
var options = arguments[1];
return toLocaleDateTime(
this, locales, options, 'date', 'date', 'dateformatdate');
},
writable: true,
configurable: true,
enumerable: false
});
%FunctionSetName(Date.prototype.toLocaleDateString, 'toLocaleDateString');
%FunctionRemovePrototype(Date.prototype.toLocaleDateString);
%SetNativeFlag(Date.prototype.toLocaleDateString);
/**
* Formats a Date object (this) using locale and options values.
* If locale or options are omitted, defaults are used - only time is present
* in the output.
*/
Object.defineProperty(Date.prototype, 'toLocaleTimeString', {
value: function() {
if (%_IsConstructCall()) {
throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
var locales = arguments[0];
var options = arguments[1];
return toLocaleDateTime(
this, locales, options, 'time', 'time', 'dateformattime');
},
writable: true,
configurable: true,
enumerable: false
});
%FunctionSetName(Date.prototype.toLocaleTimeString, 'toLocaleTimeString');
%FunctionRemovePrototype(Date.prototype.toLocaleTimeString);
%SetNativeFlag(Date.prototype.toLocaleTimeString);

2
deps/v8/src/extensions/statistics-extension.cc

@ -60,7 +60,7 @@ static void AddNumber(v8::Local<v8::Object> object,
void StatisticsExtension::GetCounters(
const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = Isolate::Current();
Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
Heap* heap = isolate->heap();
if (args.Length() > 0) { // GC if first argument evaluates to true.

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save