Browse Source

deps: update v8 to 3.17.13

v0.11.0-release
Trevor Norris 12 years ago
committed by Ben Noordhuis
parent
commit
83261e789e
  1. 3
      deps/v8/AUTHORS
  2. 514
      deps/v8/ChangeLog
  3. 22
      deps/v8/Makefile
  4. 8
      deps/v8/Makefile.android
  5. 24
      deps/v8/SConstruct
  6. 36
      deps/v8/build/android.gypi
  7. 82
      deps/v8/build/common.gypi
  8. 2
      deps/v8/build/gyp_v8
  9. 9
      deps/v8/build/standalone.gypi
  10. 45
      deps/v8/include/v8-profiler.h
  11. 1469
      deps/v8/include/v8.h
  12. 41
      deps/v8/samples/lineprocessor.cc
  13. 63
      deps/v8/samples/process.cc
  14. 78
      deps/v8/samples/shell.cc
  15. 4
      deps/v8/src/SConscript
  16. 168
      deps/v8/src/accessors.cc
  17. 1665
      deps/v8/src/api.cc
  18. 12
      deps/v8/src/api.h
  19. 2
      deps/v8/src/apinatives.js
  20. 97
      deps/v8/src/arm/assembler-arm-inl.h
  21. 691
      deps/v8/src/arm/assembler-arm.cc
  22. 335
      deps/v8/src/arm/assembler-arm.h
  23. 177
      deps/v8/src/arm/builtins-arm.cc
  24. 2155
      deps/v8/src/arm/code-stubs-arm.cc
  25. 202
      deps/v8/src/arm/code-stubs-arm.h
  26. 305
      deps/v8/src/arm/codegen-arm.cc
  27. 22
      deps/v8/src/arm/codegen-arm.h
  28. 8
      deps/v8/src/arm/constants-arm.cc
  29. 23
      deps/v8/src/arm/constants-arm.h
  30. 2
      deps/v8/src/arm/debug-arm.cc
  31. 500
      deps/v8/src/arm/deoptimizer-arm.cc
  32. 89
      deps/v8/src/arm/disasm-arm.cc
  33. 9
      deps/v8/src/arm/frames-arm.cc
  34. 30
      deps/v8/src/arm/frames-arm.h
  35. 329
      deps/v8/src/arm/full-codegen-arm.cc
  36. 278
      deps/v8/src/arm/ic-arm.cc
  37. 456
      deps/v8/src/arm/lithium-arm.cc
  38. 369
      deps/v8/src/arm/lithium-arm.h
  39. 1972
      deps/v8/src/arm/lithium-codegen-arm.cc
  40. 54
      deps/v8/src/arm/lithium-codegen-arm.h
  41. 9
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  42. 778
      deps/v8/src/arm/macro-assembler-arm.cc
  43. 188
      deps/v8/src/arm/macro-assembler-arm.h
  44. 59
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  45. 272
      deps/v8/src/arm/simulator-arm.cc
  46. 10
      deps/v8/src/arm/simulator-arm.h
  47. 1958
      deps/v8/src/arm/stub-cache-arm.cc
  48. 29
      deps/v8/src/array.js
  49. 317
      deps/v8/src/assembler.cc
  50. 158
      deps/v8/src/assembler.h
  51. 74
      deps/v8/src/ast.cc
  52. 116
      deps/v8/src/ast.h
  53. 6
      deps/v8/src/atomicops.h
  54. 335
      deps/v8/src/atomicops_internals_tsan.h
  55. 621
      deps/v8/src/bootstrapper.cc
  56. 19
      deps/v8/src/bootstrapper.h
  57. 821
      deps/v8/src/builtins.cc
  58. 114
      deps/v8/src/builtins.h
  59. 3
      deps/v8/src/checks.cc
  60. 389
      deps/v8/src/code-stubs-hydrogen.cc
  61. 322
      deps/v8/src/code-stubs.cc
  62. 807
      deps/v8/src/code-stubs.h
  63. 35
      deps/v8/src/codegen.cc
  64. 28
      deps/v8/src/codegen.h
  65. 54
      deps/v8/src/collection.js
  66. 4
      deps/v8/src/compilation-cache.cc
  67. 248
      deps/v8/src/compiler.cc
  68. 116
      deps/v8/src/compiler.h
  69. 40
      deps/v8/src/contexts.cc
  70. 20
      deps/v8/src/contexts.h
  71. 8
      deps/v8/src/conversions-inl.h
  72. 7
      deps/v8/src/counters.cc
  73. 166
      deps/v8/src/cpu-profiler.cc
  74. 97
      deps/v8/src/cpu-profiler.h
  75. 32
      deps/v8/src/d8-debug.cc
  76. 8
      deps/v8/src/d8-debug.h
  77. 2
      deps/v8/src/d8-posix.cc
  78. 31
      deps/v8/src/d8-readline.cc
  79. 566
      deps/v8/src/d8.cc
  80. 8
      deps/v8/src/d8.gyp
  81. 51
      deps/v8/src/d8.h
  82. 675
      deps/v8/src/d8.js
  83. 55
      deps/v8/src/data-flow.h
  84. 2
      deps/v8/src/date.js
  85. 19
      deps/v8/src/debug-agent.cc
  86. 256
      deps/v8/src/debug-debugger.js
  87. 148
      deps/v8/src/debug.cc
  88. 14
      deps/v8/src/debug.h
  89. 1263
      deps/v8/src/deoptimizer.cc
  90. 124
      deps/v8/src/deoptimizer.h
  91. 34
      deps/v8/src/disassembler.cc
  92. 2
      deps/v8/src/disassembler.h
  93. 9
      deps/v8/src/elements-kind.cc
  94. 8
      deps/v8/src/elements-kind.h
  95. 872
      deps/v8/src/elements.cc
  96. 51
      deps/v8/src/elements.h
  97. 105
      deps/v8/src/execution.cc
  98. 11
      deps/v8/src/execution.h
  99. 13
      deps/v8/src/extensions/externalize-string-extension.cc
  100. 4
      deps/v8/src/extensions/gc-extension.cc

3
deps/v8/AUTHORS

@ -34,6 +34,7 @@ Joel Stanley <joel.stan@gmail.com>
John Jozwiak <jjozwiak@codeaurora.org>
Jonathan Liu <net147@gmail.com>
Kun Zhang <zhangk@codeaurora.org>
Luis Reis <luis.m.reis@gmail.com>
Martyn Capewell <martyn.capewell@arm.com>
Mathias Bynens <mathias@qiwi.be>
Matt Hanselman <mjhanselman@gmail.com>
@ -45,6 +46,7 @@ Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Rafal Krypa <rafal@krypa.net>
Rajeev R Krithivasan <rkrithiv@codeaurora.org>
Rene Rebe <rene@exactcode.de>
Robert Mustacchi <rm@fingolfin.org>
Rodolph Perfetta <rodolph.perfetta@arm.com>
@ -54,6 +56,7 @@ Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org>
Tobias Burnus <burnus@net-b.de>
Vlad Burlik <vladbph@gmail.com>
Xi Qian <xi.qian@intel.com>
Yuqiang Xian <yuqiang.xian@intel.com>
Zaheer Ahmad <zahmad@codeaurora.org>
Zhongping Wang <kewpie.w.zp@gmail.com>

514
deps/v8/ChangeLog

@ -1,3 +1,517 @@
2013-03-19: Version 3.17.13
Turned Flags into a uint32_t typedef (Chromium issue 194749).
Performance and stability improvements on all platforms.
2013-03-18: Version 3.17.12
Unified kMaxArguments with number of bits used to encode it.
(Chromium issue 211741)
Fixed detection of |handle_smi| case in
HOptimizedGraphBuilder::HandlePolymorphicCallNamed.
(Chromium issue 196583)
Performance and stability improvements on all platforms.
2013-03-15: Version 3.17.11
Added a version of the v8::HandleScope constructor with an v8::Isolate
parameter and made AdjustAmountOfExternalAllocatedMemory an instance
method of v8::Isolate.
(issue 2487)
Fixed two register allocator bugs (off-by-one error/failure
propagation). (issue 2576)
Fixed huge heap snapshot when a heavily shared context has many
variables. (Chromium issue 145687)
Performance and stability improvements on all platforms.
2013-03-13: Version 3.17.10
Fixed heap snapshot creation for Harmony collections. (issue 2535)
Fixed register allocation corner case. (Chromium issue 177883)
Performance and stability improvements on all platforms.
2013-03-08: Version 3.17.9
Restored Function()'s expected string representation. (issue 2470)
Enabled deprecatations (again). (issue 2487)
Avoid bool to Oddball conversions by being lazy. (issue 2491)
Added %p option to --logfile.
Hardened Function()'s parsing of function literals. (issue 2470)
ES6 symbols: Refine test for getOwnPropertyNames. (issue 2158)
Performance and stability improvements on all platforms.
2013-03-07: Version 3.17.8
Added missing license headers. (Chromium issue 98597)
Inserted missing type cast in JSON.stringify. (issue 2570)
Reverted "Send SIGPROF signals on the profiler event processor thread"
(issue 2571)
Fixed Array.length, String.length and Function.prototype LoadICs on x64.
(issue 2568)
ES6 symbols: filter symbols form for-in loops and Object.keys.
(issue 2158)
Properly handle misses for StoreArrayLengthStub on ia32 and x64
(issue 2566)
Fixed x32 handling of Atomic64. (Chromium issue chromium-os:36866)
Removed "library" variable from standalone.gypi. (Chromium issue 111541)
Fixed HCheckSmiOrInt <-> HBoundsCheck interaction wrt. representations.
(issue 2556)
Enabled zapping of disposed global handles in release mode.
(Chromium issue 176056)
Added workaround for redefinition of __proto__ property. (issue 2565)
ES6 symbols: Allow symbols as property names. (issue 2158)
Performance and stability improvements on all platforms.
2013-03-04: Version 3.17.7
Limited recursion in regexp compilation by a budget.
(Chromium issue 178790)
ES6 symbols: Implemented Symbol intrinsic and basic functionality
(issue 2158)
Performance and stability improvements on all platforms.
2013-02-28: Version 3.17.6
Fixed materialization of arguments objects with unknown values.
(Chromium issue 163530)
Set default number of sweeper threads to at most four.
Performance and stability improvements on all platforms.
2013-02-27: Version 3.17.5
Made __proto__ a foreign callback on Object.prototype.
(issue 621, issue 1949 and issue 2441)
Performance and stability improvements on all platforms.
2013-02-25: Version 3.17.4
Performance and stability improvements on all platforms.
2013-02-21: Version 3.17.3
Performance and stability improvements on all platforms.
2013-02-19: Version 3.17.2
Removed bogus check for TOP register in deoptimizer.
(Chromium issue 176943)
Made the Isolate parameter mandatory for internal HandleScopes.
(issue 2487)
Fixed f.apply() optimization when declared arguments are mutated.
(issue 2539)
Performance and stability improvements on all platforms.
2013-02-14: Version 3.17.1
Performance and stability improvements on all platforms.
2013-02-13: Version 3.17.0
Enabled parallel sweeping.
Don't try to unlink instructions twice during GVN
(Chromium issue 175141)
Fixed code flusher disabling while marking incrementally.
(Chromium issue 173458, 168582)
Don't use TLS for space iterators.
(issue 2531)
Added new GetHeapStatistics API entry and deprecated old one.
Fixed DoubleStackSlot-to-DoubleStackSlot moves on ia32. Unified
platform-independent code.
(Chromium issue 173907)
Added --trace-array-abuse to help find OOB accesses.
Performance and stability improvements on all platforms.
2013-02-06: Version 3.16.14
Performance and stability improvements on all platforms.
2013-02-04: Version 3.16.13
Tagged stubs that rely on instance types as MEGAMORPHIC.
(Chromium issue 173974)
Fixed clearing of dead dependent codes and verifing of weak
embedded maps on full GC. (Chromium issue 172488,172489)
Made the arm port build cleanly with Clang.
Performance and stability improvements on all platforms.
2013-01-31: Version 3.16.12
Performance and stability improvements on all platforms.
2013-01-30: Version 3.16.11
Put making embedded maps in optimized code weak behind a flag.
(Chromium issue 172488,172489)
Performance and stability improvements on all platforms.
2013-01-25: Version 3.16.10
Avoid excessive memory usage during redundant phi elimination.
(issue 2510)
Fixed additional spec violations wrt RegExp.lastIndex.
(issue 2437)
Added Isolate parameter to Persistent class.
(issue 2487)
Performance and stability improvements on all platforms.
2013-01-24: Version 3.16.9
Made embedded maps in optimized code weak.
(issue 2073)
Fixed corner case when JSFunction is evicted from flusher.
(Chromium issue 168801)
Correctly set kCanBeDivByZero flag for HMathFloorOfDiv.
(Chromium issue 171641)
Performance and stability improvements on all platforms.
2013-01-23: Version 3.16.8
Correctly reset lastIndex in an RegExp object.
(Chromium issue 170856)
Added a workaround for Windows compilation problems related to V8EXPORT.
(issue 2507)
tools/run-tests.py: shlex.split() the value of --command-prefix
(Chromium issue 171553)
Fixed pattern detection for replacing shifts by rotation.
(Chromium issue 2499)
Performance and stability improvements on all platforms.
2013-01-21: Version 3.16.7
Removed <(library) usage from v8.gyp.
(Chromium issue 111541)
Fixed out of bounds memory access in TestJSArrayForAllocationSiteInfo.
(Chromium issue 169928)
Performance and stability improvements on all platforms.
2013-01-18: Version 3.16.6
Made the Isolate parameter mandatory in Locker and Unlocker classes.
(issue 2487)
Avoid pointer underflow in CopyCharsUnsigned.
(issue 2493)
Generate shim headers when using system v8.
(Chromium issue 165264)
Fixed arguments materialization for inlined apply().
(issue 2489)
Sync'ed laziness between BuildFunctionInfo and MakeFunctionInfo.
(Chromium issue 147497)
Added sanity check to CodeFlusher::AddCandidate.
(Chromium issue 169209)
Performance and stability improvements on all platforms.
2013-01-15: Version 3.16.5
Removed deprecated functions from V8's external API.
Prepared API for WebKit use of Latin-1.
Fixed V8 issue 2486.
Fixed Chromium issue 169723.
Performance and stability improvements on all platforms.
2013-01-11: Version 3.16.4
Fixed Chromium issues 168545 and 169209.
Performance and stability improvements on all platforms.
2013-01-09: Version 3.16.3
Improved GC performance when moving parts of a FixedArray (issue 2452).
Enabled readline on d8 while building a shared lib (issue 1781).
Fixed missing exception check in typed array constructor
(Chromium issue 168545).
Check for read-only-ness when preparing for array sort (issue 2419).
Performance and stability improvements on all platforms.
2013-01-04: Version 3.16.2
Added Makefile options to build for the Raspberry Pi (armv7=0,
arm_fpu=vfp2).
Performance and stability improvements on all platforms.
2012-12-27: Version 3.16.1
Fixed x64 MathMinMax for negative untagged int32 arguments.
(Chromium issue 164442)
Fixed FloatingPointHelper::CheckSSE2OperandIsInt32.
(issue 2458)
Performance and stability improvements on all platforms.
2012-12-21: Version 3.16.0
V8_Fatal now prints C++ stack trace in debug mode.
Added HTML-based tick processor.
Continued implementation of Object.observe (V8 issue 2409).
Fixed V8 issues 2243, 2340, 2393, 2399, 2457.
Fixed Chromium issues 125308, 165637, 166379, 166553.
Performance and stability improvements on all platforms.
2012-12-10: Version 3.15.11
Define CAN_USE_VFP2/3_INSTRUCTIONS based on arm_neon and arm_fpu GYP
flags.
Performance and stability improvements on all platforms.
2012-12-07: Version 3.15.10
Enabled optimisation of functions inside eval. (issue 2315)
Fixed spec violations in methods of Number.prototype. (issue 2443)
Added GCTracer metrics for a scavenger GC for DOM wrappers.
Performance and stability improvements on all platforms.
2012-12-06: Version 3.15.9
Fixed candidate eviction in code flusher.
(Chromium issue 159140)
Iterate through all arguments for side effects in Math.min/max.
(issue 2444)
Fixed spec violations related to regexp.lastIndex
(issue 2437, issue 2438)
Performance and stability improvements on all platforms.
2012-12-04: Version 3.15.8
Enforced stack allocation of TryCatch blocks.
(issue 2166,chromium:152389)
Fixed external exceptions in external try-catch handlers.
(issue 2166)
Activated incremental code flushing by default.
Performance and stability improvements on all platforms.
2012-11-30: Version 3.15.7
Activated code aging by default.
Included more information in --prof log.
Removed eager sweeping for lazy swept spaces. Try to find in
SlowAllocateRaw a bounded number of times a big enough memory slot.
(issue 2194)
Performance and stability improvements on all platforms.
2012-11-26: Version 3.15.6
Ensure double arrays are filled with holes when extended from
variations of empty arrays. (Chromium issue 162085)
Performance and stability improvements on all platforms.
2012-11-23: Version 3.15.5
Fixed JSON.stringify for objects with interceptor handlers.
(Chromium issue 161028)
Fixed corner case in x64 compare stubs. (issue 2416)
Performance and stability improvements on all platforms.
2012-11-16: Version 3.15.4
Fixed Array.prototype.join evaluation order. (issue 2263)
Perform CPU sampling by CPU sampling thread only iff processing thread
is not running. (issue 2364)
When using an Object as a set in Object.getOwnPropertyNames, null out
the proto. (issue 2410)
Disabled EXTRA_CHECKS in Release build.
Heap explorer: Show representation of strings.
Removed 'type' and 'arguments' properties from Error object.
(issue 2397)
Added atomics implementation for ThreadSanitizer v2.
(Chromium issue 128314)
Fixed LiveEdit crashes when object/array literal is added. (issue 2368)
Performance and stability improvements on all platforms.
2012-11-13: Version 3.15.3
Changed sample shell to send non-JS output (e.g. errors) to stderr
instead of stdout.
Correctly check for stack overflow even when interrupt is pending.
(issue 214)
Collect stack trace on stack overflow. (issue 2394)
Performance and stability improvements on all platforms.
2012-11-12: Version 3.15.2
Function::GetScriptOrigin supplies sourceURL when script name is
not available. (Chromium issue 159413)
Made formatting error message side-effect-free. (issue 2398)
Fixed length check in JSON.stringify. (Chromium issue 160010)
ES6: Added support for Set and Map clear method (issue 2400)
Fixed slack tracking when instance prototype changes.
(Chromium issue 157019)
Fixed disabling of code flusher while marking. (Chromium issue 159140)
Added a test case for object grouping in a scavenger GC (issue 2077)
Support shared library build of Android for v8.
(Chromium issue 158821)
ES6: Added support for size to Set and Map (issue 2395)
Performance and stability improvements on all platforms.
2012-11-06: Version 3.15.1
Put incremental code flushing behind a flag. (Chromium issue 159140)
Performance and stability improvements on all platforms.
2012-10-31: Version 3.15.0
Loosened aligned code target requirement on ARM (issue 2380)
Fixed JSON.parse to treat leading zeros correctly.
(Chromium issue 158185)
Performance and stability improvements on all platforms.
2012-10-22: Version 3.14.5
Killed off the SCons based build.

22
deps/v8/Makefile

@ -62,6 +62,12 @@ endif
ifeq ($(verifyheap), on)
GYPFLAGS += -Dv8_enable_verify_heap=1
endif
# backtrace=off
ifeq ($(backtrace), off)
GYPFLAGS += -Dv8_enable_backtrace=0
else
GYPFLAGS += -Dv8_enable_backtrace=1
endif
# snapshot=off
ifeq ($(snapshot), off)
GYPFLAGS += -Dv8_use_snapshot='false'
@ -77,15 +83,17 @@ endif
ifeq ($(gdbjit), on)
GYPFLAGS += -Dv8_enable_gdbjit=1
endif
# liveobjectlist=on
ifeq ($(liveobjectlist), on)
GYPFLAGS += -Dv8_use_liveobjectlist=true
# vfp2=off
ifeq ($(vfp2), off)
GYPFLAGS += -Dv8_can_use_vfp2_instructions=false
else
GYPFLAGS += -Dv8_can_use_vfp2_instructions=true -Darm_fpu=vfpv2
endif
# vfp3=off
ifeq ($(vfp3), off)
GYPFLAGS += -Dv8_can_use_vfp3_instructions=false
else
GYPFLAGS += -Dv8_can_use_vfp3_instructions=true
GYPFLAGS += -Dv8_can_use_vfp3_instructions=true -Darm_fpu=vfpv3
endif
# debuggersupport=off
ifeq ($(debuggersupport), off)
@ -115,6 +123,10 @@ endif
ifeq ($(hardfp), on)
GYPFLAGS += -Dv8_use_arm_eabi_hardfloat=true
endif
# armv7=false
ifeq ($(armv7), false)
GYPFLAGS += -Darmv7=0
endif
# ----------------- available targets: --------------------
# - "dependencies": pulls in external dependencies (currently: GYP)
@ -136,7 +148,7 @@ endif
ARCHES = ia32 x64 arm mipsel
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug
ANDROID_ARCHES = android_ia32 android_arm
ANDROID_ARCHES = android_ia32 android_arm android_mipsel
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/common.gypi build/standalone.gypi \

8
deps/v8/Makefile.android

@ -26,7 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
ANDROID_ARCHES = android_ia32 android_arm
ANDROID_ARCHES = android_ia32 android_arm android_mipsel
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
@ -50,12 +50,18 @@ ifeq ($(ARCH), android_arm)
DEFINES += arm_neon=0 armv7=1
TOOLCHAIN_ARCH = arm-linux-androideabi-4.6
else
ifeq ($(ARCH), android_mipsel)
DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_arch=mips
DEFINES += mips_arch_variant=mips32r2
TOOLCHAIN_ARCH = mipsel-linux-android-4.6
else
ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
TOOLCHAIN_ARCH = x86-4.6
else
$(error Target architecture "${ARCH}" is not supported)
endif
endif
endif
TOOLCHAIN_PATH = ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}/prebuilt

24
deps/v8/SConstruct

@ -67,16 +67,9 @@ LIBRARY_FLAGS = {
'debuggersupport:on': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
},
'inspector:on': {
'CPPDEFINES': ['INSPECTOR'],
},
'fasttls:off': {
'CPPDEFINES': ['V8_NO_FAST_TLS'],
},
'liveobjectlist:on': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT', 'INSPECTOR',
'LIVE_OBJECT_LIST', 'OBJECT_PRINT'],
}
},
'gcc': {
'all': {
@ -1051,16 +1044,6 @@ SIMPLE_OPTIONS = {
'default': 'on',
'help': 'enable debugging of JavaScript code'
},
'inspector': {
'values': ['on', 'off'],
'default': 'off',
'help': 'enable inspector features'
},
'liveobjectlist': {
'values': ['on', 'off'],
'default': 'off',
'help': 'enable live object list features in the debugger'
},
'soname': {
'values': ['on', 'off'],
'default': 'off',
@ -1418,13 +1401,6 @@ def PostprocessOptions(options, os):
options['msvcltcg'] = 'on'
if (options['mipsabi'] != 'none') and (options['arch'] != 'mips') and (options['simulator'] != 'mips'):
options['mipsabi'] = 'none'
if options['liveobjectlist'] == 'on':
if (options['debuggersupport'] != 'on') or (options['mode'] == 'release'):
# Print a warning that liveobjectlist will implicitly enable the debugger
print "Warning: forcing debuggersupport on for liveobjectlist"
options['debuggersupport'] = 'on'
options['inspector'] = 'on'
options['objectprint'] = 'on'
def ParseEnvOverrides(arg, imports):

36
deps/v8/build/android.gypi

@ -35,9 +35,9 @@
'variables': {
'android_ndk_root%': '<!(/bin/echo -n $ANDROID_NDK_ROOT)',
'android_toolchain%': '<!(/bin/echo -n $ANDROID_TOOLCHAIN)',
# Switch between different build types, currently only '0' is
# supported.
'android_build_type%': 0,
# This is set when building the Android WebView inside the Android build
# system, using the 'android' gyp backend.
'android_webview_build%': 0,
},
'conditions': [
['android_ndk_root==""', {
@ -62,10 +62,10 @@
],
# Enable to use the system stlport, otherwise statically
# link the NDK one?
'use_system_stlport%': '<(android_build_type)',
'use_system_stlport%': '<(android_webview_build)',
'android_stlport_library': 'stlport_static',
# Copy it out one scope.
'android_build_type%': '<(android_build_type)',
'android_webview_build%': '<(android_webview_build)',
'OS': 'android',
}, # variables
'target_defaults': {
@ -122,8 +122,6 @@
'ldflags': [
'-nostdlib',
'-Wl,--no-undefined',
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
],
'libraries!': [
'-lrt', # librt is built into Bionic.
@ -143,7 +141,7 @@
'-lm',
],
'conditions': [
['android_build_type==0', {
['android_webview_build==0', {
'ldflags': [
'-Wl,-rpath-link=<(android_lib)',
'-L<(android_lib)',
@ -183,6 +181,11 @@
'-L<(android_stlport_libs)/armeabi',
],
}],
['target_arch=="mipsel"', {
'ldflags': [
'-L<(android_stlport_libs)/mips',
],
}],
['target_arch=="ia32"', {
'ldflags': [
'-L<(android_stlport_libs)/x86',
@ -199,6 +202,16 @@
'-fno-stack-protector',
],
}],
['target_arch=="mipsel"', {
# The mips toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
'-U__linux__'
],
'cflags': [
'-fno-stack-protector',
],
}],
],
'target_conditions': [
['_type=="executable"', {
@ -219,6 +232,13 @@
['_type=="shared_library"', {
'ldflags': [
'-Wl,-shared,-Bsymbolic',
'<(android_lib)/crtbegin_so.o',
],
}],
['_type=="static_library"', {
'ldflags': [
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
],
}],
],

82
deps/v8/build/common.gypi

@ -32,6 +32,7 @@
'use_system_v8%': 0,
'msvs_use_common_release': 0,
'gcc_version%': 'unknown',
'CXX%': '${CXX:-$(which g++)}', # Used to assemble a shell command.
'v8_compress_startup_data%': 'off',
'v8_target_arch%': '<(target_arch)',
@ -51,6 +52,13 @@
'v8_can_use_vfp2_instructions%': 'false',
'v8_can_use_vfp3_instructions%': 'false',
# Setting 'v8_can_use_vfp32dregs' to 'true' will cause V8 to use the VFP
# registers d16-d31 in the generated code, both in the snapshot and for the
# ARM target. Leaving the default value of 'false' will avoid the use of
# these registers in the snapshot and use CPU feature probing when running
# on the target.
'v8_can_use_vfp32dregs%': 'false',
# Similar to vfp but on MIPS.
'v8_can_use_fpu_instructions%': 'true',
@ -68,10 +76,9 @@
'v8_enable_debugger_support%': 1,
'v8_enable_disassembler%': 0,
'v8_enable_backtrace%': 0,
# Enable extra checks in API functions and other strategic places.
'v8_enable_extra_checks%': 1,
'v8_enable_disassembler%': 0,
'v8_enable_gdbjit%': 0,
@ -91,7 +98,6 @@
'v8_use_snapshot%': 'true',
'host_os%': '<(OS)',
'v8_use_liveobjectlist%': 'false',
'werror%': '-Werror',
# With post mortem support enabled, metadata is embedded into libv8 that
@ -114,9 +120,6 @@
['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',],
}],
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
@ -134,6 +137,11 @@
'V8_TARGET_ARCH_ARM',
],
'conditions': [
['armv7==1', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
}],
[ 'v8_can_use_unaligned_accesses=="true"', {
'defines': [
'CAN_USE_UNALIGNED_ACCESSES=1',
@ -144,12 +152,16 @@
'CAN_USE_UNALIGNED_ACCESSES=0',
],
}],
[ 'v8_can_use_vfp2_instructions=="true"', {
# NEON implies VFP3 and VFP3 implies VFP2.
[ 'v8_can_use_vfp2_instructions=="true" or arm_neon==1 or \
arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
'defines': [
'CAN_USE_VFP2_INSTRUCTIONS',
],
}],
[ 'v8_can_use_vfp3_instructions=="true"', {
# NEON implies VFP3.
[ 'v8_can_use_vfp3_instructions=="true" or arm_neon==1 or \
arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
@ -169,6 +181,11 @@
'USE_EABI_HARDFLOAT=0',
],
}],
[ 'v8_can_use_vfp32dregs=="true"', {
'defines': [
'CAN_USE_VFP32DREGS',
],
}],
],
}], # v8_target_arch=="arm"
['v8_target_arch=="ia32"', {
@ -181,7 +198,7 @@
'V8_TARGET_ARCH_MIPS',
],
'variables': {
'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
'mipscompiler': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
},
'conditions': [
['mipscompiler=="yes"', {
@ -199,11 +216,12 @@
}],
['mips_arch_variant=="mips32r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="mips32r1"', {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
['mips_arch_variant=="loongson"', {
'cflags': ['-mips3', '-Wa,-mips3'],
}, {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
],
}],
@ -246,14 +264,6 @@
},
'msvs_configuration_platform': 'x64',
}], # v8_target_arch=="x64"
['v8_use_liveobjectlist=="true"', {
'defines': [
'ENABLE_DEBUGGER_SUPPORT',
'INSPECTOR',
'OBJECT_PRINT',
'LIVEOBJECTLIST',
],
}],
['v8_compress_startup_data=="bz2"', {
'defines': [
'COMPRESS_STARTUP_DATA_BZ2',
@ -306,7 +316,7 @@
}],
['_toolset=="target"', {
'variables': {
'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
'm32flag': '<!((echo | $(echo ${CXX_target:-<(CXX)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
'clang%': 0,
},
'conditions': [
@ -330,6 +340,9 @@
], # conditions
'configurations': {
'Debug': {
'variables': {
'v8_enable_extra_checks%': 1,
},
'defines': [
'DEBUG',
'ENABLE_DISASSEMBLER',
@ -354,10 +367,17 @@
},
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
['OS=="linux" and v8_enable_backtrace==1', {
# Support for backtrace_symbols.
'ldflags': [ '-rdynamic' ],
}],
['OS=="android"', {
'variables': {
'android_full_debug%': 1,
@ -372,12 +392,32 @@
}],
],
}],
['OS=="mac"', {
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '0', # -O0
},
}],
],
}, # Debug
'Release': {
'variables': {
'v8_enable_extra_checks%': 0,
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', {
'cflags!': [
'-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
'-O3',
],
'conditions': [
[ 'gcc_version==44 and clang==0', {
'cflags': [

2
deps/v8/build/gyp_v8

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without

9
deps/v8/build/standalone.gypi

@ -29,9 +29,9 @@
{
'variables': {
'library%': 'static_library',
'component%': 'static_library',
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'variables': {
@ -86,6 +86,9 @@
'Debug': {
'cflags': [ '-g', '-O0' ],
},
'Release': {
# Xcode insists on this empty entry.
},
},
},
'conditions': [
@ -100,7 +103,7 @@
[ 'OS=="linux"', {
'cflags': [ '-ansi' ],
}],
[ 'visibility=="hidden"', {
[ 'visibility=="hidden" and v8_enable_backtrace==0', {
'cflags': [ '-fvisibility=hidden' ],
}],
[ 'component=="shared_library"', {
@ -191,7 +194,7 @@
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden
'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics
'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES', # -Werror
'GCC_VERSION': '4.2',
'GCC_VERSION': 'com.apple.compilers.llvmgcc42',
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof
# MACOSX_DEPLOYMENT_TARGET maps to -mmacosx-version-min
'MACOSX_DEPLOYMENT_TARGET': '<(mac_deployment_target)',

45
deps/v8/include/v8-profiler.h

@ -105,6 +105,9 @@ class V8EXPORT CpuProfileNode {
/** Returns function entry UID. */
unsigned GetCallUid() const;
/** Returns id of the node. The id is unique within the tree */
unsigned GetNodeId() const;
/** Returns child nodes count of the node. */
int GetChildrenCount() const;
@ -116,9 +119,8 @@ class V8EXPORT CpuProfileNode {
/**
* CpuProfile contains a CPU profile in a form of two call trees:
* - top-down (from main() down to functions that do all the work);
* - bottom-up call graph (in backward direction).
* CpuProfile contains a CPU profile in a form of top-down call tree
* (from main() down to functions that do all the work).
*/
class V8EXPORT CpuProfile {
public:
@ -128,12 +130,21 @@ class V8EXPORT CpuProfile {
/** Returns CPU profile title. */
Handle<String> GetTitle() const;
/** Returns the root node of the bottom up call tree. */
const CpuProfileNode* GetBottomUpRoot() const;
/** Returns the root node of the top down call tree. */
const CpuProfileNode* GetTopDownRoot() const;
/**
* Returns number of samples recorded. The samples are not recorded unless
* |record_samples| parameter of CpuProfiler::StartProfiling is true.
*/
int GetSamplesCount() const;
/**
* Returns profile node corresponding to the top frame the sample at
* the given index.
*/
const CpuProfileNode* GetSample(int index) const;
/**
* Deletes the profile and removes it from CpuProfiler's list.
* All pointers to nodes previously returned become invalid.
@ -183,8 +194,11 @@ class V8EXPORT CpuProfiler {
* title are silently ignored. While collecting a profile, functions
* from all security contexts are included in it. The token-based
* filtering is only performed when querying for a profile.
*
* |record_samples| parameter controls whether individual samples should
* be recorded in addition to the aggregated tree.
*/
static void StartProfiling(Handle<String> title);
static void StartProfiling(Handle<String> title, bool record_samples = false);
/**
* Stops collecting CPU profile with a given title and returns it.
@ -406,6 +420,20 @@ class V8EXPORT HeapProfiler {
*/
static const SnapshotObjectId kUnknownObjectId = 0;
/**
* Callback interface for retrieving user friendly names of global objects.
*/
class ObjectNameResolver {
public:
/**
* Returns name to be used in the heap snapshot for given node. Returned
* string must stay alive until snapshot collection is completed.
*/
virtual const char* GetName(Handle<Object> object) = 0;
protected:
virtual ~ObjectNameResolver() {}
};
/**
* Takes a heap snapshot and returns it. Title may be an empty string.
* See HeapSnapshot::Type for types description.
@ -413,7 +441,8 @@ class V8EXPORT HeapProfiler {
static const HeapSnapshot* TakeSnapshot(
Handle<String> title,
HeapSnapshot::Type type = HeapSnapshot::kFull,
ActivityControl* control = NULL);
ActivityControl* control = NULL,
ObjectNameResolver* global_object_name_resolver = NULL);
/**
* Starts tracking of heap objects population statistics. After calling

1469
deps/v8/include/v8.h

File diff suppressed because it is too large

41
deps/v8/samples/lineprocessor.cc

@ -98,13 +98,14 @@ enum MainCycleType {
};
const char* ToCString(const v8::String::Utf8Value& value);
void ReportException(v8::TryCatch* handler);
void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
v8::Handle<v8::String> ReadFile(const char* name);
v8::Handle<v8::String> ReadLine();
v8::Handle<v8::Value> Print(const v8::Arguments& args);
v8::Handle<v8::Value> ReadLine(const v8::Arguments& args);
bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Local<v8::Context> context,
bool report_exceptions);
@ -132,7 +133,8 @@ void DispatchDebugMessages() {
int RunMain(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::HandleScope handle_scope;
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::String> script_source(NULL);
v8::Handle<v8::Value> script_name(NULL);
@ -212,9 +214,10 @@ int RunMain(int argc, char* argv[]) {
v8::Context::Scope context_scope(context);
#ifdef ENABLE_DEBUGGER_SUPPORT
debug_message_context = v8::Persistent<v8::Context>::New(context);
debug_message_context =
v8::Persistent<v8::Context>::New(isolate, context);
v8::Locker locker;
v8::Locker locker(isolate);
if (support_callback) {
v8::Debug::SetDebugMessageDispatchHandler(DispatchDebugMessages, true);
@ -235,7 +238,7 @@ int RunMain(int argc, char* argv[]) {
if (script.IsEmpty()) {
// Print errors that happened during compilation.
if (report_exceptions)
ReportException(&try_catch);
ReportException(isolate, &try_catch);
return 1;
}
}
@ -246,13 +249,14 @@ int RunMain(int argc, char* argv[]) {
script->Run();
if (try_catch.HasCaught()) {
if (report_exceptions)
ReportException(&try_catch);
ReportException(isolate, &try_catch);
return 1;
}
}
if (cycle_type == CycleInCpp) {
bool res = RunCppCycle(script, v8::Context::GetCurrent(),
bool res = RunCppCycle(script,
v8::Context::GetCurrent(),
report_exceptions);
return !res;
} else {
@ -262,15 +266,16 @@ int RunMain(int argc, char* argv[]) {
}
bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Local<v8::Context> context,
bool report_exceptions) {
v8::Isolate* isolate = context->GetIsolate();
#ifdef ENABLE_DEBUGGER_SUPPORT
v8::Locker lock;
v8::Locker lock(isolate);
#endif // ENABLE_DEBUGGER_SUPPORT
v8::Handle<v8::String> fun_name = v8::String::New("ProcessLine");
v8::Handle<v8::Value> process_val =
v8::Context::GetCurrent()->Global()->Get(fun_name);
v8::Handle<v8::Value> process_val = context->Global()->Get(fun_name);
// If there is no Process function, or if it is not a function,
// bail out
@ -285,7 +290,7 @@ bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
while (!feof(stdin)) {
v8::HandleScope handle_scope;
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::String> input_line = ReadLine();
if (input_line == v8::Undefined()) {
@ -302,7 +307,7 @@ bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
argc, argv);
if (try_catch.HasCaught()) {
if (report_exceptions)
ReportException(&try_catch);
ReportException(isolate, &try_catch);
return false;
}
}
@ -349,8 +354,8 @@ v8::Handle<v8::String> ReadFile(const char* name) {
}
void ReportException(v8::TryCatch* try_catch) {
v8::HandleScope handle_scope;
void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
v8::HandleScope handle_scope(isolate);
v8::String::Utf8Value exception(try_catch->Exception());
const char* exception_string = ToCString(exception);
v8::Handle<v8::Message> message = try_catch->Message();
@ -388,7 +393,7 @@ void ReportException(v8::TryCatch* try_catch) {
v8::Handle<v8::Value> Print(const v8::Arguments& args) {
bool first = true;
for (int i = 0; i < args.Length(); i++) {
v8::HandleScope handle_scope;
v8::HandleScope handle_scope(args.GetIsolate());
if (first) {
first = false;
} else {
@ -420,7 +425,7 @@ v8::Handle<v8::String> ReadLine() {
char* res;
{
#ifdef ENABLE_DEBUGGER_SUPPORT
v8::Unlocker unlocker;
v8::Unlocker unlocker(v8::Isolate::GetCurrent());
#endif // ENABLE_DEBUGGER_SUPPORT
res = fgets(buffer, kBufferSize, stdin);
}

63
deps/v8/samples/process.cc

@ -79,7 +79,8 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
public:
// Creates a new processor that processes requests by invoking the
// Process function of the JavaScript script given as an argument.
explicit JsHttpRequestProcessor(Handle<String> script) : script_(script) { }
JsHttpRequestProcessor(Isolate* isolate, Handle<String> script)
: isolate_(isolate), script_(script) { }
virtual ~JsHttpRequestProcessor();
virtual bool Initialize(map<string, string>* opts,
@ -97,8 +98,8 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
// Constructs the template that describes the JavaScript wrapper
// type for requests.
static Handle<ObjectTemplate> MakeRequestTemplate();
static Handle<ObjectTemplate> MakeMapTemplate();
static Handle<ObjectTemplate> MakeRequestTemplate(Isolate* isolate);
static Handle<ObjectTemplate> MakeMapTemplate(Isolate* isolate);
// Callbacks that access the individual fields of request objects.
static Handle<Value> GetPath(Local<String> name, const AccessorInfo& info);
@ -116,11 +117,14 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
// Utility methods for wrapping C++ objects as JavaScript objects,
// and going back again.
static Handle<Object> WrapMap(map<string, string>* obj);
Handle<Object> WrapMap(map<string, string>* obj);
static map<string, string>* UnwrapMap(Handle<Object> obj);
static Handle<Object> WrapRequest(HttpRequest* obj);
Handle<Object> WrapRequest(HttpRequest* obj);
static HttpRequest* UnwrapRequest(Handle<Object> obj);
Isolate* GetIsolate() { return isolate_; }
Isolate* isolate_;
Handle<String> script_;
Persistent<Context> context_;
Persistent<Function> process_;
@ -134,12 +138,12 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
static Handle<Value> LogCallback(const Arguments& args) {
if (args.Length() < 1) return v8::Undefined();
HandleScope scope;
if (args.Length() < 1) return Undefined();
HandleScope scope(args.GetIsolate());
Handle<Value> arg = args[0];
String::Utf8Value value(arg);
HttpRequestProcessor::Log(*value);
return v8::Undefined();
return Undefined();
}
@ -147,7 +151,7 @@ static Handle<Value> LogCallback(const Arguments& args) {
bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
map<string, string>* output) {
// Create a handle scope to hold the temporary references.
HandleScope handle_scope;
HandleScope handle_scope(GetIsolate());
// Create a template for the global object where we set the
// built-in global functions.
@ -187,7 +191,7 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// Store the function in a Persistent handle, since we also want
// that to remain after this call returns
process_ = Persistent<Function>::New(process_fun);
process_ = Persistent<Function>::New(GetIsolate(), process_fun);
// All done; all went well
return true;
@ -195,7 +199,7 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
bool JsHttpRequestProcessor::ExecuteScript(Handle<String> script) {
HandleScope handle_scope;
HandleScope handle_scope(GetIsolate());
// We're just about to compile the script; set up an error handler to
// catch any exceptions the script might throw.
@ -225,7 +229,7 @@ bool JsHttpRequestProcessor::ExecuteScript(Handle<String> script) {
bool JsHttpRequestProcessor::InstallMaps(map<string, string>* opts,
map<string, string>* output) {
HandleScope handle_scope;
HandleScope handle_scope(GetIsolate());
// Wrap the map object in a JavaScript wrapper
Handle<Object> opts_obj = WrapMap(opts);
@ -242,7 +246,7 @@ bool JsHttpRequestProcessor::InstallMaps(map<string, string>* opts,
bool JsHttpRequestProcessor::Process(HttpRequest* request) {
// Create a handle scope to keep the temporary object references.
HandleScope handle_scope;
HandleScope handle_scope(GetIsolate());
// Enter this processor's context so all the remaining operations
// take place there
@ -273,8 +277,9 @@ JsHttpRequestProcessor::~JsHttpRequestProcessor() {
// Dispose the persistent handles. When noone else has any
// references to the objects stored in the handles they will be
// automatically reclaimed.
context_.Dispose();
process_.Dispose();
Isolate* isolate = GetIsolate();
context_.Dispose(isolate);
process_.Dispose(isolate);
}
@ -290,13 +295,13 @@ Persistent<ObjectTemplate> JsHttpRequestProcessor::map_template_;
// JavaScript object.
Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// Handle scope for temporary handles.
HandleScope handle_scope;
HandleScope handle_scope(GetIsolate());
// Fetch the template for creating JavaScript map wrappers.
// It only has to be created once, which we do on demand.
if (map_template_.IsEmpty()) {
Handle<ObjectTemplate> raw_template = MakeMapTemplate();
map_template_ = Persistent<ObjectTemplate>::New(raw_template);
Handle<ObjectTemplate> raw_template = MakeMapTemplate(GetIsolate());
map_template_ = Persistent<ObjectTemplate>::New(GetIsolate(), raw_template);
}
Handle<ObjectTemplate> templ = map_template_;
@ -373,8 +378,9 @@ Handle<Value> JsHttpRequestProcessor::MapSet(Local<String> name,
}
Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate() {
HandleScope handle_scope;
Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate(
Isolate* isolate) {
HandleScope handle_scope(isolate);
Handle<ObjectTemplate> result = ObjectTemplate::New();
result->SetInternalFieldCount(1);
@ -395,13 +401,14 @@ Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate() {
*/
Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
// Handle scope for temporary handles.
HandleScope handle_scope;
HandleScope handle_scope(GetIsolate());
// Fetch the template for creating JavaScript http request wrappers.
// It only has to be created once, which we do on demand.
if (request_template_.IsEmpty()) {
Handle<ObjectTemplate> raw_template = MakeRequestTemplate();
request_template_ = Persistent<ObjectTemplate>::New(raw_template);
Handle<ObjectTemplate> raw_template = MakeRequestTemplate(GetIsolate());
request_template_ =
Persistent<ObjectTemplate>::New(GetIsolate(), raw_template);
}
Handle<ObjectTemplate> templ = request_template_;
@ -471,8 +478,9 @@ Handle<Value> JsHttpRequestProcessor::GetUserAgent(Local<String> name,
}
Handle<ObjectTemplate> JsHttpRequestProcessor::MakeRequestTemplate() {
HandleScope handle_scope;
Handle<ObjectTemplate> JsHttpRequestProcessor::MakeRequestTemplate(
Isolate* isolate) {
HandleScope handle_scope(isolate);
Handle<ObjectTemplate> result = ObjectTemplate::New();
result->SetInternalFieldCount(1);
@ -604,13 +612,14 @@ int main(int argc, char* argv[]) {
fprintf(stderr, "No script was specified.\n");
return 1;
}
HandleScope scope;
Isolate* isolate = Isolate::GetCurrent();
HandleScope scope(isolate);
Handle<String> source = ReadFile(file);
if (source.IsEmpty()) {
fprintf(stderr, "Error reading '%s'.\n", file.c_str());
return 1;
}
JsHttpRequestProcessor processor(source);
JsHttpRequestProcessor processor(isolate, source);
map<string, string> output;
if (!processor.Initialize(&options, &output)) {
fprintf(stderr, "Error initializing processor.\n");

78
deps/v8/samples/shell.cc

@ -47,8 +47,9 @@
v8::Persistent<v8::Context> CreateShellContext();
void RunShell(v8::Handle<v8::Context> context);
int RunMain(int argc, char* argv[]);
bool ExecuteString(v8::Handle<v8::String> source,
int RunMain(v8::Isolate* isolate, int argc, char* argv[]);
bool ExecuteString(v8::Isolate* isolate,
v8::Handle<v8::String> source,
v8::Handle<v8::Value> name,
bool print_result,
bool report_exceptions);
@ -58,7 +59,7 @@ v8::Handle<v8::Value> Load(const v8::Arguments& args);
v8::Handle<v8::Value> Quit(const v8::Arguments& args);
v8::Handle<v8::Value> Version(const v8::Arguments& args);
v8::Handle<v8::String> ReadFile(const char* name);
void ReportException(v8::TryCatch* handler);
void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
static bool run_shell;
@ -66,20 +67,21 @@ static bool run_shell;
int main(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::Isolate* isolate = v8::Isolate::GetCurrent();
run_shell = (argc == 1);
int result;
{
v8::HandleScope handle_scope;
v8::HandleScope handle_scope(isolate);
v8::Persistent<v8::Context> context = CreateShellContext();
if (context.IsEmpty()) {
printf("Error creating context\n");
fprintf(stderr, "Error creating context\n");
return 1;
}
context->Enter();
result = RunMain(argc, argv);
result = RunMain(isolate, argc, argv);
if (run_shell) RunShell(context);
context->Exit();
context.Dispose();
context.Dispose(isolate);
}
v8::V8::Dispose();
return result;
@ -118,7 +120,7 @@ v8::Persistent<v8::Context> CreateShellContext() {
v8::Handle<v8::Value> Print(const v8::Arguments& args) {
bool first = true;
for (int i = 0; i < args.Length(); i++) {
v8::HandleScope handle_scope;
v8::HandleScope handle_scope(args.GetIsolate());
if (first) {
first = false;
} else {
@ -158,7 +160,7 @@ v8::Handle<v8::Value> Read(const v8::Arguments& args) {
// JavaScript file.
v8::Handle<v8::Value> Load(const v8::Arguments& args) {
for (int i = 0; i < args.Length(); i++) {
v8::HandleScope handle_scope;
v8::HandleScope handle_scope(args.GetIsolate());
v8::String::Utf8Value file(args[i]);
if (*file == NULL) {
return v8::ThrowException(v8::String::New("Error loading file"));
@ -167,7 +169,11 @@ v8::Handle<v8::Value> Load(const v8::Arguments& args) {
if (source.IsEmpty()) {
return v8::ThrowException(v8::String::New("Error loading file"));
}
if (!ExecuteString(source, v8::String::New(*file), false, false)) {
if (!ExecuteString(args.GetIsolate(),
source,
v8::String::New(*file),
false,
false)) {
return v8::ThrowException(v8::String::New("Error executing file"));
}
}
@ -216,7 +222,7 @@ v8::Handle<v8::String> ReadFile(const char* name) {
// Process remaining command line arguments and execute files
int RunMain(int argc, char* argv[]) {
int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
for (int i = 1; i < argc; i++) {
const char* str = argv[i];
if (strcmp(str, "--shell") == 0) {
@ -226,21 +232,22 @@ int RunMain(int argc, char* argv[]) {
// alone JavaScript engines.
continue;
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
fprintf(stderr,
"Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
// Execute argument given to -e option directly.
v8::Handle<v8::String> file_name = v8::String::New("unnamed");
v8::Handle<v8::String> source = v8::String::New(argv[++i]);
if (!ExecuteString(source, file_name, false, true)) return 1;
if (!ExecuteString(isolate, source, file_name, false, true)) return 1;
} else {
// Use all other arguments as names of files to load and run.
v8::Handle<v8::String> file_name = v8::String::New(str);
v8::Handle<v8::String> source = ReadFile(str);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", str);
fprintf(stderr, "Error reading '%s'\n", str);
continue;
}
if (!ExecuteString(source, file_name, false, true)) return 1;
if (!ExecuteString(isolate, source, file_name, false, true)) return 1;
}
}
return 0;
@ -249,35 +256,40 @@ int RunMain(int argc, char* argv[]) {
// The read-eval-execute loop of the shell.
void RunShell(v8::Handle<v8::Context> context) {
printf("V8 version %s [sample shell]\n", v8::V8::GetVersion());
fprintf(stderr, "V8 version %s [sample shell]\n", v8::V8::GetVersion());
static const int kBufferSize = 256;
// Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context);
v8::Local<v8::String> name(v8::String::New("(shell)"));
while (true) {
char buffer[kBufferSize];
printf("> ");
fprintf(stderr, "> ");
char* str = fgets(buffer, kBufferSize, stdin);
if (str == NULL) break;
v8::HandleScope handle_scope;
ExecuteString(v8::String::New(str), name, true, true);
v8::HandleScope handle_scope(context->GetIsolate());
ExecuteString(context->GetIsolate(),
v8::String::New(str),
name,
true,
true);
}
printf("\n");
fprintf(stderr, "\n");
}
// Executes a string within the current v8 context.
bool ExecuteString(v8::Handle<v8::String> source,
bool ExecuteString(v8::Isolate* isolate,
v8::Handle<v8::String> source,
v8::Handle<v8::Value> name,
bool print_result,
bool report_exceptions) {
v8::HandleScope handle_scope;
v8::HandleScope handle_scope(isolate);
v8::TryCatch try_catch;
v8::Handle<v8::Script> script = v8::Script::Compile(source, name);
if (script.IsEmpty()) {
// Print errors that happened during compilation.
if (report_exceptions)
ReportException(&try_catch);
ReportException(isolate, &try_catch);
return false;
} else {
v8::Handle<v8::Value> result = script->Run();
@ -285,7 +297,7 @@ bool ExecuteString(v8::Handle<v8::String> source,
assert(try_catch.HasCaught());
// Print errors that happened during execution.
if (report_exceptions)
ReportException(&try_catch);
ReportException(isolate, &try_catch);
return false;
} else {
assert(!try_catch.HasCaught());
@ -302,39 +314,39 @@ bool ExecuteString(v8::Handle<v8::String> source,
}
void ReportException(v8::TryCatch* try_catch) {
v8::HandleScope handle_scope;
void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
v8::HandleScope handle_scope(isolate);
v8::String::Utf8Value exception(try_catch->Exception());
const char* exception_string = ToCString(exception);
v8::Handle<v8::Message> message = try_catch->Message();
if (message.IsEmpty()) {
// V8 didn't provide any extra information about this error; just
// print the exception.
printf("%s\n", exception_string);
fprintf(stderr, "%s\n", exception_string);
} else {
// Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptResourceName());
const char* filename_string = ToCString(filename);
int linenum = message->GetLineNumber();
printf("%s:%i: %s\n", filename_string, linenum, exception_string);
fprintf(stderr, "%s:%i: %s\n", filename_string, linenum, exception_string);
// Print line of source code.
v8::String::Utf8Value sourceline(message->GetSourceLine());
const char* sourceline_string = ToCString(sourceline);
printf("%s\n", sourceline_string);
fprintf(stderr, "%s\n", sourceline_string);
// Print wavy underline (GetUnderline is deprecated).
int start = message->GetStartColumn();
for (int i = 0; i < start; i++) {
printf(" ");
fprintf(stderr, " ");
}
int end = message->GetEndColumn();
for (int i = start; i < end; i++) {
printf("^");
fprintf(stderr, "^");
}
printf("\n");
fprintf(stderr, "\n");
v8::String::Utf8Value stack_trace(try_catch->StackTrace());
if (stack_trace.length() > 0) {
const char* stack_trace_string = ToCString(stack_trace);
printf("%s\n", stack_trace_string);
fprintf(stderr, "%s\n", stack_trace_string);
}
}
}

4
deps/v8/src/SConscript

@ -84,12 +84,12 @@ SOURCES = {
global-handles.cc
handles.cc
heap-profiler.cc
heap-snapshot-generator.cc
heap.cc
hydrogen-instructions.cc
hydrogen.cc
ic.cc
incremental-marking.cc
inspector.cc
interface.cc
interpreter-irregexp.cc
isolate.cc
@ -97,7 +97,6 @@ SOURCES = {
lithium-allocator.cc
lithium.cc
liveedit.cc
liveobjectlist.cc
log-utils.cc
log.cc
mark-compact.cc
@ -328,6 +327,7 @@ debug-debugger.js
EXPERIMENTAL_LIBRARY_FILES = '''
symbol.js
proxy.js
collection.js
'''.split()

168
deps/v8/src/accessors.cc

@ -42,8 +42,8 @@ namespace internal {
template <class C>
static C* FindInstanceOf(Object* obj) {
for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype()) {
static C* FindInstanceOf(Isolate* isolate, Object* obj) {
for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype(isolate)) {
if (Is<C>(cur)) return C::cast(cur);
}
return NULL;
@ -77,7 +77,7 @@ MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
// Traverse the prototype chain until we reach an array.
JSArray* holder = FindInstanceOf<JSArray>(object);
JSArray* holder = FindInstanceOf<JSArray>(Isolate::Current(), object);
return holder == NULL ? Smi::FromInt(0) : holder->length();
}
@ -103,7 +103,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
// causes an infinite loop.
if (!object->IsJSArray()) {
return object->SetLocalPropertyIgnoreAttributes(
isolate->heap()->length_symbol(), value, NONE);
isolate->heap()->length_string(), value, NONE);
}
value = FlattenNumber(value);
@ -112,7 +112,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
HandleScope scope(isolate);
// Protect raw pointers.
Handle<JSObject> object_handle(object, isolate);
Handle<JSArray> array_handle(JSArray::cast(object), isolate);
Handle<Object> value_handle(value, isolate);
bool has_exception;
@ -122,7 +122,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
return Handle<JSArray>::cast(object_handle)->SetElementsLength(*uint32_v);
return array_handle->SetElementsLength(*uint32_v);
}
return isolate->Throw(
*isolate->factory()->NewRangeError("invalid_array_length",
@ -383,13 +383,14 @@ const AccessorDescriptor Accessors::ScriptEvalFromScript = {
MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) {
HandleScope scope;
Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
Script* raw_script = Script::cast(JSValue::cast(object)->value());
HandleScope scope(raw_script->GetIsolate());
Handle<Script> script(raw_script);
// If this is not a script compiled through eval there is no eval position.
int compilation_type = Smi::cast(script->compilation_type())->value();
if (compilation_type != Script::COMPILATION_TYPE_EVAL) {
return HEAP->undefined_value();
return script->GetHeap()->undefined_value();
}
// Get the function from where eval was called and find the source position
@ -441,18 +442,19 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
Heap* heap = Isolate::Current()->heap();
JSFunction* function = FindInstanceOf<JSFunction>(object);
if (function == NULL) return heap->undefined_value();
Isolate* isolate = Isolate::Current();
JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
if (function == NULL) return isolate->heap()->undefined_value();
while (!function->should_have_prototype()) {
function = FindInstanceOf<JSFunction>(function->GetPrototype());
function = FindInstanceOf<JSFunction>(isolate, function->GetPrototype());
// There has to be one because we hit the getter.
ASSERT(function != NULL);
}
if (!function->has_prototype()) {
Object* prototype;
{ MaybeObject* maybe_prototype = heap->AllocateFunctionPrototype(function);
{ MaybeObject* maybe_prototype
= isolate->heap()->AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
}
Object* result;
@ -465,24 +467,46 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
Object* value,
Object* value_raw,
void*) {
Heap* heap = object->GetHeap();
JSFunction* function = FindInstanceOf<JSFunction>(object);
if (function == NULL) return heap->undefined_value();
if (!function->should_have_prototype()) {
Isolate* isolate = object->GetIsolate();
Heap* heap = isolate->heap();
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
if (function_raw == NULL) return heap->undefined_value();
if (!function_raw->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(),
value,
return object->SetLocalPropertyIgnoreAttributes(heap->prototype_string(),
value_raw,
NONE);
}
Object* prototype;
{ MaybeObject* maybe_prototype = function->SetPrototype(value);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
HandleScope scope(isolate);
Handle<JSFunction> function(function_raw, isolate);
Handle<Object> value(value_raw, isolate);
Handle<Object> old_value;
bool is_observed =
FLAG_harmony_observation &&
*function == object &&
function->map()->is_observed();
if (is_observed) {
if (function->has_prototype())
old_value = handle(function->prototype(), isolate);
else
old_value = isolate->factory()->NewFunctionPrototype(function);
}
Handle<Object> result;
MaybeObject* maybe_result = function->SetPrototype(*value);
if (!maybe_result->ToHandle(&result, isolate)) return maybe_result;
ASSERT(function->prototype() == *value);
if (is_observed && !old_value->SameValue(*value)) {
JSObject::EnqueueChangeRecord(
function, "updated", isolate->factory()->prototype_string(), old_value);
}
ASSERT(function->prototype() == value);
return function;
return *function;
}
@ -499,7 +523,8 @@ const AccessorDescriptor Accessors::FunctionPrototype = {
MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
JSFunction* function = FindInstanceOf<JSFunction>(object);
Isolate* isolate = Isolate::Current();
JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
if (function == NULL) return Smi::FromInt(0);
// Check if already compiled.
if (function->shared()->is_compiled()) {
@ -507,7 +532,7 @@ MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
}
// If the function isn't compiled yet, the length is not computed correctly
// yet. Compile it now and return the right length.
HandleScope scope;
HandleScope scope(isolate);
Handle<JSFunction> handle(function);
if (JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
return Smi::FromInt(handle->shared()->length());
@ -529,8 +554,11 @@ const AccessorDescriptor Accessors::FunctionLength = {
MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
JSFunction* holder = FindInstanceOf<JSFunction>(object);
return holder == NULL ? HEAP->undefined_value() : holder->shared()->name();
Isolate* isolate = Isolate::Current();
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
return holder == NULL
? isolate->heap()->undefined_value()
: holder->shared()->name();
}
@ -550,7 +578,8 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
JavaScriptFrame* frame,
Handle<JSFunction> inlined_function,
int inlined_frame_index) {
Factory* factory = Isolate::Current()->factory();
Isolate* isolate = inlined_function->GetIsolate();
Factory* factory = isolate->factory();
Vector<SlotRef> args_slots =
SlotRef::ComputeSlotMappingForArguments(
frame,
@ -561,7 +590,7 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
factory->NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count);
for (int i = 0; i < args_count; ++i) {
Handle<Object> value = args_slots[i].GetValue();
Handle<Object> value = args_slots[i].GetValue(isolate);
array->set(i, *value);
}
arguments->set_elements(*array);
@ -575,7 +604,7 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
JSFunction* holder = FindInstanceOf<JSFunction>(object);
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
if (holder == NULL) return isolate->heap()->undefined_value();
Handle<JSFunction> function(holder, isolate);
@ -601,7 +630,7 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
// If there is an arguments variable in the stack, we return that.
Handle<ScopeInfo> scope_info(function->shared()->scope_info());
int index = scope_info->StackSlotIndex(
isolate->heap()->arguments_symbol());
isolate->heap()->arguments_string());
if (index >= 0) {
Handle<Object> arguments(frame->GetExpression(index), isolate);
if (!arguments->IsArgumentsMarker()) return *arguments;
@ -649,19 +678,6 @@ const AccessorDescriptor Accessors::FunctionArguments = {
//
static MaybeObject* CheckNonStrictCallerOrThrow(
Isolate* isolate,
JSFunction* caller) {
DisableAssertNoAllocation enable_allocation;
if (!caller->shared()->is_classic_mode()) {
return isolate->Throw(
*isolate->factory()->NewTypeError("strict_caller",
HandleVector<Object>(NULL, 0)));
}
return caller;
}
class FrameFunctionIterator {
public:
FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise)
@ -712,7 +728,7 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
AssertNoAllocation no_alloc;
JSFunction* holder = FindInstanceOf<JSFunction>(object);
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
if (holder == NULL) return isolate->heap()->undefined_value();
if (holder->shared()->native()) return isolate->heap()->null_value();
Handle<JSFunction> function(holder, isolate);
@ -748,7 +764,14 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
if (caller->shared()->bound()) {
return isolate->heap()->null_value();
}
return CheckNonStrictCallerOrThrow(isolate, caller);
// Censor if the caller is not a classic mode function.
// Change from ES5, which used to throw, see:
// https://bugs.ecmascript.org/show_bug.cgi?id=310
if (!caller->shared()->is_classic_mode()) {
return isolate->heap()->null_value();
}
return caller;
}
@ -764,22 +787,49 @@ const AccessorDescriptor Accessors::FunctionCaller = {
//
MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
Object* current = receiver->GetPrototype();
static inline Object* GetPrototypeSkipHiddenPrototypes(Isolate* isolate,
Object* receiver) {
Object* current = receiver->GetPrototype(isolate);
while (current->IsJSObject() &&
JSObject::cast(current)->map()->is_hidden_prototype()) {
current = current->GetPrototype();
current = current->GetPrototype(isolate);
}
return current;
}
MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver,
Object* value,
MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
return GetPrototypeSkipHiddenPrototypes(Isolate::Current(), receiver);
}
MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver_raw,
Object* value_raw,
void*) {
const bool skip_hidden_prototypes = true;
const bool kSkipHiddenPrototypes = true;
// To be consistent with other Set functions, return the value.
return receiver->SetPrototype(value, skip_hidden_prototypes);
if (!(FLAG_harmony_observation && receiver_raw->map()->is_observed()))
return receiver_raw->SetPrototype(value_raw, kSkipHiddenPrototypes);
Isolate* isolate = receiver_raw->GetIsolate();
HandleScope scope(isolate);
Handle<JSObject> receiver(receiver_raw);
Handle<Object> value(value_raw, isolate);
Handle<Object> old_value(GetPrototypeSkipHiddenPrototypes(isolate, *receiver),
isolate);
MaybeObject* result = receiver->SetPrototype(*value, kSkipHiddenPrototypes);
Handle<Object> hresult;
if (!result->ToHandle(&hresult, isolate)) return result;
Handle<Object> new_value(GetPrototypeSkipHiddenPrototypes(isolate, *receiver),
isolate);
if (!new_value->SameValue(*old_value)) {
JSObject::EnqueueChangeRecord(receiver, "prototype",
isolate->factory()->proto_string(),
old_value);
}
return *hresult;
}
@ -802,15 +852,15 @@ static v8::Handle<v8::Value> ModuleGetExport(
ASSERT(context->IsModuleContext());
int slot = info.Data()->Int32Value();
Object* value = context->get(slot);
Isolate* isolate = instance->GetIsolate();
if (value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Isolate* isolate = instance->GetIsolate();
isolate->ScheduleThrow(
*isolate->factory()->NewReferenceError("not_defined",
HandleVector(&name, 1)));
return v8::Handle<v8::Value>();
}
return v8::Utils::ToLocal(Handle<Object>(value));
return v8::Utils::ToLocal(Handle<Object>(value, isolate));
}
@ -840,7 +890,7 @@ Handle<AccessorInfo> Accessors::MakeModuleExport(
int index,
PropertyAttributes attributes) {
Factory* factory = name->GetIsolate()->factory();
Handle<AccessorInfo> info = factory->NewAccessorInfo();
Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
info->set_property_attributes(attributes);
info->set_all_can_read(true);
info->set_all_can_write(true);

1665
deps/v8/src/api.cc

File diff suppressed because it is too large

12
deps/v8/src/api.h

@ -177,7 +177,8 @@ class RegisteredExtension {
V(Context, Context) \
V(External, Foreign) \
V(StackTrace, JSArray) \
V(StackFrame, JSObject)
V(StackFrame, JSObject) \
V(DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
class Utils {
@ -201,8 +202,6 @@ class Utils {
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<External> ToLocal(
v8::internal::Handle<v8::internal::Foreign> obj);
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal(
@ -225,6 +224,10 @@ class Utils {
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
static inline Local<External> ExternalToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<DeclaredAccessorDescriptor> ToLocal(
v8::internal::Handle<v8::internal::DeclaredAccessorDescriptor> obj);
#define DECLARE_OPEN_HANDLE(From, To) \
static inline v8::internal::Handle<v8::internal::To> \
@ -268,7 +271,6 @@ MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, Foreign, External)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@ -280,6 +282,8 @@ MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
MAKE_TO_LOCAL(ToLocal, DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
#undef MAKE_TO_LOCAL

2
deps/v8/src/apinatives.js

@ -90,7 +90,7 @@ function InstantiateFunction(data, name) {
// internal ToBoolean doesn't handle that!
if (!(typeof parent === 'undefined')) {
var parent_fun = Instantiate(parent);
fun.prototype.__proto__ = parent_fun.prototype;
%SetPrototype(fun.prototype, parent_fun.prototype);
}
ConfigureTemplateInstance(fun, data);
} catch (e) {

97
deps/v8/src/arm/assembler-arm-inl.h

@ -47,13 +47,54 @@ namespace v8 {
namespace internal {
int Register::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return kMaxNumAllocatableRegisters;
} else {
return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
}
}
int DwVfpRegister::NumRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
} else {
return 1;
}
}
int DwVfpRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return NumRegisters() - kNumReservedRegisters;
} else {
return 1;
}
}
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kScratchDoubleReg));
if (reg.code() > kDoubleRegZero.code()) {
return reg.code() - kNumReservedRegisters;
}
return reg.code();
}
DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < NumAllocatableRegisters());
ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
kNumReservedRegisters - 1);
if (index >= kDoubleRegZero.code()) {
return from_code(index + kNumReservedRegisters);
}
return from_code(index);
}
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
@ -66,13 +107,13 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
return Assembler::target_address_at(pc_);
}
Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
@ -85,9 +126,8 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(
reinterpret_cast<intptr_t>(target) & ~3));
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(pc_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@ -138,6 +178,19 @@ Address* RelocInfo::target_reference_address() {
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
ASSERT(IsRuntimeEntry(rmode_));
return target_address();
}
void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode mode) {
ASSERT(IsRuntimeEntry(rmode_));
if (target_address() != target) set_target_address(target, mode);
}
Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = Memory::Address_at(pc_);
@ -166,6 +219,24 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
static const int kNoCodeAgeSequenceLength = 3;
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
Memory::Address_at(pc_ + Assembler::kInstrSize *
(kNoCodeAgeSequenceLength - 1)));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Memory::Address_at(pc_ + Assembler::kInstrSize *
(kNoCodeAgeSequenceLength - 1)) =
stub->instruction_start();
}
Address RelocInfo::call_address() {
// The 2 instructions offset assumes patched debug break slot or return
// sequence.
@ -239,6 +310,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@ -248,7 +321,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
}
@ -265,6 +338,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@ -273,7 +348,7 @@ void RelocInfo::Visit(Heap* heap) {
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
@ -296,7 +371,7 @@ Operand::Operand(const ExternalReference& f) {
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE;
rmode_ = RelocInfo::NONE32;
}
@ -473,14 +548,12 @@ void Assembler::set_target_pointer_at(Address pc, Address target) {
Address Assembler::target_address_at(Address pc) {
return reinterpret_cast<Address>(
reinterpret_cast<intptr_t>(target_pointer_at(pc)) & ~3);
return target_pointer_at(pc);
}
void Assembler::set_target_address_at(Address pc, Address target) {
set_target_pointer_at(pc, reinterpret_cast<Address>(
reinterpret_cast<intptr_t>(target) & ~3));
set_target_pointer_at(pc, target);
}

691
deps/v8/src/arm/assembler-arm.cc

File diff suppressed because it is too large

335
deps/v8/src/arm/assembler-arm.h

@ -47,6 +47,50 @@
namespace v8 {
namespace internal {
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a CpuFeatureScope before use.
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false;
if (f == VFP2 && !FLAG_enable_vfp2) return false;
if (f == SUDIV && !FLAG_enable_sudiv) return false;
if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
return false;
}
if (f == VFP32DREGS && !FLAG_enable_32dregs) return false;
return (supported_ & (1u << f)) != 0;
}
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
return (found_by_runtime_probing_only_ &
(static_cast<uint64_t>(1) << f)) != 0;
}
static bool IsSafeForSnapshot(CpuFeature f) {
return (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
private:
#ifdef DEBUG
static bool initialized_;
#endif
static unsigned supported_;
static unsigned found_by_runtime_probing_only_;
friend class ExternalReference;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@ -71,21 +115,24 @@ namespace internal {
// Core register
struct Register {
static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 8;
static const int kMaxNumAllocatableRegisters = 8;
static const int kSizeInBytes = 4;
static const int kGPRsPerNonVFP2Double = 2;
inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
ASSERT(reg.code() < kNumAllocatableRegisters);
ASSERT(reg.code() < kMaxNumAllocatableRegisters);
return reg.code();
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index);
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"r0",
"r1",
@ -165,7 +212,6 @@ const Register sp = { kRegister_sp_Code };
const Register lr = { kRegister_lr_Code };
const Register pc = { kRegister_pc_Code };
// Single word VFP register.
struct SwVfpRegister {
bool is_valid() const { return 0 <= code_ && code_ < 32; }
@ -190,52 +236,36 @@ struct SwVfpRegister {
// Double word VFP register.
struct DwVfpRegister {
static const int kNumRegisters = 16;
static const int kMaxNumRegisters = 32;
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
static const int kNumReservedRegisters = 2;
static const int kNumAllocatableRegisters = kNumRegisters -
static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
kNumReservedRegisters;
inline static int ToAllocationIndex(DwVfpRegister reg);
static DwVfpRegister FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return from_code(index);
}
// Note: the number of registers can be different at snapshot and run-time.
// Any code included in the snapshot must be able to run both with 16 or 32
// registers.
inline static int NumRegisters();
inline static int NumAllocatableRegisters();
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"d0",
"d1",
"d2",
"d3",
"d4",
"d5",
"d6",
"d7",
"d8",
"d9",
"d10",
"d11",
"d12",
"d13"
};
return names[index];
}
inline static int ToAllocationIndex(DwVfpRegister reg);
static const char* AllocationIndexToString(int index);
inline static DwVfpRegister FromAllocationIndex(int index);
static DwVfpRegister from_code(int code) {
DwVfpRegister r = { code };
return r;
}
// Supporting d0 to d15, can be later extended to d31.
bool is_valid() const { return 0 <= code_ && code_ < 16; }
bool is_valid() const {
return 0 <= code_ && code_ < kMaxNumRegisters;
}
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
SwVfpRegister low() const {
ASSERT(code_ < 16);
SwVfpRegister reg;
reg.code_ = code_ * 2;
@ -243,6 +273,7 @@ struct DwVfpRegister {
return reg;
}
SwVfpRegister high() const {
ASSERT(code_ < 16);
SwVfpRegister reg;
reg.code_ = (code_ * 2) + 1;
@ -322,6 +353,25 @@ const DwVfpRegister d12 = { 12 };
const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
const DwVfpRegister d16 = { 16 };
const DwVfpRegister d17 = { 17 };
const DwVfpRegister d18 = { 18 };
const DwVfpRegister d19 = { 19 };
const DwVfpRegister d20 = { 20 };
const DwVfpRegister d21 = { 21 };
const DwVfpRegister d22 = { 22 };
const DwVfpRegister d23 = { 23 };
const DwVfpRegister d24 = { 24 };
const DwVfpRegister d25 = { 25 };
const DwVfpRegister d26 = { 26 };
const DwVfpRegister d27 = { 27 };
const DwVfpRegister d28 = { 28 };
const DwVfpRegister d29 = { 29 };
const DwVfpRegister d30 = { 30 };
const DwVfpRegister d31 = { 31 };
const Register sfpd_lo = { kRegister_r6_Code };
const Register sfpd_hi = { kRegister_r7_Code };
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
@ -399,7 +449,7 @@ class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(int32_t immediate,
RelocInfo::Mode rmode = RelocInfo::NONE));
RelocInfo::Mode rmode = RelocInfo::NONE32));
INLINE(static Operand Zero()) {
return Operand(static_cast<int32_t>(0));
}
@ -498,114 +548,6 @@ class MemOperand BASE_EMBEDDED {
friend class Assembler;
};
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false;
if (f == VFP2 && !FLAG_enable_vfp2) return false;
if (f == SUDIV && !FLAG_enable_sudiv) return false;
if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
return false;
}
return (supported_ & (1u << f)) != 0;
}
#ifdef DEBUG
// Check whether a feature is currently enabled.
static bool IsEnabled(CpuFeature f) {
ASSERT(initialized_);
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL) {
// When no isolate is available, work as if we're running in
// release mode.
return IsSupported(f);
}
unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
return (enabled & (1u << f)) != 0;
}
#endif
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
explicit Scope(CpuFeature f) {
unsigned mask = 1u << f;
// VFP2 and ARMv7 are implied by VFP3.
if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7;
ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() ||
(CpuFeatures::found_by_runtime_probing_ & mask) == 0);
isolate_ = Isolate::UncheckedCurrent();
old_enabled_ = 0;
if (isolate_ != NULL) {
old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
isolate_->set_enabled_cpu_features(old_enabled_ | mask);
}
}
~Scope() {
ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
if (isolate_ != NULL) {
isolate_->set_enabled_cpu_features(old_enabled_);
}
}
private:
Isolate* isolate_;
unsigned old_enabled_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
class TryForceFeatureScope BASE_EMBEDDED {
public:
explicit TryForceFeatureScope(CpuFeature f)
: old_supported_(CpuFeatures::supported_) {
if (CanForce()) {
CpuFeatures::supported_ |= (1u << f);
}
}
~TryForceFeatureScope() {
if (CanForce()) {
CpuFeatures::supported_ = old_supported_;
}
}
private:
static bool CanForce() {
// It's only safe to temporarily force support of CPU features
// when there's only a single isolate, which is guaranteed when
// the serializer is enabled.
return Serializer::enabled();
}
const unsigned old_supported_;
};
private:
#ifdef DEBUG
static bool initialized_;
#endif
static unsigned supported_;
static unsigned found_by_runtime_probing_;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
@ -629,7 +571,11 @@ extern const Instr kCmpCmnFlip;
extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip;
struct VmovIndex {
unsigned char index;
};
const VmovIndex VmovIndexLo = { 0 };
const VmovIndex VmovIndexHi = { 1 };
class Assembler : public AssemblerBase {
public:
@ -647,15 +593,7 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
// Avoids using instructions that vary in size in unpredictable ways between
// the snapshot and the running VM. This is needed by the full compiler so
// that it can recompile code with debug support and fix the PC.
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
virtual ~Assembler();
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@ -1002,10 +940,7 @@ class Assembler : public AssemblerBase {
LFlag l = Short); // v5 and above
// Support for VFP.
// All these APIs support S0 to S31 and D0 to D15.
// Currently these APIs do not support extended D registers, i.e, D16 to D31.
// However, some simple modifications can allow
// these APIs to support D16 to D31.
// All these APIs support S0 to S31 and D0 to D31.
void vldr(const DwVfpRegister dst,
const Register base,
@ -1065,14 +1000,17 @@ class Assembler : public AssemblerBase {
void vmov(const DwVfpRegister dst,
double imm,
const Register scratch = no_reg,
const Condition cond = al);
const Register scratch = no_reg);
void vmov(const SwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
const VmovIndex index,
const Register src,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
@ -1134,6 +1072,14 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vmla(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vmls(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@ -1185,8 +1131,6 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
bool predictable_code_size() const { return predictable_code_size_; }
static bool use_immediate_embedded_pointer_loads(
const Assembler* assembler) {
#ifdef USE_BLX
@ -1282,8 +1226,6 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
int pc_offset() const { return pc_ - buffer_; }
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
@ -1299,8 +1241,11 @@ class Assembler : public AssemblerBase {
static bool IsBranch(Instr instr);
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static bool IsVldrDRegisterImmediate(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static int GetVldrDRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset);
static bool IsStrRegisterImmediate(Instr instr);
static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
static bool IsAddRegisterImmediate(Instr instr);
@ -1315,6 +1260,7 @@ class Assembler : public AssemblerBase {
static bool IsStrRegFpNegOffset(Instr instr);
static bool IsLdrRegFpNegOffset(Instr instr);
static bool IsLdrPcImmediateOffset(Instr instr);
static bool IsVldrDPcImmediateOffset(Instr instr);
static bool IsTstImmediate(Instr instr);
static bool IsCmpRegister(Instr instr);
static bool IsCmpImmediate(Instr instr);
@ -1325,10 +1271,13 @@ class Assembler : public AssemblerBase {
static bool IsMovW(Instr instr);
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the instruction
// and the accessed constant.
static const int kMaxDistToPool = 4*KB;
static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
// reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
// PC-relative loads, thereby defining a maximum distance between the
// instruction and the accessed constant.
static const int kMaxDistToIntPool = 4*KB;
static const int kMaxDistToFPPool = 1*KB;
// All relocations could be integer, it therefore acts as the limit.
static const int kMaxNumPendingRelocInfo = kMaxDistToIntPool/kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
@ -1343,8 +1292,6 @@ class Assembler : public AssemblerBase {
// the relocation info.
TypeFeedbackId recorded_ast_id_;
bool emit_debug_code() const { return emit_debug_code_; }
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos
@ -1370,7 +1317,9 @@ class Assembler : public AssemblerBase {
if (--const_pool_blocked_nesting_ == 0) {
// Check the constant pool hasn't been blocked for too long.
ASSERT((num_pending_reloc_info_ == 0) ||
(pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
(pc_offset() < (first_const_pool_use_ + kMaxDistToIntPool)));
ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
(pc_offset() < (first_const_pool_use_ + kMaxDistToFPPool)));
// Two cases:
// * no_const_pool_before_ >= next_buffer_check_ and the emission is
// still blocked
@ -1386,13 +1335,6 @@ class Assembler : public AssemblerBase {
}
private:
// Code buffer:
// The buffer into which code and relocation info are generated.
byte* buffer_;
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
int next_buffer_check_; // pc offset of next buffer check
// Code generation
@ -1401,7 +1343,6 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
byte* pc_; // the program counter; moves forward
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional
@ -1421,13 +1362,6 @@ class Assembler : public AssemblerBase {
static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
// Average distance beetween a constant pool and the first instruction
// accessing the constant pool. Longer distance should result in less I-cache
// pollution.
// In practice the distance will be smaller since constant pool emission is
// forced after function return and sometimes after unconditional branches.
static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
@ -1452,6 +1386,9 @@ class Assembler : public AssemblerBase {
RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
// number of pending reloc info entries in the buffer
int num_pending_reloc_info_;
// Number of pending reloc info entries included above which also happen to
// be 64-bit.
int num_pending_64_bit_reloc_info_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
@ -1488,6 +1425,8 @@ class Assembler : public AssemblerBase {
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
UseConstantPoolMode mode = USE_CONSTANT_POOL);
void RecordRelocInfo(double data);
void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
friend class RegExpMacroAssemblerARM;
friend class RelocInfo;
@ -1495,10 +1434,6 @@ class Assembler : public AssemblerBase {
friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_;
bool emit_debug_code_;
bool predictable_code_size_;
friend class PositionsRecorder;
friend class EnsureSpace;
};
@ -1512,26 +1447,6 @@ class EnsureSpace BASE_EMBEDDED {
};
class PredictableCodeSizeScope {
public:
explicit PredictableCodeSizeScope(Assembler* assembler)
: asm_(assembler) {
old_value_ = assembler->predictable_code_size();
assembler->set_predictable_code_size(true);
}
~PredictableCodeSizeScope() {
if (!old_value_) {
asm_->set_predictable_code_size(false);
}
}
private:
Assembler* asm_;
bool old_value_;
};
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_H_

177
deps/v8/src/arm/builtins-arm.cc

@ -124,12 +124,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
if (initial_capacity > 0) {
size += FixedArray::SizeFor(initial_capacity);
}
__ AllocateInNewSpace(size,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
__ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
@ -140,7 +135,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
__ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
// Field JSArray::kElementsOffset is initialized later.
__ mov(scratch3, Operand(0, RelocInfo::NONE));
__ mov(scratch3, Operand::Zero());
__ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
if (initial_capacity == 0) {
@ -319,7 +314,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
has_non_smi_element, finish, cant_transition_map, not_double;
// Check for array construction with zero arguments or one.
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ cmp(r0, Operand::Zero());
__ b(ne, &argc_one_or_more);
// Handle construction of an empty array.
@ -347,7 +342,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ tst(r2, r2);
__ b(ne, &not_empty_array);
__ Drop(1); // Adjust stack.
__ mov(r0, Operand(0)); // Treat this as a call with argc of zero.
__ mov(r0, Operand::Zero()); // Treat this as a call with argc of zero.
__ b(&empty_array);
__ bind(&not_empty_array);
@ -542,22 +537,55 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
// -- r2 : type info cell
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Label generic_constructor;
if (FLAG_debug_code) {
// The array construct code is only set for the builtin and internal
// Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask));
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r3, Operand(kSmiTagMask));
__ Assert(ne, "Unexpected initial map for Array function");
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
__ Assert(eq, "Unexpected initial map for Array function");
if (FLAG_optimize_constructed_arrays) {
// We should either have undefined in r2 or a valid jsglobalpropertycell
Label okay_here;
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(), masm->isolate());
Handle<Map> global_property_cell_map(
masm->isolate()->heap()->global_property_cell_map());
__ cmp(r2, Operand(undefined_sentinel));
__ b(eq, &okay_here);
__ ldr(r3, FieldMemOperand(r2, 0));
__ cmp(r3, Operand(global_property_cell_map));
__ Assert(eq, "Expected property cell in register ebx");
__ bind(&okay_here);
}
}
if (FLAG_optimize_constructed_arrays) {
Label not_zero_case, not_one_case;
__ tst(r0, r0);
__ b(ne, &not_zero_case);
ArrayNoArgumentConstructorStub no_argument_stub;
__ TailCallStub(&no_argument_stub);
__ bind(&not_zero_case);
__ cmp(r0, Operand(1));
__ b(gt, &not_one_case);
ArraySingleArgumentConstructorStub single_argument_stub;
__ TailCallStub(&single_argument_stub);
__ bind(&not_one_case);
ArrayNArgumentsConstructorStub n_argument_stub;
__ TailCallStub(&n_argument_stub);
} else {
Label generic_constructor;
// Run the native code for the Array function called as a constructor.
ArrayNativeCode(masm, &generic_constructor);
@ -567,6 +595,7 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
}
}
@ -590,7 +619,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the first arguments in r0 and get rid of the rest.
Label no_arguments;
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ cmp(r0, Operand::Zero());
__ b(eq, &no_arguments);
// First args = sp[(argc - 1) * 4].
__ sub(r0, r0, Operand(1));
@ -619,7 +648,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -----------------------------------
Label gc_required;
__ AllocateInNewSpace(JSValue::kSize,
__ Allocate(JSValue::kSize,
r0, // Result.
r3, // Scratch.
r4, // Scratch.
@ -634,7 +663,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2));
__ Assert(eq, "Unexpected string wrapper instance size");
__ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
__ cmp(r4, Operand(0, RelocInfo::NONE));
__ cmp(r4, Operand::Zero());
__ Assert(eq, "Unexpected unused properties of string wrapper");
}
__ str(map, FieldMemOperand(r0, HeapObject::kMapOffset));
@ -682,7 +711,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the empty string into r2, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
__ LoadRoot(argument, Heap::kEmptyStringRootIndex);
__ LoadRoot(argument, Heap::kempty_stringRootIndex);
__ Drop(1);
__ b(&argument_is_string);
@ -712,6 +741,35 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
}
void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve the function.
__ push(r1);
// Push call kind information.
__ push(r5);
// Push the function on the stack as the argument to the runtime function.
__ push(r1);
__ CallRuntime(Runtime::kInstallRecompiledCode, 1);
// Calculate the entry point.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore call kind information.
__ pop(r5);
// Restore saved function.
__ pop(r1);
// Tear down internal frame.
}
// Do a tail-call of the compiled function.
__ Jump(r2);
}
void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@ -1044,9 +1102,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the type of the result (stored in its map) is less than
// FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
__ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r0, r1, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &exit);
// Symbols are "objects".
__ CompareInstanceType(r1, r3, SYMBOL_TYPE);
__ b(eq, &exit);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
@ -1097,7 +1159,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r5-r7, cp may be clobbered
// Clear the context before we push it when entering the internal frame.
__ mov(cp, Operand(0, RelocInfo::NONE));
__ mov(cp, Operand::Zero());
// Enter an internal frame.
{
@ -1141,6 +1203,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
// No type feedback cell is available
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(), masm->isolate());
__ mov(r2, Operand(undefined_sentinel));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@ -1226,6 +1292,57 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// The following registers must be saved and restored when calling through to
// the runtime:
// r0 - contains return address (beginning of patch sequence)
// r1 - function object
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(1, 0, r1);
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 1);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ mov(pc, r0);
}
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
} \
void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
__ CallRuntime(Runtime::kNotifyStubFailure, 0);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
__ add(sp, sp, Operand(kPointerSize)); // Ignore state
__ mov(pc, lr); // Jump to miss handler
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@ -1284,12 +1401,6 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
CpuFeatures::TryForceFeatureScope scope(VFP3);
if (!CPU::SupportsCrankshaft()) {
__ Abort("Unreachable code: Cannot optimize without VFP3 support.");
return;
}
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@ -1322,7 +1433,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
{ Label done;
__ cmp(r0, Operand(0));
__ cmp(r0, Operand::Zero());
__ b(ne, &done);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ push(r2);
@ -1343,7 +1454,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// r0: actual number of arguments
// r1: function
Label shift_arguments;
__ mov(r4, Operand(0, RelocInfo::NONE)); // indicate regular JS_FUNCTION
__ mov(r4, Operand::Zero()); // indicate regular JS_FUNCTION
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
@ -1398,7 +1509,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Restore the function to r1, and the flag to r4.
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ mov(r4, Operand(0, RelocInfo::NONE));
__ mov(r4, Operand::Zero());
__ jmp(&patch_receiver);
// Use the global receiver object from the called function as the
@ -1420,11 +1531,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3b. Check for function proxy.
__ bind(&slow);
__ mov(r4, Operand(1, RelocInfo::NONE)); // indicate function proxy
__ mov(r4, Operand(1, RelocInfo::NONE32)); // indicate function proxy
__ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(eq, &shift_arguments);
__ bind(&non_function);
__ mov(r4, Operand(2, RelocInfo::NONE)); // indicate non-function
__ mov(r4, Operand(2, RelocInfo::NONE32)); // indicate non-function
// 3c. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
@ -1468,7 +1579,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ tst(r4, r4);
__ b(eq, &function);
// Expected number of arguments is 0 for CALL_NON_FUNCTION.
__ mov(r2, Operand(0, RelocInfo::NONE));
__ mov(r2, Operand::Zero());
__ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r4, Operand(1));
__ b(ne, &non_proxy);
@ -1546,7 +1657,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Push current limit and index.
__ bind(&okay);
__ push(r0); // limit
__ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
__ mov(r1, Operand::Zero()); // initial index
__ push(r1);
// Get the receiver.
@ -1658,7 +1769,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ bind(&call_proxy);
__ push(r1); // add function proxy as last argument
__ add(r0, r0, Operand(1));
__ mov(r2, Operand(0, RelocInfo::NONE));
__ mov(r2, Operand::Zero());
__ SetCallKind(r5, CALL_AS_METHOD);
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),

2155
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

202
deps/v8/src/arm/code-stubs-arm.h

@ -36,7 +36,7 @@ namespace internal {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub {
class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
@ -58,7 +58,7 @@ class TranscendentalCacheStub: public CodeStub {
};
class StoreBufferOverflowStub: public CodeStub {
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
@ -66,7 +66,7 @@ class StoreBufferOverflowStub: public CodeStub {
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
@ -77,7 +77,7 @@ class StoreBufferOverflowStub: public CodeStub {
};
class UnaryOpStub: public CodeStub {
class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
@ -119,9 +119,9 @@ class UnaryOpStub: public CodeStub {
void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateHeapNumberStubSub(MacroAssembler* masm);
void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
void GenerateNumberStub(MacroAssembler* masm);
void GenerateNumberStubSub(MacroAssembler* masm);
void GenerateNumberStubBitNot(MacroAssembler* masm);
void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
@ -142,108 +142,6 @@ class UnaryOpStub: public CodeStub {
};
class BinaryOpStub: public CodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED) {
use_vfp2_ = CpuFeatures::IsSupported(VFP2);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
BinaryOpStub(
int key,
BinaryOpIC::TypeInfo operands_type,
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
use_vfp2_(VFP2Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type) { }
private:
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
};
Token::Value op_;
OverwriteMode mode_;
bool use_vfp2_;
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
virtual void PrintName(StringStream* stream);
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class VFP2Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
Major MajorKey() { return BinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| VFP2Bits::encode(use_vfp2_)
| OperandTypeInfoBits::encode(operands_type_)
| ResultTypeInfoBits::encode(result_type_);
}
void Generate(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
void GenerateSmiSmiOperation(MacroAssembler* masm);
void GenerateFPOperation(MacroAssembler* masm,
bool smi_operands,
Label* not_numbers,
Label* gc_required);
void GenerateSmiCode(MacroAssembler* masm,
Label* use_runtime,
Label* gc_required,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm,
Register result,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* gc_required);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Handle<Code> code) {
code->set_binary_op_type(operands_type_);
code->set_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
};
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@ -274,14 +172,14 @@ class StringHelper : public AllStatic {
int flags);
// Probe the symbol table for a two character string. If the string is
// Probe the string table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
// does not guarantee that the string is not in the symbol table. If the
// does not guarantee that the string is not in the string table. If the
// string is found the code falls through with the string in register r0.
// Contents of both c1 and c2 registers are modified. At the exit c1 is
// guaranteed to contain halfword with low and high bytes equal to
// initial contents of c1 and c2 respectively.
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@ -321,7 +219,7 @@ enum StringAddFlags {
};
class StringAddStub: public CodeStub {
class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@ -344,7 +242,7 @@ class StringAddStub: public CodeStub {
};
class SubStringStub: public CodeStub {
class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@ -357,7 +255,7 @@ class SubStringStub: public CodeStub {
class StringCompareStub: public CodeStub {
class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
@ -397,7 +295,7 @@ class StringCompareStub: public CodeStub {
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public CodeStub {
class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
@ -407,7 +305,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
scratch_(scratch) { }
bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
Register the_int_;
@ -431,7 +329,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
};
class NumberToStringStub: public CodeStub {
class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
@ -457,7 +355,7 @@ class NumberToStringStub: public CodeStub {
};
class RecordWriteStub: public CodeStub {
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
@ -481,7 +379,7 @@ class RecordWriteStub: public CodeStub {
};
virtual bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
@ -571,12 +469,15 @@ class RecordWriteStub: public CodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP2);
// Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled());
CpuFeatureScope scope(masm, VFP2);
masm->sub(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
// Save all VFP registers except d0.
for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
// TODO(hans): We should probably save d0 too. And maybe use vstm.
for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
@ -586,15 +487,18 @@ class RecordWriteStub: public CodeStub {
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP2);
// Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled());
CpuFeatureScope scope(masm, VFP2);
// Restore all VFP registers except d0.
for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
// TODO(hans): We should probably restore d0 too. And maybe use vldm.
for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
masm->add(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
}
masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
}
@ -613,7 +517,7 @@ class RecordWriteStub: public CodeStub {
Register GetRegThatIsNotOneOf(Register r1,
Register r2,
Register r3) {
for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
@ -672,7 +576,7 @@ class RecordWriteStub: public CodeStub {
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
class RegExpCEntryStub: public CodeStub {
class RegExpCEntryStub: public PlatformCodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
@ -691,7 +595,7 @@ class RegExpCEntryStub: public CodeStub {
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
class DirectCEntryStub: public CodeStub {
class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
@ -724,20 +628,6 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2);
// Loads objects from r0 and r1 (right and left in binary operations) into
// floating point registers. Depending on the destination the values ends up
// either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
// floating point registers VFP3 must be supported. If core registers are
// requested when VFP3 is supported d6 and d7 will still be scratched. If
// either r0 or r1 is not a number (not smi and not heap number object) the
// not_number label is jumped to with r0 and r1 intact.
static void LoadOperands(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* not_number);
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
@ -748,7 +638,8 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2,
Label* not_int32);
// Converts the integer (untagged smi) in |int_scratch| to a double, storing
@ -836,7 +727,12 @@ class FloatingPointHelper : public AllStatic {
Register heap_number_result,
Register scratch);
private:
// Loads the objects from |object| into floating point registers.
// Depending on |destination| the value ends up either in |dst| or
// in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3
// must be supported. If kCoreRegisters are requested and VFP3 is
// supported, |dst| will be scratched. If |object| is neither smi nor
// heap number, |not_number| is jumped to with |object| still intact.
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,
@ -850,11 +746,11 @@ class FloatingPointHelper : public AllStatic {
};
class StringDictionaryLookupStub: public CodeStub {
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
void Generate(MacroAssembler* masm);
@ -863,7 +759,7 @@ class StringDictionaryLookupStub: public CodeStub {
Label* done,
Register receiver,
Register properties,
Handle<String> name,
Handle<Name> name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
@ -881,14 +777,14 @@ class StringDictionaryLookupStub: public CodeStub {
static const int kTotalProbes = 20;
static const int kCapacityOffset =
StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
NameDictionary::kHeaderSize +
NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() { return StringDictionaryLookup; }
Major MajorKey() { return NameDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);

305
deps/v8/src/arm/codegen-arm.cc

@ -31,11 +31,11 @@
#include "codegen.h"
#include "macro-assembler.h"
#include "simulator-arm.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
@ -49,6 +49,75 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
#define __ masm.
#if defined(USE_SIMULATOR)
byte* fast_exp_arm_machine_code = NULL;
double fast_exp_simulator(double x) {
return Simulator::current(Isolate::Current())->CallFP(
fast_exp_arm_machine_code, x, 0);
}
#endif
UnaryMathFunction CreateExpFunction() {
if (!CpuFeatures::IsSupported(VFP2)) return &exp;
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
{
CpuFeatureScope use_vfp(&masm, VFP2);
DwVfpRegister input = d0;
DwVfpRegister result = d1;
DwVfpRegister double_scratch1 = d2;
DwVfpRegister double_scratch2 = d3;
Register temp1 = r4;
Register temp2 = r5;
Register temp3 = r6;
if (masm.use_eabi_hardfloat()) {
// Input value is in d0 anyway, nothing to do.
} else {
__ vmov(input, r0, r1);
}
__ Push(temp3, temp2, temp1);
MathExpGenerator::EmitMathExp(
&masm, input, result, double_scratch1, double_scratch2,
temp1, temp2, temp3);
__ Pop(temp3, temp2, temp1);
if (masm.use_eabi_hardfloat()) {
__ vmov(d0, result);
} else {
__ vmov(r0, r1, result);
}
__ Ret();
}
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#else
fast_exp_arm_machine_code = buffer;
return &fast_exp_simulator;
#endif
}
#undef __
UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
@ -73,8 +142,11 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
MacroAssembler* masm, AllocationSiteMode mode,
Label* allocation_site_info_found) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@ -83,6 +155,12 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_site_info_found != NULL);
__ TestJSArrayForAllocationSiteInfo(r2, r4);
__ b(eq, allocation_site_info_found);
}
// Set transitioned map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
@ -97,7 +175,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Label* fail) {
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@ -109,6 +187,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool vfp2_supported = CpuFeatures::IsSupported(VFP2);
if (mode == TRACK_ALLOCATION_SITE) {
__ TestJSArrayForAllocationSiteInfo(r2, r4);
__ b(eq, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
@ -123,27 +206,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new FixedDoubleArray.
// Use lr as a temporary register.
__ mov(lr, Operand(r5, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize + kPointerSize));
__ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
__ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
// r6: destination FixedDoubleArray, not tagged as heap object.
// Align the array conveniently for doubles.
// Store a filler value in the unused memory.
Label aligned, aligned_done;
__ tst(r6, Operand(kDoubleAlignmentMask));
__ mov(ip, Operand(masm->isolate()->factory()->one_pointer_filler_map()));
__ b(eq, &aligned);
// Store at the beginning of the allocated memory and update the base pointer.
__ str(ip, MemOperand(r6, kPointerSize, PostIndex));
__ b(&aligned_done);
__ bind(&aligned);
// Store the filler at the end of the allocated memory.
__ sub(lr, lr, Operand(kPointerSize));
__ str(ip, MemOperand(r6, lr));
__ bind(&aligned_done);
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
@ -192,7 +258,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
HeapObject::kMapOffset,
r3,
r9,
kLRHasBeenSaved,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@ -211,7 +277,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Normal smi, convert to double and store.
if (vfp2_supported) {
CpuFeatures::Scope scope(VFP2);
CpuFeatureScope scope(masm, VFP2);
__ vmov(s0, r9);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r7, 0);
@ -251,7 +317,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, Label* fail) {
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@ -262,6 +328,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -----------------------------------
Label entry, loop, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
__ TestJSArrayForAllocationSiteInfo(r2, r4);
__ b(eq, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
@ -397,7 +468,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// the string.
__ bind(&cons_string);
__ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
__ CompareRoot(result, Heap::kEmptyStringRootIndex);
__ CompareRoot(result, Heap::kempty_stringRootIndex);
__ b(ne, call_runtime);
// Get the first of the two strings and load its instance type.
__ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
@ -416,7 +487,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ b(ne, &external_string);
// Prepare sequential strings
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ add(string,
string,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@ -450,8 +521,188 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
String::Encoding encoding,
Register string,
Register index,
Register value) {
if (FLAG_debug_code) {
__ tst(index, Operand(kSmiTagMask));
__ Check(eq, "Non-smi index");
__ tst(value, Operand(kSmiTagMask));
__ Check(eq, "Non-smi value");
__ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
__ cmp(index, ip);
__ Check(lt, "Index is too large");
__ cmp(index, Operand(Smi::FromInt(0)));
__ Check(ge, "Index is negative");
__ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
__ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(eq, "Unexpected string type");
}
__ add(ip,
string,
Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ SmiUntag(value, value);
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
if (encoding == String::ONE_BYTE_ENCODING) {
// Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline.
__ strb(value, MemOperand(ip, index, LSR, 1));
} else {
// No need to untag a smi for two-byte addressing.
__ strh(value, MemOperand(ip, index));
}
}
static MemOperand ExpConstant(int index, Register base) {
return MemOperand(base, index * kDoubleSize);
}
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
DwVfpRegister input,
DwVfpRegister result,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3) {
ASSERT(!input.is(result));
ASSERT(!input.is(double_scratch1));
ASSERT(!input.is(double_scratch2));
ASSERT(!result.is(double_scratch1));
ASSERT(!result.is(double_scratch2));
ASSERT(!double_scratch1.is(double_scratch2));
ASSERT(!temp1.is(temp2));
ASSERT(!temp1.is(temp3));
ASSERT(!temp2.is(temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
Label done;
__ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
__ vldr(double_scratch1, ExpConstant(0, temp3));
__ vmov(result, kDoubleRegZero);
__ VFPCompareAndSetFlags(double_scratch1, input);
__ b(ge, &done);
__ vldr(double_scratch2, ExpConstant(1, temp3));
__ VFPCompareAndSetFlags(input, double_scratch2);
__ vldr(result, ExpConstant(2, temp3));
__ b(ge, &done);
__ vldr(double_scratch1, ExpConstant(3, temp3));
__ vldr(result, ExpConstant(4, temp3));
__ vmul(double_scratch1, double_scratch1, input);
__ vadd(double_scratch1, double_scratch1, result);
__ vmov(temp2, temp1, double_scratch1);
__ vsub(double_scratch1, double_scratch1, result);
__ vldr(result, ExpConstant(6, temp3));
__ vldr(double_scratch2, ExpConstant(5, temp3));
__ vmul(double_scratch1, double_scratch1, double_scratch2);
__ vsub(double_scratch1, double_scratch1, input);
__ vsub(result, result, double_scratch1);
__ vmul(input, double_scratch1, double_scratch1);
__ vmul(result, result, input);
__ mov(temp1, Operand(temp2, LSR, 11));
__ vldr(double_scratch2, ExpConstant(7, temp3));
__ vmul(result, result, double_scratch2);
__ vsub(result, result, double_scratch1);
__ vldr(double_scratch2, ExpConstant(8, temp3));
__ vadd(result, result, double_scratch2);
__ movw(ip, 0x7ff);
__ and_(temp2, temp2, Operand(ip));
__ add(temp1, temp1, Operand(0x3ff));
__ mov(temp1, Operand(temp1, LSL, 20));
// Must not call ExpConstant() after overwriting temp3!
__ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
__ ldr(ip, MemOperand(temp3, temp2, LSL, 3));
__ add(temp3, temp3, Operand(kPointerSize));
__ ldr(temp2, MemOperand(temp3, temp2, LSL, 3));
__ orr(temp1, temp1, temp2);
__ vmov(input, ip, temp1);
__ vmul(result, result, input);
__ bind(&done);
}
#undef __
// add(r0, pc, Operand(-8))
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
static byte* GetNoCodeAgeSequence(uint32_t* length) {
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found in FUNCTIONS
static bool initialized = false;
static uint32_t sequence[kNoCodeAgeSequenceLength];
byte* byte_sequence = reinterpret_cast<byte*>(sequence);
*length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
if (!initialized) {
CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
PredictableCodeSizeScope scope(patcher.masm(), *length);
patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
patcher.masm()->LoadRoot(ip, Heap::kUndefinedValueRootIndex);
patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
initialized = true;
}
return byte_sequence;
}
bool Code::IsYoungSequence(byte* sequence) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
bool result = !memcmp(sequence, young_sequence, young_length);
ASSERT(result ||
Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
return result;
}
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
*age = kNoAge;
*parity = NO_MARKING_PARITY;
} else {
Address target_address = Memory::Address_at(
sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity);
}
}
void Code::PatchPlatformCodeAge(byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
if (age == kNoAge) {
memcpy(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4));
patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
}
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

22
deps/v8/src/arm/codegen-arm.h

@ -44,6 +44,10 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
CodeGenerator() {
InitializeAstVisitor();
}
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
@ -68,6 +72,8 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
@ -88,6 +94,22 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
class MathExpGenerator : public AllStatic {
public:
static void EmitMathExp(MacroAssembler* masm,
DwVfpRegister input,
DwVfpRegister result,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3);
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_

8
deps/v8/src/arm/constants-arm.cc

@ -87,8 +87,8 @@ const char* Registers::Name(int reg) {
}
// Support for VFP registers s0 to s31 (d0 to d15).
// Note that "sN:sM" is the same as "dN/2"
// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31.
// Note that "sN:sM" is the same as "dN/2" up to d15.
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* VFPRegisters::names_[kNumVFPRegisters] = {
@ -97,7 +97,9 @@ const char* VFPRegisters::names_[kNumVFPRegisters] = {
"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"
};

23
deps/v8/src/arm/constants-arm.h

@ -84,16 +84,25 @@ namespace v8 {
namespace internal {
// Constant pool marker.
const int kConstantPoolMarkerMask = 0xffe00000;
const int kConstantPoolMarker = 0x0c000000;
const int kConstantPoolLengthMask = 0x001ffff;
// Use UDF, the permanently undefined instruction.
const int kConstantPoolMarkerMask = 0xfff000f0;
const int kConstantPoolMarker = 0xe7f000f0;
const int kConstantPoolLengthMaxMask = 0xffff;
inline int EncodeConstantPoolLength(int length) {
ASSERT((length & kConstantPoolLengthMaxMask) == length);
return ((length & 0xfff0) << 4) | (length & 0xf);
}
inline int DecodeConstantPoolLength(int instr) {
ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
}
// Number of registers in normal ARM mode.
const int kNumRegisters = 16;
// VFP support.
const int kNumVFPSingleRegisters = 32;
const int kNumVFPDoubleRegisters = 16;
const int kNumVFPDoubleRegisters = 32;
const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
// PC is register 15.
@ -258,7 +267,8 @@ enum {
kCoprocessorMask = 15 << 8,
kOpCodeMask = 15 << 21, // In data-processing instructions.
kImm24Mask = (1 << 24) - 1,
kOff12Mask = (1 << 12) - 1
kOff12Mask = (1 << 12) - 1,
kOff8Mask = (1 << 8) - 1
};
@ -455,6 +465,9 @@ extern const Instr kMovLrPc;
// ldr rd, [pc, #offset]
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
// vldr dd, [pc, #offset]
extern const Instr kVldrDPCMask;
extern const Instr kVldrDPCPattern;
// blxcc rm
extern const Instr kBlxRegMask;

2
deps/v8/src/arm/debug-arm.cc

@ -161,7 +161,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
__ mov(r0, Operand::Zero()); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1);

500
deps/v8/src/arm/deoptimizer-arm.cc

@ -44,11 +44,14 @@ int Deoptimizer::patch_size() {
}
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
JSFunction* function) {
Isolate* isolate = function->GetIsolate();
HandleScope scope(isolate);
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());
// The optimized code is going to be patched, so we cannot use it
// any more. Play safe and reset the whole cache.
@ -72,17 +75,17 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes =
MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
RelocInfo::NONE);
RelocInfo::NONE32);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
CodePatcher patcher(call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
@ -91,8 +94,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
#endif
}
Isolate* isolate = code->GetIsolate();
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
DeoptimizerData* data = isolate->deoptimizer_data();
@ -114,7 +115,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
static const int32_t kBranchBeforeStackCheck = 0x2a000001;
static const int32_t kBranchBeforeInterrupt = 0x5a000004;
@ -123,24 +123,21 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
// The call of the stack guard check has the following form:
// e1 5d 00 0c cmp sp, <limit>
// 2a 00 00 01 bcs ok
// The back edge bookkeeping code matches the pattern:
//
// <decrement profiling counter>
// 2a 00 00 01 bpl ok
// e5 9f c? ?? ldr ip, [pc, <stack guard address>]
// e1 2f ff 3c blx ip
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
if (FLAG_count_based_interrupts) {
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
} else {
ASSERT_EQ(kBranchBeforeStackCheck,
Memory::int32_at(pc_after - 3 * kInstrSize));
}
// We patch the code to the following form:
// e1 5d 00 0c cmp sp, <limit>
//
// <decrement profiling counter>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
@ -177,15 +174,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Replace NOP with conditional jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
if (FLAG_count_based_interrupts) {
patcher.masm()->b(+16, pl);
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
} else {
patcher.masm()->b(+4, cs);
ASSERT_EQ(kBranchBeforeStackCheck,
Memory::int32_at(pc_after - 3 * kInstrSize));
}
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.
@ -222,7 +213,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
optimized_code_->deoptimization_data());
compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
@ -256,7 +247,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@ -348,7 +339,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
optimized_code_->entry() + pc_offset);
compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
@ -365,342 +356,6 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
}
void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
int frame_index) {
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
}
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
// Arguments adaptor can not be topmost or bottommost.
ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous
// frame's top and this frame's size.
uint32_t top_address;
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
// A marker value is used in place of the context.
output_offset -= kPointerSize;
intptr_t context = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
output_frame->SetFrameSlot(output_offset, context);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
top_address + output_offset, output_offset, context);
}
// The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
top_address + output_offset, output_offset, value);
}
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
ASSERT(0 == output_offset);
Builtins* builtins = isolate_->builtins();
Code* adaptor_trampoline =
builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
uint32_t pc = reinterpret_cast<uint32_t>(
adaptor_trampoline->instruction_start() +
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
output_frame->SetPc(pc);
}
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
Builtins* builtins = isolate_->builtins();
Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating construct stub => height=%d\n", height_in_bytes);
}
unsigned fixed_frame_size = 8 * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
output_frame->SetFrameType(StackFrame::CONSTRUCT);
// Construct stub can not be topmost or bottommost.
ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous
// frame's top and this frame's size.
uint32_t top_address;
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
// A marker value is used in place of the function.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
top_address + output_offset, output_offset, value);
}
// The output frame reflects a JSConstructStubGeneric frame.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(construct_stub);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
top_address + output_offset, output_offset, value);
}
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
// Constructor function being invoked by the stub.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
top_address + output_offset, output_offset, value);
}
// The newly allocated object was passed as receiver in the artificial
// constructor stub environment created by HEnvironment::CopyForInlining().
output_offset -= kPointerSize;
value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
top_address + output_offset, output_offset, value);
}
ASSERT(0 == output_offset);
uint32_t pc = reinterpret_cast<uint32_t>(
construct_stub->instruction_start() +
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
output_frame->SetPc(pc);
}
void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
int frame_index,
bool is_setter_stub_frame) {
JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
// The receiver (and the implicit return value, if any) are expected in
// registers by the LoadIC/StoreIC, so they don't belong to the output stack
// frame. This means that we have to use a height of 0.
unsigned height = 0;
unsigned height_in_bytes = height * kPointerSize;
const char* kind = is_setter_stub_frame ? "setter" : "getter";
if (FLAG_trace_deopt) {
PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
}
// We need 5 stack entries from StackFrame::INTERNAL (lr, fp, cp, frame type,
// code object, see MacroAssembler::EnterFrame). For a setter stub frames we
// need one additional entry for the implicit return value, see
// StoreStubCompiler::CompileStoreViaSetter.
unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0);
unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, accessor);
output_frame->SetFrameType(StackFrame::INTERNAL);
// A frame for an accessor stub can not be the topmost or bottommost one.
ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
// this frame's size.
uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
unsigned output_offset = output_frame_size;
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; caller's fp\n",
fp_value, output_offset, value);
}
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; context\n",
top_address + output_offset, output_offset, value);
}
// A marker value is used in place of the function.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; function (%s sentinel)\n",
top_address + output_offset, output_offset, value, kind);
}
// Get Code object from accessor stub.
output_offset -= kPointerSize;
Builtins::Name name = is_setter_stub_frame ?
Builtins::kStoreIC_Setter_ForDeopt :
Builtins::kLoadIC_Getter_ForDeopt;
Code* accessor_stub = isolate_->builtins()->builtin(name);
value = reinterpret_cast<intptr_t>(accessor_stub);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; code object\n",
top_address + output_offset, output_offset, value);
}
// Skip receiver.
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
iterator->Skip(Translation::NumberOfOperandsFor(opcode));
if (is_setter_stub_frame) {
// The implicit return value was part of the artificial setter stub
// environment.
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
ASSERT(0 == output_offset);
Smi* offset = is_setter_stub_frame ?
isolate_->heap()->setter_stub_deopt_pc_offset() :
isolate_->heap()->getter_stub_deopt_pc_offset();
intptr_t pc = reinterpret_cast<intptr_t>(
accessor_stub->instruction_start() + offset->value());
output_frame->SetPc(pc);
}
// This code is very similar to ia32 code, but relies on register names (fp, sp)
// and how the frame is laid out.
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
@ -718,7 +373,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
if (trace_) {
PrintF(" translating ");
function->PrintName();
PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
@ -782,7 +437,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, value);
}
@ -805,7 +460,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
if (is_topmost) {
output_frame->SetRegister(fp.code(), fp_value);
}
if (FLAG_trace_deopt) {
if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
@ -823,7 +478,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_frame->SetFrameSlot(output_offset, value);
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(cp.code(), value);
if (FLAG_trace_deopt) {
if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
@ -836,7 +491,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
top_address + output_offset, output_offset, value);
}
@ -888,7 +543,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
@ -899,6 +554,28 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->register_param_count_;
if (descriptor->stack_parameter_count_ != NULL) {
params++;
}
output_frame->SetRegister(r0.code(), params);
output_frame->SetRegister(r1.code(), handler);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@ -908,7 +585,6 @@ void Deoptimizer::EntryGenerator::Generate() {
Isolate* isolate = masm()->isolate();
CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@ -916,23 +592,25 @@ void Deoptimizer::EntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
const int kDoubleRegsSize =
kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
// Save all VFP registers before messing with them.
DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
DwVfpRegister last =
DwVfpRegister::FromAllocationIndex(
DwVfpRegister::kNumAllocatableRegisters - 1);
ASSERT(last.code() > first.code());
ASSERT((last.code() - first.code()) ==
(DwVfpRegister::kNumAllocatableRegisters - 1));
#ifdef DEBUG
for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
(DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
// Save all allocatable VFP registers before messing with them.
ASSERT(kDoubleRegZero.code() == 14);
ASSERT(kScratchDoubleReg.code() == 15);
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
// Push registers d0-d13, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
__ vstm(db_w, sp, d16, d31, ne);
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
__ vstm(db_w, sp, d0, d13);
} else {
__ sub(sp, sp, Operand(kDoubleRegsSize));
}
#endif
__ vstm(db_w, sp, first, last);
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@ -949,7 +627,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
if (type() == EAGER) {
__ mov(r3, Operand(0));
__ mov(r3, Operand::Zero());
// Correct one word for bailout id.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else if (type() == OSR) {
@ -991,15 +669,18 @@ void Deoptimizer::EntryGenerator::Generate() {
__ str(r2, MemOperand(r1, offset));
}
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
// Copy VFP registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
}
// Remove the bailout id, eventually return address, and the saved registers
// from the stack.
@ -1019,10 +700,13 @@ void Deoptimizer::EntryGenerator::Generate() {
// frame description.
__ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ b(&pop_loop_header);
__ bind(&pop_loop);
__ pop(r4);
__ str(r4, MemOperand(r3, 0));
__ add(r3, r3, Operand(sizeof(uint32_t)));
__ bind(&pop_loop_header);
__ cmp(r2, sp);
__ b(ne, &pop_loop);
@ -1039,27 +723,49 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: r0 = current "FrameDescription** output_",
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
// Outer loop state: r4 = current "FrameDescription** output_",
// r1 = one past the last FrameDescription**.
__ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
__ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_.
__ add(r1, r0, Operand(r1, LSL, 2));
__ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
__ add(r1, r4, Operand(r1, LSL, 2));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r2 = current FrameDescription*, r3 = loop index.
__ ldr(r2, MemOperand(r0, 0)); // output_[ix]
__ ldr(r2, MemOperand(r4, 0)); // output_[ix]
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r7);
__ cmp(r3, Operand(0));
__ bind(&inner_loop_header);
__ cmp(r3, Operand::Zero());
__ b(ne, &inner_push_loop); // test for gt?
__ add(r0, r0, Operand(kPointerSize));
__ cmp(r0, r1);
__ add(r4, r4, Operand(kPointerSize));
__ bind(&outer_loop_header);
__ cmp(r4, r1);
__ b(lt, &outer_push_loop);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
int src_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
if (i == kDoubleRegZero.code()) continue;
if (i == kScratchDoubleReg.code()) continue;
const DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vldr(reg, r1, src_offset, i < 16 ? al : ne);
src_offset += kDoubleSize;
}
}
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {
__ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));

89
deps/v8/src/arm/disasm-arm.cc

@ -381,7 +381,16 @@ int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
} else if (format[1] == 'm') {
reg = instr->VFPMRegValue(precision);
} else if (format[1] == 'd') {
if ((instr->TypeValue() == 7) &&
(instr->Bit(24) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(4) == 0x1)) {
// vmov.32 has Vd in a different place.
reg = instr->Bits(19, 16) | (instr->Bit(7) << 4);
} else {
reg = instr->VFPDRegValue(precision);
}
if (format[2] == '+') {
int immed8 = instr->Immed8Value();
if (format[0] == 'S') reg += immed8 - 1;
@ -1098,6 +1107,8 @@ int Decoder::DecodeType7(Instruction* instr) {
// Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm)
// Dd = vmla(Dn, Dm)
// Dd = vmls(Dn, Dm)
// Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm)
// vmrs
@ -1113,16 +1124,16 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
// vmov register to register.
if (instr->SzValue() == 0x1) {
Format(instr, "vmov.f64'cond 'Dd, 'Dm");
Format(instr, "vmov'cond.f64 'Dd, 'Dm");
} else {
Format(instr, "vmov.f32'cond 'Sd, 'Sm");
Format(instr, "vmov'cond.f32 'Sd, 'Sm");
}
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
Format(instr, "vabs.f64'cond 'Dd, 'Dm");
Format(instr, "vabs'cond.f64 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
Format(instr, "vneg.f64'cond 'Dd, 'Dm");
Format(instr, "vneg'cond.f64 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
@ -1134,10 +1145,10 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
(instr->Opc3Value() & 0x1)) {
DecodeVCMP(instr);
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
Format(instr, "vsqrt.f64'cond 'Dd, 'Dm");
Format(instr, "vsqrt'cond.f64 'Dd, 'Dm");
} else if (instr->Opc3Value() == 0x0) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmov.f64'cond 'Dd, 'd");
Format(instr, "vmov'cond.f64 'Dd, 'd");
} else {
Unknown(instr); // Not used by V8.
}
@ -1147,22 +1158,34 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
} else if (instr->Opc1Value() == 0x3) {
if (instr->SzValue() == 0x1) {
if (instr->Opc3Value() & 0x1) {
Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
Format(instr, "vsub'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
Format(instr, "vadd'cond.f64 'Dd, 'Dn, 'Dm");
}
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
Format(instr, "vmul'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmla'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Value() == 0x0) && (instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmls'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
Format(instr, "vdiv'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
@ -1173,6 +1196,14 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
} else if ((instr->VLValue() == 0x0) &&
(instr->VCValue() == 0x1) &&
(instr->Bit(23) == 0x0)) {
if (instr->Bit(21) == 0x0) {
Format(instr, "vmov'cond.32 'Dd[0], 'rt");
} else {
Format(instr, "vmov'cond.32 'Dd[1], 'rt");
}
} else if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
@ -1220,9 +1251,9 @@ void Decoder::DecodeVCMP(Instruction* instr) {
if (dp_operation && !raise_exception_for_qnan) {
if (instr->Opc2Value() == 0x4) {
Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
Format(instr, "vcmp'cond.f64 'Dd, 'Dm");
} else if (instr->Opc2Value() == 0x5) {
Format(instr, "vcmp.f64'cond 'Dd, #0.0");
Format(instr, "vcmp'cond.f64 'Dd, #0.0");
} else {
Unknown(instr); // invalid
}
@ -1239,9 +1270,9 @@ void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
bool double_to_single = (instr->SzValue() == 1);
if (double_to_single) {
Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
Format(instr, "vcvt'cond.f32.f64 'Sd, 'Dm");
} else {
Format(instr, "vcvt.f64.f32'cond 'Dd, 'Sm");
Format(instr, "vcvt'cond.f64.f32 'Dd, 'Sm");
}
}
@ -1258,15 +1289,15 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
if (dp_operation) {
if (unsigned_integer) {
Format(instr, "vcvt.u32.f64'cond 'Sd, 'Dm");
Format(instr, "vcvt'cond.u32.f64 'Sd, 'Dm");
} else {
Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
Format(instr, "vcvt'cond.s32.f64 'Sd, 'Dm");
}
} else {
if (unsigned_integer) {
Format(instr, "vcvt.u32.f32'cond 'Sd, 'Sm");
Format(instr, "vcvt'cond.u32.f32 'Sd, 'Sm");
} else {
Format(instr, "vcvt.s32.f32'cond 'Sd, 'Sm");
Format(instr, "vcvt'cond.s32.f32 'Sd, 'Sm");
}
}
} else {
@ -1274,15 +1305,15 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
if (dp_operation) {
if (unsigned_integer) {
Format(instr, "vcvt.f64.u32'cond 'Dd, 'Sm");
Format(instr, "vcvt'cond.f64.u32 'Dd, 'Sm");
} else {
Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
Format(instr, "vcvt'cond.f64.s32 'Dd, 'Sm");
}
} else {
if (unsigned_integer) {
Format(instr, "vcvt.f32.u32'cond 'Sd, 'Sm");
Format(instr, "vcvt'cond.f32.u32 'Sd, 'Sm");
} else {
Format(instr, "vcvt.f32.s32'cond 'Sd, 'Sm");
Format(instr, "vcvt'cond.f32.s32 'Sd, 'Sm");
}
}
}
@ -1336,7 +1367,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
if (instr->Bits(7, 4) != 0x1) {
if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
Unknown(instr); // Not used by V8.
} else if (instr->HasL()) {
Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
@ -1345,6 +1376,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
}
break;
case 0x8:
case 0xA:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]");
} else {
@ -1352,6 +1384,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
}
break;
case 0xC:
case 0xE:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]");
} else {
@ -1360,7 +1393,10 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
break;
case 0x4:
case 0x5:
case 0x9: {
case 0x6:
case 0x7:
case 0x9:
case 0xB: {
bool to_vfp_register = (instr->VLValue() == 0x1);
if (to_vfp_register) {
Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
@ -1388,7 +1424,7 @@ bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
if (IsConstantPoolAt(instr_ptr)) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
return instruction_bits & kConstantPoolLengthMask;
return DecodeConstantPoolLength(instruction_bits);
} else {
return -1;
}
@ -1410,8 +1446,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"constant pool begin (length %d)",
instruction_bits &
kConstantPoolLengthMask);
DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
}
switch (instr->TypeValue()) {

9
deps/v8/src/arm/frames-arm.cc

@ -29,7 +29,12 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "assembler.h"
#include "assembler-arm.h"
#include "assembler-arm-inl.h"
#include "frames-inl.h"
#include "macro-assembler.h"
#include "macro-assembler-arm.h"
namespace v8 {
namespace internal {
@ -40,6 +45,10 @@ Address ExitFrame::ComputeStackPointer(Address fp) {
}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

30
deps/v8/src/arm/frames-arm.h

@ -134,20 +134,6 @@ class ExitFrameConstants : public AllStatic {
};
class StandardFrameConstants : public AllStatic {
public:
// Fixed part of the frame consists of return address, caller fp,
// context and function.
static const int kFixedFrameSize = 4 * kPointerSize;
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = 1 * kPointerSize;
static const int kCallerSPOffset = 2 * kPointerSize;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
@ -163,14 +149,30 @@ class JavaScriptFrameConstants : public AllStatic {
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
class ConstructFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kImplicitReceiverOffset = -6 * kPointerSize;
static const int kConstructorOffset = -5 * kPointerSize;
static const int kLengthOffset = -4 * kPointerSize;
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
};
class InternalFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};

329
deps/v8/src/arm/full-codegen-arm.cc

@ -130,7 +130,7 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@ -138,7 +138,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
@ -149,7 +149,7 @@ void FullCodeGenerator::Generate() {
// function calls.
if (!info->is_classic_mode() || info->is_native()) {
Label ok;
__ cmp(r5, Operand(0));
__ cmp(r5, Operand::Zero());
__ b(eq, &ok);
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
@ -164,14 +164,19 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
__ Push(lr, fp, cp, r1);
if (locals_count > 0) {
info->set_prologue_offset(masm_->pc_offset());
{
PredictableCodeSizeScope predictible_code_size_scope(
masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
// The following three instructions must remain together and unmodified
// for code aging to work properly.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
// Load undefined value here, so the value is ready for the loop
// below.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
}
// Adjust fp to point to caller's fp.
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
}
{ Comment cmnt(masm_, "[ Allocate locals");
for (int i = 0; i < locals_count; i++) {
@ -287,7 +292,7 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
PredictableCodeSizeScope predictable(masm_);
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
@ -342,14 +347,13 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
Comment cmnt(masm_, "[ Back edge bookkeeping");
// Block literal pools whilst emitting stack check code.
Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
if (FLAG_count_based_interrupts) {
int weight = 1;
if (FLAG_weighted_back_edges) {
ASSERT(back_edge_target->is_bound());
@ -361,23 +365,13 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
__ b(pl, &ok);
InterruptStub stub;
__ CallStub(&stub);
} else {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
PredictableCodeSizeScope predictable(masm_);
StackCheckStub stub;
__ CallStub(&stub);
}
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
RecordBackEdge(stmt->OsrEntryId());
if (FLAG_count_based_interrupts) {
EmitProfilingCounterReset();
}
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@ -439,7 +433,8 @@ void FullCodeGenerator::EmitReturnSequence() {
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
PredictableCodeSizeScope predictable(masm_);
// TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
PredictableCodeSizeScope predictable(masm_, -1);
__ RecordJSReturn();
masm_->mov(sp, fp);
masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
@ -680,7 +675,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
ToBooleanStub stub(result_register());
__ CallStub(&stub);
__ CallStub(&stub, condition->test_id());
__ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@ -914,34 +909,33 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
Handle<JSModule> instance = declaration->module()->interface()->Instance();
ASSERT(!instance.is_null());
Variable* variable = declaration->proxy()->var();
ASSERT(variable->location() == Variable::CONTEXT);
ASSERT(variable->interface()->IsFrozen());
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
globals_->Add(variable->name(), zone());
globals_->Add(instance, zone());
Visit(declaration->module());
break;
}
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ mov(r1, Operand(instance));
// Load instance object.
__ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope()));
__ ldr(r1, ContextOperand(r1, variable->interface()->Index()));
__ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
// Assign it.
__ str(r1, ContextOperand(cp, variable->index()));
Visit(declaration->module());
break;
}
// We know that we have written a module, which is not a smi.
__ RecordWriteContextSlot(cp,
Context::SlotOffset(variable->index()),
r1,
r3,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::LOOKUP:
UNREACHABLE();
}
// Traverse into body.
Visit(declaration->module());
}
@ -984,6 +978,14 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
__ CallRuntime(Runtime::kDeclareModules, 1);
// Return value is ignored.
}
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@ -1033,11 +1035,11 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ cmp(r0, Operand(0));
__ cmp(r0, Operand::Zero());
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target());
@ -1162,7 +1164,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(r1, cell);
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
@ -1238,7 +1241,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0);
EmitStackCheck(stmt, &loop);
EmitBackEdgeBookkeeping(stmt, &loop);
__ b(&loop);
// Remove the pointers stored on the stack.
@ -1391,9 +1394,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == CONST ||
local->mode() == CONST_HARMONY ||
local->mode() == LET) {
if (local->mode() == LET ||
local->mode() == CONST ||
local->mode() == CONST_HARMONY) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
if (local->mode() == CONST) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
@ -1544,7 +1547,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
__ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
__ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
@ -1588,12 +1591,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
__ Push(r3, r2, r1, r0);
int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else if (flags != ObjectLiteral::kFastElements ||
} else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
@ -1627,7 +1631,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
if (key->handle()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->handle()));
@ -1642,8 +1646,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
}
// Fall through.
case ObjectLiteral::Property::PROTOTYPE:
// Duplicate receiver on stack.
__ ldr(r0, MemOperand(sp));
__ push(r0);
@ -1657,6 +1659,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Drop(3);
}
break;
case ObjectLiteral::Property::PROTOTYPE:
// Duplicate receiver on stack.
__ ldr(r0, MemOperand(sp));
__ push(r0);
VisitForStackValue(value);
if (property->emit_store()) {
__ CallRuntime(Runtime::kSetPrototype, 2);
} else {
__ Drop(2);
}
break;
case ObjectLiteral::Property::GETTER:
accessor_table.lookup(key)->second->getter = value;
break;
@ -1717,7 +1731,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
DONT_TRACK_ALLOCATION_SITE,
length);
__ CallStub(&stub);
__ IncrementCounter(
isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
@ -1728,10 +1744,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
if (has_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
__ CallStub(&stub);
}
@ -1937,7 +1960,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@ -1989,7 +2012,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ mov(ip, Operand(scratch1, ASR, 31));
__ cmp(ip, Operand(scratch2));
__ b(ne, &stub_call);
__ cmp(scratch1, Operand(0));
__ cmp(scratch1, Operand::Zero());
__ mov(right, Operand(scratch1), LeaveCC, ne);
__ b(ne, &done);
__ add(scratch2, right, Operand(left), SetCC);
@ -2021,7 +2044,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
@ -2029,7 +2052,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) {
// Invalid left-hand sides are rewritten to have a 'throw
// Invalid left-hand sides are rewritten by the parser to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
VisitForEffect(expr);
@ -2328,7 +2351,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
CallFunctionStub stub(arg_count, flags);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
__ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2374,7 +2397,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
if (proxy != NULL && proxy->var()->is_possibly_eval()) {
if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@ -2523,7 +2546,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(r2, Operand(cell));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(r0);
}
@ -2678,14 +2701,14 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ cmp(r2, ip);
__ b(eq, if_false);
// Look for valueOf symbol in the descriptor array, and indicate false if
// Look for valueOf name in the descriptor array, and indicate false if
// found. Since we omit an enumeration index check, if it is added via a
// transition that shares its descriptor array, this is a false positive.
Label entry, loop, done;
// Skip loop if no descriptors are valid.
__ NumberOfOwnDescriptors(r3, r1);
__ cmp(r3, Operand(0));
__ cmp(r3, Operand::Zero());
__ b(eq, &done);
__ LoadInstanceDescriptors(r1, r4);
@ -2703,10 +2726,10 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
// The use of ip to store the valueOf symbol asumes that it is not otherwise
// string "valueOf" the result is false.
// The use of ip to store the valueOf string assumes that it is not otherwise
// used in the loop below.
__ mov(ip, Operand(FACTORY->value_of_symbol()));
__ mov(ip, Operand(FACTORY->value_of_string()));
__ jmp(&entry);
__ bind(&loop);
__ ldr(r3, MemOperand(r4, 0));
@ -2741,6 +2764,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
}
void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r2, SYMBOL_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@ -2941,12 +2986,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
__ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
__ LoadRoot(r0, Heap::kfunction_class_stringRootIndex);
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
__ LoadRoot(r0, Heap::kObject_symbolRootIndex);
__ LoadRoot(r0, Heap::kObject_stringRootIndex);
__ jmp(&done);
// Non-JS objects have class null.
@ -3008,7 +3053,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatures::Scope scope(VFP2);
CpuFeatureScope scope(masm(), VFP2);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000));
@ -3016,7 +3061,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
__ vmov(d7, r0, r1);
// Move 0x4130000000000000 to VFP.
__ mov(r0, Operand(0, RelocInfo::NONE));
__ mov(r0, Operand::Zero());
__ vmov(d8, r0, r1);
// Subtract and store the result in the heap number.
__ vsub(d7, d7, d8);
@ -3129,6 +3174,39 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
__ pop(r2);
__ pop(r1);
VisitForAccumulatorValue(args->at(0)); // string
static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
context()->Plug(r0);
}
void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
__ pop(r2);
__ pop(r1);
VisitForAccumulatorValue(args->at(0)); // string
static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
context()->Plug(r0);
}
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@ -3278,7 +3356,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
__ LoadRoot(result, Heap::kEmptyStringRootIndex);
__ LoadRoot(result, Heap::kempty_stringRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
@ -3587,7 +3665,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
__ SmiUntag(array_length, SetCC);
__ b(ne, &non_trivial_array);
__ LoadRoot(r0, Heap::kEmptyStringRootIndex);
__ LoadRoot(r0, Heap::kempty_stringRootIndex);
__ b(&done);
__ bind(&non_trivial_array);
@ -3599,7 +3677,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Check that all array elements are sequential ASCII strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
__ mov(string_length, Operand(0));
__ mov(string_length, Operand::Zero());
__ add(element,
elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
@ -3612,7 +3690,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// element: Current array element.
// elements_end: Array end.
if (generate_debug_code_) {
__ cmp(array_length, Operand(0));
__ cmp(array_length, Operand::Zero());
__ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
}
__ bind(&loop);
@ -3621,7 +3699,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
__ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
__ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch1), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
@ -3650,12 +3728,12 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi
__ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
__ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ sub(string_length, string_length, Operand(scratch1));
__ smull(scratch2, ip, array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
__ cmp(ip, Operand(0));
__ cmp(ip, Operand::Zero());
__ b(ne, &bailout);
__ tst(scratch2, Operand(0x80000000));
__ b(ne, &bailout);
@ -3688,10 +3766,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array_length = no_reg;
__ add(result_pos,
result,
Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
__ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
__ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ cmp(scratch1, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator);
__ b(gt, &long_separator);
@ -3707,7 +3785,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
__ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
@ -3717,7 +3797,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
__ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
__ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&one_char_separator_loop_entry);
@ -3737,7 +3817,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
__ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
@ -3758,14 +3840,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiUntag(string_length);
__ add(string,
separator,
Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
__ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end).
@ -3964,7 +4048,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->UnaryOperationFeedbackId());
context()->Plug(r0);
}
@ -4070,13 +4154,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
}
__ mov(r1, Operand(Smi::FromInt(count_value)));
__ mov(r1, r0);
__ mov(r0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
CallIC(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@ -4191,13 +4278,13 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (check->Equals(isolate()->heap()->number_symbol())) {
if (check->Equals(isolate()->heap()->number_string())) {
__ JumpIfSmi(r0, if_true);
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r0, ip);
Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->string_symbol())) {
} else if (check->Equals(isolate()->heap()->string_string())) {
__ JumpIfSmi(r0, if_false);
// Check for undetectable objects => false.
__ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
@ -4205,16 +4292,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->boolean_symbol())) {
} else if (check->Equals(isolate()->heap()->boolean_string())) {
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
__ b(eq, if_true);
__ CompareRoot(r0, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
check->Equals(isolate()->heap()->null_symbol())) {
check->Equals(isolate()->heap()->null_string())) {
__ CompareRoot(r0, Heap::kNullValueRootIndex);
Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->undefined_symbol())) {
} else if (check->Equals(isolate()->heap()->undefined_string())) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(eq, if_true);
__ JumpIfSmi(r0, if_false);
@ -4224,19 +4311,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(ne, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
} else if (check->Equals(isolate()->heap()->function_string())) {
__ JumpIfSmi(r0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
__ b(eq, if_true);
__ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
} else if (check->Equals(isolate()->heap()->object_string())) {
__ JumpIfSmi(r0, if_false);
if (!FLAG_harmony_typeof) {
__ CompareRoot(r0, Heap::kNullValueRootIndex);
__ b(eq, if_true);
}
if (FLAG_harmony_symbols) {
__ CompareObjectType(r0, r0, r1, SYMBOL_TYPE);
__ b(eq, if_true);
}
// Check for JS objects => true.
__ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ b(lt, if_false);
@ -4295,29 +4386,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cond = eq;
switch (op) {
case Token::EQ_STRICT:
case Token::EQ:
cond = eq;
break;
case Token::LT:
cond = lt;
break;
case Token::GT:
cond = gt;
break;
case Token::LTE:
cond = le;
break;
case Token::GTE:
cond = ge;
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
Condition cond = CompareIC::ComputeCondition(op);
__ pop(r1);
bool inline_smi_code = ShouldInlineSmiCase(op);
@ -4333,11 +4402,11 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand(0));
__ cmp(r0, Operand::Zero());
Split(cond, if_true, if_false, fall_through);
}
}

278
deps/v8/src/arm/ic-arm.cc

@ -64,7 +64,7 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
Register receiver,
Register elements,
Register t0,
@ -131,7 +131,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
StringDictionaryLookupStub::GeneratePositiveLookup(masm,
NameDictionaryLookupStub::GeneratePositiveLookup(masm,
miss,
&done,
elements,
@ -142,8 +142,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// If probing finds an entry check that the value is a normal
// property.
__ bind(&done); // scratch2 == elements + 4 * index
const int kElementsStartOffset = StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
const int kElementsStartOffset = NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
__ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
@ -180,7 +180,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
Label done;
// Probe the dictionary.
StringDictionaryLookupStub::GeneratePositiveLookup(masm,
NameDictionaryLookupStub::GeneratePositiveLookup(masm,
miss,
&done,
elements,
@ -191,8 +191,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// If probing finds an entry in the dictionary check that the value
// is a normal property that is not read only.
__ bind(&done); // scratch2 == elements + 4 * index
const int kElementsStartOffset = StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
const int kElementsStartOffset = NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
(PropertyDetails::TypeField::kMask |
@ -213,53 +213,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
}
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
Label miss;
StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
Label miss;
StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss,
support_wrappers);
// Cache miss: Jump to runtime.
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
Label miss;
StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
@ -350,30 +303,35 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
}
// Checks whether a key is an array index string or a symbol string.
// Falls through if a key is a symbol.
static void GenerateKeyStringCheck(MacroAssembler* masm,
// Checks whether a key is an array index string or a unique name.
// Falls through if a key is a unique name.
static void GenerateKeyNameCheck(MacroAssembler* masm,
Register key,
Register map,
Register hash,
Label* index_string,
Label* not_symbol) {
Label* not_unique) {
// The key is not a smi.
// Is it a string?
__ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE);
__ b(ge, not_symbol);
Label unique;
// Is it a name?
__ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
__ b(hi, not_unique);
STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
__ b(eq, &unique);
// Is the string an array index, with cached numeric value?
__ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
__ tst(hash, Operand(String::kContainsCachedArrayIndexMask));
__ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
__ b(eq, index_string);
// Is the string a symbol?
// Is the string internalized?
// map: key map
__ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSymbolTag != 0);
__ tst(hash, Operand(kIsSymbolMask));
__ b(eq, not_symbol);
STATIC_ASSERT(kInternalizedTag != 0);
__ tst(hash, Operand(kIsInternalizedMask));
__ b(eq, not_unique);
__ bind(&unique);
}
@ -474,7 +432,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
GenerateStringDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
GenerateNameDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
// r0: elements
// Search the dictionary - put result in register r1.
@ -578,11 +536,11 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
Label do_call, slow_call, slow_load, slow_reload_receiver;
Label check_number_dictionary, check_string, lookup_monomorphic_cache;
Label index_smi, index_string;
Label check_number_dictionary, check_name, lookup_monomorphic_cache;
Label index_smi, index_name;
// Check that the key is a smi.
__ JumpIfNotSmi(r2, &check_string);
__ JumpIfNotSmi(r2, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@ -629,10 +587,10 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ mov(r1, r0);
__ jmp(&do_call);
__ bind(&check_string);
GenerateKeyStringCheck(masm, r2, r0, r3, &index_string, &slow_call);
__ bind(&check_name);
GenerateKeyNameCheck(masm, r2, r0, r3, &index_name, &slow_call);
// The key is known to be a symbol.
// The key is known to be a unique name.
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
@ -660,14 +618,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&slow_call);
// This branch is taken if:
// - the receiver requires boxing or access check,
// - the key is neither smi nor symbol,
// - the key is neither smi nor a unique name,
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
__ IncrementCounter(counters->keyed_call_generic_slow(), 1, r0, r3);
GenerateMiss(masm, argc);
__ bind(&index_string);
__ bind(&index_name);
__ IndexFromHash(r3, r2);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
@ -680,10 +638,10 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- lr : return address
// -----------------------------------
// Check if the name is a string.
// Check if the name is really a name.
Label miss;
__ JumpIfSmi(r2, &miss);
__ IsObjectJSStringType(r2, r0, &miss);
__ IsObjectNameType(r2, r0, &miss);
CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
@ -703,8 +661,9 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@ -722,7 +681,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -----------------------------------
Label miss;
GenerateStringDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
GenerateNameDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
// r1: elements
GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
@ -862,7 +821,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(r0, r2);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
GenerateMiss(masm, MISS);
}
@ -891,7 +850,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
GenerateMiss(masm, MISS);
}
@ -925,7 +884,7 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
Object* KeyedLoadIC_Miss(Arguments args);
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@ -938,7 +897,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
__ Push(r1, r0);
// Perform tail call to the entry.
ExternalReference ref = force_generic
ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
: ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
@ -965,7 +924,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label slow, check_string, index_smi, index_string, property_array_property;
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register key = r0;
@ -974,7 +933,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
// Check that the key is a smi.
__ JumpIfNotSmi(key, &check_string);
__ JumpIfNotSmi(key, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@ -1011,8 +970,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
1, r2, r3);
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, r2, r3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
@ -1026,15 +985,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ b(eq, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
// based on 32 bits of the map pointer and the name hash.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
__ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
__ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
__ ldr(r4, FieldMemOperand(r0, Name::kHashFieldOffset));
__ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
__ And(r3, r3, Operand(mask));
// Load the key (consisting of map and symbol) from the cache and
// Load the key (consisting of map and unique name) from the cache and
// check for match.
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
@ -1051,13 +1010,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
__ cmp(r2, r5);
__ b(ne, &try_next_entry);
__ ldr(r5, MemOperand(r4, -kPointerSize)); // Load symbol
__ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name
__ cmp(r0, r5);
__ b(eq, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
// Last entry: Load map and move r4 to symbol.
// Last entry: Load map and move r4 to name.
__ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
__ cmp(r2, r5);
__ b(ne, &slow);
@ -1119,11 +1078,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
// Load the property to r0.
GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
__ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
1, r2, r3);
__ IncrementCounter(
isolate->counters()->keyed_load_generic_symbol(), 1, r2, r3);
__ Ret();
__ bind(&index_string);
__ bind(&index_name);
__ IndexFromHash(r3, key);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
@ -1158,7 +1117,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
GenerateMiss(masm, false);
GenerateMiss(masm, MISS);
}
@ -1198,11 +1157,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1);
__ bind(&slow);
GenerateMiss(masm, false);
GenerateMiss(masm, MISS);
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@ -1213,7 +1172,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
ExternalReference ref = force_generic
ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@ -1249,7 +1208,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
__ mov(r0, r2);
__ Ret();
__ bind(&fail);
@ -1270,7 +1231,9 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
__ mov(r0, r2);
__ Ret();
__ bind(&fail);
@ -1379,7 +1342,6 @@ static void KeyedStoreGenerateGenericHelper(
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
receiver,
elements, // Overwritten.
r3, // Scratch regs...
r4,
@ -1407,7 +1369,9 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@ -1419,7 +1383,9 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@ -1433,7 +1399,8 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@ -1577,62 +1544,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
}
void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
//
// This accepts as a receiver anything JSArray::SetElementsLength accepts
// (currently anything except for external arrays which means anything with
// elements of FixedArray type). Value must be a number, but only smis are
// accepted as the most common case.
Label miss;
Register receiver = r1;
Register value = r0;
Register scratch = r3;
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
// Check that the object is a JS array.
__ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
__ b(ne, &miss);
// Check that elements are FixedArray.
// We rely on StoreIC_ArrayLength below to deal with all types of
// fast elements (including COW).
__ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
__ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
__ b(ne, &miss);
// Check that the array has fast properties, otherwise the length
// property might have been redefined.
__ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
__ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
__ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
__ b(eq, &miss);
// Check that value is a smi.
__ JumpIfNotSmi(value, &miss);
// Prepare tail call to StoreIC_ArrayLength.
__ Push(receiver, value);
ExternalReference ref =
ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
__ bind(&miss);
GenerateMiss(masm);
}
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
@ -1642,7 +1553,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
// -----------------------------------
Label miss;
GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
GenerateNameDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
Counters* counters = masm->isolate()->counters();
@ -1699,36 +1610,15 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope;
Handle<Code> rewritten;
State previous_state = GetState();
State state = TargetState(previous_state, false, x, y);
if (state == GENERIC) {
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
if (state == KNOWN_OBJECTS) {
stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
}
rewritten = stub.GetCode();
}
set_target(*rewritten);
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[CompareIC (%s->%s)#%s]\n",
GetStateName(previous_state),
GetStateName(state),
Token::Name(op_));
}
#endif
bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address cmp_instruction_address =
Assembler::return_address_from_call_start(address);
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
}
// If the instruction following the call is not a cmp rx, #yyy, nothing
// was inlined.
Instr instr = Assembler::instr_at(cmp_instruction_address);
return Assembler::IsCmpImmediate(instr);
}

456
deps/v8/src/arm/lithium-arm.cc

@ -42,10 +42,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL;
}
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL;
}
}
@ -112,8 +112,12 @@ void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
if (InputAt(i) == NULL) {
stream->Add("NULL");
} else {
InputAt(i)->PrintTo(stream);
}
}
}
@ -177,6 +181,7 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
case Token::ROR: return "ror-t";
case Token::SHL: return "shl-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
@ -285,6 +290,13 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
stream->Add(" + %d", offset());
}
void LCallConstantFunction::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
@ -296,6 +308,11 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
void LMathExp::PrintDataTo(StringStream* stream) {
value()->PrintTo(stream);
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@ -345,6 +362,17 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
ASSERT(hydrogen()->property_cell()->value()->IsSmi());
ElementsKind kind = static_cast<ElementsKind>(
Smi::cast(hydrogen()->property_cell()->value())->value());
stream->Add(" (%s) ", ElementsKindToString(kind));
}
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
stream->Add(" length ");
@ -372,20 +400,27 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
stream->Add("] <- ");
value()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
stream->Add(" + %d]", additional_index());
} else {
stream->Add("]");
}
}
void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
stream->Add(" + %d] <-", additional_index());
} else {
stream->Add("] <- ");
}
value()->PrintTo(stream);
}
@ -599,6 +634,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
info()->MarkAsNonDeferredCalling();
#ifdef DEBUG
instr->VerifyCall();
#endif
@ -639,8 +675,12 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand =
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
operand->set_virtual_register(allocator_->GetVirtualRegister());
if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
Abort("Out of virtual registers while trying to allocate temp register.");
return NULL;
}
operand->set_virtual_register(vreg);
return operand;
}
@ -664,6 +704,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
}
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@ -894,7 +939,7 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
if (value->EmitAtUses()) {
HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new(zone()) LGoto(successor->block_id());
@ -949,6 +994,12 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
LOperand* object = UseRegisterAtStart(instr->object());
return DefineAsRegister(new(zone()) LInstanceSize(object));
}
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@ -977,6 +1028,15 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
LInnerAllocatedObject* result =
new(zone()) LInnerAllocatedObject(base_object);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
return instr->HasNoUses()
? NULL
@ -985,7 +1045,14 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
// If there is a non-return use, the context must be allocated in a register.
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->IsReturn()) {
return DefineAsRegister(new(zone()) LContext);
}
}
return NULL;
}
@ -1033,6 +1100,15 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
} else if (op == kMathExp) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseTempRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
return DefineAsRegister(result);
} else if (op == kMathPowHalf) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LOperand* temp = FixedTemp(d3);
@ -1094,6 +1170,14 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
}
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* constructor = UseFixed(instr->constructor(), r1);
argument_count_ -= instr->argument_count();
LCallNewArray* result = new(zone()) LCallNewArray(constructor);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* function = UseFixed(instr->function(), r1);
argument_count_ -= instr->argument_count();
@ -1108,6 +1192,11 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
LInstruction* LChunkBuilder::DoRor(HRor* instr) {
return DoShift(Token::ROR, instr);
}
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@ -1157,6 +1246,13 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsInteger32()) {
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
LDivI* div =
new(zone()) LDivI(value, UseOrConstant(instr->right()));
return AssignEnvironment(DefineSameAsFirst(div));
}
// TODO(1042) The fixed register allocation
// is needed because we call TypeRecordingBinaryOpStub from
// the generated code, which requires registers r0
@ -1213,18 +1309,27 @@ HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
// Only optimize when we have magic numbers for the divisor.
// The standard integer division routine is usually slower than transitionning
// to VFP.
if (divisor->IsConstant() &&
HConstant::cast(divisor)->HasInteger32Value()) {
if (CpuFeatures::IsSupported(SUDIV)) {
// A value with an integer representation does not need to be transformed.
if (divisor->representation().IsInteger32()) {
return divisor;
// A change from an integer32 can be replaced by the integer32 value.
} else if (divisor->IsChange() &&
HChange::cast(divisor)->from().IsInteger32()) {
return HChange::cast(divisor)->value();
}
}
if (divisor->IsConstant() && HConstant::cast(divisor)->HasInteger32Value()) {
HConstant* constant_val = HConstant::cast(divisor);
int32_t int32_val = constant_val->Integer32Value();
if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) {
if (LChunkBuilder::HasMagicNumberForDivisor(int32_val) ||
CpuFeatures::IsSupported(SUDIV)) {
return constant_val->CopyToRepresentation(Representation::Integer32(),
divisor->block()->zone());
}
}
return NULL;
}
@ -1232,11 +1337,14 @@ HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
HValue* right = instr->right();
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegisterOrConstant(right);
LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
? UseRegister(right)
: UseOrConstant(right);
LOperand* remainder = TempRegister();
ASSERT(right->IsConstant() &&
ASSERT(CpuFeatures::IsSupported(SUDIV) ||
(right->IsConstant() &&
HConstant::cast(right)->HasInteger32Value() &&
HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()));
HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value())));
return AssignEnvironment(DefineAsRegister(
new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
}
@ -1306,8 +1414,28 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
if (instr->UseCount() == 1 && (instr->uses().value()->IsAdd() ||
instr->uses().value()->IsSub())) {
HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value());
if (use->IsAdd() && instr == use->left()) {
// This mul is the lhs of an add. The add and mul will be folded into a
// multiply-add in DoAdd.
return NULL;
}
if (instr == use->right() && use->IsAdd() && !use->left()->IsMul()) {
// This mul is the rhs of an add, where the lhs is not another mul.
// The add and mul will be folded into a multiply-add in DoAdd.
return NULL;
}
if (instr == use->right() && use->IsSub()) {
// This mul is the rhs of a sub. The sub and mul will be folded into a
// multiply-sub in DoSub.
return NULL;
}
}
return DoArithmeticD(Token::MUL, instr);
} else {
return DoArithmeticT(Token::MUL, instr);
}
@ -1318,6 +1446,12 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
if (instr->left()->IsConstant()) {
// If lhs is constant, do reverse subtraction instead.
return DoRSub(instr);
}
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@ -1327,6 +1461,10 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
}
return result;
} else if (instr->representation().IsDouble()) {
if (instr->right()->IsMul()) {
return DoMultiplySub(instr->left(), HMul::cast(instr->right()));
}
return DoArithmeticD(Token::SUB, instr);
} else {
return DoArithmeticT(Token::SUB, instr);
@ -1334,6 +1472,44 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
}
LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
// Note: The lhs of the subtraction becomes the rhs of the
// reverse-subtraction.
LOperand* left = UseRegisterAtStart(instr->right());
LOperand* right = UseOrConstantAtStart(instr->left());
LRSubI* rsb = new(zone()) LRSubI(left, right);
LInstruction* result = DefineAsRegister(rsb);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
}
LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
LOperand* multiplier_op = UseRegisterAtStart(mul->left());
LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
LOperand* addend_op = UseRegisterAtStart(addend);
return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
multiplicand_op));
}
LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
LOperand* minuend_op = UseRegisterAtStart(minuend);
LOperand* multiplier_op = UseRegisterAtStart(mul->left());
LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
return DefineSameAsFirst(new(zone()) LMultiplySubD(minuend_op,
multiplier_op,
multiplicand_op));
}
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
@ -1347,6 +1523,15 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsDouble()) {
if (instr->left()->IsMul()) {
return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
}
if (instr->right()->IsMul()) {
ASSERT(!instr->left()->IsMul());
return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
}
return DoArithmeticD(Token::ADD, instr);
} else {
ASSERT(instr->representation().IsTagged());
@ -1412,7 +1597,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
Representation r = instr->GetInputRepresentation();
Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@ -1566,6 +1751,27 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegister(instr->index());
LOperand* value = UseRegister(instr->value());
LSeqStringSetChar* result =
new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
return NULL;
}
LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
HInductionVariableAnnotation* instr) {
return NULL;
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@ -1573,6 +1779,13 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
HBoundsCheckBaseIndexInformation* instr) {
UNREACHABLE();
return NULL;
}
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@ -1604,6 +1817,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation to = instr->to();
if (from.IsTagged()) {
if (to.IsDouble()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@ -1628,6 +1842,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
@ -1647,6 +1862,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
@ -1689,10 +1905,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LOperand* temp1 = TempRegister();
LUnallocated* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
return AssignEnvironment(result);
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
return AssignEnvironment(Define(result, temp1));
}
@ -1702,6 +1918,12 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckFunction(value));
@ -1734,7 +1956,9 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new(zone()) LReturn(UseFixed(instr->value(), r0));
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
return new(zone()) LReturn(UseFixed(instr->value(), r0),
parameter_count);
}
@ -1860,35 +2084,23 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
ASSERT(instr->representation().IsTagged());
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
if (instr->RequiresHoleCheck()) AssignEnvironment(result);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
HLoadKeyedFastDoubleElement* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* elements = UseTempRegister(instr->elements());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
new(zone()) LLoadKeyedFastDoubleElement(elements, key);
return AssignEnvironment(DefineAsRegister(result));
}
LLoadKeyed* result = NULL;
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
if (!instr->is_external()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseTempRegister(instr->elements());
} else {
ASSERT(instr->representation().IsTagged());
obj = UseRegisterAtStart(instr->elements());
}
result = new(zone()) LLoadKeyed(obj, key);
} else {
ASSERT(
(instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
@ -1896,17 +2108,25 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
LInstruction* load_instr = DefineAsRegister(result);
// float->double conversion on non-VFP2 requires an extra scratch
// register. For convenience, just mark the elements register as "UseTemp"
// so that it can be used as a temp during the float->double conversion
// after it's no longer needed after the float load.
bool needs_temp =
!CpuFeatures::IsSupported(VFP2) &&
(elements_kind == EXTERNAL_FLOAT_ELEMENTS);
LOperand* external_pointer = needs_temp
? UseTempRegister(instr->elements())
: UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
AssignEnvironment(load_instr) : load_instr;
bool can_deoptimize = instr->RequiresHoleCheck() ||
(elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
return can_deoptimize ? AssignEnvironment(result) : result;
}
@ -1920,43 +2140,32 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
HStoreKeyedFastElement* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
LOperand* key = NULL;
LOperand* val = NULL;
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
val = UseTempRegister(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
object = UseTempRegister(instr->elements());
val = needs_write_barrier ? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
key = needs_write_barrier ? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyedFastElement(obj, key, val);
}
LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
HStoreKeyedFastDoubleElement* instr) {
ASSERT(instr->value()->representation().IsDouble());
ASSERT(instr->elements()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* val = UseTempRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
}
}
return new(zone()) LStoreKeyed(object, key, val);
}
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
@ -1964,22 +2173,15 @@ LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
ASSERT(instr->elements()->representation().IsExternal());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
LOperand* val = val_is_temp_register
? UseTempRegister(instr->value())
LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
: UseRegister(instr->value());
LOperand* key = UseRegisterOrConstant(instr->key());
return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
key,
val);
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* external_pointer = UseRegister(instr->elements());
return new(zone()) LStoreKeyed(external_pointer, key, val);
}
@ -1998,14 +2200,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
ElementsKind from_kind = instr->original_map()->elements_kind();
ElementsKind to_kind = instr->transitioned_map()->elements_kind();
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object());
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
return DefineSameAsFirst(result);
} else if (FLAG_compiled_transitions) {
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, NULL, NULL);
return AssignPointerMap(result);
} else {
LOperand* object = UseFixed(instr->object(), r0);
LOperand* fixed_object_reg = FixedTemp(r2);
@ -2014,11 +2218,21 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
new(zone()) LTransitionElementsKind(object,
new_map_reg,
fixed_object_reg);
return MarkAsCall(DefineFixed(result, r0), instr);
return MarkAsCall(result, instr);
}
}
LInstruction* LChunkBuilder::DoTrapAllocationMemento(
HTrapAllocationMemento* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* temp = TempRegister();
LTrapAllocationMemento* result =
new(zone()) LTrapAllocationMemento(object, temp);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
@ -2085,12 +2299,23 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
info()->MarkAsDeferredCalling();
LAllocateObject* result =
new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = UseTempRegister(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr);
}
@ -2133,8 +2358,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LParameter* result = new(zone()) LParameter;
if (instr->kind() == HParameter::STACK_PARAMETER) {
int spill_index = chunk()->GetParameterStackSlot(instr->index());
return DefineAsSpilled(new(zone()) LParameter, spill_index);
return DefineAsSpilled(result, spill_index);
} else {
ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
Register reg = descriptor->register_params_[instr->index()];
return DefineFixed(result, reg);
}
}
@ -2202,7 +2436,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
for (int i = 0; i < instr->values()->length(); ++i) {
for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
@ -2246,8 +2480,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
instr->call_kind(),
instr->inlining_kind());
instr->inlining_kind(),
instr->undefined_receiver());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}

369
deps/v8/src/arm/lithium-arm.h

@ -50,6 +50,7 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(AllocateObject) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@ -67,6 +68,7 @@ class LCodeGen;
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
V(CheckFunction) \
@ -93,6 +95,7 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(DummyUse) \
V(ElementsKind) \
V(FastLiteral) \
V(FixedArrayBaseLength) \
@ -106,6 +109,7 @@ class LCodeGen;
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Uint32ToDouble) \
@ -125,18 +129,19 @@ class LCodeGen;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyedFastDoubleElement) \
V(LoadKeyedFastElement) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathExp) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
V(MultiplySubD) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
@ -150,6 +155,7 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@ -157,10 +163,8 @@ class LCodeGen;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
V(StoreKeyedFastDoubleElement) \
V(StoreKeyedFastElement) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
@ -169,11 +173,13 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(StringLength) \
V(SubI) \
V(RSubI) \
V(TaggedToI) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@ -185,7 +191,8 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(DateField) \
V(WrapReceiver) \
V(Drop)
V(Drop) \
V(InnerAllocatedObject)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@ -255,6 +262,11 @@ class LInstruction: public ZoneObject {
void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return is_call_; }
bool ClobbersRegisters() const { return is_call_; }
bool ClobbersDoubleRegisters() const { return is_call_; }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
@ -398,6 +410,15 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
class LDummyUse: public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
};
class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
@ -625,6 +646,42 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
// Instruction for computing multiplier * multiplicand + addend.
class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
public:
LMultiplyAddD(LOperand* addend, LOperand* multiplier,
LOperand* multiplicand) {
inputs_[0] = addend;
inputs_[1] = multiplier;
inputs_[2] = multiplicand;
}
LOperand* addend() { return inputs_[0]; }
LOperand* multiplier() { return inputs_[1]; }
LOperand* multiplicand() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
};
// Instruction for computing minuend - multiplier * multiplicand.
class LMultiplySubD: public LTemplateInstruction<1, 3, 0> {
public:
LMultiplySubD(LOperand* minuend, LOperand* multiplier,
LOperand* multiplicand) {
inputs_[0] = minuend;
inputs_[1] = multiplier;
inputs_[2] = multiplicand;
}
LOperand* minuend() { return inputs_[0]; }
LOperand* multiplier() { return inputs_[1]; }
LOperand* multiplicand() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
};
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@ -640,7 +697,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
return hydrogen()->GetInputRepresentation().IsDouble();
return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@ -665,6 +722,30 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
};
class LMathExp: public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
LOperand* double_temp,
LOperand* temp1,
LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp1;
temps_[1] = temp2;
temps_[2] = double_temp;
ExternalReference::InitializeMathExpData();
}
LOperand* value() { return inputs_[0]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
LOperand* double_temp() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
virtual void PrintDataTo(StringStream* stream);
};
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@ -921,6 +1002,19 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInstanceSize(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
};
class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@ -989,6 +1083,21 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
class LRSubI: public LTemplateInstruction<1, 2, 0> {
public:
LRSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
class LConstantI: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
@ -1142,6 +1251,30 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
public:
LSeqStringSetChar(String::Encoding encoding,
LOperand* string,
LOperand* index,
LOperand* value) : encoding_(encoding) {
inputs_[0] = string;
inputs_[1] = index;
inputs_[2] = value;
}
String::Encoding encoding() { return encoding_; }
LOperand* string() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
private:
String::Encoding encoding_;
};
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@ -1266,14 +1399,24 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
};
class LReturn: public LTemplateInstruction<0, 1, 0> {
class LReturn: public LTemplateInstruction<0, 2, 0> {
public:
explicit LReturn(LOperand* value) {
explicit LReturn(LOperand* value, LOperand* parameter_count) {
inputs_[0] = value;
inputs_[1] = parameter_count;
}
LOperand* value() { return inputs_[0]; }
bool has_constant_parameter_count() {
return parameter_count()->IsConstantOperand();
}
LConstantOperand* constant_parameter_count() {
ASSERT(has_constant_parameter_count());
return LConstantOperand::cast(parameter_count());
}
LOperand* parameter_count() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
@ -1357,58 +1500,26 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
"load-keyed-fast-double-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
bool is_external() const {
return hydrogen()->is_external();
}
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
"load-keyed-specialized-array-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1545,6 +1656,22 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
}
LOperand* base_object() { return inputs_[0]; }
int offset() { return hydrogen()->offset(); }
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
};
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
@ -1555,6 +1682,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> {
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
@ -1717,6 +1845,23 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
};
class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNewArray(LOperand* constructor) {
inputs_[0] = constructor;
}
LOperand* constructor() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
@ -1788,6 +1933,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
DECLARE_HYDROGEN_ACCESSOR(Change)
};
@ -1922,51 +2068,28 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
};
class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) {
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
}
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
"store-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedFastDoubleElement(LOperand* elements,
LOperand* key,
LOperand* value) {
inputs_[0] = elements;
inputs_[1] = key;
inputs_[2] = value;
}
bool is_external() const { return hydrogen()->is_external(); }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
"store-keyed-fast-double-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1990,37 +2113,15 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key,
LOperand* value) {
inputs_[0] = external_pointer;
inputs_[1] = key;
inputs_[2] = value;
}
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
"store-keyed-specialized-array-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
LOperand* temp) {
LOperand* fixed_object_temp) {
inputs_[0] = object;
temps_[0] = new_map_temp;
temps_[1] = temp;
temps_[1] = fixed_object_temp;
}
LOperand* object() { return inputs_[0]; }
@ -2035,6 +2136,24 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
inputs_[0] = object;
temps_[0] = temp;
}
LOperand* object() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
"trap-allocation-memento")
};
@ -2134,7 +2253,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
public:
LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
@ -2147,8 +2266,10 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
Handle<JSObject> holder() const { return hydrogen()->holder(); }
ZoneList<Handle<JSObject> >* prototypes() const {
return hydrogen()->prototypes();
}
ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
};
@ -2216,7 +2337,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
public:
LAllocateObject(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
@ -2231,6 +2352,23 @@ class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
};
class LAllocate: public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
inputs_[1] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
LOperand* size() { return inputs_[1]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
@ -2355,8 +2493,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
LOperand* register_spills_[Register::kNumAllocatableRegisters];
LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
LOperand* double_register_spills_[
DoubleRegister::kMaxNumAllocatableRegisters];
};
@ -2478,6 +2617,10 @@ class LChunkBuilder BASE_EMBEDDED {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
LInstruction* DoRSub(HSub* instr);
static bool HasMagicNumberForDivisor(int32_t divisor);
static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);

1972
deps/v8/src/arm/lithium-codegen-arm.cc

File diff suppressed because it is too large

54
deps/v8/src/arm/lithium-codegen-arm.h

@ -54,6 +54,7 @@ class LCodeGen BASE_EMBEDDED {
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@ -61,6 +62,7 @@ class LCodeGen BASE_EMBEDDED {
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@ -76,6 +78,15 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
bool NeedsEagerFrame() const {
return GetStackSlotCount() > 0 ||
info()->is_non_deferred_calling() ||
!info()->IsStub();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
@ -84,12 +95,12 @@ class LCodeGen BASE_EMBEDDED {
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
DoubleRegister ToDoubleRegister(LOperand* op) const;
DwVfpRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
SwVfpRegister flt_scratch,
DoubleRegister dbl_scratch);
DwVfpRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
@ -128,10 +139,11 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
void DoCheckMapCommon(Register map_reg, Handle<Map> map,
CompareMapMode mode, LEnvironment* env);
// Parallel move support.
@ -193,7 +205,6 @@ class LCodeGen BASE_EMBEDDED {
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
@ -267,15 +278,17 @@ class LCodeGen BASE_EMBEDDED {
LOperand* op,
bool is_tagged,
bool is_uint32,
bool arguments_known,
int arguments_index,
int arguments_count);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
DwVfpRegister ToDoubleRegister(int index) const;
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@ -308,14 +321,11 @@ class LCodeGen BASE_EMBEDDED {
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitNumberUntagD(Register input,
DoubleRegister result,
DwVfpRegister result,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env);
void DeoptIfTaggedButNotSmi(LEnvironment* environment,
HValue* value,
LOperand* operand);
LEnvironment* env,
NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@ -355,7 +365,8 @@ class LCodeGen BASE_EMBEDDED {
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
int* offset);
int* offset,
AllocationSiteMode mode);
// Emit optimized code for integer division.
// Inputs are signed.
@ -369,14 +380,24 @@ class LCodeGen BASE_EMBEDDED {
LEnvironment* environment);
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
: label(),
address(entry) { }
address(entry),
needs_frame(frame),
is_lazy_deopt(is_lazy) { }
Label label;
Address address;
bool needs_frame;
bool is_lazy_deopt;
};
void EnsureSpaceForLazyDeopt();
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
Zone* zone_;
LPlatformChunk* const chunk_;
@ -389,6 +410,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
@ -396,6 +418,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@ -411,6 +434,7 @@ class LCodeGen BASE_EMBEDDED {
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;

9
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -171,8 +171,10 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) {
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
@ -192,8 +194,10 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) {
__ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
} else if (saved_destination_->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
@ -229,6 +233,7 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsUint12Encodable()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
// ip is overwritten while saving the value to the destination.
// Therefore we can't use ip. It is OK if the read from the source
// destroys ip, since that happens before the value is read.
@ -267,7 +272,8 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
CpuFeatureScope scope(cgen_->masm(), VFP2);
DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
} else {
@ -276,6 +282,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ vldr(cgen_->ToDoubleRegister(destination), source_operand);

778
deps/v8/src/arm/macro-assembler-arm.cc

File diff suppressed because it is too large

188
deps/v8/src/arm/macro-assembler-arm.h

@ -54,20 +54,6 @@ inline Operand SmiUntagOperand(Register object) {
const Register cp = { 8 }; // JavaScript context pointer
const Register kRootRegister = { 10 }; // Roots array pointer.
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
// Return the pointer to the allocated already tagged as a heap object.
TAG_OBJECT = 1 << 0,
// The content of the result register already contains the allocation top in
// new space.
RESULT_CONTAINS_TOP = 1 << 1,
// Specify that the requested size of the space to allocate is specified in
// words instead of bytes.
SIZE_IN_WORDS = 1 << 2
};
// Flags used for AllocateHeapNumber
enum TaggingMode {
// Tag the result.
@ -178,7 +164,7 @@ class MacroAssembler: public Assembler {
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
void Move(DwVfpRegister dst, DwVfpRegister src);
// Load an object from the root table.
void LoadRoot(Register destination,
@ -322,6 +308,7 @@ class MacroAssembler: public Assembler {
// Push a handle.
void Push(Handle<Object> handle);
void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
@ -473,11 +460,6 @@ class MacroAssembler: public Assembler {
const MemOperand& dst,
Condition cond = al);
// Clear specified FPSCR bits.
void ClearFPSCRBits(const uint32_t bits_to_clear,
const Register scratch,
const Condition cond = al);
// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
@ -498,8 +480,7 @@ class MacroAssembler: public Assembler {
void Vmov(const DwVfpRegister dst,
const double imm,
const Register scratch = no_reg,
const Condition cond = al);
const Register scratch = no_reg);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@ -533,6 +514,7 @@ class MacroAssembler: public Assembler {
bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@ -596,6 +578,10 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
void IsObjectNameType(Register object,
Register scratch,
Label* fail);
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
@ -679,19 +665,20 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Allocation support
// Allocate an object in new space. The object_size is specified
// either in bytes or in words if the allocation flag SIZE_IN_WORDS
// is passed. If the new space is exhausted control continues at the
// gc_required label. The allocated object is returned in result. If
// the flag tag_allocated_object is true the result is tagged as as
// a heap object. All registers are clobbered also when control
// continues at the gc_required label.
void AllocateInNewSpace(int object_size,
// Allocate an object in new space or old pointer space. The object_size is
// specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
// is passed. If the space is exhausted control continues at the gc_required
// label. The allocated object is returned in result. If the flag
// tag_allocated_object is true the result is tagged as as a heap object.
// All registers are clobbered also when control continues at the gc_required
// label.
void Allocate(int object_size,
Register result,
Register scratch1,
Register scratch2,
Label* gc_required,
AllocationFlags flags);
void AllocateInNewSpace(Register object_size,
Register result,
Register scratch1,
@ -831,14 +818,14 @@ class MacroAssembler: public Assembler {
// case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
// All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail);
Label* fail,
int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
@ -893,12 +880,15 @@ class MacroAssembler: public Assembler {
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// Returns a condition that will be enabled if the object was a string.
// Returns a condition that will be enabled if the object was a string
// and the passed-in condition passed. If the passed-in condition failed
// then flags remain unchanged.
Condition IsObjectStringType(Register obj,
Register type) {
ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset));
ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
tst(type, Operand(kIsNotStringMask));
Register type,
Condition cond = al) {
ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
tst(type, Operand(kIsNotStringMask), cond);
ASSERT_EQ(0, kStringTag);
return eq;
}
@ -944,51 +934,55 @@ class MacroAssembler: public Assembler {
Register scratch1,
SwVfpRegister scratch2);
// Convert the HeapNumber pointed to by source to a 32bits signed integer
// dest. If the HeapNumber does not fit into a 32bits signed integer branch
// to not_int32 label. If VFP3 is available double_scratch is used but not
// scratch2.
void ConvertToInt32(Register source,
Register dest,
Register scratch,
Register scratch2,
// Check if a double can be exactly represented as a signed 32-bit integer.
// Z flag set to one if true.
void TestDoubleIsInt32(DwVfpRegister double_input,
DwVfpRegister double_scratch);
// Try to convert a double to a signed 32-bit integer.
// Z flag set to one and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch);
// Floor a double and writes the value to the result register.
// Go to exact if the conversion is exact (to be able to test -0),
// fall through calling code if an overflow occurred, else go to done.
void TryInt32Floor(Register result,
DwVfpRegister double_input,
Register input_high,
DwVfpRegister double_scratch,
Label *not_int32);
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// Clears the z flag (ne condition) if an overflow occurs.
// If kCheckForInexactConversion is passed, the z flag is also cleared if the
// conversion was inexact, i.e. if the double value could not be converted
// exactly to a 32-bit integer.
void EmitVFPTruncate(VFPRoundingMode rounding_mode,
Label* done,
Label* exact);
// Performs a truncating conversion of a heap floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void ECMAConvertNumberToInt32(Register source,
Register result,
DwVfpRegister double_input,
Register scratch,
DwVfpRegister double_scratch,
CheckForInexactConversion check
= kDontCheckForInexactConversion);
// Helper for EmitECMATruncate.
// This will truncate a floating-point value outside of the signed 32bit
// integer range to a 32bit signed integer.
// Expects the double value loaded in input_high and input_low.
// Exits with the answer in 'result'.
// Note that this code does not work for values in the 32bit range!
void EmitOutOfInt32RangeTruncate(Register result,
Register input_high,
Register input_low,
Register scratch);
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer and all other registers clobbered.
void EmitECMATruncate(Register result,
void ECMAToInt32VFP(Register result,
DwVfpRegister double_input,
SwVfpRegister single_scratch,
DwVfpRegister double_scratch,
Register scratch,
Register scratch2,
Register scratch3);
Register input_high,
Register input_low);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void ECMAToInt32NoVFP(Register result,
Register scratch,
Register input_high,
Register input_low);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
@ -999,11 +993,18 @@ class MacroAssembler: public Assembler {
Register source,
Register scratch);
// Check whether d16-d31 are available on the CPU. The result is given by the
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
void CheckFor32DRegs(Register scratch);
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = al);
void CallStub(CodeStub* stub,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
@ -1054,9 +1055,9 @@ class MacroAssembler: public Assembler {
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
void SetCallCDoubleArguments(DoubleRegister dreg);
void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
void SetCallCDoubleArguments(DwVfpRegister dreg);
void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
@ -1072,7 +1073,7 @@ class MacroAssembler: public Assembler {
int num_reg_arguments,
int num_double_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst);
void GetCFunctionDoubleResult(const DwVfpRegister dst);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
@ -1202,7 +1203,7 @@ class MacroAssembler: public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
// Jump the register contains a smi.
// Jump if the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
@ -1221,9 +1222,12 @@ class MacroAssembler: public Assembler {
void AssertNotSmi(Register object);
void AssertSmi(Register object);
// Abort execution if argument is a string, enabled via --debug-code.
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
// Abort execution if argument is not the root value with the given index,
// enabled via --debug-code.
void AssertRootValue(Register src,
@ -1285,8 +1289,8 @@ class MacroAssembler: public Assembler {
void ClampUint8(Register output_reg, Register input_reg);
void ClampDoubleToUint8(Register result_reg,
DoubleRegister input_reg,
DoubleRegister temp_double_reg);
DwVfpRegister input_reg,
DwVfpRegister temp_double_reg);
void LoadInstanceDescriptors(Register map, Register descriptors);
@ -1309,6 +1313,15 @@ class MacroAssembler: public Assembler {
// in r0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
// AllocationSiteInfo support. Arrays may have an associated
// AllocationSiteInfo object that can be checked for in order to pretransition
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
// If allocation info is present, condition flags are set to eq
void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
Register scratch_reg);
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
@ -1350,6 +1363,16 @@ class MacroAssembler: public Assembler {
// it. See the implementation for register usage.
void JumpToHandlerEntry();
// Helper for ECMAToInt32VFP and ECMAToInt32NoVFP.
// It is expected that 31 <= exponent <= 83, and scratch is exponent - 1.
void ECMAToInt32Tail(Register result,
Register scratch,
Register input_high,
Register input_low,
Label* out_of_range,
Label* negate,
Label* done);
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
@ -1361,9 +1384,9 @@ class MacroAssembler: public Assembler {
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
// Needs access to SafepointRegisterStackIndex for optimized frame
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class OptimizedFrame;
friend class StandardFrame;
};
@ -1392,7 +1415,6 @@ class CodePatcher {
private:
byte* address_; // The address of the code being patched.
int instructions_; // Number of instructions of the expected patch size.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};

59
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -204,7 +204,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, &not_at_start);
// If we did, are we still at the start of the input?
@ -219,7 +219,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, on_not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@ -261,7 +261,7 @@ void RegExpMacroAssemblerARM::CheckCharacters(Vector<const uc16> str,
for (int i = 0; i < str.length(); i++) {
if (mode_ == ASCII) {
__ ldrb(r1, MemOperand(r0, char_size(), PostIndex));
ASSERT(str[i] <= String::kMaxAsciiCharCode);
ASSERT(str[i] <= String::kMaxOneByteCharCode);
__ cmp(r1, Operand(str[i]));
} else {
__ ldrh(r1, MemOperand(r0, char_size(), PostIndex));
@ -337,8 +337,13 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ b(ne, &fail);
__ sub(r3, r3, Operand('a'));
__ cmp(r3, Operand('z' - 'a')); // Is r3 a lowercase letter?
__ b(hi, &fail);
__ b(ls, &loop_check); // In range 'a'-'z'.
// Latin-1: Check for values in range [224,254] but not 247.
__ sub(r3, r3, Operand(224 - 'a'));
__ cmp(r3, Operand(254 - 224));
__ b(hi, &fail); // Weren't Latin-1 letters.
__ cmp(r3, Operand(247 - 224)); // Check for 247.
__ b(eq, &fail);
__ bind(&loop_check);
__ cmp(r0, r1);
@ -385,7 +390,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
}
// Check if function returned non-zero for success or zero for failure.
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ cmp(r0, Operand::Zero());
BranchOrBacktrack(eq, on_no_match);
// On success, increment position by length of capture.
__ add(current_input_offset(), current_input_offset(), Operand(r4));
@ -508,7 +513,7 @@ void RegExpMacroAssemblerARM::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ mov(r0, Operand(table));
if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ and_(r1, current_character(), Operand(kTableSize - 1));
__ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
} else {
@ -517,7 +522,7 @@ void RegExpMacroAssemblerARM::CheckBitInTable(
Operand(ByteArray::kHeaderSize - kHeapObjectTag));
}
__ ldrb(r0, MemOperand(r0, r1));
__ cmp(r0, Operand(0));
__ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, on_bit_set);
}
@ -530,29 +535,23 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
case 's':
// Match space-characters
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ cmp(current_character(), Operand(' '));
__ b(eq, &success);
// Check range 0x09..0x0d
__ sub(r0, current_character(), Operand('\t'));
__ cmp(r0, Operand('\r' - '\t'));
BranchOrBacktrack(hi, on_no_match);
__ b(ls, &success);
// \u00a0 (NBSP).
__ cmp(r0, Operand(0x00a0 - '\t'));
BranchOrBacktrack(ne, on_no_match);
__ bind(&success);
return true;
}
return false;
case 'S':
// Match non-space characters.
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
__ cmp(current_character(), Operand(' '));
BranchOrBacktrack(eq, on_no_match);
__ sub(r0, current_character(), Operand('\t'));
__ cmp(r0, Operand('\r' - '\t'));
BranchOrBacktrack(ls, on_no_match);
return true;
}
// The emitted code for generic character classes is good enough.
return false;
case 'd':
// Match ASCII digits ('0'..'9')
@ -613,7 +612,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
ExternalReference map = ExternalReference::re_word_character_map();
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
__ cmp(r0, Operand(0));
__ cmp(r0, Operand::Zero());
BranchOrBacktrack(eq, on_no_match);
return true;
}
@ -627,7 +626,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
ExternalReference map = ExternalReference::re_word_character_map();
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
__ cmp(r0, Operand(0));
__ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, on_no_match);
if (mode_ != ASCII) {
__ bind(&done);
@ -675,7 +674,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ mov(r0, Operand(0, RelocInfo::NONE));
__ mov(r0, Operand::Zero());
__ push(r0); // Make room for success counter and initialize it to 0.
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
// Check if we have space on the stack for registers.
@ -700,7 +699,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
CallCheckStackGuardState(r0);
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ cmp(r0, Operand::Zero());
// If returned value is non-zero, we exit with the returned value as result.
__ b(ne, &return_r0);
@ -728,7 +727,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
__ cmp(r1, Operand(0, RelocInfo::NONE));
__ cmp(r1, Operand::Zero());
__ b(ne, &load_char_start_regexp);
__ mov(current_character(), Operand('\n'), LeaveCC, eq);
__ jmp(&start_regexp);
@ -834,7 +833,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Not a zero-length match, restart.
__ b(ne, &load_char_start_regexp);
// Offset from the end is zero if we already reached the end.
__ cmp(current_input_offset(), Operand(0));
__ cmp(current_input_offset(), Operand::Zero());
__ b(eq, &exit_label_);
// Advance current position after a zero-length match.
__ add(current_input_offset(),
@ -873,7 +872,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
SafeCallTarget(&check_preempt_label_);
CallCheckStackGuardState(r0);
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ cmp(r0, Operand::Zero());
// If returning non-zero, we should end execution with the given
// result as return value.
__ b(ne, &return_r0);
@ -900,7 +899,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ cmp(r0, Operand::Zero());
__ b(eq, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), r0);
@ -1150,7 +1149,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
bool is_ascii = subject->IsAsciiRepresentationUnderneath();
bool is_ascii = subject->IsOneByteRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@ -1181,7 +1180,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
}
// String might have changed.
if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).

272
deps/v8/src/arm/simulator-arm.cc

@ -34,6 +34,7 @@
#include "disasm.h"
#include "assembler.h"
#include "codegen.h"
#include "arm/constants-arm.h"
#include "arm/simulator-arm.h"
@ -398,7 +399,7 @@ void ArmDebugger::Debug() {
int32_t words;
if (argc == next_arg) {
words = 10;
} else if (argc == next_arg + 1) {
} else {
if (!GetValue(argv[next_arg], &words)) {
words = 10;
}
@ -764,8 +765,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// All registers are initialized to zero to start with
// even though s_registers_ & d_registers_ share the same
// physical registers in the target.
for (int i = 0; i < num_s_registers; i++) {
vfp_register[i] = 0;
for (int i = 0; i < num_d_registers * 2; i++) {
vfp_registers_[i] = 0;
}
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = false;
@ -900,7 +901,7 @@ double Simulator::get_double_from_register_pair(int reg) {
double dm_val = 0.0;
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_register[0])];
char buffer[2 * sizeof(vfp_registers_[0])];
memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
@ -935,13 +936,13 @@ int32_t Simulator::get_pc() const {
// Getting from and setting into VFP registers.
void Simulator::set_s_register(int sreg, unsigned int value) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
vfp_register[sreg] = value;
vfp_registers_[sreg] = value;
}
unsigned int Simulator::get_s_register(int sreg) const {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
return vfp_register[sreg];
return vfp_registers_[sreg];
}
@ -949,12 +950,12 @@ template<class InputType, int register_size>
void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
ASSERT(reg_index >= 0);
if (register_size == 1) ASSERT(reg_index < num_s_registers);
if (register_size == 2) ASSERT(reg_index < num_d_registers);
if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
char buffer[register_size * sizeof(vfp_register[0])];
memcpy(buffer, &value, register_size * sizeof(vfp_register[0]));
memcpy(&vfp_register[reg_index * register_size], buffer,
register_size * sizeof(vfp_register[0]));
char buffer[register_size * sizeof(vfp_registers_[0])];
memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
memcpy(&vfp_registers_[reg_index * register_size], buffer,
register_size * sizeof(vfp_registers_[0]));
}
@ -962,13 +963,13 @@ template<class ReturnType, int register_size>
ReturnType Simulator::GetFromVFPRegister(int reg_index) {
ASSERT(reg_index >= 0);
if (register_size == 1) ASSERT(reg_index < num_s_registers);
if (register_size == 2) ASSERT(reg_index < num_d_registers);
if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
ReturnType value = 0;
char buffer[register_size * sizeof(vfp_register[0])];
memcpy(buffer, &vfp_register[register_size * reg_index],
register_size * sizeof(vfp_register[0]));
memcpy(&value, buffer, register_size * sizeof(vfp_register[0]));
char buffer[register_size * sizeof(vfp_registers_[0])];
memcpy(buffer, &vfp_registers_[register_size * reg_index],
register_size * sizeof(vfp_registers_[0]));
memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
return value;
}
@ -977,8 +978,8 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) {
// from r0-r3 or d0 and d1.
void Simulator::GetFpArgs(double* x, double* y) {
if (use_eabi_hardfloat()) {
*x = vfp_register[0];
*y = vfp_register[1];
*x = vfp_registers_[0];
*y = vfp_registers_[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
@ -996,7 +997,7 @@ void Simulator::GetFpArgs(double* x, double* y) {
// from r0 and r1 or d0.
void Simulator::GetFpArgs(double* x) {
if (use_eabi_hardfloat()) {
*x = vfp_register[0];
*x = vfp_registers_[0];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
@ -1012,7 +1013,7 @@ void Simulator::GetFpArgs(double* x) {
// from r0 and r1 or d0 and one integer value.
void Simulator::GetFpArgs(double* x, int32_t* y) {
if (use_eabi_hardfloat()) {
*x = vfp_register[0];
*x = vfp_registers_[0];
*y = registers_[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
@ -1031,10 +1032,10 @@ void Simulator::GetFpArgs(double* x, int32_t* y) {
// The return value is either in r0/r1 or d0.
void Simulator::SetFpResult(const double& result) {
if (use_eabi_hardfloat()) {
char buffer[2 * sizeof(vfp_register[0])];
char buffer[2 * sizeof(vfp_registers_[0])];
memcpy(buffer, &result, sizeof(buffer));
// Copy result to d0.
memcpy(vfp_register, buffer, sizeof(buffer));
memcpy(vfp_registers_, buffer, sizeof(buffer));
} else {
char buffer[2 * sizeof(registers_[0])];
memcpy(buffer, &result, sizeof(buffer));
@ -1387,7 +1388,14 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
}
case ROR: {
UNIMPLEMENTED();
if (shift_amount == 0) {
*carry_out = c_flag_;
} else {
uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
result = right | left;
*carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
}
break;
}
@ -1459,7 +1467,14 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
}
case ROR: {
UNIMPLEMENTED();
if (shift_amount == 0) {
*carry_out = c_flag_;
} else {
uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
result = right | left;
*carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
}
break;
}
@ -1599,15 +1614,19 @@ void Simulator::HandleVList(Instruction* instr) {
address += 1;
} else {
if (load) {
set_s_register_from_sinteger(
2 * reg, ReadW(reinterpret_cast<int32_t>(address), instr));
set_s_register_from_sinteger(
2 * reg + 1, ReadW(reinterpret_cast<int32_t>(address + 1), instr));
} else {
WriteW(reinterpret_cast<int32_t>(address),
get_sinteger_from_s_register(2 * reg), instr);
WriteW(reinterpret_cast<int32_t>(address + 1),
get_sinteger_from_s_register(2 * reg + 1), instr);
int32_t data[] = {
ReadW(reinterpret_cast<int32_t>(address), instr),
ReadW(reinterpret_cast<int32_t>(address + 1), instr)
};
double d;
memcpy(&d, data, 8);
set_d_register_from_double(reg, d);
} else {
int32_t data[2];
double d = get_double_from_d_register(reg);
memcpy(data, &d, 8);
WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
}
address += 2;
}
@ -1673,18 +1692,18 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
arg0 = vfp_register[0];
arg1 = vfp_register[1];
arg2 = vfp_register[2];
arg3 = vfp_register[3];
arg0 = vfp_registers_[0];
arg1 = vfp_registers_[1];
arg2 = vfp_registers_[2];
arg3 = vfp_registers_[3];
break;
case ExternalReference::BUILTIN_FP_CALL:
arg0 = vfp_register[0];
arg1 = vfp_register[1];
arg0 = vfp_registers_[0];
arg1 = vfp_registers_[1];
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
arg0 = vfp_register[0];
arg1 = vfp_register[1];
arg0 = vfp_registers_[0];
arg1 = vfp_registers_[1];
arg2 = get_register(0);
break;
default:
@ -1762,7 +1781,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
set_register(r0, (int32_t) *result);
set_register(r0, reinterpret_cast<int32_t>(*result));
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
@ -1779,7 +1798,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
set_register(r0, (int32_t) *result);
set_register(r0, reinterpret_cast<int32_t>(*result));
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
@ -2764,6 +2783,26 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc1Value() == 0x0)) {
// vmla, vmls
const bool is_vmls = (instr->Opc3Value() & 0x1);
if (instr->SzValue() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
const double dd_val = get_double_from_d_register(vd);
const double dn_val = get_double_from_d_register(vn);
const double dm_val = get_double_from_d_register(vm);
// Note: we do the mul and add/sub in separate steps to avoid getting a
// result with too high precision.
set_d_register_from_double(vd, dn_val * dm_val);
if (is_vmls) {
set_d_register_from_double(vd, dd_val - get_double_from_d_register(vd));
} else {
set_d_register_from_double(vd, dd_val + get_double_from_d_register(vd));
}
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
if (instr->SzValue() != 0x1) {
@ -2782,6 +2821,17 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
} else if ((instr->VLValue() == 0x0) &&
(instr->VCValue() == 0x1) &&
(instr->Bit(23) == 0x0)) {
// vmov (ARM core register to scalar)
int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
double dd_value = get_double_from_d_register(vd);
int32_t data[2];
memcpy(data, &dd_value, 8);
data[instr->Bit(21)] = get_register(instr->RtValue());
memcpy(&dd_value, data, 8);
set_d_register_from_double(vd, dd_value);
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
@ -3055,15 +3105,15 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
if (src_precision == kDoublePrecision) {
if (unsigned_integer) {
set_d_register_from_double(dst,
static_cast<double>((uint32_t)val));
set_d_register_from_double(
dst, static_cast<double>(static_cast<uint32_t>(val)));
} else {
set_d_register_from_double(dst, static_cast<double>(val));
}
} else {
if (unsigned_integer) {
set_s_register_from_float(dst,
static_cast<float>((uint32_t)val));
set_s_register_from_float(
dst, static_cast<float>(static_cast<uint32_t>(val)));
} else {
set_s_register_from_float(dst, static_cast<float>(val));
}
@ -3120,31 +3170,32 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
if (instr->Bits(7, 4) != 0x1) {
if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
UNIMPLEMENTED(); // Not used by V8.
} else {
int rt = instr->RtValue();
int rn = instr->RnValue();
int vm = instr->VmValue();
int vm = instr->VFPMRegValue(kDoublePrecision);
if (instr->HasL()) {
int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
set_register(rt, rt_int_value);
set_register(rn, rn_int_value);
int32_t data[2];
double d = get_double_from_d_register(vm);
memcpy(data, &d, 8);
set_register(rt, data[0]);
set_register(rn, data[1]);
} else {
int32_t rs_val = get_register(rt);
int32_t rn_val = get_register(rn);
set_s_register_from_sinteger(2*vm, rs_val);
set_s_register_from_sinteger((2*vm+1), rn_val);
int32_t data[] = { get_register(rt), get_register(rn) };
double d;
memcpy(&d, data, 8);
set_d_register_from_double(vm, d);
}
}
break;
case 0x8:
case 0xC: { // Load and store double to memory.
case 0xA:
case 0xC:
case 0xE: { // Load and store double to memory.
int rn = instr->RnValue();
int vd = instr->VdValue();
int vd = instr->VFPDRegValue(kDoublePrecision);
int offset = instr->Immed8Value();
if (!instr->HasU()) {
offset = -offset;
@ -3152,18 +3203,29 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
int32_t address = get_register(rn) + 4 * offset;
if (instr->HasL()) {
// Load double from memory: vldr.
set_s_register_from_sinteger(2*vd, ReadW(address, instr));
set_s_register_from_sinteger(2*vd + 1, ReadW(address + 4, instr));
int32_t data[] = {
ReadW(address, instr),
ReadW(address + 4, instr)
};
double val;
memcpy(&val, data, 8);
set_d_register_from_double(vd, val);
} else {
// Store double to memory: vstr.
WriteW(address, get_sinteger_from_s_register(2*vd), instr);
WriteW(address + 4, get_sinteger_from_s_register(2*vd + 1), instr);
int32_t data[2];
double val = get_double_from_d_register(vd);
memcpy(data, &val, 8);
WriteW(address, data[0], instr);
WriteW(address + 4, data[1], instr);
}
break;
}
case 0x4:
case 0x5:
case 0x6:
case 0x7:
case 0x9:
case 0xB:
// Load/store multiple double from memory: vldm/vstm.
HandleVList(instr);
break;
@ -3273,33 +3335,7 @@ void Simulator::Execute() {
}
int32_t Simulator::Call(byte* entry, int argument_count, ...) {
va_list parameters;
va_start(parameters, argument_count);
// Set up arguments
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
set_register(r0, va_arg(parameters, int32_t));
set_register(r1, va_arg(parameters, int32_t));
set_register(r2, va_arg(parameters, int32_t));
set_register(r3, va_arg(parameters, int32_t));
// Remaining arguments passed on stack.
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
if (OS::ActivationFrameAlignment() != 0) {
entry_stack &= -OS::ActivationFrameAlignment();
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
for (int i = 4; i < argument_count; i++) {
stack_argument[i - 4] = va_arg(parameters, int32_t);
}
va_end(parameters);
set_register(sp, entry_stack);
void Simulator::CallInternal(byte* entry) {
// Prepare to execute the code at entry
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
@ -3353,6 +3389,37 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
set_register(r9, r9_val);
set_register(r10, r10_val);
set_register(r11, r11_val);
}
int32_t Simulator::Call(byte* entry, int argument_count, ...) {
va_list parameters;
va_start(parameters, argument_count);
// Set up arguments
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
set_register(r0, va_arg(parameters, int32_t));
set_register(r1, va_arg(parameters, int32_t));
set_register(r2, va_arg(parameters, int32_t));
set_register(r3, va_arg(parameters, int32_t));
// Remaining arguments passed on stack.
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
if (OS::ActivationFrameAlignment() != 0) {
entry_stack &= -OS::ActivationFrameAlignment();
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
for (int i = 4; i < argument_count; i++) {
stack_argument[i - 4] = va_arg(parameters, int32_t);
}
va_end(parameters);
set_register(sp, entry_stack);
CallInternal(entry);
// Pop stack passed arguments.
CHECK_EQ(entry_stack, get_register(sp));
@ -3363,6 +3430,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
double Simulator::CallFP(byte* entry, double d0, double d1) {
if (use_eabi_hardfloat()) {
set_d_register_from_double(0, d0);
set_d_register_from_double(1, d1);
} else {
int buffer[2];
ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
memcpy(buffer, &d0, sizeof(d0));
set_dw_register(0, buffer);
memcpy(buffer, &d1, sizeof(d1));
set_dw_register(2, buffer);
}
CallInternal(entry);
if (use_eabi_hardfloat()) {
return get_double_from_d_register(0);
} else {
return get_double_from_register_pair(0);
}
}
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);

10
deps/v8/src/arm/simulator-arm.h

@ -142,7 +142,9 @@ class Simulator {
num_s_registers = 32,
d0 = 0, d1, d2, d3, d4, d5, d6, d7,
d8, d9, d10, d11, d12, d13, d14, d15,
num_d_registers = 16
d16, d17, d18, d19, d20, d21, d22, d23,
d24, d25, d26, d27, d28, d29, d30, d31,
num_d_registers = 32
};
explicit Simulator(Isolate* isolate);
@ -205,6 +207,8 @@ class Simulator {
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
double CallFP(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@ -356,6 +360,8 @@ class Simulator {
template<class InputType, int register_size>
void SetVFPRegister(int reg_index, const InputType& value);
void CallInternal(byte* entry);
// Architecture state.
// Saturating instructions require a Q flag to indicate saturation.
// There is currently no way to read the CPSR directly, and thus read the Q
@ -367,7 +373,7 @@ class Simulator {
bool v_flag_;
// VFP architecture state.
unsigned int vfp_register[num_s_registers];
unsigned int vfp_registers_[num_d_registers * 2];
bool n_flag_FPSCR_;
bool z_flag_FPSCR_;
bool c_flag_FPSCR_;

1958
deps/v8/src/arm/stub-cache-arm.cc

File diff suppressed because it is too large

29
deps/v8/src/array.js

@ -413,6 +413,7 @@ function ArrayJoin(separator) {
["Array.prototype.join"]);
}
var length = TO_UINT32(this.length);
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
@ -422,7 +423,7 @@ function ArrayJoin(separator) {
var result = %_FastAsciiArrayJoin(this, separator);
if (!IS_UNDEFINED(result)) return result;
return Join(this, TO_UINT32(this.length), separator, ConvertToString);
return Join(this, length, separator, ConvertToString);
}
@ -441,8 +442,8 @@ function ArrayPop() {
}
n--;
var value = this[n];
this.length = n;
delete this[n];
this.length = n;
return value;
}
@ -581,7 +582,7 @@ function ArrayShift() {
var first = this[0];
if (IS_ARRAY(this)) {
if (IS_ARRAY(this) && !%IsObserved(this)) {
SmartMove(this, 0, 1, len, 0);
} else {
SimpleMove(this, 0, 1, len, 0);
@ -602,7 +603,7 @@ function ArrayUnshift(arg1) { // length == 1
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
if (IS_ARRAY(this)) {
if (IS_ARRAY(this) && !%IsObserved(this)) {
SmartMove(this, 0, 0, len, num_arguments);
} else {
SimpleMove(this, 0, 0, len, num_arguments);
@ -649,6 +650,7 @@ function ArraySlice(start, end) {
if (end_i < start_i) return result;
if (IS_ARRAY(this) &&
!%IsObserved(this) &&
(end_i > 1000) &&
(%EstimateNumberOfElements(this) < end_i)) {
SmartSlice(this, start_i, end_i - start_i, len, result);
@ -705,7 +707,9 @@ function ArraySplice(start, delete_count) {
var use_simple_splice = true;
if (IS_ARRAY(this) && num_additional_args !== del_count) {
if (IS_ARRAY(this) &&
!%IsObserved(this) &&
num_additional_args !== del_count) {
// If we are only deleting/moving a few things near the end of the
// array then the simple version is going to be faster, because it
// doesn't touch most of the array.
@ -881,7 +885,7 @@ function ArraySort(comparefn) {
// of a prototype property.
var CopyFromPrototype = function CopyFromPrototype(obj, length) {
var max = 0;
for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) {
var indices = %GetArrayKeys(proto, length);
if (indices.length > 0) {
if (indices[0] == -1) {
@ -912,7 +916,7 @@ function ArraySort(comparefn) {
// where a prototype of obj has an element. I.e., shadow all prototype
// elements in that range.
var ShadowPrototypeElements = function(obj, from, to) {
for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) {
var indices = %GetArrayKeys(proto, to);
if (indices.length > 0) {
if (indices[0] == -1) {
@ -982,7 +986,7 @@ function ArraySort(comparefn) {
}
for (i = length - num_holes; i < length; i++) {
// For compatability with Webkit, do not expose elements in the prototype.
if (i in obj.__proto__) {
if (i in %GetPrototype(obj)) {
obj[i] = void 0;
} else {
delete obj[i];
@ -1549,6 +1553,15 @@ function SetUpArray() {
// exposed to user code.
// Adding only the functions that are actually used.
SetUpLockedPrototype(InternalArray, $Array(), $Array(
"concat", getFunction("concat", ArrayConcat),
"indexOf", getFunction("indexOf", ArrayIndexOf),
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush),
"splice", getFunction("splice", ArraySplice)
));
SetUpLockedPrototype(InternalPackedArray, $Array(), $Array(
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush)

317
deps/v8/src/assembler.cc

@ -91,6 +91,7 @@ namespace internal {
struct DoubleConstant BASE_EMBEDDED {
double min_int;
double one_half;
double minus_one_half;
double minus_zero;
double zero;
double uint8_max_value;
@ -103,18 +104,110 @@ static DoubleConstant double_constants;
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
static bool math_exp_data_initialized = false;
static Mutex* math_exp_data_mutex = NULL;
static double* math_exp_constants_array = NULL;
static double* math_exp_log_table_array = NULL;
// -----------------------------------------------------------------------------
// Implementation of AssemblerBase
AssemblerBase::AssemblerBase(Isolate* isolate)
AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
: isolate_(isolate),
jit_cookie_(0) {
jit_cookie_(0),
enabled_cpu_features_(0),
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = V8::RandomPrivate(isolate);
}
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
if (isolate->assembler_spare_buffer() != NULL) {
buffer = isolate->assembler_spare_buffer();
isolate->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) buffer = NewArray<byte>(buffer_size);
own_buffer_ = true;
} else {
// Use externally provided buffer instead.
ASSERT(buffer_size > 0);
own_buffer_ = false;
}
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
pc_ = buffer_;
}
AssemblerBase::~AssemblerBase() {
if (own_buffer_) {
if (isolate() != NULL &&
isolate()->assembler_spare_buffer() == NULL &&
buffer_size_ == kMinimalBufferSize) {
isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
}
}
// -----------------------------------------------------------------------------
// Implementation of PredictableCodeSizeScope
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
int expected_size)
: assembler_(assembler),
expected_size_(expected_size),
start_offset_(assembler->pc_offset()),
old_value_(assembler->predictable_code_size()) {
assembler_->set_predictable_code_size(true);
}
PredictableCodeSizeScope::~PredictableCodeSizeScope() {
// TODO(svenpanne) Remove the 'if' when everything works.
if (expected_size_ >= 0) {
CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
}
assembler_->set_predictable_code_size(old_value_);
}
// -----------------------------------------------------------------------------
// Implementation of CpuFeatureScope
#ifdef DEBUG
CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
: assembler_(assembler) {
ASSERT(CpuFeatures::IsSafeForSnapshot(f));
old_enabled_ = assembler_->enabled_cpu_features();
uint64_t mask = static_cast<uint64_t>(1) << f;
// TODO(svenpanne) This special case below doesn't belong here!
#if V8_TARGET_ARCH_ARM
// VFP2 and ARMv7 are implied by VFP3.
if (f == VFP3) {
mask |=
static_cast<uint64_t>(1) << VFP2 |
static_cast<uint64_t>(1) << ARMv7;
}
#endif
assembler_->set_enabled_cpu_features(old_enabled_ | mask);
}
CpuFeatureScope::~CpuFeatureScope() {
assembler_->set_enabled_cpu_features(old_enabled_);
}
#endif
// -----------------------------------------------------------------------------
// Implementation of Label
@ -313,6 +406,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#ifdef DEBUG
byte* begin_pos = pos_;
#endif
ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
<= kMaxStandardNonCompactModes);
@ -570,6 +664,15 @@ void RelocIterator::next() {
}
}
}
if (code_age_sequence_ != NULL) {
byte* old_code_age_sequence = code_age_sequence_;
code_age_sequence_ = NULL;
if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) {
rinfo_.data_ = 0;
rinfo_.pc_ = old_code_age_sequence;
return;
}
}
done_ = true;
}
@ -585,6 +688,12 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
byte* sequence = code->FindCodeAgeSequence();
if (sequence != NULL && !Code::IsYoungSequence(sequence)) {
code_age_sequence_ = sequence;
} else {
code_age_sequence_ = NULL;
}
if (mode_mask_ == 0) pos_ = end_;
next();
}
@ -600,6 +709,7 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
code_age_sequence_ = NULL;
if (mode_mask_ == 0) pos_ = end_;
next();
}
@ -609,11 +719,28 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
// Implementation of RelocInfo
#ifdef DEBUG
bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
// Ensure there are no code targets or embedded objects present in the
// deoptimization entries, they would require relocation after code
// generation.
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
RelocInfo::kApplyMask;
RelocIterator it(desc, mode_mask);
return !it.done();
}
#endif
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
case RelocInfo::NONE:
return "no reloc";
case RelocInfo::NONE32:
return "no reloc 32";
case RelocInfo::NONE64:
return "no reloc 64";
case RelocInfo::EMBEDDED_OBJECT:
return "embedded object";
case RelocInfo::CONSTRUCT_CALL:
@ -652,6 +779,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
UNREACHABLE();
#endif
return "debug break slot";
case RelocInfo::CODE_AGE_SEQUENCE:
return "code_age_sequence";
case RelocInfo::NUMBER_OF_MODES:
UNREACHABLE();
return "number_of_modes";
@ -660,7 +789,7 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
}
void RelocInfo::Print(FILE* out) {
void RelocInfo::Print(Isolate* isolate, FILE* out) {
PrintF(out, "%p %s", pc_, RelocModeName(rmode_));
if (IsComment(rmode_)) {
PrintF(out, " (%s)", reinterpret_cast<char*>(data_));
@ -682,11 +811,11 @@ void RelocInfo::Print(FILE* out) {
}
} else if (IsPosition(rmode_)) {
PrintF(out, " (%" V8_PTR_PREFIX "d)", data());
} else if (rmode_ == RelocInfo::RUNTIME_ENTRY &&
Isolate::Current()->deoptimizer_data() != NULL) {
} else if (IsRuntimeEntry(rmode_) &&
isolate->deoptimizer_data() != NULL) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
target_address(), Deoptimizer::EAGER);
isolate, target_address(), Deoptimizer::EAGER);
if (id != Deoptimizer::kNotDeoptimizationEntry) {
PrintF(out, " (deoptimization bailout %d)", id);
}
@ -734,11 +863,15 @@ void RelocInfo::Verify() {
case INTERNAL_REFERENCE:
case CONST_POOL:
case DEBUG_BREAK_SLOT:
case NONE:
case NONE32:
case NONE64:
break;
case NUMBER_OF_MODES:
UNREACHABLE();
break;
case CODE_AGE_SEQUENCE:
ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode());
break;
}
}
#endif // VERIFY_HEAP
@ -750,12 +883,77 @@ void RelocInfo::Verify() {
void ExternalReference::SetUp() {
double_constants.min_int = kMinInt;
double_constants.one_half = 0.5;
double_constants.minus_one_half = -0.5;
double_constants.minus_zero = -0.0;
double_constants.uint8_max_value = 255;
double_constants.zero = 0.0;
double_constants.canonical_non_hole_nan = OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
math_exp_data_mutex = OS::CreateMutex();
}
void ExternalReference::InitializeMathExpData() {
// Early return?
if (math_exp_data_initialized) return;
math_exp_data_mutex->Lock();
if (!math_exp_data_initialized) {
// If this is changed, generated code must be adapted too.
const int kTableSizeBits = 11;
const int kTableSize = 1 << kTableSizeBits;
const double kTableSizeDouble = static_cast<double>(kTableSize);
math_exp_constants_array = new double[9];
// Input values smaller than this always return 0.
math_exp_constants_array[0] = -708.39641853226408;
// Input values larger than this always return +Infinity.
math_exp_constants_array[1] = 709.78271289338397;
math_exp_constants_array[2] = V8_INFINITY;
// The rest is black magic. Do not attempt to understand it. It is
// loosely based on the "expd" function published at:
// http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
const double constant3 = (1 << kTableSizeBits) / log(2.0);
math_exp_constants_array[3] = constant3;
math_exp_constants_array[4] =
static_cast<double>(static_cast<int64_t>(3) << 51);
math_exp_constants_array[5] = 1 / constant3;
math_exp_constants_array[6] = 3.0000000027955394;
math_exp_constants_array[7] = 0.16666666685227835;
math_exp_constants_array[8] = 1;
math_exp_log_table_array = new double[kTableSize];
for (int i = 0; i < kTableSize; i++) {
double value = pow(2, i / kTableSizeDouble);
uint64_t bits = BitCast<uint64_t, double>(value);
bits &= (static_cast<uint64_t>(1) << 52) - 1;
double mantissa = BitCast<double, uint64_t>(bits);
// <just testing>
uint64_t doublebits;
memcpy(&doublebits, &value, sizeof doublebits);
doublebits &= (static_cast<uint64_t>(1) << 52) - 1;
double mantissa2;
memcpy(&mantissa2, &doublebits, sizeof mantissa2);
CHECK_EQ(mantissa, mantissa2);
// </just testing>
math_exp_log_table_array[i] = mantissa;
}
math_exp_data_initialized = true;
}
math_exp_data_mutex->Unlock();
}
void ExternalReference::TearDownMathExpData() {
delete[] math_exp_constants_array;
delete[] math_exp_log_table_array;
delete math_exp_data_mutex;
}
@ -874,6 +1072,13 @@ ExternalReference ExternalReference::get_date_field_function(
}
ExternalReference ExternalReference::get_make_code_young_function(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung)));
}
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
@ -900,6 +1105,20 @@ ExternalReference ExternalReference::compute_output_frames_function(
}
ExternalReference ExternalReference::log_enter_external_function(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
}
ExternalReference ExternalReference::log_leave_external_function(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
}
ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
}
@ -969,18 +1188,35 @@ ExternalReference ExternalReference::new_space_allocation_limit_address(
}
ExternalReference ExternalReference::handle_scope_level_address() {
return ExternalReference(HandleScope::current_level_address());
ExternalReference ExternalReference::old_pointer_space_allocation_top_address(
Isolate* isolate) {
return ExternalReference(
isolate->heap()->OldPointerSpaceAllocationTopAddress());
}
ExternalReference ExternalReference::handle_scope_next_address() {
return ExternalReference(HandleScope::current_next_address());
ExternalReference ExternalReference::old_pointer_space_allocation_limit_address(
Isolate* isolate) {
return ExternalReference(
isolate->heap()->OldPointerSpaceAllocationLimitAddress());
}
ExternalReference ExternalReference::handle_scope_level_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_level_address(isolate));
}
ExternalReference ExternalReference::handle_scope_limit_address() {
return ExternalReference(HandleScope::current_limit_address());
ExternalReference ExternalReference::handle_scope_next_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_next_address(isolate));
}
ExternalReference ExternalReference::handle_scope_limit_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_limit_address(isolate));
}
@ -1018,6 +1254,12 @@ ExternalReference ExternalReference::address_of_one_half() {
}
ExternalReference ExternalReference::address_of_minus_one_half() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.minus_one_half));
}
ExternalReference ExternalReference::address_of_minus_zero() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.minus_zero));
@ -1186,12 +1428,45 @@ ExternalReference ExternalReference::math_log_double_function(
}
ExternalReference ExternalReference::math_exp_constants(int constant_index) {
ASSERT(math_exp_data_initialized);
return ExternalReference(
reinterpret_cast<void*>(math_exp_constants_array + constant_index));
}
ExternalReference ExternalReference::math_exp_log_table() {
ASSERT(math_exp_data_initialized);
return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
}
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
}
ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
return ExternalReference(entry);
}
double power_helper(double x, double y) {
int y_int = static_cast<int>(y);
if (y == y_int) {
return power_double_int(x, y_int); // Returns 1 if exponent is 0.
}
if (y == 0.5) {
return (isinf(x)) ? V8_INFINITY : fast_sqrt(x + 0.0); // Convert -0 to +0.
}
if (y == -0.5) {
return (isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
}
return power_double_double(x, y);
}
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
@ -1212,7 +1487,8 @@ double power_double_int(double x, int y) {
double power_double_double(double x, double y) {
#ifdef __MINGW64_VERSION_MAJOR
#if defined(__MINGW64_VERSION_MAJOR) && \
(!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)
// MinGW64 has a custom implementation for pow. This handles certain
// special cases that are different.
if ((x == 0.0 || isinf(x)) && isfinite(y)) {
@ -1330,6 +1606,10 @@ void PositionsRecorder::RecordPosition(int pos) {
gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false);
}
#endif
LOG_CODE_EVENT(assembler_->isolate(),
CodeLinePosInfoAddPositionEvent(jit_handler_data_,
assembler_->pc_offset(),
pos));
}
@ -1342,6 +1622,11 @@ void PositionsRecorder::RecordStatementPosition(int pos) {
gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true);
}
#endif
LOG_CODE_EVENT(assembler_->isolate(),
CodeLinePosInfoAddStatementPositionEvent(
jit_handler_data_,
assembler_->pc_offset(),
pos));
}

158
deps/v8/src/assembler.h

@ -56,18 +56,81 @@ struct StatsCounter;
class AssemblerBase: public Malloced {
public:
explicit AssemblerBase(Isolate* isolate);
AssemblerBase(Isolate* isolate, void* buffer, int buffer_size);
virtual ~AssemblerBase();
Isolate* isolate() const { return isolate_; }
int jit_cookie() { return jit_cookie_; }
int jit_cookie() const { return jit_cookie_; }
bool emit_debug_code() const { return emit_debug_code_; }
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
bool predictable_code_size() const { return predictable_code_size_; }
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
uint64_t enabled_cpu_features() const { return enabled_cpu_features_; }
void set_enabled_cpu_features(uint64_t features) {
enabled_cpu_features_ = features;
}
bool IsEnabled(CpuFeature f) {
return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
}
// Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
// cross-snapshotting.
static void QuietNaN(HeapObject* nan) { }
int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
static const int kMinimalBufferSize = 4*KB;
protected:
// The buffer into which code and relocation info are generated. It could
// either be owned by the assembler or be provided externally.
byte* buffer_;
int buffer_size_;
bool own_buffer_;
// The program counter, which points into the buffer above and moves forward.
byte* pc_;
private:
Isolate* isolate_;
int jit_cookie_;
uint64_t enabled_cpu_features_;
bool emit_debug_code_;
bool predictable_code_size_;
};
// Avoids using instructions that vary in size in unpredictable ways between the
// snapshot and the running VM.
class PredictableCodeSizeScope {
public:
PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
~PredictableCodeSizeScope();
private:
AssemblerBase* assembler_;
int expected_size_;
int start_offset_;
bool old_value_;
};
// Enable a specified feature within a scope.
class CpuFeatureScope BASE_EMBEDDED {
public:
#ifdef DEBUG
CpuFeatureScope(AssemblerBase* assembler, CpuFeature f);
~CpuFeatureScope();
private:
AssemblerBase* assembler_;
uint64_t old_enabled_;
#else
CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) {}
#endif
};
@ -210,7 +273,14 @@ class RelocInfo BASE_EMBEDDED {
// add more as needed
// Pseudo-types
NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
NONE, // never recorded
NONE32, // never recorded 32-bit value
NONE64, // never recorded 64-bit value
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
// code aging.
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = CONST_POOL,
FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_CODE_ENUM = DEBUG_BREAK,
LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
@ -224,7 +294,19 @@ class RelocInfo BASE_EMBEDDED {
RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
: pc_(pc), rmode_(rmode), data_(data), host_(host) {
}
RelocInfo(byte* pc, double data64)
: pc_(pc), rmode_(NONE64), data64_(data64), host_(NULL) {
}
static inline bool IsRealRelocMode(Mode mode) {
return mode >= FIRST_REAL_RELOC_MODE &&
mode <= LAST_REAL_RELOC_MODE;
}
static inline bool IsPseudoRelocMode(Mode mode) {
ASSERT(!IsRealRelocMode(mode));
return mode >= FIRST_PSEUDO_RELOC_MODE &&
mode <= LAST_PSEUDO_RELOC_MODE;
}
static inline bool IsConstructCall(Mode mode) {
return mode == CONSTRUCT_CALL;
}
@ -234,6 +316,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsEmbeddedObject(Mode mode) {
return mode == EMBEDDED_OBJECT;
}
static inline bool IsRuntimeEntry(Mode mode) {
return mode == RUNTIME_ENTRY;
}
// Is the relocation mode affected by GC?
static inline bool IsGCRelocMode(Mode mode) {
return mode <= LAST_GCED_ENUM;
@ -262,6 +347,12 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsDebugBreakSlot(Mode mode) {
return mode == DEBUG_BREAK_SLOT;
}
static inline bool IsNone(Mode mode) {
return mode == NONE32 || mode == NONE64;
}
static inline bool IsCodeAgeSequence(Mode mode) {
return mode == CODE_AGE_SEQUENCE;
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
@ -269,6 +360,7 @@ class RelocInfo BASE_EMBEDDED {
void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
double data64() const { return data64_; }
Code* host() const { return host_; }
// Apply a relocation by delta bytes
@ -281,7 +373,7 @@ class RelocInfo BASE_EMBEDDED {
// Read/modify the code target in the branch/call instruction
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
INLINE(Address target_address());
INLINE(void set_target_address(Address target,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
@ -290,11 +382,16 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Object** target_object_address());
INLINE(void set_target_object(Object* target,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(Address target_runtime_entry(Assembler* origin));
INLINE(void set_target_runtime_entry(Address target,
WriteBarrierMode mode =
UPDATE_WRITE_BARRIER));
INLINE(JSGlobalPropertyCell* target_cell());
INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
INLINE(void set_target_cell(JSGlobalPropertyCell* cell,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(Code* stub));
// Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent.
@ -344,10 +441,16 @@ class RelocInfo BASE_EMBEDDED {
// debugger.
INLINE(bool IsPatchedDebugBreakSlotSequence());
#ifdef DEBUG
// Check whether the given code contains relocation information that
// either is position-relative or movable by the garbage collector.
static bool RequiresRelocation(const CodeDesc& desc);
#endif
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* RelocModeName(Mode rmode);
void Print(FILE* out);
void Print(Isolate* isolate, FILE* out);
#endif // ENABLE_DISASSEMBLER
#ifdef VERIFY_HEAP
void Verify();
@ -366,7 +469,10 @@ class RelocInfo BASE_EMBEDDED {
// comment).
byte* pc_;
Mode rmode_;
union {
intptr_t data_;
double data64_;
};
Code* host_;
// Code and Embedded Object pointers on some platforms are stored split
// across two consecutive 32-bit instructions. Heap management
@ -487,6 +593,7 @@ class RelocIterator: public Malloced {
byte* pos_;
byte* end_;
byte* code_age_sequence_;
RelocInfo rinfo_;
bool done_;
int mode_mask_;
@ -546,6 +653,8 @@ class ExternalReference BASE_EMBEDDED {
};
static void SetUp();
static void InitializeMathExpData();
static void TearDownMathExpData();
typedef void* ExternalReferenceRedirector(void* original, Type type);
@ -595,10 +704,16 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference get_date_field_function(Isolate* isolate);
static ExternalReference date_cache_stamp(Isolate* isolate);
static ExternalReference get_make_code_young_function(Isolate* isolate);
// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
// Log support.
static ExternalReference log_enter_external_function(Isolate* isolate);
static ExternalReference log_leave_external_function(Isolate* isolate);
// Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
@ -634,6 +749,10 @@ class ExternalReference BASE_EMBEDDED {
// Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address(Isolate* isolate);
static ExternalReference new_space_allocation_limit_address(Isolate* isolate);
static ExternalReference old_pointer_space_allocation_top_address(
Isolate* isolate);
static ExternalReference old_pointer_space_allocation_limit_address(
Isolate* isolate);
static ExternalReference double_fp_operation(Token::Value operation,
Isolate* isolate);
@ -641,9 +760,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference power_double_double_function(Isolate* isolate);
static ExternalReference power_double_int_function(Isolate* isolate);
static ExternalReference handle_scope_next_address();
static ExternalReference handle_scope_limit_address();
static ExternalReference handle_scope_level_address();
static ExternalReference handle_scope_next_address(Isolate* isolate);
static ExternalReference handle_scope_limit_address(Isolate* isolate);
static ExternalReference handle_scope_level_address(Isolate* isolate);
static ExternalReference scheduled_exception_address(Isolate* isolate);
static ExternalReference address_of_pending_message_obj(Isolate* isolate);
@ -653,6 +772,7 @@ class ExternalReference BASE_EMBEDDED {
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
static ExternalReference address_of_one_half();
static ExternalReference address_of_minus_one_half();
static ExternalReference address_of_minus_zero();
static ExternalReference address_of_zero();
static ExternalReference address_of_uint8_max_value();
@ -665,8 +785,15 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference math_tan_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate);
static ExternalReference math_exp_constants(int constant_index);
static ExternalReference math_exp_log_table();
static ExternalReference page_flags(Page* page);
static ExternalReference ForDeoptEntry(Address entry);
static ExternalReference cpu_features();
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -760,6 +887,7 @@ class PositionsRecorder BASE_EMBEDDED {
#ifdef ENABLE_GDB_JIT_INTERFACE
gdbjit_lineinfo_ = NULL;
#endif
jit_handler_data_ = NULL;
}
#ifdef ENABLE_GDB_JIT_INTERFACE
@ -779,7 +907,15 @@ class PositionsRecorder BASE_EMBEDDED {
return lineinfo;
}
#endif
void AttachJITHandlerData(void* user_data) {
jit_handler_data_ = user_data;
}
void* DetachJITHandlerData() {
void* old_data = jit_handler_data_;
jit_handler_data_ = NULL;
return old_data;
}
// Set current position to pos.
void RecordPosition(int pos);
@ -802,6 +938,9 @@ class PositionsRecorder BASE_EMBEDDED {
GDBJITLineInfo* gdbjit_lineinfo_;
#endif
// Currently jit_handler_data_ is used to store JITHandler-specific data
// over the lifetime of a PositionsRecorder
void* jit_handler_data_;
friend class PreservePositionScope;
DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
@ -866,6 +1005,7 @@ inline int NumberOfBitsSet(uint32_t x) {
bool EvalComparison(Token::Value op, double op1, double op2);
// Computes pow(x, y) with the special cases in the spec for Math.pow.
double power_helper(double x, double y);
double power_double_int(double x, int y);
double power_double_double(double x, double y);

74
deps/v8/src/ast.cc

@ -29,6 +29,7 @@
#include <math.h> // For isfinite.
#include "builtins.h"
#include "code-stubs.h"
#include "conversions.h"
#include "hashmap.h"
#include "parser.h"
@ -96,13 +97,14 @@ VariableProxy::VariableProxy(Isolate* isolate,
position_(position),
interface_(interface) {
// Names must be canonicalized for fast equality checks.
ASSERT(name->IsSymbol());
ASSERT(name->IsInternalizedString());
}
void VariableProxy::BindTo(Variable* var) {
ASSERT(var_ == NULL); // must be bound only once
ASSERT(var != NULL); // must bind
ASSERT(!FLAG_harmony_modules || interface_->IsUnified(var->interface()));
ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name()));
// Ideally CONST-ness should match. However, this is very hard to achieve
// because we don't know the exact semantics of conflicting (const and
@ -180,8 +182,8 @@ ObjectLiteral::Property::Property(Literal* key,
key_ = key;
value_ = value;
Object* k = *key->handle();
if (k->IsSymbol() &&
isolate->heap()->Proto_symbol()->Equals(String::cast(k))) {
if (k->IsInternalizedString() &&
isolate->heap()->proto_string()->Equals(String::cast(k))) {
kind_ = PROTOTYPE;
} else if (value_->AsMaterializedLiteral() != NULL) {
kind_ = MATERIALIZED_LITERAL;
@ -411,12 +413,14 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
receiver_types_.Clear();
if (key()->IsPropertyName()) {
if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_ArrayLength)) {
ArrayLengthStub array_stub(Code::LOAD_IC);
FunctionPrototypeStub proto_stub(Code::LOAD_IC);
StringLengthStub string_stub(Code::LOAD_IC, false);
if (oracle->LoadIsStub(this, &array_stub)) {
is_array_length_ = true;
} else if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_StringLength)) {
} else if (oracle->LoadIsStub(this, &string_stub)) {
is_string_length_ = true;
} else if (oracle->LoadIsBuiltin(this,
Builtins::kLoadIC_FunctionPrototype)) {
} else if (oracle->LoadIsStub(this, &proto_stub)) {
is_function_prototype_ = true;
} else {
Literal* lit_key = key()->AsLiteral();
@ -429,7 +433,7 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
} else if (is_monomorphic_) {
receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this),
zone);
} else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
} else if (oracle->LoadIsPolymorphic(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_);
}
@ -451,7 +455,7 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
} else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) {
} else if (oracle->StoreIsPolymorphic(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
}
@ -467,7 +471,7 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(
oracle->StoreMonomorphicReceiverType(id), zone);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) {
} else if (oracle->StoreIsPolymorphic(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
}
@ -476,11 +480,12 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo info = oracle->SwitchType(this);
if (info.IsUninitialized()) info = TypeInfo::Unknown();
if (info.IsSmi()) {
compare_type_ = SMI_ONLY;
} else if (info.IsSymbol()) {
compare_type_ = SYMBOL_ONLY;
} else if (info.IsNonSymbol()) {
} else if (info.IsInternalizedString()) {
compare_type_ = NAME_ONLY;
} else if (info.IsNonInternalizedString()) {
compare_type_ = STRING_ONLY;
} else if (info.IsNonPrimitive()) {
compare_type_ = OBJECT_ONLY;
@ -600,18 +605,7 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
is_monomorphic_ = oracle->CallNewIsMonomorphic(this);
if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(this);
}
}
void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo info = oracle->CompareType(this);
if (info.IsSmi()) {
compare_type_ = SMI_ONLY;
} else if (info.IsNonPrimitive()) {
compare_type_ = OBJECT_ONLY;
} else {
ASSERT(compare_type_ == NONE);
elements_kind_ = oracle->GetCallNewElementsKind(this);
}
}
@ -626,14 +620,6 @@ void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
bool AstVisitor::CheckStackOverflow() {
if (stack_overflow_) return true;
StackLimitCheck check(isolate_);
if (!check.HasOverflowed()) return false;
return (stack_overflow_ = true);
}
void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
for (int i = 0; i < declarations->length(); i++) {
Visit(declarations->at(i));
@ -1021,11 +1007,6 @@ CaseClause::CaseClause(Isolate* isolate,
add_flag(kDontInline); \
add_flag(kDontSelfOptimize); \
}
#define DONT_INLINE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_flag(kDontInline); \
}
#define DONT_SELFOPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@ -1052,8 +1033,10 @@ REGULAR_NODE(ReturnStatement)
REGULAR_NODE(SwitchStatement)
REGULAR_NODE(Conditional)
REGULAR_NODE(Literal)
REGULAR_NODE(ArrayLiteral)
REGULAR_NODE(ObjectLiteral)
REGULAR_NODE(RegExpLiteral)
REGULAR_NODE(FunctionLiteral)
REGULAR_NODE(Assignment)
REGULAR_NODE(Throw)
REGULAR_NODE(Property)
@ -1070,25 +1053,20 @@ REGULAR_NODE(CallNew)
// LOOKUP variables only result from constructs that cannot be inlined anyway.
REGULAR_NODE(VariableProxy)
// We currently do not optimize any modules. Note in particular, that module
// instance objects associated with ModuleLiterals are allocated during
// scope resolution, and references to them are embedded into the code.
// That code may hence neither be cached nor re-compiled.
// We currently do not optimize any modules.
DONT_OPTIMIZE_NODE(ModuleDeclaration)
DONT_OPTIMIZE_NODE(ImportDeclaration)
DONT_OPTIMIZE_NODE(ExportDeclaration)
DONT_OPTIMIZE_NODE(ModuleVariable)
DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl)
DONT_OPTIMIZE_NODE(ModuleStatement)
DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
DONT_INLINE_NODE(ArrayLiteral) // TODO(1322): Allow materialized literals.
DONT_INLINE_NODE(FunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
@ -1103,8 +1081,9 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
// optimize them.
add_flag(kDontInline);
} else if (node->function()->intrinsic_type == Runtime::INLINE &&
(node->name()->IsEqualTo(CStrVector("_ArgumentsLength")) ||
node->name()->IsEqualTo(CStrVector("_Arguments")))) {
(node->name()->IsOneByteEqualTo(
STATIC_ASCII_VECTOR("_ArgumentsLength")) ||
node->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_Arguments")))) {
// Don't inline the %_ArgumentsLength or %_Arguments because their
// implementation will not work. There is no stack frame to get them
// from.
@ -1114,7 +1093,6 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
#undef REGULAR_NODE
#undef DONT_OPTIMIZE_NODE
#undef DONT_INLINE_NODE
#undef DONT_SELFOPTIMIZE_NODE
#undef DONT_CACHE_NODE

116
deps/v8/src/ast.h

@ -75,6 +75,7 @@ namespace internal {
#define STATEMENT_NODE_LIST(V) \
V(Block) \
V(ModuleStatement) \
V(ExpressionStatement) \
V(EmptyStatement) \
V(IfStatement) \
@ -522,7 +523,7 @@ class ModuleDeclaration: public Declaration {
ModuleDeclaration(VariableProxy* proxy,
Module* module,
Scope* scope)
: Declaration(proxy, LET, scope),
: Declaration(proxy, MODULE, scope),
module_(module) {
}
@ -645,6 +646,25 @@ class ModuleUrl: public Module {
};
class ModuleStatement: public Statement {
public:
DECLARE_NODE_TYPE(ModuleStatement)
VariableProxy* proxy() const { return proxy_; }
Block* body() const { return body_; }
protected:
ModuleStatement(VariableProxy* proxy, Block* body)
: proxy_(proxy),
body_(body) {
}
private:
VariableProxy* proxy_;
Block* body_;
};
class IterationStatement: public BreakableStatement {
public:
// Type testing & conversion.
@ -948,7 +968,7 @@ class CaseClause: public ZoneObject {
TypeFeedbackId CompareId() { return compare_id_; }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
bool IsSymbolCompare() { return compare_type_ == SYMBOL_ONLY; }
bool IsNameCompare() { return compare_type_ == NAME_ONLY; }
bool IsStringCompare() { return compare_type_ == STRING_ONLY; }
bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
@ -960,7 +980,7 @@ class CaseClause: public ZoneObject {
enum CompareTypeFeedback {
NONE,
SMI_ONLY,
SYMBOL_ONLY,
NAME_ONLY,
STRING_ONLY,
OBJECT_ONLY
};
@ -1151,7 +1171,7 @@ class Literal: public Expression {
DECLARE_NODE_TYPE(Literal)
virtual bool IsPropertyName() {
if (handle_->IsSymbol()) {
if (handle_->IsInternalizedString()) {
uint32_t ignored;
return !String::cast(*handle_)->AsArrayIndex(&ignored);
}
@ -1163,8 +1183,8 @@ class Literal: public Expression {
return Handle<String>::cast(handle_);
}
virtual bool ToBooleanIsTrue() { return handle_->ToBoolean()->IsTrue(); }
virtual bool ToBooleanIsFalse() { return handle_->ToBoolean()->IsFalse(); }
virtual bool ToBooleanIsTrue() { return handle_->BooleanValue(); }
virtual bool ToBooleanIsFalse() { return !handle_->BooleanValue(); }
// Identity testers.
bool IsNull() const {
@ -1417,7 +1437,7 @@ class VariableProxy: public Expression {
void MarkAsTrivial() { is_trivial_ = true; }
void MarkAsLValue() { is_lvalue_ = true; }
// Bind this proxy to the variable var.
// Bind this proxy to the variable var. Interfaces must match.
void BindTo(Variable* var);
protected:
@ -1512,6 +1532,22 @@ class Call: public Expression {
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
virtual bool IsMonomorphic() { return is_monomorphic_; }
CheckType check_type() const { return check_type_; }
void set_string_check(Handle<JSObject> holder) {
holder_ = holder;
check_type_ = STRING_CHECK;
}
void set_number_check(Handle<JSObject> holder) {
holder_ = holder;
check_type_ = NUMBER_CHECK;
}
void set_map_check() {
holder_ = Handle<JSObject>::null();
check_type_ = RECEIVER_MAP_CHECK;
}
Handle<JSFunction> target() { return target_; }
// A cache for the holder, set as a side effect of computing the target of the
@ -1575,6 +1611,7 @@ class CallNew: public Expression {
Handle<JSFunction> target() { return target_; }
BailoutId ReturnId() const { return return_id_; }
ElementsKind elements_kind() const { return elements_kind_; }
protected:
CallNew(Isolate* isolate,
@ -1586,7 +1623,8 @@ class CallNew: public Expression {
arguments_(arguments),
pos_(pos),
is_monomorphic_(false),
return_id_(GetNextId(isolate)) { }
return_id_(GetNextId(isolate)),
elements_kind_(GetInitialFastElementsKind()) { }
private:
Expression* expression_;
@ -1597,6 +1635,7 @@ class CallNew: public Expression {
Handle<JSFunction> target_;
const BailoutId return_id_;
ElementsKind elements_kind_;
};
@ -1777,9 +1816,6 @@ class CompareOperation: public Expression {
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@ -1796,8 +1832,7 @@ class CompareOperation: public Expression {
op_(op),
left_(left),
right_(right),
pos_(pos),
compare_type_(NONE) {
pos_(pos) {
ASSERT(Token::IsCompareOp(op));
}
@ -1806,9 +1841,6 @@ class CompareOperation: public Expression {
Expression* left_;
Expression* right_;
int pos_;
enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
CompareTypeFeedback compare_type_;
};
@ -2479,40 +2511,51 @@ inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
class AstVisitor BASE_EMBEDDED {
public:
AstVisitor() : isolate_(Isolate::Current()), stack_overflow_(false) { }
AstVisitor() {}
virtual ~AstVisitor() { }
// Stack overflow check and dynamic dispatch.
void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); }
virtual void Visit(AstNode* node) = 0;
// Iteration left-to-right.
virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
virtual void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitExpressions(ZoneList<Expression*>* expressions);
// Stack overflow tracking support.
bool HasStackOverflow() const { return stack_overflow_; }
bool CheckStackOverflow();
// If a stack-overflow exception is encountered when visiting a
// node, calling SetStackOverflow will make sure that the visitor
// bails out without visiting more nodes.
void SetStackOverflow() { stack_overflow_ = true; }
void ClearStackOverflow() { stack_overflow_ = false; }
// Individual AST nodes.
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) = 0;
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
};
protected:
Isolate* isolate() { return isolate_; }
private:
Isolate* isolate_;
bool stack_overflow_;
};
#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
public: \
virtual void Visit(AstNode* node) { \
if (!CheckStackOverflow()) node->Accept(this); \
} \
\
void SetStackOverflow() { stack_overflow_ = true; } \
void ClearStackOverflow() { stack_overflow_ = false; } \
bool HasStackOverflow() const { return stack_overflow_; } \
\
bool CheckStackOverflow() { \
if (stack_overflow_) return true; \
StackLimitCheck check(isolate_); \
if (!check.HasOverflowed()) return false; \
return (stack_overflow_ = true); \
} \
\
private: \
void InitializeAstVisitor() { \
isolate_ = Isolate::Current(); \
stack_overflow_ = false; \
} \
Isolate* isolate() { return isolate_; } \
\
Isolate* isolate_; \
bool stack_overflow_
// ----------------------------------------------------------------------------
@ -2647,6 +2690,11 @@ class AstNodeFactory BASE_EMBEDDED {
STATEMENT_WITH_LABELS(SwitchStatement)
#undef STATEMENT_WITH_LABELS
ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) {
ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body);
VISIT_AND_RETURN(ModuleStatement, stmt)
}
ExpressionStatement* NewExpressionStatement(Expression* expression) {
ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression);
VISIT_AND_RETURN(ExpressionStatement, stmt)

6
deps/v8/src/atomicops.h

@ -58,7 +58,7 @@ typedef int32_t Atomic32;
#ifdef V8_HOST_ARCH_64_BIT
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
// means Atomic64 and AtomicWord should be the same type on 64-bit.
#if defined(__APPLE__)
#if defined(__ILP32__) || defined(__APPLE__)
// MacOS is an exception to the implicit conversion rule above,
// because it uses long for intptr_t.
typedef int64_t Atomic64;
@ -151,7 +151,9 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
} } // namespace v8::internal
// Include our platform specific implementation.
#if defined(_MSC_VER) && \
#if defined(THREAD_SANITIZER)
#include "atomicops_internals_tsan.h"
#elif defined(_MSC_VER) && \
(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
#include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__) && \

335
deps/v8/src/atomicops_internals_tsan.h

@ -0,0 +1,335 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation for compiler-based
// ThreadSanitizer. Use base/atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
#define V8_ATOMICOPS_INTERNALS_TSAN_H_
// This struct is not part of the public API of this module; clients may not
// use it. (However, it's exported via BASE_EXPORT because clients implicitly
// do use it at link time by inlining these functions.)
// Features of this x86. Values may not be correct before main() is run,
// but are set conservatively.
struct AtomicOps_x86CPUFeatureStruct {
bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
// after acquire compare-and-swap.
bool has_sse2; // Processor has SSE2.
};
extern struct AtomicOps_x86CPUFeatureStruct
AtomicOps_Internalx86CPUFeatures;
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace v8 {
namespace internal {
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
#ifdef __cplusplus
extern "C" {
#endif
typedef char __tsan_atomic8;
typedef short __tsan_atomic16; // NOLINT
typedef int __tsan_atomic32;
typedef long __tsan_atomic64; // NOLINT
typedef enum {
__tsan_memory_order_relaxed = (1 << 0) + 100500,
__tsan_memory_order_consume = (1 << 1) + 100500,
__tsan_memory_order_acquire = (1 << 2) + 100500,
__tsan_memory_order_release = (1 << 3) + 100500,
__tsan_memory_order_acq_rel = (1 << 4) + 100500,
__tsan_memory_order_seq_cst = (1 << 5) + 100500,
} __tsan_memory_order;
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
__tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
__tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
__tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
__tsan_memory_order mo);
void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
__tsan_memory_order mo);
void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
__tsan_memory_order mo);
void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
__tsan_memory_order mo);
void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
__tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
__tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
__tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
__tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
__tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
__tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
__tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
__tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
__tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_relaxed);
return cmp;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_relaxed);
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_acquire);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_release);
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_acquire);
return cmp;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_release);
return cmp;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_relaxed);
return cmp;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_acquire);
return cmp;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_release);
return cmp;
}
inline void MemoryBarrier() {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
} // namespace internal
} // namespace v8
#undef ATOMICOPS_COMPILER_BARRIER
#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_

621
deps/v8/src/bootstrapper.cc

File diff suppressed because it is too large

19
deps/v8/src/bootstrapper.h

@ -54,8 +54,8 @@ class SourceCodeCache BASE_EMBEDDED {
bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
for (int i = 0; i < cache_->length(); i+=2) {
SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
if (str->IsEqualTo(name)) {
SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i));
if (str->IsUtf8EqualTo(name)) {
*handle = Handle<SharedFunctionInfo>(
SharedFunctionInfo::cast(cache_->get(i + 1)));
return true;
@ -65,7 +65,7 @@ class SourceCodeCache BASE_EMBEDDED {
}
void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
HandleScope scope;
HandleScope scope(shared->GetIsolate());
int length = cache_->length();
Handle<FixedArray> new_array =
FACTORY->NewFixedArray(length + 2, TENURED);
@ -95,7 +95,6 @@ class Bootstrapper {
// Creates a JavaScript Global Context with initial object graph.
// The returned value is a global handle casted to V8Environment*.
Handle<Context> CreateEnvironment(
Isolate* isolate,
Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions);
@ -132,6 +131,7 @@ class Bootstrapper {
SourceCodeCache* extensions_cache() { return &extensions_cache_; }
private:
Isolate* isolate_;
typedef int NestingCounterType;
NestingCounterType nesting_;
SourceCodeCache extensions_cache_;
@ -144,7 +144,7 @@ class Bootstrapper {
friend class Isolate;
friend class NativesExternalStringResource;
Bootstrapper();
explicit Bootstrapper(Isolate* isolate);
DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
};
@ -152,15 +152,18 @@ class Bootstrapper {
class BootstrapperActive BASE_EMBEDDED {
public:
BootstrapperActive() {
++Isolate::Current()->bootstrapper()->nesting_;
explicit BootstrapperActive(Bootstrapper* bootstrapper)
: bootstrapper_(bootstrapper) {
++bootstrapper_->nesting_;
}
~BootstrapperActive() {
--Isolate::Current()->bootstrapper()->nesting_;
--bootstrapper_->nesting_;
}
private:
Bootstrapper* bootstrapper_;
DISALLOW_COPY_AND_ASSIGN(BootstrapperActive);
};

821
deps/v8/src/builtins.cc

File diff suppressed because it is too large

114
deps/v8/src/builtins.h

@ -38,6 +38,25 @@ enum BuiltinExtraArguments {
};
#define CODE_AGE_LIST_WITH_ARG(V, A) \
V(Quadragenarian, A) \
V(Quinquagenarian, A) \
V(Sexagenarian, A) \
V(Septuagenarian, A) \
V(Octogenarian, A)
#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X)
#define CODE_AGE_LIST(V) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
#define DECLARE_CODE_AGE_BUILTIN(C, V) \
V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \
UNINITIALIZED, Code::kNoExtraICState) \
V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, \
UNINITIALIZED, Code::kNoExtraICState)
// Define list of builtins implemented in C++.
#define BUILTIN_LIST_C(V) \
V(Illegal, NO_EXTRA_ARGUMENTS) \
@ -68,6 +87,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(InstallRecompiledCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
@ -88,6 +109,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyOSR, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
@ -113,14 +136,6 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
@ -130,48 +145,44 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MEGAMORPHIC, \
V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \
V(StoreIC_GlobalProxy, STORE_IC, GENERIC, \
Code::kNoExtraICState) \
V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
kStrictMode) \
V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \
kStrictMode) \
V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
kStrictMode) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
V(StoreIC_GlobalProxy_Strict, STORE_IC, GENERIC, \
kStrictMode) \
V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
kStrictMode) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, \
Code::kNoExtraICState) \
\
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
kStrictMode) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
kStrictMode) \
V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC, \
V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(TransitionElementsSmiToDouble, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
@ -195,36 +206,36 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState)
Code::kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
#define BUILTIN_LIST_DEBUG_A(V) \
V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(CallFunctionStub_Recording_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(CallConstructStub_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(CallConstructStub_Recording_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState)
V(Return_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
V(CallFunctionStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
V(CallConstructStub_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
V(CallConstructStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
V(LoadIC_DebugBreak, LOAD_IC, DEBUG_STUB, \
DEBUG_BREAK) \
V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_STUB, \
DEBUG_BREAK) \
V(StoreIC_DebugBreak, STORE_IC, DEBUG_STUB, \
DEBUG_BREAK) \
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_STUB, \
DEBUG_BREAK) \
V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK)
#else
#define BUILTIN_LIST_DEBUG_A(V)
#endif
@ -263,6 +274,7 @@ enum BuiltinExtraArguments {
V(APPLY_PREPARE, 1) \
V(APPLY_OVERFLOW, 1)
MaybeObject* ArrayConstructor_StubFailure(Arguments args, Isolate* isolate);
class BuiltinFunctionTable;
class ObjectVisitor;
@ -356,6 +368,7 @@ class Builtins {
CFunctionId id,
BuiltinExtraArguments extra_args);
static void Generate_InRecompileQueue(MacroAssembler* masm);
static void Generate_InstallRecompiledCode(MacroAssembler* masm);
static void Generate_ParallelRecompile(MacroAssembler* masm);
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
@ -367,6 +380,7 @@ class Builtins {
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyOSR(MacroAssembler* masm);
static void Generate_NotifyStubFailure(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
static void Generate_FunctionCall(MacroAssembler* masm);
@ -379,6 +393,14 @@ class Builtins {
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
static void Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm); \
static void Generate_Make##C##CodeYoungAgainOddMarking( \
MacroAssembler* masm);
CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR)
#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR
static void InitBuiltinFunctionTable();
bool initialized_;

3
deps/v8/src/checks.cc

@ -46,7 +46,8 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
va_start(arguments, format);
i::OS::VPrintError(format, arguments);
va_end(arguments);
i::OS::PrintError("\n#\n\n");
i::OS::PrintError("\n#\n");
i::OS::DumpBacktrace();
}
// First two times we may try to print a stack dump.
if (fatal_error_handler_nesting_depth < 3) {

389
deps/v8/src/code-stubs-hydrogen.cc

@ -0,0 +1,389 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "code-stubs.h"
#include "hydrogen.h"
#include "lithium.h"
namespace v8 {
namespace internal {
static LChunk* OptimizeGraph(HGraph* graph) {
Isolate* isolate = graph->isolate();
AssertNoAllocation no_gc;
NoHandleAllocation no_handles(isolate);
HandleDereferenceGuard no_deref(isolate, HandleDereferenceGuard::DISALLOW);
ASSERT(graph != NULL);
SmartArrayPointer<char> bailout_reason;
if (!graph->Optimize(&bailout_reason)) {
FATAL(bailout_reason.is_empty() ? "unknown" : *bailout_reason);
}
LChunk* chunk = LChunk::NewChunk(graph);
if (chunk == NULL) {
FATAL(graph->info()->bailout_reason());
}
return chunk;
}
class CodeStubGraphBuilderBase : public HGraphBuilder {
public:
CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub)
: HGraphBuilder(&info_),
arguments_length_(NULL),
info_(stub, isolate),
context_(NULL) {
int major_key = stub->MajorKey();
descriptor_ = isolate->code_stub_interface_descriptor(major_key);
if (descriptor_->register_param_count_ < 0) {
stub->InitializeInterfaceDescriptor(isolate, descriptor_);
}
parameters_.Reset(new HParameter*[descriptor_->register_param_count_]);
}
virtual bool BuildGraph();
protected:
virtual HValue* BuildCodeStub() = 0;
HParameter* GetParameter(int parameter) {
ASSERT(parameter < descriptor_->register_param_count_);
return parameters_[parameter];
}
HValue* GetArgumentsLength() {
// This is initialized in BuildGraph()
ASSERT(arguments_length_ != NULL);
return arguments_length_;
}
CompilationInfo* info() { return &info_; }
HydrogenCodeStub* stub() { return info_.code_stub(); }
HContext* context() { return context_; }
Isolate* isolate() { return info_.isolate(); }
private:
SmartArrayPointer<HParameter*> parameters_;
HValue* arguments_length_;
CompilationInfoWithZone info_;
CodeStubInterfaceDescriptor* descriptor_;
HContext* context_;
};
bool CodeStubGraphBuilderBase::BuildGraph() {
if (FLAG_trace_hydrogen) {
const char* name = CodeStub::MajorName(stub()->MajorKey(), false);
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling stub %s using hydrogen\n", name);
isolate()->GetHTracer()->TraceCompilation(&info_);
}
Zone* zone = this->zone();
int param_count = descriptor_->register_param_count_;
HEnvironment* start_environment = graph()->start_environment();
HBasicBlock* next_block = CreateBasicBlock(start_environment);
current_block()->Goto(next_block);
next_block->SetJoinId(BailoutId::StubEntry());
set_current_block(next_block);
HConstant* undefined_constant = new(zone) HConstant(
isolate()->factory()->undefined_value(), Representation::Tagged());
AddInstruction(undefined_constant);
graph()->set_undefined_constant(undefined_constant);
for (int i = 0; i < param_count; ++i) {
HParameter* param =
new(zone) HParameter(i, HParameter::REGISTER_PARAMETER);
AddInstruction(param);
start_environment->Bind(i, param);
parameters_[i] = param;
}
HInstruction* stack_parameter_count;
if (descriptor_->stack_parameter_count_ != NULL) {
ASSERT(descriptor_->environment_length() == (param_count + 1));
stack_parameter_count = new(zone) HParameter(param_count,
HParameter::REGISTER_PARAMETER);
// it's essential to bind this value to the environment in case of deopt
start_environment->Bind(param_count, stack_parameter_count);
AddInstruction(stack_parameter_count);
arguments_length_ = stack_parameter_count;
} else {
ASSERT(descriptor_->environment_length() == param_count);
stack_parameter_count = graph()->GetConstantMinus1();
arguments_length_ = graph()->GetConstant0();
}
context_ = new(zone) HContext();
AddInstruction(context_);
start_environment->BindContext(context_);
AddSimulate(BailoutId::StubEntry());
HValue* return_value = BuildCodeStub();
HReturn* hreturn_instruction = new(zone) HReturn(return_value,
context_,
stack_parameter_count);
current_block()->Finish(hreturn_instruction);
return true;
}
template <class Stub>
class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
public:
explicit CodeStubGraphBuilder(Stub* stub)
: CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
protected:
virtual HValue* BuildCodeStub();
Stub* casted_stub() { return static_cast<Stub*>(stub()); }
};
template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
Zone* zone = this->zone();
Factory* factory = isolate()->factory();
HInstruction* boilerplate =
AddInstruction(new(zone) HLoadKeyed(GetParameter(0),
GetParameter(1),
NULL,
FAST_ELEMENTS));
CheckBuilder builder(this, BailoutId::StubEntry());
builder.CheckNotUndefined(boilerplate);
int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize;
HValue* boilerplate_size =
AddInstruction(new(zone) HInstanceSize(boilerplate));
HValue* size_in_words =
AddInstruction(new(zone) HConstant(size >> kPointerSizeLog2,
Representation::Integer32()));
builder.CheckIntegerEq(boilerplate_size, size_in_words);
HValue* size_in_bytes =
AddInstruction(new(zone) HConstant(size, Representation::Integer32()));
HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
if (FLAG_pretenure_literals) {
flags = static_cast<HAllocate::Flags>(
flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
}
HInstruction* object =
AddInstruction(new(zone) HAllocate(context(),
size_in_bytes,
HType::JSObject(),
flags));
for (int i = 0; i < size; i += kPointerSize) {
HInstruction* value =
AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
AddInstruction(new(zone) HStoreNamedField(object,
factory->empty_string(),
value,
true, i));
AddSimulate(BailoutId::StubEntry());
}
builder.End();
return object;
}
Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
CodeStubGraphBuilder<FastCloneShallowObjectStub> builder(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(Code::COMPILED_STUB);
}
template <>
HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL, NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
false, Representation::Tagged());
AddInstruction(load);
return load;
}
Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
CodeStubGraphBuilder<KeyedLoadFastElementStub> builder(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(Code::COMPILED_STUB);
}
template <>
HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
Zone* zone = this->zone();
HValue* js_array = GetParameter(0);
HValue* map = GetParameter(1);
info()->MarkAsSavesCallerDoubles();
AddInstruction(new(zone) HTrapAllocationMemento(js_array));
HInstruction* array_length =
AddInstruction(new(zone) HJSArrayLength(js_array,
js_array,
HType::Smi()));
Heap* heap = isolate()->heap();
const int kMinFreeNewSpaceAfterGC =
((heap->InitialSemiSpaceSize() - sizeof(FixedArrayBase)) / 2) /
kDoubleSize;
HConstant* max_alloc_size =
new(zone) HConstant(kMinFreeNewSpaceAfterGC, Representation::Integer32());
AddInstruction(max_alloc_size);
// Since we're forcing Integer32 representation for this HBoundsCheck,
// there's no need to Smi-check the index.
AddInstruction(
new(zone) HBoundsCheck(array_length, max_alloc_size,
DONT_ALLOW_SMI_KEY, Representation::Integer32()));
IfBuilder if_builder(this, BailoutId::StubEntry());
if_builder.BeginTrue(array_length, graph()->GetConstant0(), Token::EQ);
// Nothing to do, just change the map.
if_builder.BeginFalse();
HInstruction* elements =
AddInstruction(new(zone) HLoadElements(js_array, js_array));
HInstruction* elements_length =
AddInstruction(new(zone) HFixedArrayBaseLength(elements));
ElementsKind to_kind = casted_stub()->to_kind();
HValue* new_elements =
BuildAllocateElements(context(), to_kind, elements_length);
// Fast elements kinds need to be initialized in case statements below cause a
// garbage collection.
Factory* factory = isolate()->factory();
ASSERT(!IsFastSmiElementsKind(to_kind));
double nan_double = FixedDoubleArray::hole_nan_as_double();
HValue* hole = IsFastObjectElementsKind(to_kind)
? AddInstruction(new(zone) HConstant(factory->the_hole_value(),
Representation::Tagged()))
: AddInstruction(new(zone) HConstant(nan_double,
Representation::Double()));
LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement,
BailoutId::StubEntry());
HValue* zero = graph()->GetConstant0();
HValue* start = IsFastElementsKind(to_kind) ? zero : array_length;
HValue* key = builder.BeginBody(start, elements_length, Token::LT);
AddInstruction(new(zone) HStoreKeyed(new_elements, key, hole, to_kind));
AddSimulate(BailoutId::StubEntry(), REMOVABLE_SIMULATE);
builder.EndBody();
BuildCopyElements(context(), elements,
casted_stub()->from_kind(), new_elements,
to_kind, array_length);
AddInstruction(new(zone) HStoreNamedField(js_array,
factory->elements_field_string(),
new_elements, true,
JSArray::kElementsOffset));
AddSimulate(BailoutId::StubEntry());
if_builder.End();
AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_string(),
map, true, JSArray::kMapOffset));
AddSimulate(BailoutId::StubEntry());
return js_array;
}
template <>
HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
HInstruction* deopt = new(zone()) HSoftDeoptimize();
AddInstruction(deopt);
current_block()->MarkAsDeoptimizing();
return GetParameter(0);
}
Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
CodeStubGraphBuilder<ArrayNoArgumentConstructorStub> builder(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(Code::COMPILED_STUB);
}
template <>
HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
BuildCodeStub() {
HInstruction* deopt = new(zone()) HSoftDeoptimize();
AddInstruction(deopt);
current_block()->MarkAsDeoptimizing();
return GetParameter(0);
}
Handle<Code> TransitionElementsKindStub::GenerateCode() {
CodeStubGraphBuilder<TransitionElementsKindStub> builder(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(Code::COMPILED_STUB);
}
Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
CodeStubGraphBuilder<ArraySingleArgumentConstructorStub> builder(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(Code::COMPILED_STUB);
}
template <>
HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
HInstruction* deopt = new(zone()) HSoftDeoptimize();
AddInstruction(deopt);
current_block()->MarkAsDeoptimizing();
return GetParameter(0);
}
Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
CodeStubGraphBuilder<ArrayNArgumentsConstructorStub> builder(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(Code::COMPILED_STUB);
}
} } // namespace v8::internal

322
deps/v8/src/code-stubs.cc

@ -37,31 +37,17 @@
namespace v8 {
namespace internal {
bool CodeStub::FindCodeInCache(Code** code_out) {
Heap* heap = Isolate::Current()->heap();
int index = heap->code_stubs()->FindEntry(GetKey());
bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
UnseededNumberDictionary* stubs = isolate->heap()->code_stubs();
int index = stubs->FindEntry(GetKey());
if (index != UnseededNumberDictionary::kNotFound) {
*code_out = Code::cast(heap->code_stubs()->ValueAt(index));
*code_out = Code::cast(stubs->ValueAt(index));
return true;
}
return false;
}
void CodeStub::GenerateCode(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
masm->isolate()->counters()->code_stubs()->Increment();
// Nested stubs are not allowed for leaves.
AllowStubCallsScope allow_scope(masm, false);
// Generate the code for the stub.
masm->set_generating_stub(true);
NoCurrentFrameScope scope(masm);
Generate(masm);
}
SmartArrayPointer<const char> CodeStub::GetName() {
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
@ -72,8 +58,7 @@ SmartArrayPointer<const char> CodeStub::GetName() {
}
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) {
SmartArrayPointer<const char> name = GetName();
PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
@ -87,24 +72,25 @@ int CodeStub::GetCodeKind() {
}
Handle<Code> CodeStub::GetCode() {
Handle<Code> PlatformCodeStub::GenerateCode() {
Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
Code* code;
if (UseSpecialCache()
? FindCodeInSpecialCache(&code)
: FindCodeInCache(&code)) {
ASSERT(IsPregenerated() == code->is_pregenerated());
return Handle<Code>(code);
}
{
HandleScope scope(isolate);
// Generate the new code.
MacroAssembler masm(isolate, NULL, 256);
GenerateCode(&masm);
{
// Update the static counter each time a new code stub is generated.
isolate->counters()->code_stubs()->Increment();
// Nested stubs are not allowed for leaves.
AllowStubCallsScope allow_scope(&masm, false);
// Generate the code for the stub.
masm.set_generating_stub(true);
NoCurrentFrameScope scope(&masm);
Generate(&masm);
}
// Create the code object.
CodeDesc desc;
@ -113,12 +99,34 @@ Handle<Code> CodeStub::GetCode() {
// Copy the generated code into a heap object.
Code::Flags flags = Code::ComputeFlags(
static_cast<Code::Kind>(GetCodeKind()),
GetICState());
GetICState(),
GetExtraICState(),
GetStubType(),
GetStubFlags());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
}
Handle<Code> CodeStub::GetCode(Isolate* isolate) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
Code* code;
if (UseSpecialCache()
? FindCodeInSpecialCache(&code, isolate)
: FindCodeInCache(&code, isolate)) {
ASSERT(IsPregenerated() == code->is_pregenerated());
return Handle<Code>(code);
}
{
HandleScope scope(isolate);
Handle<Code> new_object = GenerateCode();
new_object->set_major_key(MajorKey());
FinishCode(new_object);
RecordCodeGeneration(*new_object, &masm);
RecordCodeGeneration(*new_object, isolate);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
@ -169,20 +177,135 @@ void CodeStub::PrintName(StringStream* stream) {
}
void BinaryOpStub::Generate(MacroAssembler* masm) {
// Explicitly allow generation of nested stubs. It is safe here because
// generation code does not use any raw pointers.
AllowStubCallsScope allow_stub_calls(masm, true);
BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
// The OddballStub handles a number and an oddball, not two oddballs.
operands_type = BinaryOpIC::GENERIC;
}
switch (operands_type) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
break;
case BinaryOpIC::SMI:
GenerateSmiStub(masm);
break;
case BinaryOpIC::INT32:
GenerateInt32Stub(masm);
break;
case BinaryOpIC::NUMBER:
GenerateNumberStub(masm);
break;
case BinaryOpIC::ODDBALL:
GenerateOddballStub(masm);
break;
case BinaryOpIC::STRING:
GenerateStringStub(masm);
break;
case BinaryOpIC::GENERIC:
GenerateGeneric(masm);
break;
default:
UNREACHABLE();
}
}
#define __ ACCESS_MASM(masm)
void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
switch (op_) {
case Token::ADD:
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break;
case Token::SUB:
__ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
break;
case Token::MUL:
__ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
break;
case Token::DIV:
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
break;
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
break;
case Token::BIT_AND:
__ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
break;
case Token::BIT_XOR:
__ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
break;
case Token::SAR:
__ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
break;
case Token::SHR:
__ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
case Token::SHL:
__ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
}
#undef __
void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
case NO_OVERWRITE: overwrite_name = "Alloc"; break;
case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
stream->Add("BinaryOpStub_%s_%s_%s+%s",
op_name,
overwrite_name,
BinaryOpIC::GetName(left_type_),
BinaryOpIC::GetName(right_type_));
}
void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
GenerateBothStringStub(masm);
return;
}
// Try to add arguments as strings, otherwise, transition to the generic
// BinaryOpIC type.
GenerateAddStrings(masm);
GenerateTypeTransition(masm);
}
void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
ASSERT(*known_map_ != NULL);
Isolate* isolate = new_object->GetIsolate();
Factory* factory = isolate->factory();
return Map::UpdateCodeCache(known_map_,
strict() ?
factory->strict_compare_ic_symbol() :
factory->compare_ic_symbol(),
factory->strict_compare_ic_string() :
factory->compare_ic_string(),
new_object);
}
bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
Isolate* isolate = known_map_->GetIsolate();
bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
Factory* factory = isolate->factory();
Code::Flags flags = Code::ComputeFlags(
static_cast<Code::Kind>(GetCodeKind()),
@ -191,12 +314,18 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
Handle<Object> probe(
known_map_->FindInCodeCache(
strict() ?
*factory->strict_compare_ic_symbol() :
*factory->compare_ic_symbol(),
flags));
*factory->strict_compare_ic_string() :
*factory->compare_ic_string(),
flags),
isolate);
if (probe->IsCode()) {
*code_out = Code::cast(*probe);
ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ);
#ifdef DEBUG
Token::Value cached_op;
ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL,
&cached_op);
ASSERT(op_ == cached_op);
#endif
return true;
}
return false;
@ -204,7 +333,33 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
int ICCompareStub::MinorKey() {
return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
return OpField::encode(op_ - Token::EQ) |
LeftStateField::encode(left_) |
RightStateField::encode(right_) |
HandlerStateField::encode(state_);
}
void ICCompareStub::DecodeMinorKey(int minor_key,
CompareIC::State* left_state,
CompareIC::State* right_state,
CompareIC::State* handler_state,
Token::Value* op) {
if (left_state) {
*left_state =
static_cast<CompareIC::State>(LeftStateField::decode(minor_key));
}
if (right_state) {
*right_state =
static_cast<CompareIC::State>(RightStateField::decode(minor_key));
}
if (handler_state) {
*handler_state =
static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
}
if (op) {
*op = static_cast<Token::Value>(OpField::decode(minor_key) + Token::EQ);
}
}
@ -213,27 +368,31 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
case CompareIC::UNINITIALIZED:
GenerateMiss(masm);
break;
case CompareIC::SMIS:
case CompareIC::SMI:
GenerateSmis(masm);
break;
case CompareIC::HEAP_NUMBERS:
GenerateHeapNumbers(masm);
case CompareIC::NUMBER:
GenerateNumbers(masm);
break;
case CompareIC::STRINGS:
case CompareIC::STRING:
GenerateStrings(masm);
break;
case CompareIC::SYMBOLS:
GenerateSymbols(masm);
case CompareIC::INTERNALIZED_STRING:
GenerateInternalizedStrings(masm);
break;
case CompareIC::UNIQUE_NAME:
GenerateUniqueNames(masm);
break;
case CompareIC::OBJECTS:
case CompareIC::OBJECT:
GenerateObjects(masm);
break;
case CompareIC::KNOWN_OBJECTS:
case CompareIC::KNOWN_OBJECT:
ASSERT(*known_map_ != NULL);
GenerateKnownObjects(masm);
break;
default:
UNREACHABLE();
case CompareIC::GENERIC:
GenerateGeneric(masm);
break;
}
}
@ -269,36 +428,8 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
}
void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
break;
case DICTIONARY_ELEMENTS:
void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
break;
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
@ -311,14 +442,14 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_,
elements_kind_,
grow_mode_);
store_mode_);
}
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_js_array_,
grow_mode_);
store_mode_);
break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
@ -446,32 +577,33 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
Label fail;
AllocationSiteMode mode = AllocationSiteInfo::GetMode(from_, to_);
ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
if (!FLAG_trace_elements_transitions) {
if (IsFastSmiOrObjectElementsKind(to_)) {
if (IsFastSmiOrObjectElementsKind(from_)) {
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm);
GenerateMapChangeElementsTransition(masm, mode, &fail);
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(!IsFastSmiElementsKind(to_));
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
} else {
UNREACHABLE();
}
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_jsarray_,
to_,
grow_mode_);
store_mode_);
} else if (IsFastSmiElementsKind(from_) &&
IsFastDoubleElementsKind(to_)) {
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_jsarray_,
grow_mode_);
store_mode_);
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm);
GenerateMapChangeElementsTransition(masm, mode, &fail);
} else {
UNREACHABLE();
}
@ -481,6 +613,14 @@ void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
}
void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
int i = 0;
for (; i <= StubFailureTrampolineStub::kMaxExtraExpressionStackCount; ++i) {
StubFailureTrampolineStub(i).GetCode(isolate);
}
}
FunctionEntryHook ProfileEntryHookStub::entry_hook_ = NULL;

807
deps/v8/src/code-stubs.h

File diff suppressed because it is too large

35
deps/v8/src/codegen.cc

@ -76,16 +76,22 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
if (FLAG_trace_codegen || print_source || print_ast) {
PrintF("*** Generate code for %s function: ", ftype);
if (info->IsStub()) {
const char* name =
CodeStub::MajorName(info->code_stub()->MajorKey(), true);
PrintF("%s", name == NULL ? "<unknown>" : name);
} else {
info->function()->name()->ShortPrint();
}
PrintF(" ***\n");
}
if (print_source) {
if (!info->IsStub() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
PrettyPrinter().PrintProgram(info->function()));
}
if (print_ast) {
if (!info->IsStub() && print_ast) {
PrintF("--- AST ---\n%s\n",
AstPrinter().PrintProgram(info->function()));
}
@ -107,6 +113,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
if (!code.is_null()) {
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
code->set_prologue_offset(info->prologue_offset());
}
return code;
}
@ -116,24 +123,30 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#ifdef ENABLE_DISASSEMBLER
bool print_code = Isolate::Current()->bootstrapper()->IsActive()
? FLAG_print_builtin_code
: (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
: (FLAG_print_code ||
(info->IsStub() && FLAG_print_code_stubs) ||
(info->IsOptimizing() && FLAG_print_opt_code));
if (print_code) {
// Print the source code if available.
FunctionLiteral* function = info->function();
if (code->kind() != Code::COMPILED_STUB) {
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
PrintF("--- Raw source ---\n");
StringInputBuffer stream(String::cast(script->source()));
stream.Seek(function->start_position());
ConsStringIteratorOp op;
StringCharacterStream stream(String::cast(script->source()),
&op,
function->start_position());
// fun->end_position() points to the last character in the stream. We
// need to compensate by adding one to calculate the length.
int source_len =
function->end_position() - function->start_position() + 1;
for (int i = 0; i < source_len; i++) {
if (stream.has_more()) PrintF("%c", stream.GetNext());
if (stream.HasMore()) PrintF("%c", stream.GetNext());
}
PrintF("\n\n");
}
}
if (info->IsOptimizing()) {
if (FLAG_print_unopt_code) {
PrintF("--- Unoptimized code ---\n");
@ -144,8 +157,13 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
} else {
PrintF("--- Code ---\n");
}
if (info->IsStub()) {
CodeStub::Major major_key = info->code_stub()->MajorKey();
code->Disassemble(CodeStub::MajorName(major_key, false));
} else {
code->Disassemble(*function->debug_name()->ToCString());
}
}
#endif // ENABLE_DISASSEMBLER
}
@ -153,12 +171,13 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
bool CodeGenerator::ShouldGenerateLog(Expression* type) {
ASSERT(type != NULL);
Isolate* isolate = Isolate::Current();
if (!isolate->logger()->is_logging() && !CpuProfiler::is_profiling(isolate)) {
if (!isolate->logger()->is_logging() &&
!isolate->cpu_profiler()->is_profiling()) {
return false;
}
Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
if (FLAG_log_regexp) {
if (name->IsEqualTo(CStrVector("regexp")))
if (name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("regexp")))
return true;
}
return false;

28
deps/v8/src/codegen.h

@ -90,19 +90,41 @@ namespace internal {
typedef double (*UnaryMathFunction)(double x);
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
UnaryMathFunction CreateExpFunction();
UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic {
public:
static void GenerateMapChangeElementsTransition(MacroAssembler* masm);
static void GenerateSmiToDouble(MacroAssembler* masm, Label* fail);
static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
// If |mode| is set to DONT_TRACK_ALLOCATION_SITE,
// |allocation_site_info_found| may be NULL.
static void GenerateMapChangeElementsTransition(MacroAssembler* masm,
AllocationSiteMode mode,
Label* allocation_site_info_found);
static void GenerateSmiToDouble(MacroAssembler* masm,
AllocationSiteMode mode,
Label* fail);
static void GenerateDoubleToObject(MacroAssembler* masm,
AllocationSiteMode mode,
Label* fail);
private:
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
};
class SeqStringSetCharGenerator : public AllStatic {
public:
static void Generate(MacroAssembler* masm,
String::Encoding encoding,
Register string,
Register index,
Register value);
private:
DISALLOW_COPY_AND_ASSIGN(SeqStringSetCharGenerator);
};
} } // namespace v8::internal
#endif // V8_CODEGEN_H_

54
deps/v8/src/collection.js

@ -88,6 +88,25 @@ function SetDelete(key) {
}
function SetGetSize() {
if (!IS_SET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.size', this]);
}
return %SetGetSize(this);
}
function SetClear() {
if (!IS_SET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.clear', this]);
}
// Replace the internal table with a new empty table.
%SetInitialize(this);
}
function MapConstructor() {
if (%_IsConstructCall()) {
%MapInitialize(this);
@ -145,6 +164,25 @@ function MapDelete(key) {
}
function MapGetSize() {
if (!IS_MAP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.size', this]);
}
return %MapGetSize(this);
}
function MapClear() {
if (!IS_MAP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.clear', this]);
}
// Replace the internal table with a new empty table.
%MapInitialize(this);
}
function WeakMapConstructor() {
if (%_IsConstructCall()) {
%WeakMapInitialize(this);
@ -159,7 +197,7 @@ function WeakMapGet(key) {
throw MakeTypeError('incompatible_method_receiver',
['WeakMap.prototype.get', this]);
}
if (!IS_SPEC_OBJECT(key)) {
if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
return %WeakMapGet(this, key);
@ -171,7 +209,7 @@ function WeakMapSet(key, value) {
throw MakeTypeError('incompatible_method_receiver',
['WeakMap.prototype.set', this]);
}
if (!IS_SPEC_OBJECT(key)) {
if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
return %WeakMapSet(this, key, value);
@ -183,7 +221,7 @@ function WeakMapHas(key) {
throw MakeTypeError('incompatible_method_receiver',
['WeakMap.prototype.has', this]);
}
if (!IS_SPEC_OBJECT(key)) {
if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
return %WeakMapHas(this, key);
@ -195,7 +233,7 @@ function WeakMapDelete(key) {
throw MakeTypeError('incompatible_method_receiver',
['WeakMap.prototype.delete', this]);
}
if (!IS_SPEC_OBJECT(key)) {
if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
return %WeakMapDelete(this, key);
@ -215,18 +253,22 @@ function WeakMapDelete(key) {
%SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
// Set up the non-enumerable functions on the Set prototype object.
InstallGetter($Set.prototype, "size", SetGetSize);
InstallFunctions($Set.prototype, DONT_ENUM, $Array(
"add", SetAdd,
"has", SetHas,
"delete", SetDelete
"delete", SetDelete,
"clear", SetClear
));
// Set up the non-enumerable functions on the Map prototype object.
InstallGetter($Map.prototype, "size", MapGetSize);
InstallFunctions($Map.prototype, DONT_ENUM, $Array(
"get", MapGet,
"set", MapSet,
"has", MapHas,
"delete", MapDelete
"delete", MapDelete,
"clear", MapClear
));
// Set up the WeakMap constructor function.

4
deps/v8/src/compilation-cache.cc

@ -67,7 +67,7 @@ CompilationCache::~CompilationCache() {}
static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) {
CALL_HEAP_FUNCTION(isolate,
CompilationCacheTable::Allocate(size),
CompilationCacheTable::Allocate(isolate->heap(), size),
CompilationCacheTable);
}
@ -98,7 +98,7 @@ void CompilationSubCache::Age() {
void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
Object* undefined = isolate()->heap()->raw_unchecked_undefined_value();
Object* undefined = isolate()->heap()->undefined_value();
for (int i = 0; i < generations_; i++) {
if (tables_[i] != undefined) {
reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);

248
deps/v8/src/compiler.cc

@ -33,6 +33,7 @@
#include "codegen.h"
#include "compilation-cache.h"
#include "debug.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "gdb-jit.h"
#include "hydrogen.h"
@ -52,57 +53,69 @@ namespace internal {
CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
: isolate_(script->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE)),
function_(NULL),
scope_(NULL),
global_scope_(NULL),
: flags_(LanguageModeField::encode(CLASSIC_MODE)),
script_(script),
extension_(NULL),
pre_parse_data_(NULL),
osr_ast_id_(BailoutId::None()),
zone_(zone),
deferred_handles_(NULL) {
Initialize(BASE);
osr_ast_id_(BailoutId::None()) {
Initialize(script->GetIsolate(), BASE, zone);
}
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone)
: isolate_(shared_info->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
global_scope_(NULL),
: flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
extension_(NULL),
pre_parse_data_(NULL),
osr_ast_id_(BailoutId::None()),
zone_(zone),
deferred_handles_(NULL) {
Initialize(BASE);
osr_ast_id_(BailoutId::None()) {
Initialize(script_->GetIsolate(), BASE, zone);
}
CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
: isolate_(closure->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
global_scope_(NULL),
: flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
closure_(closure),
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
extension_(NULL),
pre_parse_data_(NULL),
context_(closure->context()),
osr_ast_id_(BailoutId::None()),
zone_(zone),
deferred_handles_(NULL) {
Initialize(BASE);
osr_ast_id_(BailoutId::None()) {
Initialize(script_->GetIsolate(), BASE, zone);
}
CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
Isolate* isolate, Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
osr_ast_id_(BailoutId::None()) {
Initialize(isolate, STUB, zone);
code_stub_ = stub;
}
void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
isolate_ = isolate;
function_ = NULL;
scope_ = NULL;
global_scope_ = NULL;
extension_ = NULL;
pre_parse_data_ = NULL;
zone_ = zone;
deferred_handles_ = NULL;
code_stub_ = NULL;
prologue_offset_ = kPrologueOffsetNotSet;
opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
if (mode == STUB) {
mode_ = STUB;
return;
}
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
if (!shared_info_.is_null()) {
ASSERT(language_mode() == CLASSIC_MODE);
SetLanguageMode(shared_info_->language_mode());
}
set_bailout_reason("unknown");
}
@ -111,6 +124,33 @@ CompilationInfo::~CompilationInfo() {
}
int CompilationInfo::num_parameters() const {
if (IsStub()) {
return 0;
} else {
return scope()->num_parameters();
}
}
int CompilationInfo::num_heap_slots() const {
if (IsStub()) {
return 0;
} else {
return scope()->num_heap_slots();
}
}
Code::Flags CompilationInfo::flags() const {
if (IsStub()) {
return Code::ComputeFlags(Code::COMPILED_STUB);
} else {
return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
}
}
// Disable optimization for the rest of the compilation pipeline.
void CompilationInfo::DisableOptimization() {
bool is_optimizable_closure =
@ -194,6 +234,11 @@ void OptimizingCompiler::RecordOptimizationStats() {
code_size,
compilation_time);
}
if (FLAG_hydrogen_stats) {
isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_create_graph_,
time_taken_to_optimize_,
time_taken_to_codegen_);
}
}
@ -233,7 +278,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// Fall back to using the full code generator if it's not possible
// to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object.
if (AlwaysFullCompiler(info()->isolate())) {
if (AlwaysFullCompiler(isolate())) {
info()->SetCode(code);
return SetLastStatus(BAILED_OUT);
}
@ -242,7 +287,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// the optimizing compiler.
const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
if (info()->shared_info()->opt_count() > kMaxOptCount) {
if (info()->opt_count() > kMaxOptCount) {
info()->set_bailout_reason("optimized too many times");
return AbortOptimization();
}
@ -273,8 +318,8 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
if (*FLAG_hydrogen_filter != '\0') {
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
if ((filter[0] == '-'
&& name->IsEqualTo(filter.SubVector(1, filter.length())))
|| (filter[0] != '-' && !name->IsEqualTo(filter))) {
&& name->IsUtf8EqualTo(filter.SubVector(1, filter.length())))
|| (filter[0] != '-' && !name->IsUtf8EqualTo(filter))) {
info()->SetCode(code);
return SetLastStatus(BAILED_OUT);
}
@ -284,10 +329,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// doesn't have deoptimization support. Alternatively, we may decide to
// run the full code generator to get a baseline for the compile-time
// performance of the hydrogen-based compiler.
Timer t(this, &time_taken_to_create_graph_);
bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
HPhase phase(HPhase::kFullCodeGen);
HPhase phase(HPhase::kFullCodeGen, isolate());
CompilationInfoWithZone unoptimized(info()->shared_info());
// Note that we use the same AST that we will use for generating the
// optimized code.
@ -317,17 +361,18 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
if (FLAG_trace_hydrogen) {
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
HTracer::Instance()->TraceCompilation(info()->function());
isolate()->GetHTracer()->TraceCompilation(info());
}
Handle<Context> native_context(
info()->closure()->context()->native_context());
oracle_ = new(info()->zone()) TypeFeedbackOracle(
code, native_context, info()->isolate(), info()->zone());
graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
HPhase phase(HPhase::kTotal);
code, native_context, isolate(), info()->zone());
graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info(), oracle_);
Timer t(this, &time_taken_to_create_graph_);
graph_ = graph_builder_->CreateGraph();
if (info()->isolate()->has_pending_exception()) {
if (isolate()->has_pending_exception()) {
info()->SetCode(Handle<Code>::null());
return SetLastStatus(FAILED);
}
@ -350,7 +395,8 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
AssertNoAllocation no_gc;
NoHandleAllocation no_handles;
NoHandleAllocation no_handles(isolate());
HandleDereferenceGuard no_deref(isolate(), HandleDereferenceGuard::DISALLOW);
ASSERT(last_status() == SUCCEEDED);
Timer t(this, &time_taken_to_optimize_);
@ -371,15 +417,17 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED);
{ // Scope for timer.
Timer timer(this, &time_taken_to_codegen_);
ASSERT(chunk_ != NULL);
ASSERT(graph_ != NULL);
Handle<Code> optimized_code = chunk_->Codegen();
Handle<Code> optimized_code = chunk_->Codegen(Code::OPTIMIZED_FUNCTION);
if (optimized_code.is_null()) {
info()->set_bailout_reason("code generation failed");
return AbortOptimization();
}
info()->SetCode(optimized_code);
}
RecordOptimizationStats();
return SetLastStatus(SUCCEEDED);
}
@ -390,6 +438,8 @@ static bool GenerateCode(CompilationInfo* info) {
!info->IsCompilingForDebugging() &&
info->IsOptimizing();
if (is_optimizing) {
Logger::TimerEventScope timer(
info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
return MakeCrankshaftCode(info);
} else {
if (info->IsOptimizing()) {
@ -397,6 +447,8 @@ static bool GenerateCode(CompilationInfo* info) {
// BASE or NONOPT.
info->DisableOptimization();
}
Logger::TimerEventScope timer(
info->isolate(), Logger::TimerEventScope::v8_compile_full_code);
return FullCodeGenerator::MakeCode(info);
}
}
@ -425,6 +477,13 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
#endif
static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
bool allow_lazy_without_ctx = false) {
return LiveEditFunctionTracker::IsActive(info->isolate()) ||
(info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
}
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Isolate* isolate = info->isolate();
ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
@ -432,7 +491,9 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
ASSERT(!isolate->native_context().is_null());
Handle<Script> script = info->script();
script->set_context_data((*isolate->native_context())->data());
// TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
FixedArray* array = isolate->native_context()->embedder_data();
script->set_context_data(array->get(0));
#ifdef ENABLE_DEBUGGER_SUPPORT
if (info->is_eval()) {
@ -460,8 +521,9 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// Only allow non-global compiles for eval.
ASSERT(info->is_eval() || info->is_global());
ParsingFlags flags = kNoParsingFlags;
if (info->pre_parse_data() != NULL ||
String::cast(script->source())->length() > FLAG_min_preparse_length) {
if ((info->pre_parse_data() != NULL ||
String::cast(script->source())->length() > FLAG_min_preparse_length) &&
!DebuggerWantsEagerCompilation(info)) {
flags = kAllowLazy;
}
if (!ParserApi::Parse(info, flags)) {
@ -620,6 +682,7 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
Handle<Context> context,
bool is_global,
LanguageMode language_mode,
ParseRestriction restriction,
int scope_position) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
@ -646,6 +709,7 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
info.MarkAsEval();
if (is_global) info.MarkAsGlobal();
info.SetLanguageMode(language_mode);
info.SetParseRestriction(restriction);
info.SetContext(context);
result = MakeFunctionInfo(&info);
if (!result.is_null()) {
@ -688,7 +752,7 @@ static bool InstallFullCode(CompilationInfo* info) {
Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info->scope(), info->zone());
shared->set_scope_info(*scope_info);
shared->set_code(*code);
shared->ReplaceCode(*code);
if (!function.is_null()) {
function->ReplaceCode(*code);
ASSERT(!function->IsOptimized());
@ -821,7 +885,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
if (info->IsOptimizing()) {
Handle<Code> code = info->code();
ASSERT(shared->scope_info() != ScopeInfo::Empty());
ASSERT(shared->scope_info() != ScopeInfo::Empty(isolate));
info->closure()->ReplaceCode(*code);
InsertCodeIntoOptimizedCodeMap(info);
return true;
@ -837,10 +901,14 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
void Compiler::RecompileParallel(Handle<JSFunction> closure) {
if (closure->IsInRecompileQueue()) return;
ASSERT(closure->IsMarkedForParallelRecompilation());
Isolate* isolate = closure->GetIsolate();
// Here we prepare compile data for the parallel recompilation thread, but
// this still happens synchronously and interrupts execution.
Logger::TimerEventScope timer(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** Compilation queue, will retry opting on next run.\n");
@ -849,7 +917,7 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
}
SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
VMState state(isolate, PARALLEL_COMPILER_PROLOGUE);
VMState state(isolate, PARALLEL_COMPILER);
PostponeInterruptsScope postpone(isolate);
Handle<SharedFunctionInfo> shared = info->shared_info();
@ -860,7 +928,9 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
{
CompilationHandleScope handle_scope(*info);
if (InstallCodeFromOptimizedCodeMap(*info)) return;
if (InstallCodeFromOptimizedCodeMap(*info)) {
return;
}
if (ParserApi::Parse(*info, kNoParsingFlags)) {
LanguageMode language_mode = info->function()->language_mode();
@ -873,11 +943,12 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
new(info->zone()) OptimizingCompiler(*info);
OptimizingCompiler::Status status = compiler->CreateGraph();
if (status == OptimizingCompiler::SUCCEEDED) {
isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
shared->code()->set_profiler_ticks(0);
closure->ReplaceCode(isolate->builtins()->builtin(
Builtins::kInRecompileQueue));
info.Detach();
shared->code()->set_profiler_ticks(0);
// Do a scavenge to put off the next scavenge as far as possible.
// This may ease the issue that GVN blocks the next scavenge.
isolate->heap()->CollectGarbage(NEW_SPACE, "parallel recompile");
isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
} else if (status == OptimizingCompiler::BAILED_OUT) {
isolate->clear_pending_exception();
InstallFullCode(*info);
@ -886,14 +957,44 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
}
}
if (isolate->has_pending_exception()) {
isolate->clear_pending_exception();
if (shared->code()->stack_check_patched_for_osr()) {
// At this point we either put the function on recompilation queue or
// aborted optimization. In either case we want to continue executing
// the unoptimized code without running into OSR. If the unoptimized
// code has been patched for OSR, unpatch it.
InterruptStub interrupt_stub;
Handle<Code> check_code = interrupt_stub.GetCode(isolate);
Handle<Code> replacement_code =
isolate->builtins()->OnStackReplacement();
Deoptimizer::RevertStackCheckCode(shared->code(),
*check_code,
*replacement_code);
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
}
void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
// The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
if (info->shared_info()->optimization_disabled()) {
info->SetCode(Handle<Code>(info->shared_info()->code()));
InstallFullCode(*info);
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** aborting optimization for ");
info->closure()->PrintName();
PrintF(" as it has been disabled.\n");
}
ASSERT(!info->closure()->IsMarkedForInstallingRecompiledCode());
return;
}
Isolate* isolate = info->isolate();
VMState state(isolate, PARALLEL_COMPILER);
Logger::TimerEventScope timer(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status();
@ -910,16 +1011,25 @@ void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
InstallCodeCommon(*info);
if (status == OptimizingCompiler::SUCCEEDED) {
Handle<Code> code = info->code();
ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty());
ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
info->closure()->ReplaceCode(*code);
if (info->shared_info()->SearchOptimizedCodeMap(
info->closure()->context()->native_context()) == -1) {
InsertCodeIntoOptimizedCodeMap(*info);
}
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** Optimized code for ");
info->closure()->PrintName();
PrintF(" installed.\n");
}
} else {
info->SetCode(Handle<Code>(info->shared_info()->code()));
InstallFullCode(*info);
}
// Optimized code is finally replacing unoptimized code. Reset the latter's
// profiler ticks to prevent too soon re-opt after a deopt.
info->shared_info()->code()->set_profiler_ticks(0);
ASSERT(!info->closure()->IsMarkedForInstallingRecompiledCode());
}
@ -931,7 +1041,8 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
info.SetScope(literal->scope());
info.SetLanguageMode(literal->scope()->language_mode());
LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
Isolate* isolate = info.isolate();
LiveEditFunctionTracker live_edit_tracker(isolate, literal);
// Determine if the function can be lazily compiled. This is necessary to
// allow some of our builtin JS files to be lazily compiled. These
// builtins cannot be handled lazily by the parser, since we have to know
@ -943,14 +1054,13 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Debug::FindSharedFunctionInfoInScript.
bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
bool allow_lazy = literal->AllowsLazyCompilation() &&
!LiveEditFunctionTracker::IsActive(info.isolate()) &&
(!info.isolate()->DebuggerHasBreakPoints() || allow_lazy_without_ctx);
!DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate));
// Generate code
if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
Handle<Code> code = info.isolate()->builtins()->LazyCompile();
Handle<Code> code = isolate->builtins()->LazyCompile();
info.SetCode(code);
} else if (GenerateCode(&info)) {
ASSERT(!info.code().is_null());
@ -1024,7 +1134,7 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
if (info->isolate()->logger()->is_logging_code_events() ||
CpuProfiler::is_profiling(info->isolate())) {
info->isolate()->cpu_profiler()->is_profiling()) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))

116
deps/v8/src/compiler.h

@ -35,7 +35,17 @@
namespace v8 {
namespace internal {
static const int kPrologueOffsetNotSet = -1;
class ScriptDataImpl;
class HydrogenCodeStub;
// ParseRestriction is used to restrict the set of valid statements in a
// unit of compilation. Restriction violations cause a syntax error.
enum ParseRestriction {
NO_PARSE_RESTRICTION, // All expressions are allowed.
ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
};
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
@ -44,16 +54,15 @@ class CompilationInfo {
CompilationInfo(Handle<Script> script, Zone* zone);
CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
virtual ~CompilationInfo();
~CompilationInfo();
Isolate* isolate() {
ASSERT(Isolate::Current() == isolate_);
return isolate_;
}
Zone* zone() {
return zone_;
}
Zone* zone() { return zone_; }
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
@ -70,10 +79,15 @@ class CompilationInfo {
Handle<JSFunction> closure() const { return closure_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
Handle<Script> script() const { return script_; }
HydrogenCodeStub* code_stub() {return code_stub_; }
v8::Extension* extension() const { return extension_; }
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
int opt_count() const { return opt_count_; }
int num_parameters() const;
int num_heap_slots() const;
Code::Flags flags() const;
void MarkAsEval() {
ASSERT(!is_lazy());
@ -96,9 +110,47 @@ class CompilationInfo {
void MarkAsNative() {
flags_ |= IsNative::encode(true);
}
bool is_native() const {
return IsNative::decode(flags_);
}
bool is_calling() const {
return is_deferred_calling() || is_non_deferred_calling();
}
void MarkAsDeferredCalling() {
flags_ |= IsDeferredCalling::encode(true);
}
bool is_deferred_calling() const {
return IsDeferredCalling::decode(flags_);
}
void MarkAsNonDeferredCalling() {
flags_ |= IsNonDeferredCalling::encode(true);
}
bool is_non_deferred_calling() const {
return IsNonDeferredCalling::decode(flags_);
}
void MarkAsSavesCallerDoubles() {
flags_ |= SavesCallerDoubles::encode(true);
}
bool saves_caller_doubles() const {
return SavesCallerDoubles::decode(flags_);
}
void SetParseRestriction(ParseRestriction restriction) {
flags_ = ParseRestricitonField::update(flags_, restriction);
}
ParseRestriction parse_restriction() const {
return ParseRestricitonField::decode(flags_);
}
void SetFunction(FunctionLiteral* literal) {
ASSERT(function_ == NULL);
function_ = literal;
@ -149,6 +201,7 @@ class CompilationInfo {
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsOptimizable() const { return mode_ == BASE; }
bool IsStub() const { return mode_ == STUB; }
void SetOptimizing(BailoutId osr_ast_id) {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
@ -186,6 +239,16 @@ class CompilationInfo {
const char* bailout_reason() const { return bailout_reason_; }
void set_bailout_reason(const char* reason) { bailout_reason_ = reason; }
int prologue_offset() const {
ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_);
return prologue_offset_;
}
void set_prologue_offset(int prologue_offset) {
ASSERT_EQ(kPrologueOffsetNotSet, prologue_offset_);
prologue_offset_ = prologue_offset;
}
private:
Isolate* isolate_;
@ -197,21 +260,11 @@ class CompilationInfo {
enum Mode {
BASE,
OPTIMIZE,
NONOPT
NONOPT,
STUB
};
void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
ASSERT(!script_.is_null());
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
if (!shared_info_.is_null()) {
ASSERT(language_mode() == CLASSIC_MODE);
SetLanguageMode(shared_info_->language_mode());
}
set_bailout_reason("unknown");
}
void Initialize(Isolate* isolate, Mode mode, Zone* zone);
void SetMode(Mode mode) {
ASSERT(V8::UseCrankshaft());
@ -237,7 +290,16 @@ class CompilationInfo {
// If compiling for debugging produce just full code matching the
// initial mode setting.
class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
// If the compiled code contains calls that require building a frame
class IsCalling: public BitField<bool, 9, 1> {};
// If the compiled code contains calls that require building a frame
class IsDeferredCalling: public BitField<bool, 10, 1> {};
// If the compiled code contains calls that require building a frame
class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
// If the compiled code saves double caller registers that it clobbers.
class SavesCallerDoubles: public BitField<bool, 12, 1> {};
// If the set of valid statements is restricted.
class ParseRestricitonField: public BitField<ParseRestriction, 13, 1> {};
unsigned flags_;
@ -249,6 +311,8 @@ class CompilationInfo {
Scope* scope_;
// The global scope provided as a convenience.
Scope* global_scope_;
// For compiled stubs, the stub object
HydrogenCodeStub* code_stub_;
// The compiled code.
Handle<Code> code_;
@ -285,6 +349,12 @@ class CompilationInfo {
const char* bailout_reason_;
int prologue_offset_;
// A copy of shared_info()->opt_count() to avoid handle deref
// during graph optimization.
int opt_count_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@ -305,6 +375,10 @@ class CompilationInfoWithZone: public CompilationInfo {
: CompilationInfo(closure, &zone_),
zone_(closure->GetIsolate()),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
: CompilationInfo(stub, isolate, &zone_),
zone_(isolate),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
private:
Zone zone_;
@ -330,7 +404,7 @@ class CompilationHandleScope BASE_EMBEDDED {
class HGraph;
class HGraphBuilder;
class HOptimizedGraphBuilder;
class LChunk;
// A helper class that calls the three compilation phases in
@ -362,6 +436,7 @@ class OptimizingCompiler: public ZoneObject {
Status last_status() const { return last_status_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info()->isolate(); }
MUST_USE_RESULT Status AbortOptimization() {
info_->AbortOptimization();
@ -372,7 +447,7 @@ class OptimizingCompiler: public ZoneObject {
private:
CompilationInfo* info_;
TypeFeedbackOracle* oracle_;
HGraphBuilder* graph_builder_;
HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
int64_t time_taken_to_create_graph_;
@ -441,6 +516,7 @@ class Compiler : public AllStatic {
Handle<Context> context,
bool is_global,
LanguageMode language_mode,
ParseRestriction restriction,
int scope_position);
// Compile from function info (used for lazy compilation). Returns true on

40
deps/v8/src/contexts.cc

@ -55,6 +55,15 @@ JSBuiltinsObject* Context::builtins() {
}
Context* Context::global_context() {
Context* current = this;
while (!current->IsGlobalContext()) {
current = current->previous();
}
return current;
}
Context* Context::native_context() {
// Fast case: the global object for this context has been set. In
// that case, the global object has a direct pointer to the global
@ -183,6 +192,10 @@ Handle<Object> Context::Lookup(Handle<String> name,
? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
IMMUTABLE_IS_INITIALIZED_HARMONY;
break;
case MODULE:
*attributes = READ_ONLY;
*binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
break;
case DYNAMIC:
case DYNAMIC_GLOBAL:
case DYNAMIC_LOCAL:
@ -251,8 +264,6 @@ void Context::AddOptimizedFunction(JSFunction* function) {
}
}
CHECK(function->next_function_link()->IsUndefined());
// Check that the context belongs to the weak native contexts list.
bool found = false;
Object* context = GetHeap()->native_contexts_list();
@ -265,6 +276,16 @@ void Context::AddOptimizedFunction(JSFunction* function) {
}
CHECK(found);
#endif
// If the function link field is already used then the function was
// enqueued as a code flushing candidate and we remove it now.
if (!function->next_function_link()->IsUndefined()) {
CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
flusher->EvictCandidate(function);
}
ASSERT(function->next_function_link()->IsUndefined());
function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST));
set(OPTIMIZED_FUNCTIONS_LIST, function);
}
@ -306,14 +327,11 @@ void Context::ClearOptimizedFunctions() {
Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
Handle<Object> result(error_message_for_code_gen_from_strings());
if (result->IsUndefined()) {
const char* error =
"Code generation from strings disallowed for this context";
Isolate* isolate = Isolate::Current();
result = isolate->factory()->NewStringFromAscii(i::CStrVector(error));
}
return result;
Handle<Object> result(error_message_for_code_gen_from_strings(),
GetIsolate());
if (!result->IsUndefined()) return result;
return GetIsolate()->factory()->NewStringFromAscii(i::CStrVector(
"Code generation from strings disallowed for this context"));
}
@ -322,7 +340,7 @@ bool Context::IsBootstrappingOrValidParentContext(
Object* object, Context* child) {
// During bootstrapping we allow all objects to pass as
// contexts. This is necessary to fix circular dependencies.
if (Isolate::Current()->bootstrapper()->IsActive()) return true;
if (child->GetIsolate()->bootstrapper()->IsActive()) return true;
if (!object->IsContext()) return false;
Context* context = Context::cast(object);
return context->IsNativeContext() || context->IsGlobalContext() ||

20
deps/v8/src/contexts.h

@ -152,16 +152,19 @@ enum BindingFlags {
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(CONTEXT_DATA_INDEX, Object, data) \
V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
V(SYMBOL_DELEGATE_INDEX, JSObject, symbol_delegate) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
V(PROXY_ENUMERATE, JSFunction, proxy_enumerate) \
V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \
V(RANDOM_SEED_INDEX, ByteArray, random_seed)
// JSFunctions are pairs (context, function code), sometimes also called
@ -281,14 +284,17 @@ class Context: public FixedArray {
OPAQUE_REFERENCE_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX,
CONTEXT_DATA_INDEX,
EMBEDDER_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
SYMBOL_DELEGATE_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX,
PROXY_ENUMERATE,
PROXY_ENUMERATE_INDEX,
OBSERVERS_NOTIFY_CHANGE_INDEX,
OBSERVERS_DELIVER_CHANGES_INDEX,
RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.
@ -341,6 +347,9 @@ class Context: public FixedArray {
// The builtins object.
JSBuiltinsObject* builtins();
// Get the innermost global context by traversing the context chain.
Context* global_context();
// Compute the native context by traversing the context chain.
Context* native_context();
@ -450,6 +459,9 @@ class Context: public FixedArray {
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
static bool IsBootstrappingOrGlobalObject(Object* object);
#endif
STATIC_CHECK(kHeaderSize == Internals::kContextHeaderSize);
STATIC_CHECK(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
};
} } // namespace v8::internal

8
deps/v8/src/conversions-inl.h

@ -212,7 +212,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache,
}
// Rounding up may cause overflow.
if ((number & ((int64_t)1 << 53)) != 0) {
if ((number & (static_cast<int64_t>(1) << 53)) != 0) {
exponent++;
number >>= 1;
}
@ -481,9 +481,9 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
sign = NEGATIVE;
}
static const char kInfinitySymbol[] = "Infinity";
if (*current == kInfinitySymbol[0]) {
if (!SubStringEquals(&current, end, kInfinitySymbol)) {
static const char kInfinityString[] = "Infinity";
if (*current == kInfinityString[0]) {
if (!SubStringEquals(&current, end, kInfinityString)) {
return JunkStringValue();
}

7
deps/v8/src/counters.cc

@ -81,17 +81,22 @@ void HistogramTimer::Start() {
stop_time_ = 0;
start_time_ = OS::Ticks();
}
if (FLAG_log_internal_timer_events) {
LOG(Isolate::Current(), TimerEvent(Logger::START, histogram_.name_));
}
}
// Stop the timer and record the results.
void HistogramTimer::Stop() {
if (histogram_.Enabled()) {
stop_time_ = OS::Ticks();
// Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
histogram_.AddSample(milliseconds);
}
if (FLAG_log_internal_timer_events) {
LOG(Isolate::Current(), TimerEvent(Logger::END, histogram_.name_));
}
}
} } // namespace v8::internal

166
deps/v8/src/cpu-profiler.cc

@ -39,7 +39,6 @@
namespace v8 {
namespace internal {
static const int kEventsBufferSize = 256 * KB;
static const int kTickSamplesBufferChunkSize = 64 * KB;
static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
@ -58,7 +57,7 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix,
String* name,
Name* name,
Address start) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec;
@ -74,7 +73,7 @@ void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
String* name,
Name* name,
String* resource_name,
int line_number,
Address start,
@ -259,109 +258,66 @@ void ProfilerEventsProcessor::Run() {
}
void CpuProfiler::StartProfiling(const char* title) {
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
}
void CpuProfiler::StartProfiling(String* title) {
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
}
CpuProfile* CpuProfiler::StopProfiling(const char* title) {
Isolate* isolate = Isolate::Current();
return is_profiling(isolate) ?
isolate->cpu_profiler()->StopCollectingProfile(title) : NULL;
}
CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
Isolate* isolate = Isolate::Current();
return is_profiling(isolate) ?
isolate->cpu_profiler()->StopCollectingProfile(
security_token, title) : NULL;
}
int CpuProfiler::GetProfilesCount() {
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
// The count of profiles doesn't depend on a security token.
return Isolate::Current()->cpu_profiler()->profiles_->Profiles(
TokenEnumerator::kNoSecurityToken)->length();
return profiles_->Profiles(TokenEnumerator::kNoSecurityToken)->length();
}
CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
const int token = profiler->token_enumerator_->GetTokenId(security_token);
return profiler->profiles_->Profiles(token)->at(index);
const int token = token_enumerator_->GetTokenId(security_token);
return profiles_->Profiles(token)->at(index);
}
CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
const int token = profiler->token_enumerator_->GetTokenId(security_token);
return profiler->profiles_->GetProfile(token, uid);
const int token = token_enumerator_->GetTokenId(security_token);
return profiles_->GetProfile(token, uid);
}
TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
if (CpuProfiler::is_profiling(isolate)) {
return isolate->cpu_profiler()->processor_->TickSampleEvent();
} else {
TickSample* CpuProfiler::TickSampleEvent() {
if (is_profiling_) return processor_->TickSampleEvent();
return NULL;
}
}
void CpuProfiler::DeleteAllProfiles() {
Isolate* isolate = Isolate::Current();
ASSERT(isolate->cpu_profiler() != NULL);
if (is_profiling(isolate)) {
isolate->cpu_profiler()->StopProcessor();
}
isolate->cpu_profiler()->ResetProfiles();
if (is_profiling_) StopProcessor();
ResetProfiles();
}
void CpuProfiler::DeleteProfile(CpuProfile* profile) {
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
Isolate::Current()->cpu_profiler()->profiles_->RemoveProfile(profile);
profiles_->RemoveProfile(profile);
delete profile;
}
bool CpuProfiler::HasDetachedProfiles() {
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
return Isolate::Current()->cpu_profiler()->profiles_->HasDetachedProfiles();
return profiles_->HasDetachedProfiles();
}
void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
void CpuProfiler::CallbackEvent(Name* name, Address entry_point) {
processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, const char* comment) {
Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
processor_->CodeCreateEvent(
tag, comment, code->address(), code->ExecutableSize());
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name) {
Isolate* isolate = Isolate::Current();
isolate->cpu_profiler()->processor_->CodeCreateEvent(
Code* code, Name* name) {
processor_->CodeCreateEvent(
tag,
name,
isolate->heap()->empty_string(),
isolate_->heap()->empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
@ -372,12 +328,11 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* name) {
Isolate* isolate = Isolate::Current();
isolate->cpu_profiler()->processor_->CodeCreateEvent(
Name* name) {
processor_->CodeCreateEvent(
tag,
name,
isolate->heap()->empty_string(),
isolate_->heap()->empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
@ -389,7 +344,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* source, int line) {
Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
processor_->CodeCreateEvent(
tag,
shared->DebugName(),
source,
@ -402,7 +357,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count) {
Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
processor_->CodeCreateEvent(
tag,
args_count,
code->address(),
@ -411,7 +366,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
void CpuProfiler::CodeMoveEvent(Address from, Address to) {
Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to);
processor_->CodeMoveEvent(from, to);
}
@ -420,19 +375,18 @@ void CpuProfiler::CodeDeleteEvent(Address from) {
void CpuProfiler::SharedFunctionInfoMoveEvent(Address from, Address to) {
CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
profiler->processor_->SharedFunctionInfoMoveEvent(from, to);
processor_->SharedFunctionInfoMoveEvent(from, to);
}
void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, "get ", name, entry_point);
}
void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent(
processor_->RegExpCodeCreateEvent(
Logger::REG_EXP_TAG,
"RegExp: ",
source,
@ -441,14 +395,15 @@ void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
}
void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, "set ", name, entry_point);
}
CpuProfiler::CpuProfiler()
: profiles_(new CpuProfilesCollection()),
CpuProfiler::CpuProfiler(Isolate* isolate)
: isolate_(isolate),
profiles_(new CpuProfilesCollection()),
next_profile_uid_(1),
token_enumerator_(new TokenEnumerator()),
generator_(NULL),
@ -469,43 +424,41 @@ void CpuProfiler::ResetProfiles() {
profiles_ = new CpuProfilesCollection();
}
void CpuProfiler::StartCollectingProfile(const char* title) {
if (profiles_->StartProfiling(title, next_profile_uid_++)) {
void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
if (profiles_->StartProfiling(title, next_profile_uid_++, record_samples)) {
StartProcessorIfNotStarted();
}
processor_->AddCurrentStack();
}
void CpuProfiler::StartCollectingProfile(String* title) {
StartCollectingProfile(profiles_->GetName(title));
void CpuProfiler::StartProfiling(String* title, bool record_samples) {
StartProfiling(profiles_->GetName(title), record_samples);
}
void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
Isolate* isolate = Isolate::Current();
// Disable logging when using the new implementation.
saved_logging_nesting_ = isolate->logger()->logging_nesting_;
isolate->logger()->logging_nesting_ = 0;
saved_logging_nesting_ = isolate_->logger()->logging_nesting_;
isolate_->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
NoBarrier_Store(&is_profiling_, true);
is_profiling_ = true;
processor_->Start();
// Enumerate stuff we already have in the heap.
if (isolate->heap()->HasBeenSetUp()) {
if (isolate_->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
bool saved_log_code_flag = FLAG_log_code;
FLAG_log_code = true;
isolate->logger()->LogCodeObjects();
isolate_->logger()->LogCodeObjects();
FLAG_log_code = saved_log_code_flag;
}
isolate->logger()->LogCompiledFunctions();
isolate->logger()->LogAccessorCallbacks();
isolate_->logger()->LogCompiledFunctions();
isolate_->logger()->LogAccessorCallbacks();
}
// Enable stack sampling.
Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
Sampler* sampler = reinterpret_cast<Sampler*>(isolate_->logger()->ticker_);
if (!sampler->IsActive()) {
sampler->Start();
need_to_stop_sampler_ = true;
@ -515,7 +468,8 @@ void CpuProfiler::StartProcessorIfNotStarted() {
}
CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
CpuProfile* CpuProfiler::StopProfiling(const char* title) {
if (!is_profiling_) return NULL;
const double actual_sampling_rate = generator_->actual_sampling_rate();
StopProcessorIfLastProfile(title);
CpuProfile* result =
@ -529,8 +483,8 @@ CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
}
CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
String* title) {
CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
if (!is_profiling_) return NULL;
const double actual_sampling_rate = generator_->actual_sampling_rate();
const char* profile_title = profiles_->GetName(title);
StopProcessorIfLastProfile(profile_title);
@ -545,14 +499,14 @@ void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
void CpuProfiler::StopProcessor() {
Logger* logger = Isolate::Current()->logger();
Logger* logger = isolate_->logger();
Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
sampler->DecreaseProfilingDepth();
if (need_to_stop_sampler_) {
sampler->Stop();
need_to_stop_sampler_ = false;
}
NoBarrier_Store(&is_profiling_, false);
is_profiling_ = false;
processor_->Stop();
processor_->Join();
delete processor_;
@ -563,20 +517,4 @@ void CpuProfiler::StopProcessor() {
}
void CpuProfiler::SetUp() {
Isolate* isolate = Isolate::Current();
if (isolate->cpu_profiler() == NULL) {
isolate->set_cpu_profiler(new CpuProfiler());
}
}
void CpuProfiler::TearDown() {
Isolate* isolate = Isolate::Current();
if (isolate->cpu_profiler() != NULL) {
delete isolate->cpu_profiler();
}
isolate->set_cpu_profiler(NULL);
}
} } // namespace v8::internal

97
deps/v8/src/cpu-profiler.h

@ -134,10 +134,10 @@ class ProfilerEventsProcessor : public Thread {
// Events adding methods. Called by VM threads.
void CallbackCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix, String* name,
const char* prefix, Name* name,
Address start);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
String* name,
Name* name,
String* resource_name, int line_number,
Address start, unsigned size,
Address shared);
@ -184,86 +184,71 @@ class ProfilerEventsProcessor : public Thread {
unsigned enqueue_order_;
};
} } // namespace v8::internal
#define PROFILE(isolate, Call) \
LOG_CODE_EVENT(isolate, Call); \
#define PROFILE(IsolateGetter, Call) \
do { \
if (v8::internal::CpuProfiler::is_profiling(isolate)) { \
v8::internal::CpuProfiler::Call; \
Isolate* cpu_profiler_isolate = (IsolateGetter); \
LOG_CODE_EVENT(cpu_profiler_isolate, Call); \
CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler(); \
if (cpu_profiler->is_profiling()) { \
cpu_profiler->Call; \
} \
} while (false)
namespace v8 {
namespace internal {
// TODO(isolates): isolatify this class.
class CpuProfiler {
public:
static void SetUp();
static void TearDown();
static void StartProfiling(const char* title);
static void StartProfiling(String* title);
static CpuProfile* StopProfiling(const char* title);
static CpuProfile* StopProfiling(Object* security_token, String* title);
static int GetProfilesCount();
static CpuProfile* GetProfile(Object* security_token, int index);
static CpuProfile* FindProfile(Object* security_token, unsigned uid);
static void DeleteAllProfiles();
static void DeleteProfile(CpuProfile* profile);
static bool HasDetachedProfiles();
explicit CpuProfiler(Isolate* isolate);
~CpuProfiler();
void StartProfiling(const char* title, bool record_samples = false);
void StartProfiling(String* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
CpuProfile* StopProfiling(Object* security_token, String* title);
int GetProfilesCount();
CpuProfile* GetProfile(Object* security_token, int index);
CpuProfile* FindProfile(Object* security_token, unsigned uid);
void DeleteAllProfiles();
void DeleteProfile(CpuProfile* profile);
bool HasDetachedProfiles();
// Invoked from stack sampler (thread or signal handler.)
static TickSample* TickSampleEvent(Isolate* isolate);
TickSample* TickSampleEvent();
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
static void CallbackEvent(String* name, Address entry_point);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
void CallbackEvent(Name* name, Address entry_point);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, const char* comment);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, Name* name);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* name);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Name* name);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* source, int line);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count);
static void CodeMovingGCEvent() {}
static void CodeMoveEvent(Address from, Address to);
static void CodeDeleteEvent(Address from);
static void GetterCallbackEvent(String* name, Address entry_point);
static void RegExpCodeCreateEvent(Code* code, String* source);
static void SetterCallbackEvent(String* name, Address entry_point);
static void SharedFunctionInfoMoveEvent(Address from, Address to);
// TODO(isolates): this doesn't have to use atomics anymore.
static INLINE(bool is_profiling(Isolate* isolate)) {
CpuProfiler* profiler = isolate->cpu_profiler();
return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
}
void CodeMovingGCEvent() {}
void CodeMoveEvent(Address from, Address to);
void CodeDeleteEvent(Address from);
void GetterCallbackEvent(Name* name, Address entry_point);
void RegExpCodeCreateEvent(Code* code, String* source);
void SetterCallbackEvent(Name* name, Address entry_point);
void SharedFunctionInfoMoveEvent(Address from, Address to);
INLINE(bool is_profiling() const) { return is_profiling_; }
private:
CpuProfiler();
~CpuProfiler();
void StartCollectingProfile(const char* title);
void StartCollectingProfile(String* title);
void StartProcessorIfNotStarted();
CpuProfile* StopCollectingProfile(const char* title);
CpuProfile* StopCollectingProfile(Object* security_token, String* title);
void StopProcessorIfLastProfile(const char* title);
void StopProcessor();
void ResetProfiles();
Isolate* isolate_;
CpuProfilesCollection* profiles_;
unsigned next_profile_uid_;
TokenEnumerator* token_enumerator_;
@ -271,7 +256,7 @@ class CpuProfiler {
ProfilerEventsProcessor* processor_;
int saved_logging_nesting_;
bool need_to_stop_sampler_;
Atomic32 is_profiling_;
bool is_profiling_;
private:
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);

32
deps/v8/src/d8-debug.cc

@ -54,7 +54,9 @@ void HandleDebugEvent(DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
Handle<Value> data) {
HandleScope scope;
// TODO(svenpanne) There should be a way to retrieve this in the callback.
Isolate* isolate = Isolate::GetCurrent();
HandleScope scope(isolate);
// Check for handled event.
if (event != Break && event != Exception && event != AfterCompile) {
@ -69,7 +71,7 @@ void HandleDebugEvent(DebugEvent event,
Function::Cast(*event_data->Get(to_json_fun_name));
Local<Value> event_json = to_json_fun->Call(event_data, 0, NULL);
if (try_catch.HasCaught()) {
Shell::ReportException(&try_catch);
Shell::ReportException(isolate, &try_catch);
return;
}
@ -77,7 +79,7 @@ void HandleDebugEvent(DebugEvent event,
Handle<Object> details =
Shell::DebugMessageDetails(Handle<String>::Cast(event_json));
if (try_catch.HasCaught()) {
Shell::ReportException(&try_catch);
Shell::ReportException(isolate, &try_catch);
return;
}
String::Utf8Value str(details->Get(String::New("text")));
@ -93,7 +95,7 @@ void HandleDebugEvent(DebugEvent event,
Local<Object> cmd_processor =
Object::Cast(*fun->Call(exec_state, 0, NULL));
if (try_catch.HasCaught()) {
Shell::ReportException(&try_catch);
Shell::ReportException(isolate, &try_catch);
return;
}
@ -114,7 +116,7 @@ void HandleDebugEvent(DebugEvent event,
Handle<Value> request =
Shell::DebugCommandToJSONRequest(String::New(command));
if (try_catch.HasCaught()) {
Shell::ReportException(&try_catch);
Shell::ReportException(isolate, &try_catch);
continue;
}
@ -138,7 +140,7 @@ void HandleDebugEvent(DebugEvent event,
args[0] = request;
Handle<Value> response_val = fun->Call(cmd_processor, kArgc, args);
if (try_catch.HasCaught()) {
Shell::ReportException(&try_catch);
Shell::ReportException(isolate, &try_catch);
continue;
}
Handle<String> response = Handle<String>::Cast(response_val);
@ -146,7 +148,7 @@ void HandleDebugEvent(DebugEvent event,
// Convert the debugger response into text details and the running state.
Handle<Object> response_details = Shell::DebugMessageDetails(response);
if (try_catch.HasCaught()) {
Shell::ReportException(&try_catch);
Shell::ReportException(isolate, &try_catch);
continue;
}
String::Utf8Value text_str(response_details->Get(String::New("text")));
@ -159,8 +161,8 @@ void HandleDebugEvent(DebugEvent event,
}
void RunRemoteDebugger(int port) {
RemoteDebugger debugger(port);
void RunRemoteDebugger(Isolate* isolate, int port) {
RemoteDebugger debugger(isolate, port);
debugger.Run();
}
@ -273,15 +275,15 @@ RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
void RemoteDebugger::HandleMessageReceived(char* message) {
Locker lock;
HandleScope scope;
Locker lock(isolate_);
HandleScope scope(isolate_);
// Print the event details.
TryCatch try_catch;
Handle<Object> details =
Shell::DebugMessageDetails(Handle<String>::Cast(String::New(message)));
if (try_catch.HasCaught()) {
Shell::ReportException(&try_catch);
Shell::ReportException(isolate_, &try_catch);
PrintPrompt();
return;
}
@ -302,15 +304,15 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
void RemoteDebugger::HandleKeyboardCommand(char* command) {
Locker lock;
HandleScope scope;
Locker lock(isolate_);
HandleScope scope(isolate_);
// Convert the debugger command to a JSON debugger request.
TryCatch try_catch;
Handle<Value> request =
Shell::DebugCommandToJSONRequest(String::New(command));
if (try_catch.HasCaught()) {
Shell::ReportException(&try_catch);
Shell::ReportException(isolate_, &try_catch);
PrintPrompt();
return;
}

8
deps/v8/src/d8-debug.h

@ -43,7 +43,7 @@ void HandleDebugEvent(DebugEvent event,
// Start the remove debugger connecting to a V8 debugger agent on the specified
// port.
void RunRemoteDebugger(int port);
void RunRemoteDebugger(Isolate* isolate, int port);
// Forward declerations.
class RemoteDebuggerEvent;
@ -53,8 +53,9 @@ class ReceiverThread;
// Remote debugging class.
class RemoteDebugger {
public:
explicit RemoteDebugger(int port)
: port_(port),
explicit RemoteDebugger(Isolate* isolate, int port)
: isolate_(isolate),
port_(port),
event_access_(i::OS::CreateMutex()),
event_available_(i::OS::CreateSemaphore(0)),
head_(NULL), tail_(NULL) {}
@ -79,6 +80,7 @@ class RemoteDebugger {
// Get connection to agent in debugged V8.
i::Socket* conn() { return conn_; }
Isolate* isolate_;
int port_; // Port used to connect to debugger V8.
i::Socket* conn_; // Connection to debugger agent in debugged V8.

2
deps/v8/src/d8-posix.cc

@ -449,7 +449,7 @@ static bool WaitForChild(int pid,
// Implementation of the system() function (see d8.h for details).
Handle<Value> Shell::System(const Arguments& args) {
HandleScope scope;
HandleScope scope(args.GetIsolate());
int read_timeout = -1;
int total_timeout = -1;
if (!GetTimeouts(args, &read_timeout, &total_timeout)) return v8::Undefined();

31
deps/v8/src/d8-readline.cc

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cstdio> // NOLINT
#include <string.h> // NOLINT
#include <readline/readline.h> // NOLINT
#include <readline/history.h> // NOLINT
@ -35,7 +35,6 @@
#include "d8.h"
// There are incompatibilities between different versions and different
// implementations of readline. This smooths out one known incompatibility.
#if RL_READLINE_VERSION >= 0x0500
@ -50,7 +49,7 @@ class ReadLineEditor: public LineEditor {
public:
ReadLineEditor() : LineEditor(LineEditor::READLINE, "readline") { }
virtual Handle<String> Prompt(const char* prompt);
virtual bool Open();
virtual bool Open(Isolate* isolate);
virtual bool Close();
virtual void AddHistory(const char* str);
@ -58,9 +57,13 @@ class ReadLineEditor: public LineEditor {
static const int kMaxHistoryEntries;
private:
#ifndef V8_SHARED
static char** AttemptedCompletion(const char* text, int start, int end);
static char* CompletionGenerator(const char* text, int state);
#endif // V8_SHARED
static char kWordBreakCharacters[];
Isolate* isolate_;
};
@ -74,9 +77,19 @@ const char* ReadLineEditor::kHistoryFileName = ".d8_history";
const int ReadLineEditor::kMaxHistoryEntries = 1000;
bool ReadLineEditor::Open() {
bool ReadLineEditor::Open(Isolate* isolate) {
isolate_ = isolate;
rl_initialize();
#ifdef V8_SHARED
// Don't do completion on shared library mode
// http://cnswww.cns.cwru.edu/php/chet/readline/readline.html#SEC24
rl_bind_key('\t', rl_insert);
#else
rl_attempted_completion_function = AttemptedCompletion;
#endif // V8_SHARED
rl_completer_word_break_characters = kWordBreakCharacters;
rl_bind_key('\t', rl_complete);
using_history();
@ -122,6 +135,7 @@ void ReadLineEditor::AddHistory(const char* str) {
}
#ifndef V8_SHARED
char** ReadLineEditor::AttemptedCompletion(const char* text,
int start,
int end) {
@ -134,12 +148,14 @@ char** ReadLineEditor::AttemptedCompletion(const char* text,
char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
static unsigned current_index;
static Persistent<Array> current_completions;
Isolate* isolate = read_line_editor.isolate_;
Locker lock(isolate);
if (state == 0) {
HandleScope scope;
Local<String> full_text = String::New(rl_line_buffer, rl_point);
Handle<Array> completions =
Shell::GetCompletions(String::New(text), full_text);
current_completions = Persistent<Array>::New(completions);
Shell::GetCompletions(isolate, String::New(text), full_text);
current_completions = Persistent<Array>::New(isolate, completions);
current_index = 0;
}
if (current_index < current_completions->Length()) {
@ -150,11 +166,12 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
String::Utf8Value str(str_obj);
return strdup(*str);
} else {
current_completions.Dispose();
current_completions.Dispose(isolate);
current_completions.Clear();
return NULL;
}
}
#endif // V8_SHARED
} // namespace v8

566
deps/v8/src/d8.cc

File diff suppressed because it is too large

8
deps/v8/src/d8.gyp

@ -45,6 +45,10 @@
'd8.cc',
],
'conditions': [
[ 'console=="readline"', {
'libraries': [ '-lreadline', ],
'sources': [ 'd8-readline.cc' ],
}],
[ 'component!="shared_library"', {
'sources': [ 'd8-debug.cc', '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', ],
'conditions': [
@ -57,10 +61,6 @@
'd8_js2c',
],
}],
[ 'console=="readline"', {
'libraries': [ '-lreadline', ],
'sources': [ 'd8-readline.cc' ],
}],
['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
or OS=="openbsd" or OS=="solaris" or OS=="android")', {
'sources': [ 'd8-posix.cc', ]

51
deps/v8/src/d8.h

@ -123,17 +123,16 @@ class LineEditor {
virtual ~LineEditor() { }
virtual Handle<String> Prompt(const char* prompt) = 0;
virtual bool Open() { return true; }
virtual bool Open(Isolate* isolate) { return true; }
virtual bool Close() { return true; }
virtual void AddHistory(const char* str) { }
const char* name() { return name_; }
static LineEditor* Get();
static LineEditor* Get() { return current_; }
private:
Type type_;
const char* name_;
LineEditor* next_;
static LineEditor* first_;
static LineEditor* current_;
};
@ -158,7 +157,7 @@ class SourceGroup {
void End(int offset) { end_offset_ = offset; }
void Execute();
void Execute(Isolate* isolate);
#ifndef V8_SHARED
void StartExecuteInThread();
@ -187,7 +186,7 @@ class SourceGroup {
#endif // V8_SHARED
void ExitShell(int exit_code);
Handle<String> ReadFile(const char* name);
Handle<String> ReadFile(Isolate* isolate, const char* name);
const char** argv_;
int begin_offset_;
@ -266,22 +265,24 @@ class Shell : public i::AllStatic {
#endif // V8_SHARED
public:
static bool ExecuteString(Handle<String> source,
static bool ExecuteString(Isolate* isolate,
Handle<String> source,
Handle<Value> name,
bool print_result,
bool report_exceptions);
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(TryCatch* try_catch);
static Handle<String> ReadFile(const char* name);
static Persistent<Context> CreateEvaluationContext();
static int RunMain(int argc, char* argv[]);
static void ReportException(Isolate* isolate, TryCatch* try_catch);
static Handle<String> ReadFile(Isolate* isolate, const char* name);
static Persistent<Context> CreateEvaluationContext(Isolate* isolate);
static int RunMain(Isolate* isolate, int argc, char* argv[]);
static int Main(int argc, char* argv[]);
static void Exit(int exit_code);
static void OnExit();
#ifndef V8_SHARED
static Handle<Array> GetCompletions(Handle<String> text,
static Handle<Array> GetCompletions(Isolate* isolate,
Handle<String> text,
Handle<String> full);
static void OnExit();
static int* LookupCounter(const char* name);
static void* CreateHistogram(const char* name,
int min,
@ -310,9 +311,9 @@ class Shell : public i::AllStatic {
static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadBuffer(const Arguments& args);
static Handle<String> ReadFromStdin();
static Handle<String> ReadFromStdin(Isolate* isolate);
static Handle<Value> ReadLine(const Arguments& args) {
return ReadFromStdin();
return ReadFromStdin(args.GetIsolate());
}
static Handle<Value> Load(const Arguments& args);
static Handle<Value> ArrayBuffer(const Arguments& args);
@ -365,7 +366,6 @@ class Shell : public i::AllStatic {
static void AddOSMethods(Handle<ObjectTemplate> os_template);
static LineEditor* console;
static const char* kPrompt;
static ShellOptions options;
@ -382,17 +382,20 @@ class Shell : public i::AllStatic {
static i::Mutex* context_mutex_;
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript();
static void InstallUtilityScript(Isolate* isolate);
#endif // V8_SHARED
static void Initialize();
static void RunShell();
static void Initialize(Isolate* isolate);
static void InitializeDebugger(Isolate* isolate);
static void RunShell(Isolate* isolate);
static bool SetOptions(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate();
static Handle<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback);
static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback);
static Handle<Value> CreateExternalArrayBuffer(Handle<Object> buffer,
static Handle<Value> CreateExternalArrayBuffer(Isolate* isolate,
Handle<Object> buffer,
int32_t size);
static Handle<Object> CreateExternalArray(Handle<Object> array,
static Handle<Object> CreateExternalArray(Isolate* isolate,
Handle<Object> array,
Handle<Object> buffer,
ExternalArrayType type,
int32_t length,
@ -402,7 +405,9 @@ class Shell : public i::AllStatic {
static Handle<Value> CreateExternalArray(const Arguments& args,
ExternalArrayType type,
int32_t element_size);
static void ExternalArrayWeakCallback(Persistent<Value> object, void* data);
static void ExternalArrayWeakCallback(Isolate* isolate,
Persistent<Value> object,
void* data);
};

675
deps/v8/src/d8.js

@ -71,7 +71,7 @@ function GetCompletions(global, last, full) {
result.push(name);
}
}
current = ToInspectableObject(current.__proto__);
current = ToInspectableObject(Object.getPrototypeOf(current));
}
return result;
}
@ -123,10 +123,6 @@ Debug.State = {
var trace_compile = false; // Tracing all compile events?
var trace_debug_json = false; // Tracing all debug json packets?
var last_cmd = '';
//var lol_is_enabled; // Set to true in d8.cc if LIVE_OBJECT_LIST is defined.
var lol_next_dump_index = 0;
var kDefaultLolLinesToPrintAtATime = 10;
var kMaxLolLinesToPrintAtATime = 1000;
var repeat_cmd_line = '';
var is_running = true;
// Global variable used to store whether a handle was requested.
@ -507,13 +503,6 @@ function DebugRequest(cmd_line) {
this.request_ = void 0;
break;
case 'liveobjectlist':
case 'lol':
if (lol_is_enabled) {
this.request_ = this.lolToJSONRequest_(args, is_repeating);
break;
}
default:
throw new Error('Unknown command "' + cmd + '"');
}
@ -558,53 +547,10 @@ DebugRequest.prototype.createRequest = function(command) {
};
// Note: we use detected command repetition as a signal for continuation here.
DebugRequest.prototype.createLOLRequest = function(command,
start_index,
lines_to_dump,
is_continuation) {
if (is_continuation) {
start_index = lol_next_dump_index;
}
if (lines_to_dump) {
lines_to_dump = parseInt(lines_to_dump);
} else {
lines_to_dump = kDefaultLolLinesToPrintAtATime;
}
if (lines_to_dump > kMaxLolLinesToPrintAtATime) {
lines_to_dump = kMaxLolLinesToPrintAtATime;
}
// Save the next start_index to dump from:
lol_next_dump_index = start_index + lines_to_dump;
var request = this.createRequest(command);
request.arguments = {};
request.arguments.start = start_index;
request.arguments.count = lines_to_dump;
return request;
};
// Create a JSON request for the evaluation command.
DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
lookup_handle = null;
if (lol_is_enabled) {
// Check if the expression is a obj id in the form @<obj id>.
var obj_id_match = expression.match(/^@([0-9]+)$/);
if (obj_id_match) {
var obj_id = parseInt(obj_id_match[1]);
// Build a dump request.
var request = this.createRequest('getobj');
request.arguments = {};
request.arguments.obj_id = obj_id;
return request.toJSONProtocol();
}
}
// Check if the expression is a handle id in the form #<handle>#.
var handle_match = expression.match(/^#([0-9]*)#$/);
if (handle_match) {
@ -1170,10 +1116,6 @@ DebugRequest.prototype.infoCommandToJSONRequest_ = function(args) {
// Build a evaluate request from the text command.
request = this.createRequest('frame');
last_cmd = 'info args';
} else if (lol_is_enabled &&
args && (args == 'liveobjectlist' || args == 'lol')) {
// Build a evaluate request from the text command.
return this.liveObjectListToJSONRequest_(null);
} else {
throw new Error('Invalid info arguments.');
}
@ -1224,262 +1166,6 @@ DebugRequest.prototype.gcToJSONRequest_ = function(args) {
};
// Args: [v[erbose]] [<N>] [i[ndex] <i>] [t[ype] <type>] [sp[ace] <space>]
DebugRequest.prototype.lolMakeListRequest =
function(cmd, args, first_arg_index, is_repeating) {
var request;
var start_index = 0;
var dump_limit = void 0;
var type_filter = void 0;
var space_filter = void 0;
var prop_filter = void 0;
var is_verbose = false;
var i;
for (i = first_arg_index; i < args.length; i++) {
var arg = args[i];
// Check for [v[erbose]]:
if (arg === 'verbose' || arg === 'v') {
// Nothing to do. This is already implied by args.length > 3.
is_verbose = true;
// Check for [<N>]:
} else if (arg.match(/^[0-9]+$/)) {
dump_limit = arg;
is_verbose = true;
// Check for i[ndex] <i>:
} else if (arg === 'index' || arg === 'i') {
i++;
if (args.length < i) {
throw new Error('Missing index after ' + arg + '.');
}
start_index = parseInt(args[i]);
// The user input start index starts at 1:
if (start_index <= 0) {
throw new Error('Invalid index ' + args[i] + '.');
}
start_index -= 1;
is_verbose = true;
// Check for t[ype] <type>:
} else if (arg === 'type' || arg === 't') {
i++;
if (args.length < i) {
throw new Error('Missing type after ' + arg + '.');
}
type_filter = args[i];
// Check for space <heap space name>:
} else if (arg === 'space' || arg === 'sp') {
i++;
if (args.length < i) {
throw new Error('Missing space name after ' + arg + '.');
}
space_filter = args[i];
// Check for property <prop name>:
} else if (arg === 'property' || arg === 'prop') {
i++;
if (args.length < i) {
throw new Error('Missing property name after ' + arg + '.');
}
prop_filter = args[i];
} else {
throw new Error('Unknown args at ' + arg + '.');
}
}
// Build the verbose request:
if (is_verbose) {
request = this.createLOLRequest('lol-'+cmd,
start_index,
dump_limit,
is_repeating);
request.arguments.verbose = true;
} else {
request = this.createRequest('lol-'+cmd);
request.arguments = {};
}
request.arguments.filter = {};
if (type_filter) {
request.arguments.filter.type = type_filter;
}
if (space_filter) {
request.arguments.filter.space = space_filter;
}
if (prop_filter) {
request.arguments.filter.prop = prop_filter;
}
return request;
};
function extractObjId(args) {
var id = args;
id = id.match(/^@([0-9]+)$/);
if (id) {
id = id[1];
} else {
throw new Error('Invalid obj id ' + args + '.');
}
return parseInt(id);
}
DebugRequest.prototype.lolToJSONRequest_ = function(args, is_repeating) {
var request;
// Use default command if one is not specified:
if (!args) {
args = 'info';
}
var orig_args = args;
var first_arg_index;
var arg, i;
var args = args.split(/\s+/g);
var cmd = args[0];
var id;
// Command: <id> [v[erbose]] ...
if (cmd.match(/^[0-9]+$/)) {
// Convert to the padded list command:
// Command: l[ist] <dummy> <id> [v[erbose]] ...
// Insert the implicit 'list' in front and process as normal:
cmd = 'list';
args.unshift(cmd);
}
switch(cmd) {
// Command: c[apture]
case 'capture':
case 'c':
request = this.createRequest('lol-capture');
break;
// Command: clear|d[elete] <id>|all
case 'clear':
case 'delete':
case 'del': {
if (args.length < 2) {
throw new Error('Missing argument after ' + cmd + '.');
} else if (args.length > 2) {
throw new Error('Too many arguments after ' + cmd + '.');
}
id = args[1];
if (id.match(/^[0-9]+$/)) {
// Delete a specific lol record:
request = this.createRequest('lol-delete');
request.arguments = {};
request.arguments.id = parseInt(id);
} else if (id === 'all') {
// Delete all:
request = this.createRequest('lol-reset');
} else {
throw new Error('Invalid argument after ' + cmd + '.');
}
break;
}
// Command: diff <id1> <id2> [<dump options>]
case 'diff':
first_arg_index = 3;
// Command: list <dummy> <id> [<dump options>]
case 'list':
// Command: ret[ainers] <obj id> [<dump options>]
case 'retainers':
case 'ret':
case 'retaining-paths':
case 'rp': {
if (cmd === 'ret') cmd = 'retainers';
else if (cmd === 'rp') cmd = 'retaining-paths';
if (!first_arg_index) first_arg_index = 2;
if (args.length < first_arg_index) {
throw new Error('Too few arguments after ' + cmd + '.');
}
var request_cmd = (cmd === 'list') ? 'diff':cmd;
request = this.lolMakeListRequest(request_cmd,
args,
first_arg_index,
is_repeating);
if (cmd === 'diff') {
request.arguments.id1 = parseInt(args[1]);
request.arguments.id2 = parseInt(args[2]);
} else if (cmd == 'list') {
request.arguments.id1 = 0;
request.arguments.id2 = parseInt(args[1]);
} else {
request.arguments.id = extractObjId(args[1]);
}
break;
}
// Command: getid
case 'getid': {
request = this.createRequest('lol-getid');
request.arguments = {};
request.arguments.address = args[1];
break;
}
// Command: inf[o] [<N>]
case 'info':
case 'inf': {
if (args.length > 2) {
throw new Error('Too many arguments after ' + cmd + '.');
}
// Built the info request:
request = this.createLOLRequest('lol-info', 0, args[1], is_repeating);
break;
}
// Command: path <obj id 1> <obj id 2>
case 'path': {
request = this.createRequest('lol-path');
request.arguments = {};
if (args.length > 2) {
request.arguments.id1 = extractObjId(args[1]);
request.arguments.id2 = extractObjId(args[2]);
} else {
request.arguments.id1 = 0;
request.arguments.id2 = extractObjId(args[1]);
}
break;
}
// Command: print
case 'print': {
request = this.createRequest('lol-print');
request.arguments = {};
request.arguments.id = extractObjId(args[1]);
break;
}
// Command: reset
case 'reset': {
request = this.createRequest('lol-reset');
break;
}
default:
throw new Error('Invalid arguments.');
}
return request.toJSONProtocol();
};
// Create a JSON request for the threads command.
DebugRequest.prototype.threadsCommandToJSONRequest_ = function(args) {
// Build a threads request from the text command.
@ -1545,7 +1231,6 @@ DebugRequest.prototype.helpCommand_ = function(args) {
print('inf[o] br[eak] - prints info about breakpoints in use');
print('inf[o] ar[gs] - prints info about arguments of the current function');
print('inf[o] lo[cals] - prints info about locals in the current function');
print('inf[o] liveobjectlist|lol - same as \'lol info\'');
print('');
print('step [in | next | out| min [step count]]');
print('c[ontinue] - continue executing after a breakpoint');
@ -1566,49 +1251,6 @@ DebugRequest.prototype.helpCommand_ = function(args) {
print('');
print('gc - runs the garbage collector');
print('');
if (lol_is_enabled) {
print('liveobjectlist|lol <command> - live object list tracking.');
print(' where <command> can be:');
print(' c[apture] - captures a LOL list.');
print(' clear|del[ete] <id>|all - clears LOL of id <id>.');
print(' If \'all\' is unspecified instead, will clear all.');
print(' diff <id1> <id2> [<dump options>]');
print(' - prints the diff between LOLs id1 and id2.');
print(' - also see <dump options> below.');
print(' getid <address> - gets the obj id for the specified address if available.');
print(' The address must be in hex form prefixed with 0x.');
print(' inf[o] [<N>] - lists summary info of all LOL lists.');
print(' If N is specified, will print N items at a time.');
print(' [l[ist]] <id> [<dump options>]');
print(' - prints the listing of objects in LOL id.');
print(' - also see <dump options> below.');
print(' reset - clears all LOL lists.');
print(' ret[ainers] <id> [<dump options>]');
print(' - prints the list of retainers of obj id.');
print(' - also see <dump options> below.');
print(' path <id1> <id2> - prints the retaining path from obj id1 to id2.');
print(' If only one id is specified, will print the path from');
print(' roots to the specified object if available.');
print(' print <id> - prints the obj for the specified obj id if available.');
print('');
print(' <dump options> includes:');
print(' [v[erbose]] - do verbose dump.');
print(' [<N>] - dump N items at a time. Implies verbose dump.');
print(' If unspecified, N will default to '+
kDefaultLolLinesToPrintAtATime+'. Max N is '+
kMaxLolLinesToPrintAtATime+'.');
print(' [i[ndex] <i>] - start dump from index i. Implies verbose dump.');
print(' [t[ype] <type>] - filter by type.');
print(' [sp[ace] <space name>] - filter by heap space where <space name> is one of');
print(' { cell, code, lo, map, new, old-data, old-pointer }.');
print('');
print(' If the verbose option, or an option that implies a verbose dump');
print(' is specified, then a verbose dump will requested. Else, a summary dump');
print(' will be requested.');
print('');
}
print('trace compile');
// hidden command: trace debug json - toggles tracing of debug json packets
print('');
@ -1709,237 +1351,6 @@ function refObjectToString_(protocolPackage, handle) {
}
function decodeLolCaptureResponse(body) {
var result;
result = 'Captured live object list '+ body.id +
': count '+ body.count + ' size ' + body.size;
return result;
}
function decodeLolDeleteResponse(body) {
var result;
result = 'Deleted live object list '+ body.id;
return result;
}
function digitsIn(value) {
var digits = 0;
if (value === 0) value = 1;
while (value >= 1) {
digits++;
value /= 10;
}
return digits;
}
function padding(value, max_digits) {
var padding_digits = max_digits - digitsIn(value);
var padding = '';
while (padding_digits > 0) {
padding += ' ';
padding_digits--;
}
return padding;
}
function decodeLolInfoResponse(body) {
var result;
var lists = body.lists;
var length = lists.length;
var first_index = body.first_index + 1;
var has_more = ((first_index + length) <= body.count);
result = 'captured live object lists';
if (has_more || (first_index != 1)) {
result += ' ['+ length +' of '+ body.count +
': starting from '+ first_index +']';
}
result += ':\n';
var max_digits = digitsIn(body.count);
var last_count = 0;
var last_size = 0;
for (var i = 0; i < length; i++) {
var entry = lists[i];
var count = entry.count;
var size = entry.size;
var index = first_index + i;
result += ' [' + padding(index, max_digits) + index + '] id '+ entry.id +
': count '+ count;
if (last_count > 0) {
result += '(+' + (count - last_count) + ')';
}
result += ' size '+ size;
if (last_size > 0) {
result += '(+' + (size - last_size) + ')';
}
result += '\n';
last_count = count;
last_size = size;
}
result += ' total: '+length+' lists\n';
if (has_more) {
result += ' -- press <enter> for more --\n';
} else {
repeat_cmd_line = '';
}
if (length === 0) result += ' none\n';
return result;
}
function decodeLolListResponse(body, title) {
var result;
var total_count = body.count;
var total_size = body.size;
var length;
var max_digits;
var i;
var entry;
var index;
var max_count_digits = digitsIn(total_count);
var max_size_digits;
var summary = body.summary;
if (summary) {
var roots_count = 0;
var found_root = body.found_root || 0;
var found_weak_root = body.found_weak_root || 0;
// Print the summary result:
result = 'summary of objects:\n';
length = summary.length;
if (found_root !== 0) {
roots_count++;
}
if (found_weak_root !== 0) {
roots_count++;
}
max_digits = digitsIn(length + roots_count);
max_size_digits = digitsIn(total_size);
index = 1;
if (found_root !== 0) {
result += ' [' + padding(index, max_digits) + index + '] ' +
' count '+ 1 + padding(0, max_count_digits) +
' '+ padding(0, max_size_digits+1) +
' : <root>\n';
index++;
}
if (found_weak_root !== 0) {
result += ' [' + padding(index, max_digits) + index + '] ' +
' count '+ 1 + padding(0, max_count_digits) +
' '+ padding(0, max_size_digits+1) +
' : <weak root>\n';
index++;
}
for (i = 0; i < length; i++) {
entry = summary[i];
var count = entry.count;
var size = entry.size;
result += ' [' + padding(index, max_digits) + index + '] ' +
' count '+ count + padding(count, max_count_digits) +
' size '+ size + padding(size, max_size_digits) +
' : <' + entry.desc + '>\n';
index++;
}
result += '\n total count: '+(total_count+roots_count)+'\n';
if (body.size) {
result += ' total size: '+body.size+'\n';
}
} else {
// Print the full dump result:
var first_index = body.first_index + 1;
var elements = body.elements;
length = elements.length;
var has_more = ((first_index + length) <= total_count);
result = title;
if (has_more || (first_index != 1)) {
result += ' ['+ length +' of '+ total_count +
': starting from '+ first_index +']';
}
result += ':\n';
if (length === 0) result += ' none\n';
max_digits = digitsIn(length);
var max_id = 0;
var max_size = 0;
for (i = 0; i < length; i++) {
entry = elements[i];
if (entry.id > max_id) max_id = entry.id;
if (entry.size > max_size) max_size = entry.size;
}
var max_id_digits = digitsIn(max_id);
max_size_digits = digitsIn(max_size);
for (i = 0; i < length; i++) {
entry = elements[i];
index = first_index + i;
result += ' ['+ padding(index, max_digits) + index +']';
if (entry.id !== 0) {
result += ' @' + entry.id + padding(entry.id, max_id_digits) +
': size ' + entry.size + ', ' +
padding(entry.size, max_size_digits) + entry.desc + '\n';
} else {
// Must be a root or weak root:
result += ' ' + entry.desc + '\n';
}
}
if (has_more) {
result += ' -- press <enter> for more --\n';
} else {
repeat_cmd_line = '';
}
if (length === 0) result += ' none\n';
}
return result;
}
function decodeLolDiffResponse(body) {
var title = 'objects';
return decodeLolListResponse(body, title);
}
function decodeLolRetainersResponse(body) {
var title = 'retainers for @' + body.id;
return decodeLolListResponse(body, title);
}
function decodeLolPathResponse(body) {
return body.path;
}
function decodeLolResetResponse(body) {
return 'Reset all live object lists.';
}
function decodeLolGetIdResponse(body) {
if (body.id == 0) {
return 'Address is invalid, or object has been moved or collected';
}
return 'obj id is @' + body.id;
}
function decodeLolPrintResponse(body) {
return body.dump;
}
// Rounds number 'num' to 'length' decimal places.
function roundNumber(num, length) {
var factor = Math.pow(10, length);
@ -2276,34 +1687,6 @@ function DebugResponseDetails(response) {
}
break;
case 'lol-capture':
details.text = decodeLolCaptureResponse(body);
break;
case 'lol-delete':
details.text = decodeLolDeleteResponse(body);
break;
case 'lol-diff':
details.text = decodeLolDiffResponse(body);
break;
case 'lol-getid':
details.text = decodeLolGetIdResponse(body);
break;
case 'lol-info':
details.text = decodeLolInfoResponse(body);
break;
case 'lol-print':
details.text = decodeLolPrintResponse(body);
break;
case 'lol-reset':
details.text = decodeLolResetResponse(body);
break;
case 'lol-retainers':
details.text = decodeLolRetainersResponse(body);
break;
case 'lol-path':
details.text = decodeLolPathResponse(body);
break;
default:
details.text =
'Response for unknown command \'' + response.command() + '\'' +
@ -2811,3 +2194,59 @@ function SimpleArrayToJSON_(array) {
json += ']';
return json;
}
// A more universal stringify that supports more types than JSON.
// Used by the d8 shell to output results.
var stringifyDepthLimit = 4; // To avoid crashing on cyclic objects
function Stringify(x, depth) {
if (depth === undefined)
depth = stringifyDepthLimit;
else if (depth === 0)
return "*";
switch (typeof x) {
case "undefined":
return "undefined";
case "boolean":
case "number":
case "function":
return x.toString();
case "string":
return "\"" + x.toString() + "\"";
// TODO(rossberg): add symbol case
case "object":
if (x === null) return "null";
if (x.constructor && x.constructor.name === "Array") {
var elems = [];
for (var i = 0; i < x.length; ++i) {
elems.push(
{}.hasOwnProperty.call(x, i) ? Stringify(x[i], depth - 1) : "");
}
return "[" + elems.join(", ") + "]";
}
try {
var string = String(x);
if (string && string !== "[object Object]") return string;
} catch(e) {}
var props = [];
for (var name in x) {
var desc = Object.getOwnPropertyDescriptor(x, name);
if (desc === void 0) continue;
if ("value" in desc) {
props.push(name + ": " + Stringify(desc.value, depth - 1));
}
if ("get" in desc) {
var getter = desc.get.toString();
props.push("get " + name + getter.slice(getter.indexOf('(')));
}
if ("set" in desc) {
var setter = desc.set.toString();
props.push("set " + name + setter.slice(setter.indexOf('(')));
}
}
return "{" + props.join(", ") + "}";
default:
return "[crazy non-standard shit]";
}
}

55
deps/v8/src/data-flow.h

@ -199,6 +199,61 @@ class BitVector: public ZoneObject {
uint32_t* data_;
};
class GrowableBitVector BASE_EMBEDDED {
public:
class Iterator BASE_EMBEDDED {
public:
Iterator(const GrowableBitVector* target, Zone* zone)
: it_(target->bits_ == NULL
? new(zone) BitVector(1, zone)
: target->bits_) { }
bool Done() const { return it_.Done(); }
void Advance() { it_.Advance(); }
int Current() const { return it_.Current(); }
private:
BitVector::Iterator it_;
};
GrowableBitVector() : bits_(NULL) { }
bool Contains(int value) const {
if (!InBitsRange(value)) return false;
return bits_->Contains(value);
}
void Add(int value, Zone* zone) {
EnsureCapacity(value, zone);
bits_->Add(value);
}
void Union(const GrowableBitVector& other, Zone* zone) {
for (Iterator it(&other, zone); !it.Done(); it.Advance()) {
Add(it.Current(), zone);
}
}
void Clear() { if (bits_ != NULL) bits_->Clear(); }
private:
static const int kInitialLength = 1024;
bool InBitsRange(int value) const {
return bits_ != NULL && bits_->length() > value;
}
void EnsureCapacity(int value, Zone* zone) {
if (InBitsRange(value)) return;
int new_length = bits_ == NULL ? kInitialLength : bits_->length();
while (new_length <= value) new_length *= 2;
BitVector* new_bits = new(zone) BitVector(new_length, zone);
if (bits_ != NULL) new_bits->CopyFrom(*bits_);
bits_ = new_bits;
}
BitVector* bits_;
};
} } // namespace v8::internal

2
deps/v8/src/date.js

@ -107,7 +107,7 @@ function MakeDay(year, month, date) {
}
// Now we rely on year and month being SMIs.
return %DateMakeDay(year, month) + date - 1;
return %DateMakeDay(year | 0, month | 0) + date - 1;
}

19
deps/v8/src/debug-agent.cc

@ -192,21 +192,14 @@ void DebuggerAgentSession::Run() {
}
// Convert UTF-8 to UTF-16.
unibrow::Utf8InputBuffer<> buf(msg, StrLength(msg));
int len = 0;
while (buf.has_more()) {
buf.GetNext();
len++;
}
ScopedVector<int16_t> temp(len + 1);
buf.Reset(msg, StrLength(msg));
for (int i = 0; i < len; i++) {
temp[i] = buf.GetNext();
}
unibrow::Utf8Decoder<128> decoder(msg, StrLength(msg));
int utf16_length = decoder.Utf16Length();
ScopedVector<uint16_t> temp(utf16_length + 1);
decoder.WriteUtf16(temp.start(), utf16_length);
// Send the request received to the debugger.
v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp.start()),
len,
v8::Debug::SendCommand(temp.start(),
utf16_length,
NULL,
reinterpret_cast<v8::Isolate*>(agent_->isolate()));

256
deps/v8/src/debug-debugger.js

@ -110,7 +110,6 @@ var debugger_flags = {
}
},
};
var lol_is_enabled = %HasLOLEnabled();
// Create a new break point object and add it to the list of break points.
@ -1306,9 +1305,12 @@ ProtocolMessage.prototype.setOption = function(name, value) {
};
ProtocolMessage.prototype.failed = function(message) {
ProtocolMessage.prototype.failed = function(message, opt_details) {
this.success = false;
this.message = message;
if (IS_OBJECT(opt_details)) {
this.error_details = opt_details;
}
};
@ -1355,6 +1357,9 @@ ProtocolMessage.prototype.toJSONProtocol = function() {
if (this.message) {
json.message = this.message;
}
if (this.error_details) {
json.error_details = this.error_details;
}
json.running = this.running;
return JSON.stringify(json);
};
@ -1427,10 +1432,10 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
this.scopesRequest_(request, response);
} else if (request.command == 'scope') {
this.scopeRequest_(request, response);
} else if (request.command == 'setVariableValue') {
this.setVariableValueRequest_(request, response);
} else if (request.command == 'evaluate') {
this.evaluateRequest_(request, response);
} else if (lol_is_enabled && request.command == 'getobj') {
this.getobjRequest_(request, response);
} else if (request.command == 'lookup') {
this.lookupRequest_(request, response);
} else if (request.command == 'references') {
@ -1460,28 +1465,6 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
} else if (request.command == 'gc') {
this.gcRequest_(request, response);
// LiveObjectList tools:
} else if (lol_is_enabled && request.command == 'lol-capture') {
this.lolCaptureRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-delete') {
this.lolDeleteRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-diff') {
this.lolDiffRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-getid') {
this.lolGetIdRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-info') {
this.lolInfoRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-reset') {
this.lolResetRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-retainers') {
this.lolRetainersRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-path') {
this.lolPathRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-print') {
this.lolPrintRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-stats') {
this.lolStatsRequest_(request, response);
} else {
throw new Error('Unknown command "' + request.command + '" in request');
}
@ -1953,11 +1936,12 @@ DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
};
DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ =
function(scope_description) {
// Get the frame for which the scope or scopes are requested.
// With no frameNumber argument use the currently selected frame.
if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
frame_index = request.arguments.frameNumber;
if (scope_description && !IS_UNDEFINED(scope_description.frameNumber)) {
frame_index = scope_description.frameNumber;
if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
throw new Error('Invalid frame number');
}
@ -1971,13 +1955,13 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
// Gets scope host object from request. It is either a function
// ('functionHandle' argument must be specified) or a stack frame
// ('frameNumber' may be specified and the current frame is taken by default).
DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ =
function(request) {
if (request.arguments && "functionHandle" in request.arguments) {
if (!IS_NUMBER(request.arguments.functionHandle)) {
DebugCommandProcessor.prototype.resolveScopeHolder_ =
function(scope_description) {
if (scope_description && "functionHandle" in scope_description) {
if (!IS_NUMBER(scope_description.functionHandle)) {
throw new Error('Function handle must be a number');
}
var function_mirror = LookupMirror(request.arguments.functionHandle);
var function_mirror = LookupMirror(scope_description.functionHandle);
if (!function_mirror) {
throw new Error('Failed to find function object by handle');
}
@ -1992,14 +1976,14 @@ DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ =
}
// Get the frame for which the scopes are requested.
var frame = this.frameForScopeRequest_(request);
var frame = this.resolveFrameFromScopeDescription_(scope_description);
return frame;
}
}
DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
var scope_holder = this.scopeHolderForScopeRequest_(request);
var scope_holder = this.resolveScopeHolder_(request.arguments);
// Fill all scopes for this frame or function.
var total_scopes = scope_holder.scopeCount();
@ -2018,7 +2002,7 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
// Get the frame or function for which the scope is requested.
var scope_holder = this.scopeHolderForScopeRequest_(request);
var scope_holder = this.resolveScopeHolder_(request.arguments);
// With no scope argument just return top scope.
var scope_index = 0;
@ -2033,6 +2017,77 @@ DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
};
// Reads value from protocol description. Description may be in form of type
// (for singletons), raw value (primitive types supported in JSON),
// string value description plus type (for primitive values) or handle id.
// Returns raw value or throws exception.
DebugCommandProcessor.resolveValue_ = function(value_description) {
if ("handle" in value_description) {
var value_mirror = LookupMirror(value_description.handle);
if (!value_mirror) {
throw new Error("Failed to resolve value by handle, ' #" +
mapping.handle + "# not found");
}
return value_mirror.value();
} else if ("stringDescription" in value_description) {
if (value_description.type == BOOLEAN_TYPE) {
return Boolean(value_description.stringDescription);
} else if (value_description.type == NUMBER_TYPE) {
return Number(value_description.stringDescription);
} if (value_description.type == STRING_TYPE) {
return String(value_description.stringDescription);
} else {
throw new Error("Unknown type");
}
} else if ("value" in value_description) {
return value_description.value;
} else if (value_description.type == UNDEFINED_TYPE) {
return void 0;
} else if (value_description.type == NULL_TYPE) {
return null;
} else {
throw new Error("Failed to parse value description");
}
};
DebugCommandProcessor.prototype.setVariableValueRequest_ =
function(request, response) {
if (!request.arguments) {
response.failed('Missing arguments');
return;
}
if (IS_UNDEFINED(request.arguments.name)) {
response.failed('Missing variable name');
}
var variable_name = request.arguments.name;
var scope_description = request.arguments.scope;
// Get the frame or function for which the scope is requested.
var scope_holder = this.resolveScopeHolder_(scope_description);
if (IS_UNDEFINED(scope_description.number)) {
response.failed('Missing scope number');
}
var scope_index = %ToNumber(scope_description.number);
var scope = scope_holder.scope(scope_index);
var new_value =
DebugCommandProcessor.resolveValue_(request.arguments.newValue);
scope.setVariableValue(variable_name, new_value);
var new_value_mirror = MakeMirror(new_value);
response.body = {
newValue: new_value_mirror
};
};
DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
if (!request.arguments) {
return response.failed('Missing arguments');
@ -2063,16 +2118,14 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
additional_context_object = {};
for (var i = 0; i < additional_context.length; i++) {
var mapping = additional_context[i];
if (!IS_STRING(mapping.name) || !IS_NUMBER(mapping.handle)) {
if (!IS_STRING(mapping.name)) {
return response.failed("Context element #" + i +
" must contain name:string and handle:number");
" doesn't contain name:string property");
}
var context_value_mirror = LookupMirror(mapping.handle);
if (!context_value_mirror) {
return response.failed("Context object '" + mapping.name +
"' #" + mapping.handle + "# not found");
}
additional_context_object[mapping.name] = context_value_mirror.value();
var raw_value = DebugCommandProcessor.resolveValue_(mapping);
additional_context_object[mapping.name] = raw_value;
}
}
@ -2113,24 +2166,6 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
};
DebugCommandProcessor.prototype.getobjRequest_ = function(request, response) {
if (!request.arguments) {
return response.failed('Missing arguments');
}
// Pull out arguments.
var obj_id = request.arguments.obj_id;
// Check for legal arguments.
if (IS_UNDEFINED(obj_id)) {
return response.failed('Argument "obj_id" missing');
}
// Dump the object.
response.body = MakeMirror(%GetLOLObj(obj_id));
};
DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
if (!request.arguments) {
return response.failed('Missing arguments');
@ -2387,8 +2422,17 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(
var new_source = request.arguments.new_source;
var result_description = Debug.LiveEdit.SetScriptSource(the_script,
var result_description;
try {
result_description = Debug.LiveEdit.SetScriptSource(the_script,
new_source, preview_only, change_log);
} catch (e) {
if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
response.failed(e.message, e.details);
return;
}
throw e;
}
response.body = {change_log: change_log, result: result_description};
if (!preview_only && !this.running_ && result_description.stack_modified) {
@ -2480,86 +2524,6 @@ DebugCommandProcessor.prototype.gcRequest_ = function(request, response) {
};
DebugCommandProcessor.prototype.lolCaptureRequest_ =
function(request, response) {
response.body = %CaptureLOL();
};
DebugCommandProcessor.prototype.lolDeleteRequest_ =
function(request, response) {
var id = request.arguments.id;
var result = %DeleteLOL(id);
if (result) {
response.body = { id: id };
} else {
response.failed('Failed to delete: live object list ' + id + ' not found.');
}
};
DebugCommandProcessor.prototype.lolDiffRequest_ = function(request, response) {
var id1 = request.arguments.id1;
var id2 = request.arguments.id2;
var verbose = request.arguments.verbose;
var filter = request.arguments.filter;
if (verbose === true) {
var start = request.arguments.start;
var count = request.arguments.count;
response.body = %DumpLOL(id1, id2, start, count, filter);
} else {
response.body = %SummarizeLOL(id1, id2, filter);
}
};
DebugCommandProcessor.prototype.lolGetIdRequest_ = function(request, response) {
var address = request.arguments.address;
response.body = {};
response.body.id = %GetLOLObjId(address);
};
DebugCommandProcessor.prototype.lolInfoRequest_ = function(request, response) {
var start = request.arguments.start;
var count = request.arguments.count;
response.body = %InfoLOL(start, count);
};
DebugCommandProcessor.prototype.lolResetRequest_ = function(request, response) {
%ResetLOL();
};
DebugCommandProcessor.prototype.lolRetainersRequest_ =
function(request, response) {
var id = request.arguments.id;
var verbose = request.arguments.verbose;
var start = request.arguments.start;
var count = request.arguments.count;
var filter = request.arguments.filter;
response.body = %GetLOLObjRetainers(id, Mirror.prototype, verbose,
start, count, filter);
};
DebugCommandProcessor.prototype.lolPathRequest_ = function(request, response) {
var id1 = request.arguments.id1;
var id2 = request.arguments.id2;
response.body = {};
response.body.path = %GetLOLPath(id1, id2, Mirror.prototype);
};
DebugCommandProcessor.prototype.lolPrintRequest_ = function(request, response) {
var id = request.arguments.id;
response.body = {};
response.body.dump = %PrintLOLObj(id);
};
// Check whether the previously processed command caused the VM to become
// running.
DebugCommandProcessor.prototype.isRunning = function() {
@ -2663,3 +2627,7 @@ function ValueToProtocolValue_(value, mirror_serializer) {
}
return json;
}
Debug.TestApi = {
CommandProcessorResolveValue: DebugCommandProcessor.resolveValue_
};

148
deps/v8/src/debug.cc

@ -261,8 +261,12 @@ void BreakLocationIterator::Reset() {
// Create relocation iterators for the two code objects.
if (reloc_iterator_ != NULL) delete reloc_iterator_;
if (reloc_iterator_original_ != NULL) delete reloc_iterator_original_;
reloc_iterator_ = new RelocIterator(debug_info_->code());
reloc_iterator_original_ = new RelocIterator(debug_info_->original_code());
reloc_iterator_ = new RelocIterator(
debug_info_->code(),
~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
reloc_iterator_original_ = new RelocIterator(
debug_info_->original_code(),
~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
// Position at the first break point.
break_point_ = -1;
@ -385,8 +389,8 @@ void BreakLocationIterator::ClearDebugBreak() {
}
void BreakLocationIterator::PrepareStepIn() {
HandleScope scope;
void BreakLocationIterator::PrepareStepIn(Isolate* isolate) {
HandleScope scope(isolate);
// Step in can only be prepared if currently positioned on an IC call,
// construct call or CallFunction stub call.
@ -613,9 +617,9 @@ void ScriptCache::Add(Handle<Script> script) {
Handle<Script> script_ =
Handle<Script>::cast(
(global_handles->Create(*script)));
global_handles->MakeWeak(
reinterpret_cast<Object**>(script_.location()),
global_handles->MakeWeak(reinterpret_cast<Object**>(script_.location()),
this,
NULL,
ScriptCache::HandleWeakScript);
entry->value = script_.location();
}
@ -659,7 +663,9 @@ void ScriptCache::Clear() {
}
void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) {
void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
v8::Persistent<v8::Value> obj,
void* data) {
ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data);
// Find the location of the global handle.
Script** location =
@ -672,7 +678,7 @@ void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) {
script_cache->collected_scripts_.Add(id);
// Clear the weak handle.
obj.Dispose();
obj.Dispose(isolate);
obj.Clear();
}
@ -692,8 +698,10 @@ void Debug::SetUp(bool create_heap_objects) {
}
void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
Debug* debug = Isolate::Current()->debug();
void Debug::HandleWeakDebugInfo(v8::Isolate* isolate,
v8::Persistent<v8::Value> obj,
void* data) {
Debug* debug = reinterpret_cast<Isolate*>(isolate)->debug();
DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
// We need to clear all breakpoints associated with the function to restore
// original code and avoid patching the code twice later because
@ -717,9 +725,9 @@ DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
// Globalize the request debug info object and make it weak.
debug_info_ = Handle<DebugInfo>::cast(
(global_handles->Create(debug_info)));
global_handles->MakeWeak(
reinterpret_cast<Object**>(debug_info_.location()),
global_handles->MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
this,
NULL,
Debug::HandleWeakDebugInfo);
}
@ -770,8 +778,11 @@ bool Debug::CompileDebuggerScript(int index) {
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
Handle<Object> exception =
Execution::TryCall(function, Handle<Object>(context->global_object()),
0, NULL, &caught_exception);
Execution::TryCall(function,
Handle<Object>(context->global_object(), isolate),
0,
NULL,
&caught_exception);
// Check for caught exceptions.
if (caught_exception) {
@ -782,9 +793,11 @@ bool Debug::CompileDebuggerScript(int index) {
"error_loading_debugger", &computed_location,
Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception());
if (!exception.is_null()) {
isolate->set_pending_exception(*exception);
MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
isolate->clear_pending_exception();
}
return false;
}
@ -817,7 +830,6 @@ bool Debug::Load() {
HandleScope scope(isolate_);
Handle<Context> context =
isolate_->bootstrapper()->CreateEnvironment(
isolate_,
Handle<Object>::null(),
v8::Handle<ObjectTemplate>(),
NULL);
@ -830,12 +842,16 @@ bool Debug::Load() {
isolate_->set_context(*context);
// Expose the builtins object in the debugger context.
Handle<String> key = isolate_->factory()->LookupAsciiSymbol("builtins");
Handle<String> key = isolate_->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("builtins"));
Handle<GlobalObject> global = Handle<GlobalObject>(context->global_object());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate_,
JSReceiver::SetProperty(global, key, Handle<Object>(global->builtins()),
NONE, kNonStrictMode),
JSReceiver::SetProperty(global,
key,
Handle<Object>(global->builtins(), isolate_),
NONE,
kNonStrictMode),
false);
// Compile the JavaScript for the debugger in the debugger context.
@ -940,10 +956,10 @@ Object* Debug::Break(Arguments args) {
// If there is one or more real break points check whether any of these are
// triggered.
Handle<Object> break_points_hit(heap->undefined_value());
Handle<Object> break_points_hit(heap->undefined_value(), isolate_);
if (break_location_iterator.HasBreakPoint()) {
Handle<Object> break_point_objects =
Handle<Object>(break_location_iterator.BreakPointObjects());
Handle<Object>(break_location_iterator.BreakPointObjects(), isolate_);
break_points_hit = CheckBreakPoints(break_point_objects);
}
@ -1061,7 +1077,7 @@ Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
break_points_hit = factory->NewFixedArray(array->length());
for (int i = 0; i < array->length(); i++) {
Handle<Object> o(array->get(i));
Handle<Object> o(array->get(i), isolate_);
if (CheckBreakPoint(o)) {
break_points_hit->set(break_points_hit_count++, *o);
}
@ -1093,12 +1109,13 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
if (!break_point_object->IsJSObject()) return true;
// Get the function IsBreakPointTriggered (defined in debug-debugger.js).
Handle<String> is_break_point_triggered_symbol =
factory->LookupAsciiSymbol("IsBreakPointTriggered");
Handle<String> is_break_point_triggered_string =
factory->InternalizeOneByteString(
STATIC_ASCII_VECTOR("IsBreakPointTriggered"));
Handle<JSFunction> check_break_point =
Handle<JSFunction>(JSFunction::cast(
debug_context()->global_object()->GetPropertyNoExceptionThrown(
*is_break_point_triggered_symbol)));
*is_break_point_triggered_string)));
// Get the break id as an object.
Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
@ -1283,7 +1300,8 @@ void Debug::FloodWithOneShot(Handle<JSFunction> function) {
void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
Handle<FixedArray> new_bindings(function->function_bindings());
Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex));
Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex),
isolate_);
if (!bindee.is_null() && bindee->IsJSFunction() &&
!JSFunction::cast(*bindee)->IsBuiltin()) {
@ -1481,7 +1499,8 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// from the code object.
Handle<Object> obj(
isolate_->heap()->code_stubs()->SlowReverseLookup(
*call_function_stub));
*call_function_stub),
isolate_);
ASSERT(!obj.is_null());
ASSERT(!(*obj)->IsUndefined());
ASSERT(obj->IsSmi());
@ -1535,7 +1554,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
}
// Step in or Step in min
it.PrepareStepIn();
it.PrepareStepIn(isolate_);
ActivateStepIn(frame);
}
}
@ -1579,7 +1598,7 @@ bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
// object.
bool Debug::IsDebugBreak(Address addr) {
Code* code = Code::GetCodeFromTargetAddress(addr);
return code->ic_state() == DEBUG_BREAK;
return code->is_debug_break();
}
@ -1654,10 +1673,12 @@ Handle<Object> Debug::GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared) {
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
if (!HasDebugInfo(shared)) return Handle<Object>(heap->undefined_value());
if (!HasDebugInfo(shared)) {
return Handle<Object>(heap->undefined_value(), isolate);
}
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
if (debug_info->GetBreakPointCount() == 0) {
return Handle<Object>(heap->undefined_value());
return Handle<Object>(heap->undefined_value(), isolate);
}
Handle<FixedArray> locations =
isolate->factory()->NewFixedArray(debug_info->GetBreakPointCount());
@ -1692,9 +1713,10 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
Handle<Object> holder,
Address fp,
bool is_constructor) {
Isolate* isolate = function->GetIsolate();
// If the frame pointer is not supplied by the caller find it.
if (fp == 0) {
StackFrameIterator it;
StackFrameIterator it(isolate);
it.Advance();
// For constructor functions skip another frame.
if (is_constructor) {
@ -1713,9 +1735,9 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
} else if (!function->IsBuiltin()) {
// Don't allow step into functions in the native context.
if (function->shared()->code() ==
Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply) ||
isolate->builtins()->builtin(Builtins::kFunctionApply) ||
function->shared()->code() ==
Isolate::Current()->builtins()->builtin(Builtins::kFunctionCall)) {
isolate->builtins()->builtin(Builtins::kFunctionCall)) {
// Handle function.apply and function.call separately to flood the
// function to be called and not the code for Builtins::FunctionApply or
// Builtins::FunctionCall. The receiver of call/apply is the target
@ -1978,7 +2000,7 @@ void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
Deoptimizer::DeoptimizeAll();
Deoptimizer::DeoptimizeAll(isolate_);
Handle<Code> lazy_compile =
Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
@ -1993,14 +2015,15 @@ void Debug::PrepareForBreakPoints() {
{
// We are going to iterate heap to find all functions without
// debug break slots.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
Heap* heap = isolate_->heap();
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"preparing for breakpoints");
// Ensure no GC in this scope as we are going to use gc_metadata
// field in the Code object to mark active functions.
AssertNoAllocation no_allocation;
Object* active_code_marker = isolate_->heap()->the_hole_value();
Object* active_code_marker = heap->the_hole_value();
CollectActiveFunctionsFromThread(isolate_,
isolate_->thread_local_top(),
@ -2014,7 +2037,7 @@ void Debug::PrepareForBreakPoints() {
// Scan the heap for all non-optimized functions which have no
// debug break slots and are not active or inlined into an active
// function and mark them for lazy compilation.
HeapIterator iterator;
HeapIterator iterator(heap);
HeapObject* obj = NULL;
while (((obj = iterator.next()) != NULL)) {
if (obj->IsJSFunction()) {
@ -2109,11 +2132,12 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
int target_start_position = RelocInfo::kNoPosition;
Handle<JSFunction> target_function;
Handle<SharedFunctionInfo> target;
Heap* heap = isolate_->heap();
while (!done) {
{ // Extra scope for iterator and no-allocation.
isolate_->heap()->EnsureHeapIsIterable();
heap->EnsureHeapIsIterable();
AssertNoAllocation no_alloc_during_heap_iteration;
HeapIterator iterator;
HeapIterator iterator(heap);
for (HeapObject* obj = iterator.next();
obj != NULL; obj = iterator.next()) {
bool found_next_candidate = false;
@ -2173,9 +2197,7 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
} // End for loop.
} // End no-allocation scope.
if (target.is_null()) {
return isolate_->heap()->undefined_value();
}
if (target.is_null()) return heap->undefined_value();
// There will be at least one break point when we are done.
has_break_points_ = true;
@ -2419,11 +2441,11 @@ void Debug::ClearMirrorCache() {
ASSERT(isolate_->context() == *Debug::debug_context());
// Clear the mirror cache.
Handle<String> function_name =
isolate_->factory()->LookupSymbol(CStrVector("ClearMirrorCache"));
Handle<String> function_name = isolate_->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("ClearMirrorCache"));
Handle<Object> fun(
Isolate::Current()->global_object()->GetPropertyNoExceptionThrown(
*function_name));
isolate_->global_object()->GetPropertyNoExceptionThrown(*function_name),
isolate_);
ASSERT(fun->IsJSFunction());
bool caught_exception;
Execution::TryCall(Handle<JSFunction>::cast(fun),
@ -2449,7 +2471,7 @@ void Debug::CreateScriptCache() {
// Scan heap for Script objects.
int count = 0;
HeapIterator iterator;
HeapIterator iterator(heap);
AssertNoAllocation no_allocation;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
@ -2548,10 +2570,10 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
// Create the execution state object.
Handle<String> constructor_str =
isolate_->factory()->LookupSymbol(constructor_name);
isolate_->factory()->InternalizeUtf8String(constructor_name);
Handle<Object> constructor(
isolate_->global_object()->GetPropertyNoExceptionThrown(
*constructor_str));
isolate_->global_object()->GetPropertyNoExceptionThrown(*constructor_str),
isolate_);
ASSERT(constructor->IsJSFunction());
if (!constructor->IsJSFunction()) {
*caught_exception = true;
@ -2639,7 +2661,7 @@ Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
bool* caught_exception) {
// Create the script collected event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id), isolate_);
Handle<Object> argv[] = { exec_state, id_object };
return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
@ -2778,11 +2800,14 @@ void Debugger::OnAfterCompile(Handle<Script> script,
// script. Make sure that these break points are set.
// Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
Handle<String> update_script_break_points_symbol =
isolate_->factory()->LookupAsciiSymbol("UpdateScriptBreakPoints");
Handle<String> update_script_break_points_string =
isolate_->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("UpdateScriptBreakPoints"));
Handle<Object> update_script_break_points =
Handle<Object>(debug->debug_context()->global_object()->
GetPropertyNoExceptionThrown(*update_script_break_points_symbol));
Handle<Object>(
debug->debug_context()->global_object()->GetPropertyNoExceptionThrown(
*update_script_break_points_string),
isolate_);
if (!update_script_break_points->IsJSFunction()) {
return;
}
@ -2932,7 +2957,7 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
// Invoke the JavaScript debug event listener.
Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event)),
Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event), isolate_),
exec_state,
event_data,
event_listener_data_ };
@ -3315,7 +3340,8 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
Handle<Object> argv[] = { exec_state, data };
Handle<Object> result = Execution::Call(
fun,
Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
Handle<Object>(isolate_->debug()->debug_context_->global_proxy(),
isolate_),
ARRAY_SIZE(argv),
argv,
pending_exception);
@ -3541,7 +3567,8 @@ v8::Handle<v8::Object> MessageImpl::GetEventData() const {
v8::Handle<v8::String> MessageImpl::GetJSON() const {
v8::HandleScope scope;
v8::HandleScope scope(
reinterpret_cast<v8::Isolate*>(event_data_->GetIsolate()));
if (IsEvent()) {
// Call toJSONProtocol on the debug event object.
@ -3758,6 +3785,7 @@ void MessageDispatchHelperThread::Schedule() {
void MessageDispatchHelperThread::Run() {
Isolate* isolate = Isolate::Current();
while (true) {
sem_->Wait();
{
@ -3765,8 +3793,8 @@ void MessageDispatchHelperThread::Run() {
already_signalled_ = false;
}
{
Locker locker;
Isolate::Current()->debugger()->CallMessageDispatchHandler();
Locker locker(reinterpret_cast<v8::Isolate*>(isolate));
isolate->debugger()->CallMessageDispatchHandler();
}
}
}

14
deps/v8/src/debug.h

@ -97,7 +97,7 @@ class BreakLocationIterator {
void ClearBreakPoint(Handle<Object> break_point_object);
void SetOneShot();
void ClearOneShot();
void PrepareStepIn();
void PrepareStepIn(Isolate* isolate);
bool IsExit() const;
bool HasBreakPoint();
bool IsDebugBreak();
@ -189,7 +189,9 @@ class ScriptCache : private HashMap {
void Clear();
// Weak handle callback for scripts in the cache.
static void HandleWeakScript(v8::Persistent<v8::Value> obj, void* data);
static void HandleWeakScript(v8::Isolate* isolate,
v8::Persistent<v8::Value> obj,
void* data);
// List used during GC to temporarily store id's of collected scripts.
List<int> collected_scripts_;
@ -384,7 +386,9 @@ class Debug {
static const int kEstimatedNofBreakPointsInFunction = 16;
// Passed to MakeWeak.
static void HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data);
static void HandleWeakDebugInfo(v8::Isolate* isolate,
v8::Persistent<v8::Value> obj,
void* data);
friend class Debugger;
friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc
@ -875,7 +879,9 @@ class Debugger {
void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
bool is_loading_debugger() const { return is_loading_debugger_; }
void set_live_edit_enabled(bool v) { live_edit_enabled_ = v; }
bool live_edit_enabled() const { return live_edit_enabled_; }
bool live_edit_enabled() const {
return FLAG_enable_liveedit && live_edit_enabled_ ;
}
void set_force_debugger_active(bool force_debugger_active) {
force_debugger_active_ = force_debugger_active;
}

1263
deps/v8/src/deoptimizer.cc

File diff suppressed because it is too large

124
deps/v8/src/deoptimizer.h

@ -87,19 +87,33 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
};
class OptimizedFunctionFilter BASE_EMBEDDED {
public:
virtual ~OptimizedFunctionFilter() {}
virtual bool TakeFunction(JSFunction* function) = 0;
};
class Deoptimizer;
class DeoptimizerData {
public:
DeoptimizerData();
explicit DeoptimizerData(MemoryAllocator* allocator);
~DeoptimizerData();
#ifdef ENABLE_DEBUGGER_SUPPORT
void Iterate(ObjectVisitor* v);
#endif
Code* FindDeoptimizingCode(Address addr);
void RemoveDeoptimizingCode(Code* code);
private:
MemoryAllocator* allocator_;
int eager_deoptimization_entry_code_entries_;
int lazy_deoptimization_entry_code_entries_;
MemoryChunk* eager_deoptimization_entry_code_;
MemoryChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
@ -131,8 +145,14 @@ class Deoptimizer : public Malloced {
DEBUGGER
};
static bool TraceEnabledFor(BailoutType deopt_type,
StackFrame::Type frame_type);
static const char* MessageFor(BailoutType type);
int output_count() const { return output_count_; }
Code::Kind compiled_code_kind() const { return compiled_code_->kind(); }
// Number of created JS frames. Not all created frames are necessarily JS.
int jsframe_count() const { return jsframe_count_; }
@ -171,17 +191,21 @@ class Deoptimizer : public Malloced {
static void ReplaceCodeForRelatedFunctions(JSFunction* function, Code* code);
// Deoptimize all functions in the heap.
static void DeoptimizeAll();
static void DeoptimizeAll(Isolate* isolate);
static void DeoptimizeGlobalObject(JSObject* object);
static void DeoptimizeAllFunctionsWith(Isolate* isolate,
OptimizedFunctionFilter* filter);
static void DeoptimizeAllFunctionsForContext(
Context* context, OptimizedFunctionFilter* filter);
static void VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor);
static void VisitAllOptimizedFunctionsForGlobalObject(
JSObject* object, OptimizedFunctionVisitor* visitor);
static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
static void VisitAllOptimizedFunctions(Isolate* isolate,
OptimizedFunctionVisitor* visitor);
// The size in bytes of the code required at a lazy deopt patch site.
static int patch_size();
@ -226,8 +250,21 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
static Address GetDeoptimizationEntry(int id, BailoutType type);
static int GetDeoptimizationId(Address addr, BailoutType type);
enum GetEntryMode {
CALCULATE_ENTRY_ADDRESS,
ENSURE_ENTRY_CODE
};
static Address GetDeoptimizationEntry(
Isolate* isolate,
int id,
BailoutType type,
GetEntryMode mode = ENSURE_ENTRY_CODE);
static int GetDeoptimizationId(Isolate* isolate,
Address addr,
BailoutType type);
static int GetOutputInfo(DeoptimizationOutputData* data,
BailoutId node_id,
SharedFunctionInfo* shared);
@ -283,8 +320,17 @@ class Deoptimizer : public Malloced {
int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
static size_t GetMaxDeoptTableSize();
static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
BailoutType type,
int max_entry_id);
Isolate* isolate() const { return isolate_; }
private:
static const int kNumberOfEntries = 16384;
static const int kMinNumberOfEntries = 64;
static const int kMaxNumberOfEntries = 16384;
Deoptimizer(Isolate* isolate,
JSFunction* function,
@ -293,6 +339,9 @@ class Deoptimizer : public Malloced {
Address from,
int fp_to_sp_delta,
Code* optimized_code);
Code* FindOptimizedCode(JSFunction* function, Code* optimized_code);
void Trace();
void PrintFunctionName();
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
@ -305,6 +354,8 @@ class Deoptimizer : public Malloced {
void DoComputeAccessorStubFrame(TranslationIterator* iterator,
int frame_index,
bool is_setter_stub_frame);
void DoComputeCompiledStubFrame(TranslationIterator* iterator,
int frame_index);
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
@ -327,24 +378,35 @@ class Deoptimizer : public Malloced {
void AddArgumentsObjectValue(intptr_t value);
void AddDoubleValue(intptr_t slot_address, double value);
static MemoryChunk* CreateCode(BailoutType type);
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
// Weak handle callback for deoptimizing code objects.
static void HandleWeakDeoptimizedCode(
v8::Persistent<v8::Value> obj, void* data);
static Code* FindDeoptimizingCodeFromAddress(Address addr);
static void RemoveDeoptimizingCode(Code* code);
static void HandleWeakDeoptimizedCode(v8::Isolate* isolate,
v8::Persistent<v8::Value> obj,
void* data);
// Deoptimize function assuming that function->next_function_link() points
// to a list that contains all functions that share the same optimized code.
static void DeoptimizeFunctionWithPreparedFunctionList(JSFunction* function);
// Fill the input from from a JavaScript frame. This is used when
// the debugger needs to inspect an optimized frame. For normal
// deoptimizations the input frame is filled in generated code.
void FillInputFrame(Address tos, JavaScriptFrame* frame);
// Fill the given output frame's registers to contain the failure handler
// address and the number of parameters for a stub failure trampoline.
void SetPlatformCompiledStubRegisters(FrameDescription* output_frame,
CodeStubInterfaceDescriptor* desc);
// Fill the given output frame's double registers with the original values
// from the input frame's double registers.
void CopyDoubleRegisters(FrameDescription* output_frame);
Isolate* isolate_;
JSFunction* function_;
Code* optimized_code_;
Code* compiled_code_;
unsigned bailout_id_;
BailoutType bailout_type_;
Address from_;
@ -364,6 +426,8 @@ class Deoptimizer : public Malloced {
List<ArgumentsObjectMaterializationDescriptor> deferred_arguments_objects_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
bool trace_;
static const int table_entry_size_;
friend class FrameDescription;
@ -514,16 +578,13 @@ class FrameDescription {
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
intptr_t registers_[Register::kNumRegisters];
double double_registers_[DoubleRegister::kNumAllocatableRegisters];
double double_registers_[DoubleRegister::kMaxNumRegisters];
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
intptr_t context_;
StackFrame::Type type_;
Smi* state_;
#ifdef DEBUG
Code::Kind kind_;
#endif
// Continuation is the PC where the execution continues after
// deoptimizing.
@ -550,7 +611,7 @@ class TranslationBuffer BASE_EMBEDDED {
int CurrentIndex() const { return contents_.length(); }
void Add(int32_t value, Zone* zone);
Handle<ByteArray> CreateByteArray();
Handle<ByteArray> CreateByteArray(Factory* factory);
private:
ZoneList<uint8_t> contents_;
@ -587,6 +648,7 @@ class Translation BASE_EMBEDDED {
GETTER_STUB_FRAME,
SETTER_STUB_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
COMPILED_STUB_FRAME,
REGISTER,
INT32_REGISTER,
UINT32_REGISTER,
@ -617,6 +679,7 @@ class Translation BASE_EMBEDDED {
// Commands.
void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
void BeginCompiledStubFrame();
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height);
void BeginGetterStubFrame(int literal_id);
@ -630,7 +693,7 @@ class Translation BASE_EMBEDDED {
void StoreUint32StackSlot(int index);
void StoreDoubleStackSlot(int index);
void StoreLiteral(int literal_id);
void StoreArgumentsObject(int args_index, int args_length);
void StoreArgumentsObject(bool args_known, int args_index, int args_length);
void MarkDuplicate();
Zone* zone() const { return zone_; }
@ -688,36 +751,35 @@ class SlotRef BASE_EMBEDDED {
SlotRef(Address addr, SlotRepresentation representation)
: addr_(addr), representation_(representation) { }
explicit SlotRef(Object* literal)
: literal_(literal), representation_(LITERAL) { }
SlotRef(Isolate* isolate, Object* literal)
: literal_(literal, isolate), representation_(LITERAL) { }
Handle<Object> GetValue() {
Handle<Object> GetValue(Isolate* isolate) {
switch (representation_) {
case TAGGED:
return Handle<Object>(Memory::Object_at(addr_));
return Handle<Object>(Memory::Object_at(addr_), isolate);
case INT32: {
int value = Memory::int32_at(addr_);
if (Smi::IsValid(value)) {
return Handle<Object>(Smi::FromInt(value));
return Handle<Object>(Smi::FromInt(value), isolate);
} else {
return Isolate::Current()->factory()->NewNumberFromInt(value);
return isolate->factory()->NewNumberFromInt(value);
}
}
case UINT32: {
uint32_t value = Memory::uint32_at(addr_);
if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
return Handle<Object>(Smi::FromInt(static_cast<int>(value)));
return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
} else {
return Isolate::Current()->factory()->NewNumber(
static_cast<double>(value));
return isolate->factory()->NewNumber(static_cast<double>(value));
}
}
case DOUBLE: {
double value = Memory::double_at(addr_);
return Isolate::Current()->factory()->NewNumber(value);
return isolate->factory()->NewNumber(value);
}
case LITERAL:

34
deps/v8/src/disassembler.cc

@ -111,11 +111,12 @@ static void DumpBuffer(FILE* f, StringBuilder* out) {
static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
static const int kRelocInfoPosition = 57;
static int DecodeIt(FILE* f,
static int DecodeIt(Isolate* isolate,
FILE* f,
const V8NameConverter& converter,
byte* begin,
byte* end) {
NoHandleAllocation ha;
NoHandleAllocation ha(isolate);
AssertNoAllocation no_alloc;
ExternalReferenceEncoder ref_encoder;
Heap* heap = HEAP;
@ -281,13 +282,22 @@ static int DecodeIt(FILE* f,
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
out.AddFormatted(" (id = %d)", static_cast<int>(relocinfo.data()));
}
} else if (rmode == RelocInfo::RUNTIME_ENTRY &&
Isolate::Current()->deoptimizer_data() != NULL) {
} else if (RelocInfo::IsRuntimeEntry(rmode) &&
isolate->deoptimizer_data() != NULL) {
// A runtime entry reloinfo might be a deoptimization bailout.
Address addr = relocinfo.target_address();
int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
int id = Deoptimizer::GetDeoptimizationId(isolate,
addr,
Deoptimizer::EAGER);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
id = Deoptimizer::GetDeoptimizationId(isolate,
addr,
Deoptimizer::LAZY);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
} else {
out.AddFormatted(" ;; lazy deoptimization bailout %d", id);
}
} else {
out.AddFormatted(" ;; deoptimization bailout %d", id);
}
@ -314,15 +324,17 @@ static int DecodeIt(FILE* f,
}
int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
V8NameConverter defaultConverter(NULL);
return DecodeIt(f, defaultConverter, begin, end);
return DecodeIt(isolate, f, defaultConverter, begin, end);
}
// Called by Code::CodePrint.
void Disassembler::Decode(FILE* f, Code* code) {
int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
Isolate* isolate = code->GetIsolate();
int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION ||
code->kind() == Code::COMPILED_STUB)
? static_cast<int>(code->safepoint_table_offset())
: code->instruction_size();
// If there might be a stack check table, stop before reaching it.
@ -334,13 +346,15 @@ void Disassembler::Decode(FILE* f, Code* code) {
byte* begin = code->instruction_start();
byte* end = begin + decode_size;
V8NameConverter v8NameConverter(code);
DecodeIt(f, v8NameConverter, begin, end);
DecodeIt(isolate, f, v8NameConverter, begin, end);
}
#else // ENABLE_DISASSEMBLER
void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
int Disassembler::Decode(FILE* f, byte* begin, byte* end) { return 0; }
int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
return 0;
}
void Disassembler::Decode(FILE* f, Code* code) {}
#endif // ENABLE_DISASSEMBLER

2
deps/v8/src/disassembler.h

@ -41,7 +41,7 @@ class Disassembler : public AllStatic {
// Decode instructions in the the interval [begin, end) and print the
// code into f. Returns the number of bytes disassembled or 1 if no
// instruction could be decoded.
static int Decode(FILE* f, byte* begin, byte* end);
static int Decode(Isolate* isolate, FILE* f, byte* begin, byte* end);
// Decode instructions in code.
static void Decode(FILE* f, Code* code);

9
deps/v8/src/elements-kind.cc

@ -35,9 +35,14 @@ namespace v8 {
namespace internal {
void PrintElementsKind(FILE* out, ElementsKind kind) {
const char* ElementsKindToString(ElementsKind kind) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
PrintF(out, "%s", accessor->name());
return accessor->name();
}
void PrintElementsKind(FILE* out, ElementsKind kind) {
PrintF(out, "%s", ElementsKindToString(kind));
}

8
deps/v8/src/elements-kind.h

@ -77,6 +77,7 @@ const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
FIRST_FAST_ELEMENTS_KIND + 1;
const char* ElementsKindToString(ElementsKind kind);
void PrintElementsKind(FILE* out, ElementsKind kind);
ElementsKind GetInitialFastElementsKind();
@ -109,6 +110,13 @@ inline bool IsFastDoubleElementsKind(ElementsKind kind) {
}
inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
return IsFastDoubleElementsKind(kind) ||
kind == EXTERNAL_DOUBLE_ELEMENTS ||
kind == EXTERNAL_FLOAT_ELEMENTS;
}
inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS ||

872
deps/v8/src/elements.cc

File diff suppressed because it is too large

51
deps/v8/src/elements.h

@ -71,6 +71,39 @@ class ElementsAccessor {
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Returns an element's attributes, or ABSENT if there is no such
// element. This method doesn't iterate up the prototype chain. The caller
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Returns an element's type, or NONEXISTENT if there is no such
// element. This method doesn't iterate up the prototype chain. The caller
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual PropertyType GetType(
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Returns an element's accessors, or NULL if the element does not exist or
// is plain. This method doesn't iterate up the prototype chain. The caller
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
@ -110,17 +143,17 @@ class ElementsAccessor {
MUST_USE_RESULT virtual MaybeObject* CopyElements(
JSObject* source_holder,
uint32_t source_start,
ElementsKind source_kind,
FixedArrayBase* destination,
ElementsKind destination_kind,
uint32_t destination_start,
int copy_size,
FixedArrayBase* source = NULL) = 0;
MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
FixedArrayBase* to,
ElementsKind to_kind,
ElementsKind from_kind,
FixedArrayBase* from = NULL) {
return CopyElements(from_holder, 0, to, to_kind, 0,
return CopyElements(from_holder, 0, from_kind, to, 0,
kCopyToEndAndInitializeToHole, from);
}
@ -164,15 +197,11 @@ class ElementsAccessor {
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
bool allow_appending = false);
void CopyObjectToObjectElements(FixedArray* from_obj,
ElementsKind from_kind,
uint32_t from_start,
FixedArray* to_obj,
ElementsKind to_kind,
uint32_t to_start,
int copy_size);
MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
JSArray* array, Arguments* args);
} } // namespace v8::internal

105
deps/v8/src/execution.cc

@ -106,7 +106,7 @@ static Handle<Object> Invoke(bool is_construct,
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
SaveContext save(isolate);
NoHandleAllocation na;
NoHandleAllocation na(isolate);
JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub.
@ -124,10 +124,10 @@ static Handle<Object> Invoke(bool is_construct,
// Update the pending exception flag and return the value.
*has_pending_exception = value->IsException();
ASSERT(*has_pending_exception == Isolate::Current()->has_pending_exception());
ASSERT(*has_pending_exception == isolate->has_pending_exception());
if (*has_pending_exception) {
isolate->ReportPendingMessages();
if (isolate->pending_exception() == Failure::OutOfMemoryException()) {
if (isolate->pending_exception()->IsOutOfMemory()) {
if (!isolate->ignore_out_of_memory()) {
V8::FatalProcessOutOfMemory("JS", true);
}
@ -169,7 +169,9 @@ Handle<Object> Execution::Call(Handle<Object> callable,
// Under some circumstances, 'global' can be the JSBuiltinsObject
// In that case, don't rewrite. (FWIW, the same holds for
// GetIsolate()->global_object()->global_receiver().)
if (!global->IsJSBuiltinsObject()) receiver = Handle<Object>(global);
if (!global->IsJSBuiltinsObject()) {
receiver = Handle<Object>(global, func->GetIsolate());
}
} else {
receiver = ToObject(receiver, pending_exception);
}
@ -184,7 +186,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func,
int argc,
Handle<Object> argv[],
bool* pending_exception) {
return Invoke(true, func, Isolate::Current()->global_object(), argc, argv,
return Invoke(true, func, func->GetIsolate()->global_object(), argc, argv,
pending_exception);
}
@ -206,11 +208,14 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
Handle<Object> result = Invoke(false, func, receiver, argc, args,
caught_exception);
Isolate* isolate = func->GetIsolate();
if (*caught_exception) {
ASSERT(catcher.HasCaught());
Isolate* isolate = Isolate::Current();
ASSERT(isolate->has_pending_exception());
ASSERT(isolate->external_caught_exception());
if (isolate->is_out_of_memory() && !isolate->ignore_out_of_memory()) {
V8::FatalProcessOutOfMemory("OOM during Execution::TryCall");
}
if (isolate->pending_exception() ==
isolate->heap()->termination_exception()) {
result = isolate->factory()->termination_exception();
@ -220,8 +225,8 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
isolate->OptionalRescheduleException(true);
}
ASSERT(!Isolate::Current()->has_pending_exception());
ASSERT(!Isolate::Current()->external_caught_exception());
ASSERT(!isolate->has_pending_exception());
ASSERT(!isolate->external_caught_exception());
return result;
}
@ -239,7 +244,7 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
while (fun->IsJSFunctionProxy()) {
fun = JSFunctionProxy::cast(fun)->call_trap();
}
if (fun->IsJSFunction()) return Handle<Object>(fun);
if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
@ -263,7 +268,7 @@ Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
while (fun->IsJSFunctionProxy()) {
fun = JSFunctionProxy::cast(fun)->call_trap();
}
if (fun->IsJSFunction()) return Handle<Object>(fun);
if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
@ -296,7 +301,7 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
while (fun->IsJSFunctionProxy()) {
fun = JSFunctionProxy::cast(fun)->call_trap();
}
if (fun->IsJSFunction()) return Handle<Object>(fun);
if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
@ -324,7 +329,7 @@ Handle<Object> Execution::TryGetConstructorDelegate(
while (fun->IsJSFunctionProxy()) {
fun = JSFunctionProxy::cast(fun)->call_trap();
}
if (fun->IsJSFunction()) return Handle<Object>(fun);
if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
@ -427,44 +432,6 @@ void StackGuard::TerminateExecution() {
}
bool StackGuard::IsRuntimeProfilerTick() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
}
void StackGuard::RequestRuntimeProfilerTick() {
// Ignore calls if we're not optimizing or if we can't get the lock.
if (FLAG_opt && ExecutionAccess::TryLock(isolate_)) {
thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK;
if (thread_local_.postpone_interrupts_nesting_ == 0) {
thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
isolate_->heap()->SetStackLimits();
}
ExecutionAccess::Unlock(isolate_);
}
}
void StackGuard::RequestCodeReadyEvent() {
ASSERT(FLAG_parallel_recompilation);
if (ExecutionAccess::TryLock(isolate_)) {
thread_local_.interrupt_flags_ |= CODE_READY;
if (thread_local_.postpone_interrupts_nesting_ == 0) {
thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
isolate_->heap()->SetStackLimits();
}
ExecutionAccess::Unlock(isolate_);
}
}
bool StackGuard::IsCodeReadyEvent() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & CODE_READY) != 0;
}
bool StackGuard::IsGCRequest() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
@ -615,22 +582,6 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
} while (false)
Handle<Object> Execution::ToBoolean(Handle<Object> obj) {
// See the similar code in runtime.js:ToBoolean.
if (obj->IsBoolean()) return obj;
bool result = true;
if (obj->IsString()) {
result = Handle<String>::cast(obj)->length() != 0;
} else if (obj->IsNull() || obj->IsUndefined()) {
result = false;
} else if (obj->IsNumber()) {
double value = obj->Number();
result = !((value == 0) || isnan(value));
}
return Handle<Object>(HEAP->ToBoolean(result));
}
Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_number, { obj }, exc);
}
@ -697,9 +648,8 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
return factory->undefined_value();
}
Handle<Object> char_at =
GetProperty(isolate->js_builtins_object(),
factory->char_at_symbol());
Handle<Object> char_at = GetProperty(
isolate, isolate->js_builtins_object(), factory->char_at_string());
if (!char_at->IsJSFunction()) {
return factory->undefined_value();
}
@ -800,7 +750,7 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
args,
&caught_exception);
if (caught_exception || !result->IsString()) {
return isolate->factory()->empty_symbol();
return isolate->factory()->empty_string();
}
return Handle<String>::cast(result);
@ -930,25 +880,10 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(GC_REQUEST);
}
if (stack_guard->IsCodeReadyEvent()) {
ASSERT(FLAG_parallel_recompilation);
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** CODE_READY event received.\n");
}
stack_guard->Continue(CODE_READY);
}
if (!stack_guard->IsTerminateExecution()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
}
isolate->counters()->stack_interrupts()->Increment();
// If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
if (FLAG_count_based_interrupts ||
stack_guard->IsRuntimeProfilerTick()) {
isolate->counters()->runtime_profiler_ticks()->Increment();
stack_guard->Continue(RUNTIME_PROFILER_TICK);
isolate->runtime_profiler()->OptimizeNow();
}
#ifdef ENABLE_DEBUGGER_SUPPORT
if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
DebugBreakHelper();

11
deps/v8/src/execution.h

@ -41,9 +41,7 @@ enum InterruptFlag {
DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
RUNTIME_PROFILER_TICK = 1 << 5,
GC_REQUEST = 1 << 6,
CODE_READY = 1 << 7
GC_REQUEST = 1 << 5
};
@ -92,9 +90,6 @@ class Execution : public AllStatic {
Handle<Object> argv[],
bool* caught_exception);
// ECMA-262 9.2
static Handle<Object> ToBoolean(Handle<Object> obj);
// ECMA-262 9.3
static Handle<Object> ToNumber(Handle<Object> obj, bool* exc);
@ -194,10 +189,6 @@ class StackGuard {
void Interrupt();
bool IsTerminateExecution();
void TerminateExecution();
bool IsRuntimeProfilerTick();
void RequestRuntimeProfilerTick();
bool IsCodeReadyEvent();
void RequestCodeReadyEvent();
#ifdef ENABLE_DEBUGGER_SUPPORT
bool IsDebugBreak();
void DebugBreak();

13
deps/v8/src/extensions/externalize-string-extension.cc

@ -93,13 +93,13 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
return v8::ThrowException(v8::String::New(
"externalizeString() can't externalize twice."));
}
if (string->IsAsciiRepresentation() && !force_two_byte) {
char* data = new char[string->length()];
if (string->IsOneByteRepresentation() && !force_two_byte) {
uint8_t* data = new uint8_t[string->length()];
String::WriteToFlat(*string, data, 0, string->length());
SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
data, string->length());
reinterpret_cast<char*>(data), string->length());
result = string->MakeExternal(resource);
if (result && !string->IsSymbol()) {
if (result && !string->IsInternalizedString()) {
HEAP->external_string_table()->AddString(*string);
}
if (!result) delete resource;
@ -109,7 +109,7 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
data, string->length());
result = string->MakeExternal(resource);
if (result && !string->IsSymbol()) {
if (result && !string->IsInternalizedString()) {
HEAP->external_string_table()->AddString(*string);
}
if (!result) delete resource;
@ -127,7 +127,8 @@ v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
return v8::ThrowException(v8::String::New(
"isAsciiString() requires a single string argument."));
}
return Utils::OpenHandle(*args[0].As<v8::String>())->IsAsciiRepresentation() ?
return
Utils::OpenHandle(*args[0].As<v8::String>())->IsOneByteRepresentation() ?
v8::True() : v8::False();
}

4
deps/v8/src/extensions/gc-extension.cc

@ -40,7 +40,11 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
if (args[0]->BooleanValue()) {
HEAP->CollectGarbage(NEW_SPACE, "gc extension");
} else {
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
}
return v8::Undefined();
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save