Browse Source

v8: upgrade to 3.22.24

This commit removes the simple/test-event-emitter-memory-leak test for
being unreliable with the new garbage collector: the memory pressure
exerted by the test case is too low for the garbage collector to kick
in.  It can be made to work again by limiting the heap size with the
--max_old_space_size=x flag but that won't be very reliable across
platforms and architectures.
v0.11.9-release
Ben Noordhuis 11 years ago
parent
commit
f230a1cf74
  1. 10
      configure
  2. 8
      deps/v8/.gitignore
  3. 223
      deps/v8/ChangeLog
  4. 23
      deps/v8/Makefile
  5. 3
      deps/v8/Makefile.nacl
  6. 4
      deps/v8/OWNERS
  7. 18
      deps/v8/PRESUBMIT.py
  8. 27
      deps/v8/WATCHLISTS
  9. 26
      deps/v8/benchmarks/deltablue.js
  10. 1
      deps/v8/build/all.gyp
  11. 16
      deps/v8/build/features.gypi
  12. 19
      deps/v8/build/standalone.gypi
  13. 27
      deps/v8/build/toolchain.gypi
  14. 4
      deps/v8/include/v8-debug.h
  15. 54
      deps/v8/include/v8-defaults.h
  16. 84
      deps/v8/include/v8-preparser.h
  17. 25
      deps/v8/include/v8-profiler.h
  18. 4
      deps/v8/include/v8-testing.h
  19. 518
      deps/v8/include/v8.h
  20. 15
      deps/v8/include/v8config.h
  21. 372
      deps/v8/preparser/preparser-process.cc
  22. 58
      deps/v8/preparser/preparser.gyp
  23. 10
      deps/v8/samples/lineprocessor.cc
  24. 8
      deps/v8/samples/samples.gyp
  25. 18
      deps/v8/samples/shell.cc
  26. 2
      deps/v8/src/OWNERS
  27. 55
      deps/v8/src/accessors.cc
  28. 7
      deps/v8/src/accessors.h
  29. 108
      deps/v8/src/allocation-site-scopes.cc
  30. 115
      deps/v8/src/allocation-site-scopes.h
  31. 279
      deps/v8/src/allocation-tracker.cc
  32. 138
      deps/v8/src/allocation-tracker.h
  33. 877
      deps/v8/src/api.cc
  34. 26
      deps/v8/src/api.h
  35. 1
      deps/v8/src/apinatives.js
  36. 13
      deps/v8/src/arguments.cc
  37. 18
      deps/v8/src/arguments.h
  38. 7
      deps/v8/src/arm/assembler-arm-inl.h
  39. 1
      deps/v8/src/arm/assembler-arm.cc
  40. 81
      deps/v8/src/arm/assembler-arm.h
  41. 116
      deps/v8/src/arm/builtins-arm.cc
  42. 1346
      deps/v8/src/arm/code-stubs-arm.cc
  43. 26
      deps/v8/src/arm/code-stubs-arm.h
  44. 115
      deps/v8/src/arm/codegen-arm.cc
  45. 1
      deps/v8/src/arm/codegen-arm.h
  46. 107
      deps/v8/src/arm/deoptimizer-arm.cc
  47. 2
      deps/v8/src/arm/frames-arm.h
  48. 171
      deps/v8/src/arm/full-codegen-arm.cc
  49. 6
      deps/v8/src/arm/ic-arm.cc
  50. 423
      deps/v8/src/arm/lithium-arm.cc
  51. 462
      deps/v8/src/arm/lithium-arm.h
  52. 671
      deps/v8/src/arm/lithium-codegen-arm.cc
  53. 68
      deps/v8/src/arm/lithium-codegen-arm.h
  54. 4
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  55. 266
      deps/v8/src/arm/macro-assembler-arm.cc
  56. 72
      deps/v8/src/arm/macro-assembler-arm.h
  57. 5
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  58. 79
      deps/v8/src/arm/simulator-arm.cc
  59. 9
      deps/v8/src/arm/simulator-arm.h
  60. 242
      deps/v8/src/arm/stub-cache-arm.cc
  61. 8
      deps/v8/src/array-iterator.js
  62. 26
      deps/v8/src/array.js
  63. 8
      deps/v8/src/arraybuffer.js
  64. 42
      deps/v8/src/assembler.cc
  65. 18
      deps/v8/src/assembler.h
  66. 61
      deps/v8/src/ast.cc
  67. 534
      deps/v8/src/ast.h
  68. 9
      deps/v8/src/bootstrapper.cc
  69. 122
      deps/v8/src/builtins.cc
  70. 52
      deps/v8/src/builtins.h
  71. 46
      deps/v8/src/checks.cc
  72. 19
      deps/v8/src/checks.h
  73. 292
      deps/v8/src/code-stubs-hydrogen.cc
  74. 552
      deps/v8/src/code-stubs.cc
  75. 326
      deps/v8/src/code-stubs.h
  76. 14
      deps/v8/src/codegen.cc
  77. 252
      deps/v8/src/compiler.cc
  78. 31
      deps/v8/src/compiler.h
  79. 2
      deps/v8/src/contexts.cc
  80. 4
      deps/v8/src/conversions-inl.h
  81. 16
      deps/v8/src/conversions.cc
  82. 33
      deps/v8/src/counters.h
  83. 30
      deps/v8/src/cpu-profiler.cc
  84. 2
      deps/v8/src/cpu-profiler.h
  85. 2
      deps/v8/src/d8-debug.cc
  86. 51
      deps/v8/src/d8-posix.cc
  87. 5
      deps/v8/src/d8-readline.cc
  88. 46
      deps/v8/src/d8.cc
  89. 8
      deps/v8/src/d8.gyp
  90. 5
      deps/v8/src/d8.h
  91. 14
      deps/v8/src/d8.js
  92. 32
      deps/v8/src/date.js
  93. 4
      deps/v8/src/debug-debugger.js
  94. 14
      deps/v8/src/debug.cc
  95. 1
      deps/v8/src/debug.h
  96. 79
      deps/v8/src/defaults.cc
  97. 175
      deps/v8/src/deoptimizer.cc
  98. 111
      deps/v8/src/deoptimizer.h
  99. 2
      deps/v8/src/disassembler.cc
  100. 2
      deps/v8/src/elements.cc

10
configure

@ -435,9 +435,6 @@ def configure_arm(o):
def configure_node(o):
if options.dest_os == 'android':
o['variables']['OS'] = 'android'
o['variables']['v8_enable_gdbjit'] = 1 if options.gdb else 0
o['variables']['v8_no_strict_aliasing'] = 1 # Work around compiler bugs.
o['variables']['v8_random_seed'] = 0 # Use a random seed for hash tables.
o['variables']['node_prefix'] = os.path.expanduser(options.prefix or '')
o['variables']['node_install_npm'] = b(not options.without_npm)
o['default_configuration'] = 'Debug' if options.debug else 'Release'
@ -565,8 +562,13 @@ def configure_libuv(o):
def configure_v8(o):
o['variables']['v8_use_snapshot'] = b(not options.without_snapshot)
o['variables']['node_shared_v8'] = b(options.shared_v8)
o['variables']['v8_enable_gdbjit'] = 1 if options.gdb else 0
o['variables']['v8_enable_i18n_support'] = 0 # Don't require libicu.
o['variables']['v8_no_strict_aliasing'] = 1 # Work around compiler bugs.
o['variables']['v8_optimized_debug'] = 0 # Compile with -O0 in debug builds.
o['variables']['v8_random_seed'] = 0 # Use a random seed for hash tables.
o['variables']['v8_use_snapshot'] = b(not options.without_snapshot)
# assume shared_v8 if one of these is set?
if options.shared_v8_libpath:

8
deps/v8/.gitignore

@ -37,24 +37,16 @@ shell_g
/out
/perf.data
/perf.data.old
/test/benchmarks/benchmarks.status2
/test/benchmarks/CHECKED_OUT_*
/test/benchmarks/downloaded_*
/test/benchmarks/kraken
/test/benchmarks/octane
/test/benchmarks/sunspider
/test/cctest/cctest.status2
/test/message/message.status2
/test/mjsunit/mjsunit.status2
/test/mozilla/CHECKED_OUT_VERSION
/test/mozilla/data
/test/mozilla/downloaded_*
/test/mozilla/mozilla.status2
/test/preparser/preparser.status2
/test/test262/data
/test/test262/test262-*
/test/test262/test262.status2
/test/webkit/webkit.status2
/third_party
/tools/jsfunfuzz
/tools/jsfunfuzz.zip

223
deps/v8/ChangeLog

@ -1,3 +1,226 @@
2013-10-31: Version 3.22.24
Fixed uint32-to-smi conversion in Lithium.
(Chromium issue 309623)
Performance and stability improvements on all platforms.
2013-10-28: Version 3.22.23
Renamed deprecated __attribute__((no_address_safety_analysis)) to
__attribute__((no_sanitize_address)) (Chromium issue 311283)
Defined DEBUG for v8_optimized_debug=2
Performance and stability improvements on all platforms.
2013-10-25: Version 3.22.22
Record allocation stack traces. (Chromium issue 277984,v8:2949)
Performance and stability improvements on all platforms.
2013-10-24: Version 3.22.21
Performance and stability improvements on all platforms.
2013-10-24: Version 3.22.20
Made Array.prototype.pop throw if the last element is not configurable.
Fixed HObjectAccess for loads from migrating prototypes.
(Chromium issue 305309)
Enabled preaging of code objects when --optimize-for-size.
(Chromium issue 280984)
Exposed v8::Function::GetDisplayName to public API.
(Chromium issue 17356)
Performance and stability improvements on all platforms.
2013-10-23: Version 3.22.19
Fix materialization of captured objects with field tracking.
(Chromium issue 298990)
Performance and stability improvements on all platforms.
2013-10-22: Version 3.22.18
Add tool to visualize machine code/lithium.
Handle misaligned loads and stores in load elimination. Do not track
misaligned loads and be conservative about invalidating misaligned
stores. (issue 2934)
Performance and stability improvements on all platforms.
2013-10-21: Version 3.22.17
Harmony: Implement Math.trunc and Math.sign. (issue 2938)
Performance and stability improvements on all platforms.
2013-10-21: Version 3.22.16
Performance and stability improvements on all platforms.
2013-10-18: Version 3.22.15
Enabled calling the SetReference* & SetObjectGroupId functions with a
Persistent<SubclassOfValue>.
Performance and stability improvements on all platforms.
2013-10-17: Version 3.22.14
Performance and stability improvements on all platforms.
2013-10-16: Version 3.22.13
Do not look up ArrayBuffer on global object in typed array constructor.
(issue 2931)
Performance and stability improvements on all platforms.
2013-10-15: Version 3.22.12
Added histograms to track fraction of heap spaces and percentage of
generated crankshaft code.
Moved v8_optimized_debug default value to standalone.gypi.
Track JS allocations as they arrive with no affection on performance
when tracking is switched off (Chromium issue 277984).
Performance and stability improvements on all platforms.
2013-10-14: Version 3.22.11
Performance and stability improvements on all platforms.
2013-10-11: Version 3.22.10
Fixed timezone issues with date-time/parse-* tests.
(Chromium issue 2919)
Added column getter to CpuProfileNode (Chromium issue 302537)
Performance and stability improvements on all platforms.
2013-10-10: Version 3.22.9
Ensure only whitelisted stubs have sse2 versions in the snapshot.
(fix for chromium 304565)
Implement ArrayBuffer.isView.
Performance and stability improvements on all platforms.
2013-10-04: Version 3.22.8
Performance and stability improvements on all platforms.
2013-10-03: Version 3.22.7
Debug: Allow stepping into on a given call frame
(Chromium issue 296963).
Always use timeGetTime() for TimeTicks::Now() on Windows
(Chromium issue 288924).
Performance and stability improvements on all platforms.
2013-10-02: Version 3.22.6
Performance and stability improvements on all platforms.
2013-10-01: Version 3.22.5
Disabled externalization of sliced/cons strings in old pointer space
(Chromium issue 276357).
Turned on handle zapping for release builds
Performance and stability improvements on all platforms.
2013-09-30: Version 3.22.4
Function::Call and Object::CallAsFunction APIs should allow v8::Value as
a receiver (issue 2915).
Removed unnecessary mutex (Chromium issue 291236).
Removed ArrayBufferView::BaseAddress method.
Performance and stability improvements on all platforms.
2013-09-27: Version 3.22.3
Added methods to enable configuration of ResourceConstraints based on
limits derived at runtime.
(Chromium issue 292928)
Added -optimize-for-size flag to optimize for memory size (will be used
by pre-aging CL), and removed the is_memory_constrained
ResourceConstraint.
(Chromium issue 292928)
Performance and stability improvements on all platforms.
2013-09-26: Version 3.22.2
Performance and stability improvements on all platforms.
2013-09-25: Version 3.22.1
Sped up creating typed arrays from array-like objects.
(Chromium issue 270507)
Performance and stability improvements on all platforms.
2013-09-23: Version 3.22.0
LiveEdit to mark more closure functions for re-instantiation when scope
layout changes.
(issue 2872)
Made bounds check elimination iterative instead of recursive.
(Chromium issue 289706)
Turned on i18n support by default.
Set the proper instance-type on HAllocate in BuildFastLiteral.
(Chromium issue 284577)
Performance and stability improvements on all platforms.
2013-09-18: Version 3.21.17
Implemented local load/store elimination on basic blocks.

23
deps/v8/Makefile

@ -76,10 +76,10 @@ ifeq ($(snapshot), off)
endif
# extrachecks=on/off
ifeq ($(extrachecks), on)
GYPFLAGS += -Dv8_enable_extra_checks=1
GYPFLAGS += -Dv8_enable_extra_checks=1 -Dv8_enable_handle_zapping=1
endif
ifeq ($(extrachecks), off)
GYPFLAGS += -Dv8_enable_extra_checks=0
GYPFLAGS += -Dv8_enable_extra_checks=0 -Dv8_enable_handle_zapping=0
endif
# gdbjit=on/off
ifeq ($(gdbjit), on)
@ -124,10 +124,15 @@ endif
ifeq ($(regexp), interpreted)
GYPFLAGS += -Dv8_interpreted_regexp=1
endif
# i18nsupport=on
ifeq ($(i18nsupport), on)
GYPFLAGS += -Dv8_enable_i18n_support=1
# i18nsupport=off
ifeq ($(i18nsupport), off)
GYPFLAGS += -Dv8_enable_i18n_support=0
TESTFLAGS += --noi18n
endif
# deprecation_warnings=on
ifeq ($(deprecationwarnings), on)
GYPFLAGS += -Dv8_deprecation_warnings=1
endif
# arm specific flags.
# armv7=false/true
ifeq ($(armv7), false)
@ -217,8 +222,8 @@ NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/features.gypi build/standalone.gypi \
build/toolchain.gypi preparser/preparser.gyp samples/samples.gyp \
src/d8.gyp test/cctest/cctest.gyp tools/gyp/v8.gyp
build/toolchain.gypi samples/samples.gyp src/d8.gyp \
test/cctest/cctest.gyp tools/gyp/v8.gyp
# If vtunejit=on, the v8vtune.gyp will be appended.
ifeq ($(vtunejit), on)
@ -323,7 +328,7 @@ $(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) \
--timeout=600 \
--command-prefix="tools/android-run.py"
--command-prefix="tools/android-run.py" $(TESTFLAGS)
$(addsuffix .check, $(ANDROID_ARCHES)): \
$(addprefix $$(basename $$@).,$(MODES)).check
@ -331,7 +336,7 @@ $(addsuffix .check, $(ANDROID_ARCHES)): \
$(addsuffix .check, $(NACL_BUILDS)): $$(basename $$@)
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) \
--timeout=600 --nopresubmit \
--timeout=600 --nopresubmit --noi18n \
--command-prefix="tools/nacl-run.py"
$(addsuffix .check, $(NACL_ARCHES)): \

3
deps/v8/Makefile.nacl

@ -74,6 +74,9 @@ endif
# For mksnapshot host generation.
GYPENV += host_os=${HOST_OS}
# ICU doesn't support NaCl.
GYPENV += v8_enable_i18n_support=0
NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_ARCHES))
.SECONDEXPANSION:
# For some reason the $$(basename $$@) expansion didn't work here...

4
deps/v8/OWNERS

@ -2,12 +2,14 @@ bmeurer@chromium.org
danno@chromium.org
dslomov@chromium.org
hpayer@chromium.org
ishell@chromium.org
jkummerow@chromium.org
mmassi@chromium.org
machenbach@chromium.org
mstarzinger@chromium.org
mvstanton@chromium.org
rossberg@chromium.org
svenpanne@chromium.org
titzer@chromium.org
ulan@chromium.org
vegorov@chromium.org
verwaest@chromium.org

18
deps/v8/PRESUBMIT.py

@ -58,6 +58,17 @@ def _CommonChecks(input_api, output_api):
return results
def _SkipTreeCheck(input_api, output_api):
"""Check the env var whether we want to skip tree check.
Only skip if src/version.cc has been updated."""
src_version = 'src/version.cc'
FilterFile = lambda file: file.LocalPath() == src_version
if not input_api.AffectedSourceFiles(
lambda file: file.LocalPath() == src_version):
return False
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
@ -69,7 +80,8 @@ def CheckChangeOnCommit(input_api, output_api):
results.extend(_CommonChecks(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
if not _SkipTreeCheck(input_api, output_api):
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
return results

27
deps/v8/tools/status-file-converter.py → deps/v8/WATCHLISTS

@ -1,6 +1,4 @@
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@ -27,13 +25,22 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Watchlist Rules
# Refer: http://dev.chromium.org/developers/contributing-code/watchlists
import sys
from testrunner.local import old_statusfile
# IMPORTANT: The regular expression filepath is tested against each path using
# re.search, so it is not usually necessary to add .*.
if len(sys.argv) != 2:
print "Usage: %s foo.status" % sys.argv[0]
print "Will read foo.status and print the converted version to stdout."
sys.exit(1)
{
'WATCHLIST_DEFINITIONS': {
'public_api': {
'filepath': 'include/',
},
},
print old_statusfile.ConvertNotation(sys.argv[1]).GetOutput()
'WATCHLISTS': {
'public_api': [
'phajdan.jr@chromium.org',
],
},
}

26
deps/v8/benchmarks/deltablue.js

@ -121,23 +121,23 @@ Strength.strongest = function (s1, s2) {
Strength.prototype.nextWeaker = function () {
switch (this.strengthValue) {
case 0: return Strength.WEAKEST;
case 1: return Strength.WEAK_DEFAULT;
case 2: return Strength.NORMAL;
case 3: return Strength.STRONG_DEFAULT;
case 4: return Strength.PREFERRED;
case 5: return Strength.REQUIRED;
case 0: return Strength.STRONG_PREFERRED;
case 1: return Strength.PREFERRED;
case 2: return Strength.STRONG_DEFAULT;
case 3: return Strength.NORMAL;
case 4: return Strength.WEAK_DEFAULT;
case 5: return Strength.WEAKEST;
}
}
// Strength constants.
Strength.REQUIRED = new Strength(0, "required");
Strength.STONG_PREFERRED = new Strength(1, "strongPreferred");
Strength.PREFERRED = new Strength(2, "preferred");
Strength.STRONG_DEFAULT = new Strength(3, "strongDefault");
Strength.NORMAL = new Strength(4, "normal");
Strength.WEAK_DEFAULT = new Strength(5, "weakDefault");
Strength.WEAKEST = new Strength(6, "weakest");
Strength.REQUIRED = new Strength(0, "required");
Strength.STRONG_PREFERRED = new Strength(1, "strongPreferred");
Strength.PREFERRED = new Strength(2, "preferred");
Strength.STRONG_DEFAULT = new Strength(3, "strongDefault");
Strength.NORMAL = new Strength(4, "normal");
Strength.WEAK_DEFAULT = new Strength(5, "weakDefault");
Strength.WEAKEST = new Strength(6, "weakest");
/* --- *
* C o n s t r a i n t

1
deps/v8/build/all.gyp

@ -8,7 +8,6 @@
'target_name': 'All',
'type': 'none',
'dependencies': [
'../preparser/preparser.gyp:*',
'../samples/samples.gyp:*',
'../src/d8.gyp:d8',
'../test/cctest/cctest.gyp:*',

16
deps/v8/build/features.gypi

@ -54,7 +54,10 @@
# Enable ECMAScript Internationalization API. Enabling this feature will
# add a dependency on the ICU library.
'v8_enable_i18n_support%': 0,
'v8_enable_i18n_support%': 1,
# Enable compiler warnings when using V8_DEPRECATED apis.
'v8_deprecation_warnings%': 0,
},
'target_defaults': {
'conditions': [
@ -76,6 +79,9 @@
['v8_interpreted_regexp==1', {
'defines': ['V8_INTERPRETED_REGEXP',],
}],
['v8_deprecation_warnings==1', {
'defines': ['V8_DEPRECATION_WARNINGS',],
}],
['v8_enable_i18n_support==1', {
'defines': ['V8_I18N_SUPPORT',],
}],
@ -89,21 +95,29 @@
'Debug': {
'variables': {
'v8_enable_extra_checks%': 1,
'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
['v8_enable_handle_zapping==1', {
'defines': ['ENABLE_HANDLE_ZAPPING',],
}],
],
}, # Debug
'Release': {
'variables': {
'v8_enable_extra_checks%': 0,
'v8_enable_handle_zapping%': 0,
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
['v8_enable_handle_zapping==1', {
'defines': ['ENABLE_HANDLE_ZAPPING',],
}],
], # conditions
}, # Release
}, # configurations

19
deps/v8/build/standalone.gypi

@ -36,7 +36,7 @@
'clang%': 0,
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 0,
'v8_enable_i18n_support%': 1,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'variables': {
@ -77,6 +77,23 @@
# as errors.
'v8_code%': 0,
# Speeds up Debug builds:
# 0 - Compiler optimizations off (debuggable) (default). This may
# be 5x slower than Release (or worse).
# 1 - Turn on compiler optimizations. This may be hard or impossible to
# debug. This may still be 2x slower than Release (or worse).
# 2 - Turn on optimizations, and also #undef DEBUG / #define NDEBUG
# (but leave V8_ENABLE_CHECKS and most other assertions enabled.
# This may cause some v8 tests to fail in the Debug configuration.
# This roughly matches the performance of a Release build and can
# be used by embedders that need to build their own code as debug
# but don't want or need a debug version of V8. This should produce
# near-release speeds.
'v8_optimized_debug%': 0,
# Relative path to icu.gyp from this file.
'icu_gyp_path': '../third_party/icu/icu.gyp',
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \

27
deps/v8/build/toolchain.gypi

@ -60,20 +60,6 @@
'v8_enable_backtrace%': 0,
# Speeds up Debug builds:
# 0 - Compiler optimizations off (debuggable) (default). This may
# be 5x slower than Release (or worse).
# 1 - Turn on compiler optimizations. This may be hard or impossible to
# debug. This may still be 2x slower than Release (or worse).
# 2 - Turn on optimizations, and also #undef DEBUG / #define NDEBUG
# (but leave V8_ENABLE_CHECKS and most other assertions enabled.
# This may cause some v8 tests to fail in the Debug configuration.
# This roughly matches the performance of a Release build and can
# be used by embedders that need to build their own code as debug
# but don't want or need a debug version of V8. This should produce
# near-release speeds.
'v8_optimized_debug%': 0,
# Enable profiling support. Only required on Windows.
'v8_enable_prof%': 0,
@ -450,6 +436,7 @@
'V8_ENABLE_CHECKS',
'OBJECT_PRINT',
'VERIFY_HEAP',
'DEBUG'
],
'msvs_settings': {
'VCCLCompilerTool': {
@ -517,15 +504,6 @@
},
},
'conditions': [
['v8_optimized_debug==2', {
'defines': [
'NDEBUG',
],
}, {
'defines': [
'DEBUG',
],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual',
@ -567,6 +545,9 @@
'-fdata-sections',
'-ffunction-sections',
],
'defines': [
'OPTIMIZED_DEBUG'
],
'conditions': [
# TODO(crbug.com/272548): Avoid -O3 in NaCl
['nacl_target_arch=="none"', {

4
deps/v8/include/v8-debug.h

@ -212,9 +212,13 @@ class V8_EXPORT Debug {
// If no isolate is provided the default isolate is
// used.
// TODO(dcarney): remove
static void SendCommand(const uint16_t* command, int length,
ClientData* client_data = NULL,
Isolate* isolate = NULL);
static void SendCommand(Isolate* isolate,
const uint16_t* command, int length,
ClientData* client_data = NULL);
// Dispatch interface.
static void SetHostDispatchHandler(HostDispatchHandler handler,

54
deps/v8/include/v8-defaults.h

@ -0,0 +1,54 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_V8_DEFAULTS_H_
#define V8_V8_DEFAULTS_H_
#include "v8.h"
/**
* Default configuration support for the V8 JavaScript engine.
*/
namespace v8 {
/**
* Configures the constraints with reasonable default values based on the
* capabilities of the current device the VM is running on.
*/
bool V8_EXPORT ConfigureResourceConstraintsForCurrentPlatform(
ResourceConstraints* constraints);
/**
* Convience function which performs SetResourceConstraints with the settings
* returned by ConfigureResourceConstraintsForCurrentPlatform.
*/
bool V8_EXPORT SetDefaultResourceConstraintsForCurrentPlatform();
} // namespace v8
#endif // V8_V8_DEFAULTS_H_

84
deps/v8/include/v8-preparser.h

@ -1,84 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef PREPARSER_H
#define PREPARSER_H
#include "v8.h"
#include "v8stdint.h"
namespace v8 {
// The result of preparsing is either a stack overflow error, or an opaque
// blob of data that can be passed back into the parser.
class V8_EXPORT PreParserData {
public:
PreParserData(size_t size, const uint8_t* data)
: data_(data), size_(size) { }
// Create a PreParserData value where stack_overflow reports true.
static PreParserData StackOverflow() { return PreParserData(0, NULL); }
// Whether the pre-parser stopped due to a stack overflow.
// If this is the case, size() and data() should not be used.
bool stack_overflow() { return size_ == 0u; }
// The size of the data in bytes.
size_t size() const { return size_; }
// Pointer to the data.
const uint8_t* data() const { return data_; }
private:
const uint8_t* const data_;
const size_t size_;
};
// Interface for a stream of Unicode characters.
class V8_EXPORT UnicodeInputStream { // NOLINT - V8_EXPORT is not a class name.
public:
virtual ~UnicodeInputStream();
// Returns the next Unicode code-point in the input, or a negative value when
// there is no more input in the stream.
virtual int32_t Next() = 0;
};
// Preparse a JavaScript program. The source code is provided as a
// UnicodeInputStream. The max_stack_size limits the amount of stack
// space that the preparser is allowed to use. If the preparser uses
// more stack space than the limit provided, the result's stack_overflow()
// method will return true. Otherwise the result contains preparser
// data that can be used by the V8 parser to speed up parsing.
PreParserData V8_EXPORT Preparse(UnicodeInputStream* input,
size_t max_stack_size);
} // namespace v8.
#endif // PREPARSER_H

25
deps/v8/include/v8-profiler.h

@ -57,16 +57,17 @@ class V8_EXPORT CpuProfileNode {
*/
int GetLineNumber() const;
/**
* Returns 1-based number of the column where the function originates.
* kNoColumnNumberInfo if no column number information is available.
*/
int GetColumnNumber() const;
/** Returns bailout reason for the function
* if the optimization was disabled for it.
*/
const char* GetBailoutReason() const;
/** DEPRECATED. Please use GetHitCount instead.
* Returns the count of samples where function was currently executing.
*/
V8_DEPRECATED(double GetSelfSamplesCount() const);
/**
* Returns the count of samples where the function was currently executing.
*/
@ -85,6 +86,7 @@ class V8_EXPORT CpuProfileNode {
const CpuProfileNode* GetChild(int index) const;
static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
};
@ -473,6 +475,19 @@ class V8_EXPORT HeapProfiler {
*/
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
/**
* Starts recording JS allocations immediately as they arrive and tracking of
* heap objects population statistics.
*/
void StartRecordingHeapAllocations();
/**
* Stops recording JS allocations and tracking of heap objects population
* statistics, cleans all collected heap objects population statistics data.
*/
void StopRecordingHeapAllocations();
private:
HeapProfiler();
~HeapProfiler();

4
deps/v8/include/v8-testing.h

@ -68,8 +68,4 @@ class V8_EXPORT Testing {
} // namespace v8
#undef V8_EXPORT
#endif // V8_V8_TEST_H_

518
deps/v8/include/v8.h

@ -135,6 +135,7 @@ class DeclaredAccessorDescriptor;
class ObjectOperationDescriptor;
class RawOperationDescriptor;
class CallHandlerHelper;
class EscapableHandleScope;
namespace internal {
class Arguments;
@ -377,7 +378,6 @@ template <class T> class Local : public Handle<T> {
* The referee is kept alive by the local handle even when
* the original handle is destroyed/disposed.
*/
V8_INLINE static Local<T> New(Handle<T> that);
V8_INLINE static Local<T> New(Isolate* isolate, Handle<T> that);
template<class M>
V8_INLINE static Local<T> New(Isolate* isolate,
@ -401,6 +401,7 @@ template <class T> class Local : public Handle<T> {
friend class Context;
template<class F> friend class internal::CustomArguments;
friend class HandleScope;
friend class EscapableHandleScope;
V8_INLINE static Local<T> New(Isolate* isolate, T* that);
};
@ -479,6 +480,22 @@ class NonCopyablePersistentTraits {
};
/**
* Helper class traits to allow copying and assignment of Persistent.
* This will clone the contents of storage cell, but not any of the flags, etc.
*/
template<class T>
struct CopyablePersistentTraits {
typedef Persistent<T, CopyablePersistentTraits<T> > CopyablePersistent;
static const bool kResetInDestructor = true;
template<class S, class M>
static V8_INLINE void Copy(const Persistent<S, M>& source,
CopyablePersistent* dest) {
// do nothing, just allow copy
}
};
/**
* An object reference that is independent of any handle scope. Where
* a Local handle only lives as long as the HandleScope in which it was
@ -567,9 +584,9 @@ template <class T, class M> class Persistent {
*/
template <class S, class M2>
V8_INLINE void Reset(Isolate* isolate, const Persistent<S, M2>& other);
// TODO(dcarney): deprecate
V8_INLINE void Dispose() { Reset(); }
V8_DEPRECATED(V8_INLINE void Dispose(Isolate* isolate)) { Reset(); }
V8_DEPRECATED("Use Reset instead",
V8_INLINE void Dispose()) { Reset(); }
V8_INLINE bool IsEmpty() const { return val_ == 0; }
@ -625,22 +642,22 @@ template <class T, class M> class Persistent {
P* parameter,
typename WeakCallbackData<S, P>::Callback callback);
// TODO(dcarney): deprecate
template<typename S, typename P>
V8_INLINE void MakeWeak(
P* parameter,
typename WeakReferenceCallbacks<S, P>::Revivable callback);
V8_DEPRECATED(
"Use SetWeak instead",
V8_INLINE void MakeWeak(
P* parameter,
typename WeakReferenceCallbacks<S, P>::Revivable callback));
// TODO(dcarney): deprecate
template<typename P>
V8_INLINE void MakeWeak(
P* parameter,
typename WeakReferenceCallbacks<T, P>::Revivable callback);
V8_DEPRECATED(
"Use SetWeak instead",
V8_INLINE void MakeWeak(
P* parameter,
typename WeakReferenceCallbacks<T, P>::Revivable callback));
V8_INLINE void ClearWeak();
V8_DEPRECATED(V8_INLINE void ClearWeak(Isolate* isolate)) { ClearWeak(); }
/**
* Marks the reference to this object independent. Garbage collector is free
* to ignore any object groups containing this object. Weak callback for an
@ -649,10 +666,6 @@ template <class T, class M> class Persistent {
*/
V8_INLINE void MarkIndependent();
V8_DEPRECATED(V8_INLINE void MarkIndependent(Isolate* isolate)) {
MarkIndependent();
}
/**
* Marks the reference to this object partially dependent. Partially dependent
* handles only depend on other partially dependent handles and these
@ -663,56 +676,31 @@ template <class T, class M> class Persistent {
*/
V8_INLINE void MarkPartiallyDependent();
V8_DEPRECATED(V8_INLINE void MarkPartiallyDependent(Isolate* isolate)) {
MarkPartiallyDependent();
}
V8_INLINE bool IsIndependent() const;
V8_DEPRECATED(V8_INLINE bool IsIndependent(Isolate* isolate) const) {
return IsIndependent();
}
/** Checks if the handle holds the only reference to an object. */
V8_INLINE bool IsNearDeath() const;
V8_DEPRECATED(V8_INLINE bool IsNearDeath(Isolate* isolate) const) {
return IsNearDeath();
}
/** Returns true if the handle's reference is weak. */
V8_INLINE bool IsWeak() const;
V8_DEPRECATED(V8_INLINE bool IsWeak(Isolate* isolate) const) {
return IsWeak();
}
/**
* Assigns a wrapper class ID to the handle. See RetainedObjectInfo interface
* description in v8-profiler.h for details.
*/
V8_INLINE void SetWrapperClassId(uint16_t class_id);
V8_DEPRECATED(
V8_INLINE void SetWrapperClassId(Isolate * isolate, uint16_t class_id)) {
SetWrapperClassId(class_id);
}
/**
* Returns the class ID previously assigned to this handle or 0 if no class ID
* was previously assigned.
*/
V8_INLINE uint16_t WrapperClassId() const;
V8_DEPRECATED(V8_INLINE uint16_t WrapperClassId(Isolate* isolate) const) {
return WrapperClassId();
}
// TODO(dcarney): remove
V8_INLINE T* ClearAndLeak();
V8_DEPRECATED("This will be removed",
V8_INLINE T* ClearAndLeak());
// TODO(dcarney): remove
V8_INLINE void Clear() { val_ = 0; }
V8_DEPRECATED("This will be removed",
V8_INLINE void Clear()) { val_ = 0; }
// TODO(dcarney): remove
#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
@ -724,6 +712,7 @@ template <class T, class M> class Persistent {
V8_INLINE T* operator*() const { return val_; }
private:
friend class Isolate;
friend class Utils;
template<class F> friend class Handle;
template<class F> friend class Local;
@ -757,27 +746,28 @@ class V8_EXPORT HandleScope {
~HandleScope();
/**
* Closes the handle scope and returns the value as a handle in the
* previous scope, which is the new current scope after the call.
*/
template <class T> Local<T> Close(Handle<T> value);
template <class T>
V8_DEPRECATED("Use EscapableHandleScope::Escape instead",
Local<T> Close(Handle<T> value));
/**
* Counts the number of allocated handles.
*/
static int NumberOfHandles();
private:
/**
* Creates a new handle with the given value.
*/
static internal::Object** CreateHandle(internal::Object* value);
static internal::Object** CreateHandle(internal::Isolate* isolate,
internal::Object* value);
// Faster version, uses HeapObject to obtain the current Isolate.
static internal::Object** CreateHandle(internal::HeapObject* value);
// Uses HeapObject to obtain the current Isolate.
static internal::Object** CreateHandle(internal::HeapObject* heap_object,
internal::Object* value);
V8_INLINE HandleScope() {}
void Initialize(Isolate* isolate);
private:
// Make it hard to create heap-allocated or illegal handle scopes by
// disallowing certain operations.
HandleScope(const HandleScope&);
@ -798,19 +788,58 @@ class V8_EXPORT HandleScope {
}
};
void Initialize(Isolate* isolate);
void Leave();
internal::Isolate* isolate_;
internal::Object** prev_next_;
internal::Object** prev_limit_;
// TODO(dcarney): remove this field
// Allow for the active closing of HandleScopes which allows to pass a handle
// from the HandleScope being closed to the next top most HandleScope.
bool is_closed_;
internal::Object** RawClose(internal::Object** value);
friend class ImplementationUtilities;
friend class EscapableHandleScope;
template<class F> friend class Handle;
template<class F> friend class Local;
friend class Object;
friend class Context;
};
/**
* A HandleScope which first allocates a handle in the current scope
* which will be later filled with the escape value.
*/
class V8_EXPORT EscapableHandleScope : public HandleScope {
public:
EscapableHandleScope(Isolate* isolate);
V8_INLINE ~EscapableHandleScope() {}
/**
* Pushes the value into the previous scope and returns a handle to it.
* Cannot be called twice.
*/
template <class T>
V8_INLINE Local<T> Escape(Local<T> value) {
internal::Object** slot =
Escape(reinterpret_cast<internal::Object**>(*value));
return Local<T>(reinterpret_cast<T*>(slot));
}
private:
internal::Object** Escape(internal::Object** escape_value);
// Make it hard to create heap-allocated or illegal handle scopes by
// disallowing certain operations.
EscapableHandleScope(const EscapableHandleScope&);
void operator=(const EscapableHandleScope&);
void* operator new(size_t size);
void operator delete(void*, size_t);
internal::Object** escape_slot_;
};
@ -857,7 +886,9 @@ class V8_EXPORT ScriptData { // NOLINT
* \param input Pointer to UTF-8 script source code.
* \param length Length of UTF-8 script source code.
*/
static ScriptData* PreCompile(const char* input, int length);
static ScriptData* PreCompile(Isolate* isolate,
const char* input,
int length);
/**
* Pre-compiles the specified script (context-independent).
@ -1009,9 +1040,8 @@ class V8_EXPORT Script {
/**
* Returns the script id value.
* DEPRECATED: Please use GetId().
*/
Local<Value> Id();
V8_DEPRECATED("Use GetId instead", Local<Value> Id());
/**
* Returns the script id.
@ -1463,6 +1493,7 @@ class V8_EXPORT Value : public Data {
/** JS == */
bool Equals(Handle<Value> that) const;
bool StrictEquals(Handle<Value> that) const;
bool SameValue(Handle<Value> that) const;
template <class T> V8_INLINE static Value* Cast(T* value);
@ -1515,11 +1546,6 @@ class V8_EXPORT String : public Primitive {
*/
int Utf8Length() const;
/**
* This function is no longer useful.
*/
V8_DEPRECATED(V8_INLINE bool MayContainNonAscii() const) { return true; }
/**
* Returns whether this string is known to contain only one byte data.
* Does not read the string.
@ -1570,11 +1596,6 @@ class V8_EXPORT String : public Primitive {
int start = 0,
int length = -1,
int options = NO_OPTIONS) const;
// ASCII characters.
V8_DEPRECATED(int WriteAscii(char* buffer,
int start = 0,
int length = -1,
int options = NO_OPTIONS) const);
// One byte characters.
int WriteOneByte(uint8_t* buffer,
int start = 0,
@ -1705,24 +1726,29 @@ class V8_EXPORT String : public Primitive {
V8_INLINE static String* Cast(v8::Value* obj);
// TODO(dcarney): deprecate
/**
* Allocates a new string from either UTF-8 encoded or ASCII data.
* The second parameter 'length' gives the buffer length. If omitted,
* the function calls 'strlen' to determine the buffer length.
*/
V8_INLINE static Local<String> New(const char* data, int length = -1);
V8_DEPRECATED(
"Use NewFromOneByte instead",
V8_INLINE static Local<String> New(const char* data, int length = -1));
// TODO(dcarney): deprecate
/** Allocates a new string from 16-bit character codes.*/
V8_INLINE static Local<String> New(const uint16_t* data, int length = -1);
V8_DEPRECATED(
"Use NewFromTwoByte instead",
V8_INLINE static Local<String> New(
const uint16_t* data, int length = -1));
// TODO(dcarney): deprecate
/**
* Creates an internalized string (historically called a "symbol",
* not to be confused with ES6 symbols). Returns one if it exists already.
*/
V8_INLINE static Local<String> NewSymbol(const char* data, int length = -1);
V8_DEPRECATED(
"Use NewFromUtf8 instead",
V8_INLINE static Local<String> NewSymbol(
const char* data, int length = -1));
enum NewStringType {
kNormalString, kInternalizedString, kUndetectableString
@ -1801,15 +1827,17 @@ class V8_EXPORT String : public Primitive {
*/
bool CanMakeExternal();
// TODO(dcarney): deprecate
/** Creates an undetectable string from the supplied ASCII or UTF-8 data.*/
V8_INLINE static Local<String> NewUndetectable(const char* data,
int length = -1);
V8_DEPRECATED(
"Use NewFromUtf8 instead",
V8_INLINE static Local<String> NewUndetectable(const char* data,
int length = -1));
// TODO(dcarney): deprecate
/** Creates an undetectable string from the supplied 16-bit character codes.*/
V8_INLINE static Local<String> NewUndetectable(const uint16_t* data,
int length = -1);
V8_DEPRECATED(
"Use NewFromTwoByte instead",
V8_INLINE static Local<String> NewUndetectable(const uint16_t* data,
int length = -1));
/**
* Converts an object to a UTF-8-encoded character array. Useful if
@ -1843,8 +1871,8 @@ class V8_EXPORT String : public Primitive {
*/
class V8_EXPORT AsciiValue {
public:
// TODO(dcarney): deprecate
explicit AsciiValue(Handle<v8::Value> obj);
V8_DEPRECATED("Use Utf8Value instead",
explicit AsciiValue(Handle<v8::Value> obj));
~AsciiValue();
char* operator*() { return str_; }
const char* operator*() const { return str_; }
@ -2265,7 +2293,7 @@ class V8_EXPORT Object : public Value {
* Call an Object as a function if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
*/
Local<Value> CallAsFunction(Handle<Object> recv,
Local<Value> CallAsFunction(Handle<Value> recv,
int argc,
Handle<Value> argv[]);
@ -2364,17 +2392,18 @@ class FunctionCallbackInfo {
V8_INLINE Isolate* GetIsolate() const;
V8_INLINE ReturnValue<T> GetReturnValue() const;
// This shouldn't be public, but the arm compiler needs it.
static const int kArgsLength = 6;
static const int kArgsLength = 7;
protected:
friend class internal::FunctionCallbackArguments;
friend class internal::CustomArguments<FunctionCallbackInfo>;
static const int kReturnValueIndex = 0;
static const int kReturnValueDefaultValueIndex = -1;
static const int kIsolateIndex = -2;
static const int kDataIndex = -3;
static const int kCalleeIndex = -4;
static const int kHolderIndex = -5;
static const int kHolderIndex = 0;
static const int kIsolateIndex = 1;
static const int kReturnValueDefaultValueIndex = 2;
static const int kReturnValueIndex = 3;
static const int kDataIndex = 4;
static const int kCalleeIndex = 5;
static const int kContextSaveIndex = 6;
V8_INLINE FunctionCallbackInfo(internal::Object** implicit_args,
internal::Object** values,
@ -2406,12 +2435,12 @@ class PropertyCallbackInfo {
friend class MacroAssembler;
friend class internal::PropertyCallbackArguments;
friend class internal::CustomArguments<PropertyCallbackInfo>;
static const int kThisIndex = 0;
static const int kDataIndex = -1;
static const int kReturnValueIndex = -2;
static const int kReturnValueDefaultValueIndex = -3;
static const int kIsolateIndex = -4;
static const int kHolderIndex = -5;
static const int kHolderIndex = 0;
static const int kIsolateIndex = 1;
static const int kReturnValueDefaultValueIndex = 2;
static const int kReturnValueIndex = 3;
static const int kDataIndex = 4;
static const int kThisIndex = 5;
V8_INLINE PropertyCallbackInfo(internal::Object** args) : args_(args) {}
internal::Object** args_;
@ -2437,7 +2466,7 @@ class V8_EXPORT Function : public Object {
Local<Object> NewInstance() const;
Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
Local<Value> Call(Handle<Value> recv, int argc, Handle<Value> argv[]);
void SetName(Handle<String> name);
Handle<Value> GetName() const;
@ -2449,6 +2478,12 @@ class V8_EXPORT Function : public Object {
*/
Handle<Value> GetInferredName() const;
/**
* User-defined name assigned to the "displayName" property of this function.
* Used to facilitate debugging and profiling of JavaScript code.
*/
Handle<Value> GetDisplayName() const;
/**
* Returns zero based line number of function body and
* kLineOffsetNotFound if no information available.
@ -2460,11 +2495,15 @@ class V8_EXPORT Function : public Object {
*/
int GetScriptColumnNumber() const;
/**
* Tells whether this function is builtin.
*/
bool IsBuiltin() const;
/**
* Returns scriptId object.
* DEPRECATED: use ScriptId() instead.
*/
Handle<Value> GetScriptId() const;
V8_DEPRECATED("Use ScriptId instead", Handle<Value> GetScriptId()) const;
/**
* Returns scriptId.
@ -2627,10 +2666,6 @@ class V8_EXPORT ArrayBufferView : public Object {
* Size of a view in bytes.
*/
size_t ByteLength();
/**
* Base address of a view.
*/
void* BaseAddress();
V8_INLINE static ArrayBufferView* Cast(Value* obj);
@ -2830,9 +2865,9 @@ class V8_EXPORT Date : public Object {
public:
static Local<Value> New(double time);
// Deprecated, use Date::ValueOf() instead.
// TODO(svenpanne) Actually deprecate when Chrome is adapted.
double NumberValue() const { return ValueOf(); }
V8_DEPRECATED(
"Use ValueOf instead",
double NumberValue()) const { return ValueOf(); }
/**
* A specialization of Value::NumberValue that is more efficient
@ -2868,9 +2903,9 @@ class V8_EXPORT NumberObject : public Object {
public:
static Local<Value> New(double value);
// Deprecated, use NumberObject::ValueOf() instead.
// TODO(svenpanne) Actually deprecate when Chrome is adapted.
double NumberValue() const { return ValueOf(); }
V8_DEPRECATED(
"Use ValueOf instead",
double NumberValue()) const { return ValueOf(); }
/**
* Returns the Number held by the object.
@ -2891,9 +2926,9 @@ class V8_EXPORT BooleanObject : public Object {
public:
static Local<Value> New(bool value);
// Deprecated, use BooleanObject::ValueOf() instead.
// TODO(svenpanne) Actually deprecate when Chrome is adapted.
bool BooleanValue() const { return ValueOf(); }
V8_DEPRECATED(
"Use ValueOf instead",
bool BooleanValue()) const { return ValueOf(); }
/**
* Returns the Boolean held by the object.
@ -2914,9 +2949,9 @@ class V8_EXPORT StringObject : public Object {
public:
static Local<Value> New(Handle<String> value);
// Deprecated, use StringObject::ValueOf() instead.
// TODO(svenpanne) Actually deprecate when Chrome is adapted.
Local<String> StringValue() const { return ValueOf(); }
V8_DEPRECATED(
"Use ValueOf instead",
Local<String> StringValue()) const { return ValueOf(); }
/**
* Returns the String held by the object.
@ -2939,9 +2974,9 @@ class V8_EXPORT SymbolObject : public Object {
public:
static Local<Value> New(Isolate* isolate, Handle<Symbol> value);
// Deprecated, use SymbolObject::ValueOf() instead.
// TODO(svenpanne) Actually deprecate when Chrome is adapted.
Local<Symbol> SymbolValue() const { return ValueOf(); }
V8_DEPRECATED(
"Use ValueOf instead",
Local<Symbol> SymbolValue()) const { return ValueOf(); }
/**
* Returns the Symbol held by the object.
@ -3744,23 +3779,18 @@ class V8_EXPORT ResourceConstraints {
uint32_t* stack_limit() const { return stack_limit_; }
// Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
Maybe<bool> is_memory_constrained() const { return is_memory_constrained_; }
// If set to true, V8 will limit it's memory usage, at the potential cost of
// lower performance. Note, this option is a tentative addition to the API
// and may be removed or modified without warning.
void set_memory_constrained(bool value) {
is_memory_constrained_ = Maybe<bool>(value);
}
private:
int max_young_space_size_;
int max_old_space_size_;
int max_executable_size_;
uint32_t* stack_limit_;
Maybe<bool> is_memory_constrained_;
};
/**
* Sets the given ResourceConstraints on the current isolate.
*/
bool V8_EXPORT SetResourceConstraints(ResourceConstraints* constraints);
@ -3773,13 +3803,9 @@ typedef void (*FatalErrorCallback)(const char* location, const char* message);
typedef void (*MessageCallback)(Handle<Message> message, Handle<Value> error);
/**
* Schedules an exception to be thrown when returning to JavaScript. When an
* exception has been scheduled it is illegal to invoke any JavaScript
* operation; the caller must return immediately and only after the exception
* has been handled does it become legal to invoke JavaScript operations.
*/
Handle<Value> V8_EXPORT ThrowException(Handle<Value> exception);
V8_DEPRECATED(
"Use Isolate::ThrowException instead",
Handle<Value> V8_EXPORT ThrowException(Handle<Value> exception));
/**
* Create new error objects by calling the corresponding error object
@ -3870,8 +3896,6 @@ enum GCCallbackFlags {
typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags);
typedef void (*GCEpilogueCallback)(GCType type, GCCallbackFlags flags);
typedef void (*GCCallback)();
/**
* Collection of V8 heap information.
@ -4019,9 +4043,30 @@ class V8_EXPORT Isolate {
*/
CpuProfiler* GetCpuProfiler();
/** Returns true if this isolate has a current context. */
bool InContext();
/** Returns the context that is on the top of the stack. */
Local<Context> GetCurrentContext();
/**
* Returns the context of the calling JavaScript code. That is the
* context of the top-most JavaScript frame. If there are no
* JavaScript frames an empty handle is returned.
*/
Local<Context> GetCallingContext();
/** Returns the last entered context. */
Local<Context> GetEnteredContext();
/**
* Schedules an exception to be thrown when returning to JavaScript. When an
* exception has been scheduled it is illegal to invoke any JavaScript
* operation; the caller must return immediately and only after the exception
* has been handled does it become legal to invoke JavaScript operations.
*/
Local<Value> ThrowException(Local<Value> exception);
/**
* Allows the host application to group objects together. If one
* object in the group is alive, all objects in the group are alive.
@ -4033,8 +4078,8 @@ class V8_EXPORT Isolate {
* garbage collection types it is sufficient to provide object groups
* for partially dependent handles only.
*/
void SetObjectGroupId(const Persistent<Value>& object,
UniqueId id);
template<typename T> void SetObjectGroupId(const Persistent<T>& object,
UniqueId id);
/**
* Allows the host application to declare implicit references from an object
@ -4043,8 +4088,8 @@ class V8_EXPORT Isolate {
* are removed. It is intended to be used in the before-garbage-collection
* callback function.
*/
void SetReferenceFromGroup(UniqueId id,
const Persistent<Value>& child);
template<typename T> void SetReferenceFromGroup(UniqueId id,
const Persistent<T>& child);
/**
* Allows the host application to declare implicit references from an object
@ -4052,8 +4097,53 @@ class V8_EXPORT Isolate {
* too. After each garbage collection, all implicit references are removed. It
* is intended to be used in the before-garbage-collection callback function.
*/
void SetReference(const Persistent<Object>& parent,
const Persistent<Value>& child);
template<typename T, typename S>
void SetReference(const Persistent<T>& parent, const Persistent<S>& child);
typedef void (*GCPrologueCallback)(Isolate* isolate,
GCType type,
GCCallbackFlags flags);
typedef void (*GCEpilogueCallback)(Isolate* isolate,
GCType type,
GCCallbackFlags flags);
/**
* Enables the host application to receive a notification before a
* garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set
* or delete properties for example) since it is possible such
* operations will result in the allocation of objects. It is possible
* to specify the GCType filter for your callback. But it is not possible to
* register the same callback function two times with different
* GCType filters.
*/
void AddGCPrologueCallback(
GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
/**
* This function removes callback which was installed by
* AddGCPrologueCallback function.
*/
void RemoveGCPrologueCallback(GCPrologueCallback callback);
/**
* Enables the host application to receive a notification after a
* garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set
* or delete properties for example) since it is possible such
* operations will result in the allocation of objects. It is possible
* to specify the GCType filter for your callback. But it is not possible to
* register the same callback function two times with different
* GCType filters.
*/
void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
/**
* This function removes callback which was installed by
* AddGCEpilogueCallback function.
*/
void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
private:
Isolate();
@ -4062,8 +4152,11 @@ class V8_EXPORT Isolate {
Isolate& operator=(const Isolate&);
void* operator new(size_t size);
void operator delete(void*, size_t);
};
void SetObjectGroupId(internal::Object** object, UniqueId id);
void SetReferenceFromGroup(UniqueId id, internal::Object** object);
void SetReference(internal::Object** parent, internal::Object** child);
};
class V8_EXPORT StartupData {
public:
@ -4411,16 +4504,6 @@ class V8_EXPORT V8 {
*/
static void RemoveGCPrologueCallback(GCPrologueCallback callback);
/**
* The function is deprecated. Please use AddGCPrologueCallback instead.
* Enables the host application to receive a notification before a
* garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set
* or delete properties for example) since it is possible such
* operations will result in the allocation of objects.
*/
V8_DEPRECATED(static void SetGlobalGCPrologueCallback(GCCallback));
/**
* Enables the host application to receive a notification after a
* garbage collection. Allocations are not allowed in the
@ -4440,16 +4523,6 @@ class V8_EXPORT V8 {
*/
static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
/**
* The function is deprecated. Please use AddGCEpilogueCallback instead.
* Enables the host application to receive a notification after a
* major garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set
* or delete properties for example) since it is possible such
* operations will result in the allocation of objects.
*/
V8_DEPRECATED(static void SetGlobalGCEpilogueCallback(GCCallback));
/**
* Enables the host application to provide a mechanism to be notified
* and perform custom logging when V8 Allocates Executable Memory.
@ -4497,11 +4570,6 @@ class V8_EXPORT V8 {
static void SetReturnAddressLocationResolver(
ReturnAddressLocationResolver return_address_resolver);
/**
* Deprecated, use the variant with the Isolate parameter below instead.
*/
V8_DEPRECATED(static bool SetFunctionEntryHook(FunctionEntryHook entry_hook));
/**
* Allows the host application to provide the address of a function that's
* invoked on entry to every V8-generated function.
@ -4541,10 +4609,10 @@ class V8_EXPORT V8 {
static void SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler);
// TODO(svenpanne) Really deprecate me when Chrome is fixed.
/** Deprecated. Use Isolate::AdjustAmountOfExternalAllocatedMemory instead. */
static intptr_t AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes);
V8_DEPRECATED(
"Use Isolate::AdjustAmountOfExternalAllocatedMemory instead",
static intptr_t AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes));
/**
* Forcefully terminate the current thread of JavaScript execution
@ -4599,9 +4667,6 @@ class V8_EXPORT V8 {
*/
static bool Dispose();
/** Deprecated. Use Isolate::GetHeapStatistics instead. */
V8_DEPRECATED(static void GetHeapStatistics(HeapStatistics* heap_statistics));
/**
* Iterates through all external resources referenced from current isolate
* heap. GC is not invoked prior to iterating, therefore there is no
@ -4899,25 +4964,14 @@ class V8_EXPORT Context {
Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
Handle<Value> global_object = Handle<Value>());
/** Deprecated. Use Isolate version instead. */
V8_DEPRECATED(static Persistent<Context> New(
ExtensionConfiguration* extensions = NULL,
Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
Handle<Value> global_object = Handle<Value>()));
V8_DEPRECATED("Use Isolate::GetEnteredContext instead",
static Local<Context> GetEntered());
/** Returns the last entered context. */
static Local<Context> GetEntered();
V8_DEPRECATED("Use Isolate::GetCurrentContext instead",
static Local<Context> GetCurrent());
// TODO(svenpanne) Actually deprecate this.
/** Deprecated. Use Isolate::GetCurrentContext instead. */
static Local<Context> GetCurrent();
/**
* Returns the context of the calling JavaScript code. That is the
* context of the top-most JavaScript frame. If there are no
* JavaScript frames an empty handle is returned.
*/
static Local<Context> GetCalling();
V8_DEPRECATED("Use Isolate::GetCallingContext instead",
static Local<Context> GetCalling());
/**
* Sets the security token for the context. To access an object in
@ -4948,8 +5002,8 @@ class V8_EXPORT Context {
/** Returns true if the context has experienced an out of memory situation. */
bool HasOutOfMemoryException();
/** Returns true if V8 has a current context. */
static bool InContext();
V8_DEPRECATED("Use Isolate::InContext instead",
static bool InContext());
/** Returns an isolate associated with a current context. */
v8::Isolate* GetIsolate();
@ -5020,8 +5074,9 @@ class V8_EXPORT Context {
explicit V8_INLINE Scope(Handle<Context> context) : context_(context) {
context_->Enter();
}
// TODO(dcarney): deprecate
V8_INLINE Scope(Isolate* isolate, Persistent<Context>& context) // NOLINT
V8_DEPRECATED(
"Use Handle version instead",
V8_INLINE Scope(Isolate* isolate, Persistent<Context>& context)) // NOLINT
: context_(Handle<Context>::New(isolate, context)) {
context_->Enter();
}
@ -5125,9 +5180,6 @@ class V8_EXPORT Unlocker {
*/
V8_INLINE explicit Unlocker(Isolate* isolate) { Initialize(isolate); }
/** Deprecated. Use Isolate version instead. */
V8_DEPRECATED(Unlocker());
~Unlocker();
private:
void Initialize(Isolate* isolate);
@ -5143,9 +5195,6 @@ class V8_EXPORT Locker {
*/
V8_INLINE explicit Locker(Isolate* isolate) { Initialize(isolate); }
/** Deprecated. Use Isolate version instead. */
V8_DEPRECATED(Locker());
~Locker();
/**
@ -5155,12 +5204,12 @@ class V8_EXPORT Locker {
* that will switch between multiple threads that are in contention
* for the V8 lock.
*/
static void StartPreemption(int every_n_ms);
static void StartPreemption(Isolate* isolate, int every_n_ms);
/**
* Stop preemption.
*/
static void StopPreemption();
static void StopPreemption(Isolate* isolate);
/**
* Returns whether or not the locker for a given isolate, is locked by the
@ -5359,7 +5408,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
static const int kEmptyStringRootIndex = 131;
static const int kEmptyStringRootIndex = 132;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@ -5370,7 +5419,7 @@ class Internals {
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
static const int kJSObjectType = 0xb1;
static const int kJSObjectType = 0xb2;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
@ -5378,7 +5427,7 @@ class Internals {
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
static void CheckInitializedImpl(v8::Isolate* isolate);
V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate);
V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
#ifdef V8_ENABLE_CHECKS
CheckInitializedImpl(isolate);
@ -5493,19 +5542,6 @@ template <class T>
Local<T>::Local() : Handle<T>() { }
template <class T>
Local<T> Local<T>::New(Handle<T> that) {
if (that.IsEmpty()) return Local<T>();
T* that_ptr = *that;
internal::Object** p = reinterpret_cast<internal::Object**>(that_ptr);
if (internal::Internals::CanCastToHeapObject(that_ptr)) {
return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
reinterpret_cast<internal::HeapObject*>(*p))));
}
return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(*p)));
}
template <class T>
Local<T> Local<T>::New(Isolate* isolate, Handle<T> that) {
return New(isolate, that.val_);
@ -5847,7 +5883,7 @@ FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Object** implicit_args,
template<typename T>
Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
if (i < 0 || length_ <= i) return Local<Value>(*Undefined());
if (i < 0 || length_ <= i) return Local<Value>(*Undefined(GetIsolate()));
return Local<Value>(reinterpret_cast<Value*>(values_ - i));
}
@ -5929,7 +5965,8 @@ Handle<Boolean> ScriptOrigin::ResourceIsSharedCrossOrigin() const {
Handle<Boolean> Boolean::New(bool value) {
return value ? True() : False();
Isolate* isolate = Isolate::GetCurrent();
return value ? True(isolate) : False(isolate);
}
@ -5941,6 +5978,7 @@ void Template::Set(const char* name, v8::Handle<Data> value) {
Local<Value> Object::GetInternalField(int index) {
#ifndef V8_ENABLE_CHECKS
typedef internal::Object O;
typedef internal::HeapObject HO;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(this);
// Fast path: If the object is a plain JSObject, which is the common case, we
@ -5948,7 +5986,7 @@ Local<Value> Object::GetInternalField(int index) {
if (I::GetInstanceType(obj) == I::kJSObjectType) {
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
O* value = I::ReadField<O*>(obj, offset);
O** result = HandleScope::CreateHandle(value);
O** result = HandleScope::CreateHandle(reinterpret_cast<HO*>(obj), value);
return Local<Value>(reinterpret_cast<Value*>(result));
}
#endif
@ -6397,11 +6435,41 @@ void* Isolate::GetData() {
}
template<typename T>
void Isolate::SetObjectGroupId(const Persistent<T>& object,
UniqueId id) {
TYPE_CHECK(Value, T);
SetObjectGroupId(reinterpret_cast<v8::internal::Object**>(object.val_), id);
}
template<typename T>
void Isolate::SetReferenceFromGroup(UniqueId id,
const Persistent<T>& object) {
TYPE_CHECK(Value, T);
SetReferenceFromGroup(id,
reinterpret_cast<v8::internal::Object**>(object.val_));
}
template<typename T, typename S>
void Isolate::SetReference(const Persistent<T>& parent,
const Persistent<S>& child) {
TYPE_CHECK(Object, T);
TYPE_CHECK(Value, S);
SetReference(reinterpret_cast<v8::internal::Object**>(parent.val_),
reinterpret_cast<v8::internal::Object**>(child.val_));
}
Local<Value> Context::GetEmbedderData(int index) {
#ifndef V8_ENABLE_CHECKS
typedef internal::Object O;
typedef internal::HeapObject HO;
typedef internal::Internals I;
O** result = HandleScope::CreateHandle(I::ReadEmbedderData<O*>(this, index));
HO* context = *reinterpret_cast<HO**>(this);
O** result =
HandleScope::CreateHandle(context, I::ReadEmbedderData<O*>(this, index));
return Local<Value>(reinterpret_cast<Value*>(result));
#else
return SlowGetEmbedderData(index);

15
deps/v8/include/v8config.h

@ -245,6 +245,7 @@
// older compilers.
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (V8_GNUC_PREREQ(4, 4, 0))
# define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE (V8_GNUC_PREREQ(4, 5, 0))
# define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
@ -320,12 +321,16 @@
// A macro to mark classes or functions as deprecated.
#if !V8_DISABLE_DEPRECATIONS && V8_HAS_ATTRIBUTE_DEPRECATED
# define V8_DEPRECATED(declarator) declarator __attribute__((deprecated))
#elif !V8_DISABLE_DEPRECATIONS && V8_HAS_DECLSPEC_DEPRECATED
# define V8_DEPRECATED(declarator) __declspec(deprecated) declarator
#if defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE
# define V8_DEPRECATED(message, declarator) \
declarator __attribute__((deprecated(message)))
#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED
# define V8_DEPRECATED(message, declarator) \
declarator __attribute__((deprecated))
#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED
# define V8_DEPRECATED(message, declarator) __declspec(deprecated) declarator
#else
# define V8_DEPRECATED(declarator) declarator
# define V8_DEPRECATED(message, declarator) declarator
#endif

372
deps/v8/preparser/preparser-process.cc

@ -1,372 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include "../include/v8.h"
#include "../include/v8stdint.h"
#include "../include/v8-preparser.h"
#include "../src/preparse-data-format.h"
namespace i = v8::internal;
// This file is only used for testing the preparser.
// The first argument must be the path of a JavaScript source file, or
// the flags "-e" and the next argument is then the source of a JavaScript
// program.
// Optionally this can be followed by the word "throws" (case sensitive),
// which signals that the parsing is expected to throw - the default is
// to expect the parsing to not throw.
// The command line can further be followed by a message text (the
// *type* of the exception to throw), and even more optionally, the
// start and end position reported with the exception.
//
// This source file is preparsed and tested against the expectations, and if
// successful, the resulting preparser data is written to stdout.
// Diagnostic output is output on stderr.
// The source file must contain only ASCII characters (UTF-8 isn't supported).
// The file is read into memory, so it should have a reasonable size.
// Adapts an ASCII string to the UnicodeInputStream interface.
class AsciiInputStream : public v8::UnicodeInputStream {
public:
AsciiInputStream(const uint8_t* buffer, size_t length)
: buffer_(buffer),
end_offset_(static_cast<int>(length)),
offset_(0) { }
virtual ~AsciiInputStream() { }
virtual void PushBack(int32_t ch) {
offset_--;
#ifdef DEBUG
if (offset_ < 0 ||
(ch != ((offset_ >= end_offset_) ? -1 : buffer_[offset_]))) {
fprintf(stderr, "Invalid pushback: '%c' at offset %d.", ch, offset_);
exit(1);
}
#endif
}
virtual int32_t Next() {
if (offset_ >= end_offset_) {
offset_++; // Increment anyway to allow symmetric pushbacks.
return -1;
}
uint8_t next_char = buffer_[offset_];
#ifdef DEBUG
if (next_char > 0x7fu) {
fprintf(stderr, "Non-ASCII character in input: '%c'.", next_char);
exit(1);
}
#endif
offset_++;
return static_cast<int32_t>(next_char);
}
private:
const uint8_t* buffer_;
const int end_offset_;
int offset_;
};
bool ReadBuffer(FILE* source, void* buffer, size_t length) {
size_t actually_read = fread(buffer, 1, length, source);
return (actually_read == length);
}
bool WriteBuffer(FILE* dest, const void* buffer, size_t length) {
size_t actually_written = fwrite(buffer, 1, length, dest);
return (actually_written == length);
}
class PreparseDataInterpreter {
public:
PreparseDataInterpreter(const uint8_t* data, int length)
: data_(data), length_(length), message_(NULL) { }
~PreparseDataInterpreter() {
if (message_ != NULL) delete[] message_;
}
bool valid() {
int header_length =
i::PreparseDataConstants::kHeaderSize * sizeof(int); // NOLINT
return length_ >= header_length;
}
bool throws() {
return valid() &&
word(i::PreparseDataConstants::kHasErrorOffset) != 0;
}
const char* message() {
if (message_ != NULL) return message_;
if (!throws()) return NULL;
int text_pos = i::PreparseDataConstants::kHeaderSize +
i::PreparseDataConstants::kMessageTextPos;
int length = word(text_pos);
char* buffer = new char[length + 1];
for (int i = 1; i <= length; i++) {
int character = word(text_pos + i);
buffer[i - 1] = character;
}
buffer[length] = '\0';
message_ = buffer;
return buffer;
}
int beg_pos() {
if (!throws()) return -1;
return word(i::PreparseDataConstants::kHeaderSize +
i::PreparseDataConstants::kMessageStartPos);
}
int end_pos() {
if (!throws()) return -1;
return word(i::PreparseDataConstants::kHeaderSize +
i::PreparseDataConstants::kMessageEndPos);
}
private:
int word(int offset) {
const int* word_data = reinterpret_cast<const int*>(data_);
if (word_data + offset < reinterpret_cast<const int*>(data_ + length_)) {
return word_data[offset];
}
return -1;
}
const uint8_t* const data_;
const int length_;
const char* message_;
};
template <typename T>
class ScopedPointer {
public:
explicit ScopedPointer() : pointer_(NULL) {}
explicit ScopedPointer(T* pointer) : pointer_(pointer) {}
~ScopedPointer() { if (pointer_ != NULL) delete[] pointer_; }
T& operator[](int index) { return pointer_[index]; }
T* operator*() { return pointer_ ;}
T* operator=(T* new_value) {
if (pointer_ != NULL) delete[] pointer_;
pointer_ = new_value;
return new_value;
}
private:
T* pointer_;
};
void fail(v8::PreParserData* data, const char* message, ...) {
va_list args;
va_start(args, message);
vfprintf(stderr, message, args);
va_end(args);
fflush(stderr);
if (data != NULL) {
// Print preparser data to stdout.
uint32_t size = static_cast<uint32_t>(data->size());
fprintf(stderr, "LOG: data size: %u\n", size);
if (!WriteBuffer(stdout, data->data(), size)) {
perror("ERROR: Writing data");
fflush(stderr);
}
}
exit(EXIT_FAILURE);
}
bool IsFlag(const char* arg) {
// Anything starting with '-' is considered a flag.
// It's summarily ignored for now.
return arg[0] == '-';
}
struct ExceptionExpectation {
ExceptionExpectation()
: throws(false), type(NULL), beg_pos(-1), end_pos(-1) { }
bool throws;
const char* type;
int beg_pos;
int end_pos;
};
void CheckException(v8::PreParserData* data,
ExceptionExpectation* expects) {
PreparseDataInterpreter reader(data->data(), static_cast<int>(data->size()));
if (expects->throws) {
if (!reader.throws()) {
if (expects->type == NULL) {
fail(data, "Didn't throw as expected\n");
} else {
fail(data, "Didn't throw \"%s\" as expected\n", expects->type);
}
}
if (expects->type != NULL) {
const char* actual_message = reader.message();
if (strcmp(expects->type, actual_message)) {
fail(data, "Wrong error message. Expected <%s>, found <%s> at %d..%d\n",
expects->type, actual_message, reader.beg_pos(), reader.end_pos());
}
}
if (expects->beg_pos >= 0) {
if (expects->beg_pos != reader.beg_pos()) {
fail(data, "Wrong error start position: Expected %i, found %i\n",
expects->beg_pos, reader.beg_pos());
}
}
if (expects->end_pos >= 0) {
if (expects->end_pos != reader.end_pos()) {
fail(data, "Wrong error end position: Expected %i, found %i\n",
expects->end_pos, reader.end_pos());
}
}
} else if (reader.throws()) {
const char* message = reader.message();
fail(data, "Throws unexpectedly with message: %s at location %d-%d\n",
message, reader.beg_pos(), reader.end_pos());
}
}
ExceptionExpectation ParseExpectation(int argc, const char* argv[]) {
// Parse ["throws" [<exn-type> [<start> [<end>]]]].
ExceptionExpectation expects;
int arg_index = 0;
while (argc > arg_index && strncmp("throws", argv[arg_index], 7)) {
arg_index++;
}
if (argc > arg_index) {
expects.throws = true;
arg_index++;
if (argc > arg_index && !IsFlag(argv[arg_index])) {
expects.type = argv[arg_index];
arg_index++;
if (argc > arg_index && !IsFlag(argv[arg_index])) {
expects.beg_pos = atoi(argv[arg_index]); // NOLINT
arg_index++;
if (argc > arg_index && !IsFlag(argv[arg_index])) {
expects.end_pos = atoi(argv[arg_index]); // NOLINT
}
}
}
}
return expects;
}
int main(int argc, const char* argv[]) {
// Parse command line.
// Format: preparser (<scriptfile> | -e "<source>")
// ["throws" [<exn-type> [<start> [<end>]]]]
// Any flags (except an initial -e) are ignored.
// Flags must not separate "throws" and its arguments.
// Check for mandatory filename argument.
int arg_index = 1;
if (argc <= arg_index) {
fail(NULL, "ERROR: No filename on command line.\n");
}
const uint8_t* source = NULL;
const char* filename = argv[arg_index];
if (!strcmp(filename, "-e")) {
arg_index++;
if (argc <= arg_index) {
fail(NULL, "ERROR: No source after -e on command line.\n");
}
source = reinterpret_cast<const uint8_t*>(argv[arg_index]);
}
// Check remainder of command line for exception expectations.
arg_index++;
ExceptionExpectation expects =
ParseExpectation(argc - arg_index, argv + arg_index);
v8::V8::Initialize();
ScopedPointer<uint8_t> buffer;
size_t length;
if (source == NULL) {
// Open JS file.
FILE* input = fopen(filename, "rb");
if (input == NULL) {
perror("ERROR: Error opening file");
fflush(stderr);
return EXIT_FAILURE;
}
// Find length of JS file.
if (fseek(input, 0, SEEK_END) != 0) {
perror("ERROR: Error during seek");
fflush(stderr);
return EXIT_FAILURE;
}
length = static_cast<size_t>(ftell(input));
rewind(input);
// Read JS file into memory buffer.
buffer = new uint8_t[length];
if (!ReadBuffer(input, *buffer, length)) {
perror("ERROR: Reading file");
fflush(stderr);
return EXIT_FAILURE;
}
fclose(input);
source = *buffer;
} else {
length = strlen(reinterpret_cast<const char*>(source));
}
// Preparse input file.
AsciiInputStream input_buffer(source, length);
size_t kMaxStackSize = 64 * 1024 * sizeof(void*); // NOLINT
v8::PreParserData data = v8::Preparse(&input_buffer, kMaxStackSize);
// Fail if stack overflow.
if (data.stack_overflow()) {
fail(&data, "ERROR: Stack overflow\n");
}
// Check that the expected exception is thrown, if an exception is
// expected.
CheckException(&data, &expects);
return EXIT_SUCCESS;
}

58
deps/v8/preparser/preparser.gyp

@ -1,58 +0,0 @@
# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'variables': {
'v8_code': 1,
},
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'targets': [
{
'target_name': 'preparser',
'type': 'executable',
'conditions': [
# preparser can't link against a shared library, so link against
# the underlying static targets.
['v8_use_snapshot=="true"', {
'dependencies': ['../tools/gyp/v8.gyp:v8_snapshot'],
}, {
'dependencies': [
'../tools/gyp/v8.gyp:v8_nosnapshot.<(v8_target_arch)',
],
}],
],
'include_dirs+': [
'../src',
],
'sources': [
'preparser-process.cc',
'../include/v8-preparser.h',
'../src/preparser-api.cc',
],
},
],
}

10
deps/v8/samples/lineprocessor.cc

@ -259,7 +259,7 @@ int RunMain(int argc, char* argv[]) {
if (cycle_type == CycleInCpp) {
bool res = RunCppCycle(script,
v8::Context::GetCurrent(),
isolate->GetCurrentContext(),
report_exceptions);
return !res;
} else {
@ -296,7 +296,7 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::String> input_line = ReadLine();
if (input_line == v8::Undefined()) {
if (input_line == v8::Undefined(isolate)) {
continue;
}
@ -306,7 +306,7 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Handle<v8::Value> result;
{
v8::TryCatch try_catch;
result = process_fun->Call(v8::Context::GetCurrent()->Global(),
result = process_fun->Call(isolate->GetCurrentContext()->Global(),
argc, argv);
if (try_catch.HasCaught()) {
if (report_exceptions)
@ -417,7 +417,7 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
// function is called. Reads a string from standard input and returns.
void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() > 0) {
v8::ThrowException(v8::String::New("Unexpected arguments"));
args.GetIsolate()->ThrowException(v8::String::New("Unexpected arguments"));
return;
}
args.GetReturnValue().Set(ReadLine());
@ -436,7 +436,7 @@ v8::Handle<v8::String> ReadLine() {
res = fgets(buffer, kBufferSize, stdin);
}
if (res == NULL) {
v8::Handle<v8::Primitive> t = v8::Undefined();
v8::Handle<v8::Primitive> t = v8::Undefined(v8::Isolate::GetCurrent());
return v8::Handle<v8::String>::Cast(t);
}
// Remove newline char

8
deps/v8/samples/samples.gyp

@ -28,7 +28,7 @@
{
'variables': {
'v8_code': 1,
'v8_enable_i18n_support%': 0,
'v8_enable_i18n_support%': 1,
},
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'target_defaults': {
@ -42,13 +42,13 @@
'conditions': [
['v8_enable_i18n_support==1', {
'dependencies': [
'<(DEPTH)/third_party/icu/icu.gyp:icui18n',
'<(DEPTH)/third_party/icu/icu.gyp:icuuc',
'<(icu_gyp_path):icui18n',
'<(icu_gyp_path):icuuc',
],
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
'<(DEPTH)/third_party/icu/icu.gyp:icudata',
'<(icu_gyp_path):icudata',
],
}],
],

18
deps/v8/samples/shell.cc

@ -140,17 +140,20 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
// the argument into a JavaScript string.
void Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
v8::ThrowException(v8::String::New("Bad parameters"));
args.GetIsolate()->ThrowException(
v8::String::New("Bad parameters"));
return;
}
v8::String::Utf8Value file(args[0]);
if (*file == NULL) {
v8::ThrowException(v8::String::New("Error loading file"));
args.GetIsolate()->ThrowException(
v8::String::New("Error loading file"));
return;
}
v8::Handle<v8::String> source = ReadFile(*file);
if (source.IsEmpty()) {
v8::ThrowException(v8::String::New("Error loading file"));
args.GetIsolate()->ThrowException(
v8::String::New("Error loading file"));
return;
}
args.GetReturnValue().Set(source);
@ -165,12 +168,14 @@ void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope handle_scope(args.GetIsolate());
v8::String::Utf8Value file(args[i]);
if (*file == NULL) {
v8::ThrowException(v8::String::New("Error loading file"));
args.GetIsolate()->ThrowException(
v8::String::New("Error loading file"));
return;
}
v8::Handle<v8::String> source = ReadFile(*file);
if (source.IsEmpty()) {
v8::ThrowException(v8::String::New("Error loading file"));
args.GetIsolate()->ThrowException(
v8::String::New("Error loading file"));
return;
}
if (!ExecuteString(args.GetIsolate(),
@ -178,7 +183,8 @@ void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::String::New(*file),
false,
false)) {
v8::ThrowException(v8::String::New("Error executing file"));
args.GetIsolate()->ThrowException(
v8::String::New("Error executing file"));
return;
}
}

2
deps/v8/src/OWNERS

@ -0,0 +1,2 @@
per-file i18n.*=cira@chromium.org
per-file i18n.*=mnita@google.com

55
deps/v8/src/accessors.cc

@ -78,6 +78,61 @@ MaybeObject* Accessors::ReadOnlySetAccessor(Isolate* isolate,
}
static V8_INLINE bool CheckForName(Handle<String> name,
String* property_name,
int offset,
int* object_offset) {
if (name->Equals(property_name)) {
*object_offset = offset;
return true;
}
return false;
}
bool Accessors::IsJSObjectFieldAccessor(
Handle<Map> map, Handle<String> name,
int* object_offset) {
Isolate* isolate = map->GetIsolate();
switch (map->instance_type()) {
case JS_ARRAY_TYPE:
return
CheckForName(name, isolate->heap()->length_string(),
JSArray::kLengthOffset, object_offset);
case JS_TYPED_ARRAY_TYPE:
return
CheckForName(name, isolate->heap()->length_string(),
JSTypedArray::kLengthOffset, object_offset) ||
CheckForName(name, isolate->heap()->byte_length_string(),
JSTypedArray::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->heap()->byte_offset_string(),
JSTypedArray::kByteOffsetOffset, object_offset) ||
CheckForName(name, isolate->heap()->buffer_string(),
JSTypedArray::kBufferOffset, object_offset);
case JS_ARRAY_BUFFER_TYPE:
return
CheckForName(name, isolate->heap()->byte_length_string(),
JSArrayBuffer::kByteLengthOffset, object_offset);
case JS_DATA_VIEW_TYPE:
return
CheckForName(name, isolate->heap()->byte_length_string(),
JSDataView::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->heap()->byte_offset_string(),
JSDataView::kByteOffsetOffset, object_offset) ||
CheckForName(name, isolate->heap()->buffer_string(),
JSDataView::kBufferOffset, object_offset);
default: {
if (map->instance_type() < FIRST_NONSTRING_TYPE) {
return
CheckForName(name, isolate->heap()->length_string(),
String::kLengthOffset, object_offset);
}
return false;
}
}
}
//
// Accessors::ArrayLength
//

7
deps/v8/src/accessors.h

@ -86,6 +86,13 @@ class Accessors : public AllStatic {
static Handle<AccessorInfo> MakeModuleExport(
Handle<String> name, int index, PropertyAttributes attributes);
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
static bool IsJSObjectFieldAccessor(
Handle<Map> map, Handle<String> name,
int* object_offset);
private:
// Accessor functions only used through the descriptor.
static MaybeObject* FunctionSetPrototype(Isolate* isolate,

108
deps/v8/src/allocation-site-scopes.cc

@ -0,0 +1,108 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "allocation-site-scopes.h"
namespace v8 {
namespace internal {
Handle<AllocationSite> AllocationSiteCreationContext::EnterNewScope() {
Handle<AllocationSite> scope_site;
if (top().is_null()) {
// We are creating the top level AllocationSite as opposed to a nested
// AllocationSite.
InitializeTraversal(isolate()->factory()->NewAllocationSite());
scope_site = Handle<AllocationSite>(*top(), isolate());
if (FLAG_trace_creation_allocation_sites) {
PrintF("*** Creating top level AllocationSite %p\n",
static_cast<void*>(*scope_site));
}
} else {
ASSERT(!current().is_null());
scope_site = isolate()->factory()->NewAllocationSite();
if (FLAG_trace_creation_allocation_sites) {
PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n",
static_cast<void*>(*top()),
static_cast<void*>(*current()),
static_cast<void*>(*scope_site));
}
current()->set_nested_site(*scope_site);
update_current_site(*scope_site);
}
ASSERT(!scope_site.is_null());
return scope_site;
}
void AllocationSiteCreationContext::ExitScope(
Handle<AllocationSite> scope_site,
Handle<JSObject> object) {
if (!object.is_null() && !object->IsFailure()) {
bool top_level = !scope_site.is_null() &&
top().is_identical_to(scope_site);
scope_site->set_transition_info(*object);
if (FLAG_trace_creation_allocation_sites) {
if (top_level) {
PrintF("*** Setting AllocationSite %p transition_info %p\n",
static_cast<void*>(*scope_site),
static_cast<void*>(*object));
} else {
PrintF("Setting AllocationSite (%p, %p) transition_info %p\n",
static_cast<void*>(*top()),
static_cast<void*>(*scope_site),
static_cast<void*>(*object));
}
}
}
}
Handle<AllocationSite> AllocationSiteUsageContext::EnterNewScope() {
if (top().is_null()) {
InitializeTraversal(top_site_);
} else {
// Advance current site
Object* nested_site = current()->nested_site();
// Something is wrong if we advance to the end of the list here.
ASSERT(nested_site->IsAllocationSite());
update_current_site(AllocationSite::cast(nested_site));
}
return Handle<AllocationSite>(*current(), isolate());
}
void AllocationSiteUsageContext::ExitScope(
Handle<AllocationSite> scope_site,
Handle<JSObject> object) {
// This assert ensures that we are pointing at the right sub-object in a
// recursive walk of a nested literal.
ASSERT(object.is_null() || *object == scope_site->transition_info());
}
} } // namespace v8::internal

115
deps/v8/src/allocation-site-scopes.h

@ -0,0 +1,115 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ALLOCATION_SITE_SCOPES_H_
#define V8_ALLOCATION_SITE_SCOPES_H_
#include "ast.h"
#include "handles.h"
#include "objects.h"
#include "zone.h"
namespace v8 {
namespace internal {
// AllocationSiteContext is the base class for walking and copying a nested
// boilerplate with AllocationSite and AllocationMemento support.
class AllocationSiteContext {
public:
AllocationSiteContext(Isolate* isolate, bool activated) {
isolate_ = isolate;
activated_ = activated;
};
virtual ~AllocationSiteContext() {}
Handle<AllocationSite> top() { return top_; }
Handle<AllocationSite> current() { return current_; }
// If activated, then recursively create mementos
bool activated() const { return activated_; }
// Returns the AllocationSite that matches this scope.
virtual Handle<AllocationSite> EnterNewScope() = 0;
// scope_site should be the handle returned by the matching EnterNewScope()
virtual void ExitScope(Handle<AllocationSite> scope_site,
Handle<JSObject> object) = 0;
protected:
void update_current_site(AllocationSite* site) {
*(current_.location()) = site;
}
Isolate* isolate() { return isolate_; }
void InitializeTraversal(Handle<AllocationSite> site) {
top_ = site;
current_ = Handle<AllocationSite>(*top_, isolate());
}
private:
Isolate* isolate_;
Handle<AllocationSite> top_;
Handle<AllocationSite> current_;
bool activated_;
};
// AllocationSiteCreationContext aids in the creation of AllocationSites to
// accompany object literals.
class AllocationSiteCreationContext : public AllocationSiteContext {
public:
explicit AllocationSiteCreationContext(Isolate* isolate)
: AllocationSiteContext(isolate, true) { }
virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE;
virtual void ExitScope(Handle<AllocationSite> site,
Handle<JSObject> object) V8_OVERRIDE;
};
// AllocationSiteUsageContext aids in the creation of AllocationMementos placed
// behind some/all components of a copied object literal.
class AllocationSiteUsageContext : public AllocationSiteContext {
public:
AllocationSiteUsageContext(Isolate* isolate, Handle<AllocationSite> site,
bool activated)
: AllocationSiteContext(isolate, activated),
top_site_(site) { }
virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE;
virtual void ExitScope(Handle<AllocationSite> site,
Handle<JSObject> object) V8_OVERRIDE;
private:
Handle<AllocationSite> top_site_;
};
} } // namespace v8::internal
#endif // V8_ALLOCATION_SITE_SCOPES_H_

279
deps/v8/src/allocation-tracker.cc

@ -0,0 +1,279 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "allocation-tracker.h"
#include "heap-snapshot-generator.h"
#include "frames-inl.h"
namespace v8 {
namespace internal {
AllocationTraceNode::AllocationTraceNode(
AllocationTraceTree* tree, SnapshotObjectId shared_function_info_id)
: tree_(tree),
function_id_(shared_function_info_id),
total_size_(0),
allocation_count_(0),
id_(tree->next_node_id()) {
}
AllocationTraceNode::~AllocationTraceNode() {
}
AllocationTraceNode* AllocationTraceNode::FindChild(SnapshotObjectId id) {
for (int i = 0; i < children_.length(); i++) {
AllocationTraceNode* node = children_[i];
if (node->function_id() == id) return node;
}
return NULL;
}
AllocationTraceNode* AllocationTraceNode::FindOrAddChild(SnapshotObjectId id) {
AllocationTraceNode* child = FindChild(id);
if (child == NULL) {
child = new AllocationTraceNode(tree_, id);
children_.Add(child);
}
return child;
}
void AllocationTraceNode::AddAllocation(unsigned size) {
total_size_ += size;
++allocation_count_;
}
void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
if (tracker != NULL) {
const char* name = "<unknown function>";
if (function_id_ != 0) {
AllocationTracker::FunctionInfo* info =
tracker->GetFunctionInfo(function_id_);
if (info != NULL) {
name = info->name;
}
}
OS::Print("%s #%u", name, id_);
} else {
OS::Print("%u #%u", function_id_, id_);
}
OS::Print("\n");
indent += 2;
for (int i = 0; i < children_.length(); i++) {
children_[i]->Print(indent, tracker);
}
}
AllocationTraceTree::AllocationTraceTree()
: next_node_id_(1),
root_(this, 0) {
}
AllocationTraceTree::~AllocationTraceTree() {
}
AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
const Vector<SnapshotObjectId>& path) {
AllocationTraceNode* node = root();
for (SnapshotObjectId* entry = path.start() + path.length() - 1;
entry != path.start() - 1;
--entry) {
node = node->FindOrAddChild(*entry);
}
return node;
}
void AllocationTraceTree::Print(AllocationTracker* tracker) {
OS::Print("[AllocationTraceTree:]\n");
OS::Print("Total size | Allocation count | Function id | id\n");
root()->Print(0, tracker);
}
void AllocationTracker::DeleteUnresolvedLocation(
UnresolvedLocation** location) {
delete *location;
}
AllocationTracker::FunctionInfo::FunctionInfo()
: name(""),
script_name(""),
script_id(0),
line(-1),
column(-1) {
}
static bool AddressesMatch(void* key1, void* key2) {
return key1 == key2;
}
AllocationTracker::AllocationTracker(
HeapObjectsMap* ids, StringsStorage* names)
: ids_(ids),
names_(names),
id_to_function_info_(AddressesMatch) {
}
AllocationTracker::~AllocationTracker() {
unresolved_locations_.Iterate(DeleteUnresolvedLocation);
}
void AllocationTracker::PrepareForSerialization() {
List<UnresolvedLocation*> copy(unresolved_locations_.length());
copy.AddAll(unresolved_locations_);
unresolved_locations_.Clear();
for (int i = 0; i < copy.length(); i++) {
copy[i]->Resolve();
delete copy[i];
}
}
void AllocationTracker::NewObjectEvent(Address addr, int size) {
DisallowHeapAllocation no_allocation;
Heap* heap = ids_->heap();
// Mark the new block as FreeSpace to make sure the heap is iterable
// while we are capturing stack trace.
FreeListNode::FromAddress(addr)->set_size(heap, size);
ASSERT_EQ(HeapObject::FromAddress(addr)->Size(), size);
ASSERT(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
Isolate* isolate = heap->isolate();
int length = 0;
StackTraceFrameIterator it(isolate);
while (!it.done() && length < kMaxAllocationTraceLength) {
JavaScriptFrame* frame = it.frame();
SharedFunctionInfo* shared = frame->function()->shared();
SnapshotObjectId id = ids_->FindEntry(shared->address());
allocation_trace_buffer_[length++] = id;
AddFunctionInfo(shared, id);
it.Advance();
}
AllocationTraceNode* top_node = trace_tree_.AddPathFromEnd(
Vector<SnapshotObjectId>(allocation_trace_buffer_, length));
top_node->AddAllocation(size);
}
static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
return ComputeIntegerHash(static_cast<uint32_t>(id),
v8::internal::kZeroHashSeed);
}
AllocationTracker::FunctionInfo* AllocationTracker::GetFunctionInfo(
SnapshotObjectId id) {
HashMap::Entry* entry = id_to_function_info_.Lookup(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), false);
if (entry == NULL) {
return NULL;
}
return reinterpret_cast<FunctionInfo*>(entry->value);
}
void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
SnapshotObjectId id) {
HashMap::Entry* entry = id_to_function_info_.Lookup(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), true);
if (entry->value == NULL) {
FunctionInfo* info = new FunctionInfo();
info->name = names_->GetFunctionName(shared->DebugName());
if (shared->script()->IsScript()) {
Script* script = Script::cast(shared->script());
if (script->name()->IsName()) {
Name* name = Name::cast(script->name());
info->script_name = names_->GetName(name);
}
info->script_id = script->id()->value();
// Converting start offset into line and column may cause heap
// allocations so we postpone them until snapshot serialization.
unresolved_locations_.Add(new UnresolvedLocation(
script,
shared->start_position(),
info));
}
entry->value = info;
}
}
AllocationTracker::UnresolvedLocation::UnresolvedLocation(
Script* script, int start, FunctionInfo* info)
: start_position_(start),
info_(info) {
script_ = Handle<Script>::cast(
script->GetIsolate()->global_handles()->Create(script));
GlobalHandles::MakeWeak(
reinterpret_cast<Object**>(script_.location()),
this, &HandleWeakScript);
}
AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
if (!script_.is_null()) {
script_->GetIsolate()->global_handles()->Destroy(
reinterpret_cast<Object**>(script_.location()));
}
}
void AllocationTracker::UnresolvedLocation::Resolve() {
if (script_.is_null()) return;
info_->line = GetScriptLineNumber(script_, start_position_);
info_->column = GetScriptColumnNumber(script_, start_position_);
}
void AllocationTracker::UnresolvedLocation::HandleWeakScript(
v8::Isolate* isolate,
v8::Persistent<v8::Value>* obj,
void* data) {
UnresolvedLocation* location = reinterpret_cast<UnresolvedLocation*>(data);
location->script_ = Handle<Script>::null();
obj->Dispose();
}
} } // namespace v8::internal

138
deps/v8/src/allocation-tracker.h

@ -0,0 +1,138 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ALLOCATION_TRACKER_H_
#define V8_ALLOCATION_TRACKER_H_
namespace v8 {
namespace internal {
class HeapObjectsMap;
class AllocationTraceTree;
class AllocationTraceNode {
public:
AllocationTraceNode(AllocationTraceTree* tree,
SnapshotObjectId shared_function_info_id);
~AllocationTraceNode();
AllocationTraceNode* FindChild(SnapshotObjectId shared_function_info_id);
AllocationTraceNode* FindOrAddChild(SnapshotObjectId shared_function_info_id);
void AddAllocation(unsigned size);
SnapshotObjectId function_id() const { return function_id_; }
unsigned allocation_size() const { return total_size_; }
unsigned allocation_count() const { return allocation_count_; }
unsigned id() const { return id_; }
Vector<AllocationTraceNode*> children() const { return children_.ToVector(); }
void Print(int indent, AllocationTracker* tracker);
private:
AllocationTraceTree* tree_;
SnapshotObjectId function_id_;
unsigned total_size_;
unsigned allocation_count_;
unsigned id_;
List<AllocationTraceNode*> children_;
DISALLOW_COPY_AND_ASSIGN(AllocationTraceNode);
};
class AllocationTraceTree {
public:
AllocationTraceTree();
~AllocationTraceTree();
AllocationTraceNode* AddPathFromEnd(const Vector<SnapshotObjectId>& path);
AllocationTraceNode* root() { return &root_; }
unsigned next_node_id() { return next_node_id_++; }
void Print(AllocationTracker* tracker);
private:
unsigned next_node_id_;
AllocationTraceNode root_;
DISALLOW_COPY_AND_ASSIGN(AllocationTraceTree);
};
class AllocationTracker {
public:
struct FunctionInfo {
FunctionInfo();
const char* name;
const char* script_name;
int script_id;
int line;
int column;
};
AllocationTracker(HeapObjectsMap* ids, StringsStorage* names);
~AllocationTracker();
void PrepareForSerialization();
void NewObjectEvent(Address addr, int size);
AllocationTraceTree* trace_tree() { return &trace_tree_; }
HashMap* id_to_function_info() { return &id_to_function_info_; }
FunctionInfo* GetFunctionInfo(SnapshotObjectId id);
private:
void AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
class UnresolvedLocation {
public:
UnresolvedLocation(Script* script, int start, FunctionInfo* info);
~UnresolvedLocation();
void Resolve();
private:
static void HandleWeakScript(v8::Isolate* isolate,
v8::Persistent<v8::Value>* obj,
void* data);
Handle<Script> script_;
int start_position_;
FunctionInfo* info_;
};
static void DeleteUnresolvedLocation(UnresolvedLocation** location);
static const int kMaxAllocationTraceLength = 64;
HeapObjectsMap* ids_;
StringsStorage* names_;
AllocationTraceTree trace_tree_;
SnapshotObjectId allocation_trace_buffer_[kMaxAllocationTraceLength];
HashMap id_to_function_info_;
List<UnresolvedLocation*> unresolved_locations_;
DISALLOW_COPY_AND_ASSIGN(AllocationTracker);
};
} } // namespace v8::internal
#endif // V8_ALLOCATION_TRACKER_H_

877
deps/v8/src/api.cc

File diff suppressed because it is too large

26
deps/v8/src/api.h

@ -542,12 +542,12 @@ class HandleScopeImplementer {
inline void DecrementCallDepth() {call_depth_--;}
inline bool CallDepthIsZero() { return call_depth_ == 0; }
inline void EnterContext(Handle<Object> context);
inline bool LeaveLastContext();
inline void EnterContext(Handle<Context> context);
inline bool LeaveContext(Handle<Context> context);
// Returns the last entered context or an empty handle if no
// contexts have been entered.
inline Handle<Object> LastEnteredContext();
inline Handle<Context> LastEnteredContext();
inline void SaveContext(Context* context);
inline Context* RestoreContext();
@ -592,7 +592,7 @@ class HandleScopeImplementer {
Isolate* isolate_;
List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
List<Handle<Object> > entered_contexts_;
List<Context*> entered_contexts_;
// Used as a stack to keep track of saved contexts.
List<Context*> saved_contexts_;
Object** spare_;
@ -630,21 +630,23 @@ bool HandleScopeImplementer::HasSavedContexts() {
}
void HandleScopeImplementer::EnterContext(Handle<Object> context) {
entered_contexts_.Add(context);
void HandleScopeImplementer::EnterContext(Handle<Context> context) {
entered_contexts_.Add(*context);
}
bool HandleScopeImplementer::LeaveLastContext() {
bool HandleScopeImplementer::LeaveContext(Handle<Context> context) {
if (entered_contexts_.is_empty()) return false;
// TODO(dcarney): figure out what's wrong here
// if (entered_contexts_.last() != *context) return false;
entered_contexts_.RemoveLast();
return true;
}
Handle<Object> HandleScopeImplementer::LastEnteredContext() {
if (entered_contexts_.is_empty()) return Handle<Object>::null();
return entered_contexts_.last();
Handle<Context> HandleScopeImplementer::LastEnteredContext() {
if (entered_contexts_.is_empty()) return Handle<Context>::null();
return Handle<Context>(entered_contexts_.last());
}
@ -665,7 +667,7 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
#ifdef DEBUG
// SealHandleScope may make the prev_limit to point inside the block.
if (block_start <= prev_limit && prev_limit <= block_limit) {
#ifdef ENABLE_EXTRA_CHECKS
#ifdef ENABLE_HANDLE_ZAPPING
internal::HandleScope::ZapRange(prev_limit, block_limit);
#endif
break;
@ -675,7 +677,7 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
#endif
blocks_.RemoveLast();
#ifdef ENABLE_EXTRA_CHECKS
#ifdef ENABLE_HANDLE_ZAPPING
internal::HandleScope::ZapRange(block_start, block_limit);
#endif
if (spare_ != NULL) {

1
deps/v8/src/apinatives.js

@ -71,7 +71,6 @@ function InstantiateFunction(data, name) {
(serialNumber in cache) && (cache[serialNumber] != kUninitialized);
if (!isFunctionCached) {
try {
cache[serialNumber] = null;
var fun = %CreateApiFunction(data);
if (name) %FunctionSetName(fun, name);
var flags = %GetTemplateField(data, kApiFlagOffset);

13
deps/v8/src/arguments.cc

@ -38,7 +38,7 @@ template<typename T>
template<typename V>
v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
// Check the ReturnValue.
Object** handle = &this->end()[kReturnValueOffset];
Object** handle = &this->begin()[kReturnValueOffset];
// Nothing was set, return empty handle as per previous behaviour.
if ((*handle)->IsTheHole()) return v8::Handle<V>();
return Utils::Convert<Object, V>(Handle<Object>(handle));
@ -49,7 +49,7 @@ v8::Handle<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
FunctionCallbackInfo<v8::Value> info(end(),
FunctionCallbackInfo<v8::Value> info(begin(),
argv_,
argc_,
is_construct_call_);
@ -63,7 +63,7 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ReturnValue> info(end()); \
PropertyCallbackInfo<ReturnValue> info(begin()); \
f(info); \
return GetReturnValue<ReturnValue>(isolate); \
}
@ -75,7 +75,7 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ReturnValue> info(end()); \
PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
@ -88,7 +88,7 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ReturnValue> info(end()); \
PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, arg2, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
@ -101,7 +101,7 @@ void PropertyCallbackArguments::Call(Function f, \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ReturnValue> info(end()); \
PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, arg2, info); \
}
@ -118,4 +118,3 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
} } // namespace v8::internal

18
deps/v8/src/arguments.h

@ -137,7 +137,7 @@ class CustomArgumentsBase : public Relocatable {
v->VisitPointers(values_, values_ + kArrayLength);
}
protected:
inline Object** end() { return values_ + kArrayLength - 1; }
inline Object** begin() { return values_; }
explicit inline CustomArgumentsBase(Isolate* isolate)
: Relocatable(isolate) {}
Object* values_[kArrayLength];
@ -151,7 +151,7 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
typedef CustomArgumentsBase<T::kArgsLength> Super;
~CustomArguments() {
this->end()[kReturnValueOffset] =
this->begin()[kReturnValueOffset] =
reinterpret_cast<Object*>(kHandleZapValue);
}
@ -162,7 +162,7 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
v8::Handle<V> GetReturnValue(Isolate* isolate);
inline Isolate* isolate() {
return reinterpret_cast<Isolate*>(this->end()[T::kIsolateIndex]);
return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
}
};
@ -185,7 +185,7 @@ class PropertyCallbackArguments
Object* self,
JSObject* holder)
: Super(isolate) {
Object** values = this->end();
Object** values = this->begin();
values[T::kThisIndex] = self;
values[T::kHolderIndex] = holder;
values[T::kDataIndex] = data;
@ -237,6 +237,13 @@ class FunctionCallbackArguments
typedef FunctionCallbackInfo<Value> T;
typedef CustomArguments<T> Super;
static const int kArgsLength = T::kArgsLength;
static const int kHolderIndex = T::kHolderIndex;
static const int kDataIndex = T::kDataIndex;
static const int kReturnValueDefaultValueIndex =
T::kReturnValueDefaultValueIndex;
static const int kIsolateIndex = T::kIsolateIndex;
static const int kCalleeIndex = T::kCalleeIndex;
static const int kContextSaveIndex = T::kContextSaveIndex;
FunctionCallbackArguments(internal::Isolate* isolate,
internal::Object* data,
@ -249,10 +256,11 @@ class FunctionCallbackArguments
argv_(argv),
argc_(argc),
is_construct_call_(is_construct_call) {
Object** values = end();
Object** values = begin();
values[T::kDataIndex] = data;
values[T::kCalleeIndex] = callee;
values[T::kHolderIndex] = holder;
values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.

7
deps/v8/src/arm/assembler-arm-inl.h

@ -208,6 +208,13 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
static const int kNoCodeAgeSequenceLength = 3;
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on Arm.
return Handle<Object>();
}
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(

1
deps/v8/src/arm/assembler-arm.cc

@ -50,6 +50,7 @@ bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
unsigned CpuFeatures::cross_compile_ = 0;
unsigned CpuFeatures::cache_line_size_ = 64;

81
deps/v8/src/arm/assembler-arm.h

@ -64,23 +64,41 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
return (supported_ & (1u << f)) != 0;
return Check(f, supported_);
}
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
return (found_by_runtime_probing_only_ &
(static_cast<uint64_t>(1) << f)) != 0;
return Check(f, found_by_runtime_probing_only_);
}
static bool IsSafeForSnapshot(CpuFeature f) {
return (IsSupported(f) &&
return Check(f, cross_compile_) ||
(IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
static unsigned cache_line_size() { return cache_line_size_; }
static bool VerifyCrossCompiling() {
return cross_compile_ == 0;
}
static bool VerifyCrossCompiling(CpuFeature f) {
unsigned mask = flag2set(f);
return cross_compile_ == 0 ||
(cross_compile_ & mask) == mask;
}
private:
static bool Check(CpuFeature f, unsigned set) {
return (set & flag2set(f)) != 0;
}
static unsigned flag2set(CpuFeature f) {
return 1u << f;
}
#ifdef DEBUG
static bool initialized_;
#endif
@ -88,7 +106,10 @@ class CpuFeatures : public AllStatic {
static unsigned found_by_runtime_probing_only_;
static unsigned cache_line_size_;
static unsigned cross_compile_;
friend class ExternalReference;
friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@ -114,21 +135,47 @@ class CpuFeatures : public AllStatic {
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
// These constants are used in several locations, including static initializers
const int kRegister_no_reg_Code = -1;
const int kRegister_r0_Code = 0;
const int kRegister_r1_Code = 1;
const int kRegister_r2_Code = 2;
const int kRegister_r3_Code = 3;
const int kRegister_r4_Code = 4;
const int kRegister_r5_Code = 5;
const int kRegister_r6_Code = 6;
const int kRegister_r7_Code = 7;
const int kRegister_r8_Code = 8;
const int kRegister_r9_Code = 9;
const int kRegister_r10_Code = 10;
const int kRegister_fp_Code = 11;
const int kRegister_ip_Code = 12;
const int kRegister_sp_Code = 13;
const int kRegister_lr_Code = 14;
const int kRegister_pc_Code = 15;
// Core register
struct Register {
static const int kNumRegisters = 16;
static const int kMaxNumAllocatableRegisters = 8;
static const int kMaxNumAllocatableRegisters =
FLAG_enable_ool_constant_pool ? 8 : 9;
static const int kSizeInBytes = 4;
inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
if (FLAG_enable_ool_constant_pool && (reg.code() >= kRegister_r8_Code)) {
return reg.code() - 1;
}
ASSERT(reg.code() < kMaxNumAllocatableRegisters);
return reg.code();
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
if (FLAG_enable_ool_constant_pool && (index >= 7)) {
return from_code(index + 1);
}
return from_code(index);
}
@ -143,7 +190,11 @@ struct Register {
"r5",
"r6",
"r7",
"r8",
};
if (FLAG_enable_ool_constant_pool && (index >= 7)) {
return names[index + 1];
}
return names[index];
}
@ -172,25 +223,6 @@ struct Register {
int code_;
};
// These constants are used in several locations, including static initializers
const int kRegister_no_reg_Code = -1;
const int kRegister_r0_Code = 0;
const int kRegister_r1_Code = 1;
const int kRegister_r2_Code = 2;
const int kRegister_r3_Code = 3;
const int kRegister_r4_Code = 4;
const int kRegister_r5_Code = 5;
const int kRegister_r6_Code = 6;
const int kRegister_r7_Code = 7;
const int kRegister_r8_Code = 8;
const int kRegister_r9_Code = 9;
const int kRegister_r10_Code = 10;
const int kRegister_fp_Code = 11;
const int kRegister_ip_Code = 12;
const int kRegister_sp_Code = 13;
const int kRegister_lr_Code = 14;
const int kRegister_pc_Code = 15;
const Register no_reg = { kRegister_no_reg_Code };
const Register r0 = { kRegister_r0_Code };
@ -200,6 +232,7 @@ const Register r3 = { kRegister_r3_Code };
const Register r4 = { kRegister_r4_Code };
const Register r5 = { kRegister_r5_Code };
const Register r6 = { kRegister_r6_Code };
// Used as constant pool pointer register if FLAG_enable_ool_constant_pool.
const Register r7 = { kRegister_r7_Code };
// Used as context register.
const Register r8 = { kRegister_r8_Code };

116
deps/v8/src/arm/builtins-arm.cc

@ -193,14 +193,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
Register argument = r2;
Label not_cached, argument_is_string;
NumberToStringStub::GenerateLookupNumberStringCache(
masm,
r0, // Input.
argument, // Result.
r3, // Scratch.
r4, // Scratch.
r5, // Scratch.
&not_cached);
__ LookupNumberStringCache(r0, // Input.
argument, // Result.
r3, // Scratch.
r4, // Scratch.
r5, // Scratch.
&not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
__ bind(&argument_is_string);
@ -447,9 +445,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: object size (in words)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
__ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
__ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
@ -457,14 +454,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
// r0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
__ cmp(r0, r6);
__ add(ip, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
__ cmp(r0, ip);
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(r5, r0, r7);
__ InitializeFieldsWithFiller(r5, r0, r6);
// To allow for truncation.
__ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
__ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
}
__ InitializeFieldsWithFiller(r5, r6, r7);
__ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
__ InitializeFieldsWithFiller(r5, r0, r6);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@ -529,16 +528,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry;
if (count_constructions) {
__ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
} else if (FLAG_debug_code) {
__ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
__ cmp(r7, r8);
__ Assert(eq, kUndefinedValueNotLoaded);
}
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(&entry);
__ bind(&loop);
__ str(r7, MemOperand(r2, kPointerSize, PostIndex));
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(r2, r6);
__ b(lt, &loop);
@ -702,7 +695,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r2: receiver
// r3: argc
// r4: argv
// r5-r7, cp may be clobbered
// r5-r6, r7 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
@ -742,7 +735,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
__ mov(r7, Operand(r4));
if (!FLAG_enable_ool_constant_pool) {
__ mov(r7, Operand(r4));
}
if (kR9Available == 1) {
__ mov(r9, Operand(r4));
}
@ -807,12 +802,13 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// The following registers must be saved and restored when calling through to
// the runtime:
// r0 - contains return address (beginning of patch sequence)
// r1 - function object
// r1 - isolate
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(1, 0, r1);
__ PrepareCallCFunction(1, 0, r2);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 1);
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ mov(pc, r0);
}
@ -830,6 +826,39 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
// save/restore the registers without worrying about which of them contain
// pointers.
// The following registers must be saved and restored when calling through to
// the runtime:
// r0 - contains return address (beginning of patch sequence)
// r1 - isolate
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(1, 0, r2);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(ExternalReference::get_mark_code_as_executed_function(
masm->isolate()), 2);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
// Perform prologue operations usually performed by the young code stub.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
__ add(fp, sp, Operand(2 * kPointerSize));
// Jump to point after the code-age stub.
__ add(r0, r0, Operand(kNoCodeAgeSequenceLength * Assembler::kInstrSize));
__ mov(pc, r0);
}
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@ -895,21 +924,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
// For now, we are relying on the fact that Runtime::NotifyOSR
// doesn't do any garbage collection which allows us to save/restore
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kNotifyOSR, 0);
}
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
__ Ret();
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@ -956,6 +970,24 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
// We check the stack limit as indicator that recompilation might be done.
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
__ bind(&ok);
__ Ret();
}
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments

1346
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

26
deps/v8/src/arm/code-stubs-arm.h

@ -106,7 +106,6 @@ class StringHelper : public AllStatic {
Register scratch2,
Register scratch3,
Register scratch4,
Register scratch5,
int flags);
@ -257,31 +256,6 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
};
class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
// Generate code to do a lookup in the number string cache. If the number in
// the register object is found in the cache the generated code falls through
// with the result in the result register. The object and the result register
// can be the same. If the number is not found in the cache the code jumps to
// the label not_found with only the content of register object unchanged.
static void GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
Label* not_found);
private:
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,

115
deps/v8/src/arm/codegen-arm.cc

@ -55,7 +55,7 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
#if defined(USE_SIMULATOR)
byte* fast_exp_arm_machine_code = NULL;
double fast_exp_simulator(double x) {
return Simulator::current(Isolate::Current())->CallFP(
return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
fast_exp_arm_machine_code, x, 0);
}
#endif
@ -402,8 +402,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
__ TestJSArrayForAllocationMemento(r2, r4);
__ b(eq, allocation_memento_found);
__ JumpIfJSArrayHasAllocationMemento(r2, r4, allocation_memento_found);
}
// Set transitioned map.
@ -432,8 +431,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label loop, entry, convert_hole, gc_required, only_change_map, done;
if (mode == TRACK_ALLOCATION_SITE) {
__ TestJSArrayForAllocationMemento(r2, r4);
__ b(eq, fail);
__ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@ -444,15 +442,16 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ push(lr);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedArray
// r5: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
// Use lr as a temporary register.
__ mov(lr, Operand(r5, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
__ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT);
// r6: destination FixedDoubleArray, not tagged as heap object.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
// r4: source FixedArray.
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
@ -483,15 +482,15 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Prepare for conversion loop.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
__ add(r6, r7, Operand(r5, LSL, 2));
__ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize));
__ add(r6, r9, Operand(r5, LSL, 2));
__ mov(r4, Operand(kHoleNanLower32));
__ mov(r5, Operand(kHoleNanUpper32));
// r3: begin of source FixedArray element fields, not tagged
// r4: kHoleNanLower32
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
// r7: begin of FixedDoubleArray element fields, not tagged
// r9: begin of FixedDoubleArray element fields, not tagged
__ b(&entry);
@ -514,30 +513,30 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Convert and copy elements.
__ bind(&loop);
__ ldr(r9, MemOperand(r3, 4, PostIndex));
// r9: current element
__ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
__ ldr(lr, MemOperand(r3, 4, PostIndex));
// lr: current element
__ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
// Normal smi, convert to double and store.
__ vmov(s0, r9);
__ vmov(s0, lr);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r7, 0);
__ add(r7, r7, Operand(8));
__ vstr(d0, r9, 0);
__ add(r9, r9, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object.
__ SmiTag(r9);
__ orr(r9, r9, Operand(1));
__ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
__ SmiTag(lr);
__ orr(lr, lr, Operand(1));
__ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
__ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
__ Strd(r4, r5, MemOperand(r9, 8, PostIndex));
__ bind(&entry);
__ cmp(r7, r6);
__ cmp(r9, r6);
__ b(lt, &loop);
__ pop(lr);
@ -558,8 +557,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label entry, loop, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
__ TestJSArrayForAllocationMemento(r2, r4);
__ b(eq, fail);
__ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@ -577,7 +575,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Allocate new FixedArray.
__ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
__ add(r0, r0, Operand(r5, LSL, 1));
__ Allocate(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
__ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
@ -589,14 +587,12 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ add(r3, r6, Operand(FixedArray::kHeaderSize));
__ add(r6, r6, Operand(kHeapObjectTag));
__ add(r5, r3, Operand(r5, LSL, 1));
__ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
__ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in r4 to fully take advantage of post-indexing.
// r3: begin of destination FixedArray element fields, not tagged
// r4: begin of source FixedDoubleArray element fields, not tagged, +4
// r5: end of destination FixedArray, not tagged
// r6: destination FixedArray
// r7: the-hole pointer
// r9: heap number map
__ b(&entry);
@ -608,7 +604,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ bind(&loop);
__ ldr(r1, MemOperand(r4, 8, PostIndex));
// lr: current element's upper 32 bit
// r1: current element's upper 32 bit
// r4: address of next element's upper 32 bit
__ cmp(r1, Operand(kHoleNanUpper32));
__ b(eq, &convert_hole);
@ -631,7 +627,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
__ str(r7, MemOperand(r3, 4, PostIndex));
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ str(r0, MemOperand(r3, 4, PostIndex));
__ bind(&entry);
__ cmp(r3, r5);
@ -775,50 +772,65 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
ASSERT(!temp2.is(temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
Label done;
Label zero, infinity, done;
__ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
__ vldr(double_scratch1, ExpConstant(0, temp3));
__ vmov(result, kDoubleRegZero);
__ VFPCompareAndSetFlags(double_scratch1, input);
__ b(ge, &done);
__ b(ge, &zero);
__ vldr(double_scratch2, ExpConstant(1, temp3));
__ VFPCompareAndSetFlags(input, double_scratch2);
__ vldr(result, ExpConstant(2, temp3));
__ b(ge, &done);
__ b(ge, &infinity);
__ vldr(double_scratch1, ExpConstant(3, temp3));
__ vldr(result, ExpConstant(4, temp3));
__ vmul(double_scratch1, double_scratch1, input);
__ vadd(double_scratch1, double_scratch1, result);
__ vmov(temp2, temp1, double_scratch1);
__ VmovLow(temp2, double_scratch1);
__ vsub(double_scratch1, double_scratch1, result);
__ vldr(result, ExpConstant(6, temp3));
__ vldr(double_scratch2, ExpConstant(5, temp3));
__ vmul(double_scratch1, double_scratch1, double_scratch2);
__ vsub(double_scratch1, double_scratch1, input);
__ vsub(result, result, double_scratch1);
__ vmul(input, double_scratch1, double_scratch1);
__ vmul(result, result, input);
__ mov(temp1, Operand(temp2, LSR, 11));
__ vmul(double_scratch2, double_scratch1, double_scratch1);
__ vmul(result, result, double_scratch2);
__ vldr(double_scratch2, ExpConstant(7, temp3));
__ vmul(result, result, double_scratch2);
__ vsub(result, result, double_scratch1);
__ vldr(double_scratch2, ExpConstant(8, temp3));
// Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
ASSERT(*reinterpret_cast<double*>
(ExternalReference::math_exp_constants(8).address()) == 1);
__ vmov(double_scratch2, 1);
__ vadd(result, result, double_scratch2);
__ movw(ip, 0x7ff);
__ and_(temp2, temp2, Operand(ip));
__ mov(temp1, Operand(temp2, LSR, 11));
__ Ubfx(temp2, temp2, 0, 11);
__ add(temp1, temp1, Operand(0x3ff));
__ mov(temp1, Operand(temp1, LSL, 20));
// Must not call ExpConstant() after overwriting temp3!
__ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
__ ldr(ip, MemOperand(temp3, temp2, LSL, 3));
__ add(temp3, temp3, Operand(kPointerSize));
__ ldr(temp2, MemOperand(temp3, temp2, LSL, 3));
__ orr(temp1, temp1, temp2);
__ vmov(input, ip, temp1);
__ vmul(result, result, input);
__ add(temp3, temp3, Operand(temp2, LSL, 3));
__ ldm(ia, temp3, temp2.bit() | temp3.bit());
// The first word is loaded is the lower number register.
if (temp2.code() < temp3.code()) {
__ orr(temp1, temp3, Operand(temp1, LSL, 20));
__ vmov(double_scratch1, temp2, temp1);
} else {
__ orr(temp1, temp2, Operand(temp1, LSL, 20));
__ vmov(double_scratch1, temp3, temp1);
}
__ vmul(result, result, double_scratch1);
__ b(&done);
__ bind(&zero);
__ vmov(result, kDoubleRegZero);
__ b(&done);
__ bind(&infinity);
__ vldr(result, ExpConstant(2, temp3));
__ bind(&done);
}
@ -859,7 +871,7 @@ bool Code::IsYoungSequence(byte* sequence) {
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
*age = kNoAge;
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
Address target_address = Memory::Address_at(
@ -870,16 +882,17 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
}
void Code::PatchPlatformCodeAge(byte* sequence,
void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
if (age == kNoAge) {
if (age == kNoAgeCodeAge) {
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(age, parity);
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4));

1
deps/v8/src/arm/codegen-arm.h

@ -97,6 +97,7 @@ class StringCharLoadGenerator : public AllStatic {
class MathExpGenerator : public AllStatic {
public:
// Register input isn't modified. All other registers are clobbered.
static void EmitMathExp(MacroAssembler* masm,
DwVfpRegister input,
DwVfpRegister result,

107
deps/v8/src/arm/deoptimizer-arm.cc

@ -81,100 +81,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
static const int32_t kBranchBeforeInterrupt = 0x5a000004;
// The back edge bookkeeping code matches the pattern:
//
// <decrement profiling counter>
// 2a 00 00 01 bpl ok
// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
// e1 2f ff 3c blx ip
// ok-label
//
// We patch the code to the following form:
//
// <decrement profiling counter>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
// ok-label
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Turn the jump into nops.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->nop();
// Replace the call address.
uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
Memory::uint32_at(interrupt_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
}
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the original jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later.
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
// Restore the original call address.
uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
Memory::uint32_at(interrupt_address_pointer) =
reinterpret_cast<uint32_t>(interrupt_code->entry());
interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code);
}
#ifdef DEBUG
Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
uint32_t interrupt_address_offset =
Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return PATCHED_FOR_OSR;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_builtin =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return NOT_PATCHED;
}
}
#endif // DEBUG
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@ -201,10 +107,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->register_param_count_;
if (descriptor->stack_parameter_count_ != NULL) {
params++;
}
int params = descriptor->environment_length();
output_frame->SetRegister(r0.code(), params);
output_frame->SetRegister(r1.code(), handler);
}
@ -362,8 +265,8 @@ void Deoptimizer::EntryGenerator::Generate() {
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r7);
__ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r6);
__ bind(&inner_loop_header);
__ cmp(r3, Operand::Zero());
__ b(ne, &inner_push_loop); // test for gt?
@ -409,9 +312,9 @@ void Deoptimizer::EntryGenerator::Generate() {
__ InitializeRootRegister();
__ pop(ip); // remove pc
__ pop(r7); // get continuation, leave pc on stack
__ pop(ip); // get continuation, leave pc on stack
__ pop(lr);
__ Jump(r7);
__ Jump(ip);
__ stop("Unreachable.");
}

2
deps/v8/src/arm/frames-arm.h

@ -64,7 +64,7 @@ const RegList kCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
1 << 7 | // r7 v4
1 << 7 | // r7 v4 (pp in JavaScript code)
1 << 8 | // r8 v5 (cp in JavaScript code)
kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7

171
deps/v8/src/arm/full-codegen-arm.cc

@ -148,13 +148,10 @@ void FullCodeGenerator::Generate() {
// receiver object). r5 is zero for method calls and non-zero for
// function calls.
if (!info->is_classic_mode() || info->is_native()) {
Label ok;
__ cmp(r5, Operand::Zero());
__ b(eq, &ok);
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ str(r2, MemOperand(sp, receiver_offset));
__ bind(&ok);
__ str(r2, MemOperand(sp, receiver_offset), ne);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@ -163,16 +160,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
{
PredictableCodeSizeScope predictible_code_size_scope(
masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
// The following three instructions must remain together and unmodified
// for code aging to work properly.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
__ nop(ip.code());
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
}
__ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@ -1167,7 +1155,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(r1, cell);
__ Move(r1, cell);
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ str(r2, FieldMemOperand(r1, Cell::kValueOffset));
@ -1651,13 +1639,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
expr->depth() > 1) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@ -3592,8 +3578,8 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and call the stub.
VisitForStackValue(args->at(0));
// Load the argument into r0 and call the stub.
VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
@ -3964,9 +3950,8 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
empty_separator_loop, one_char_separator_loop,
Label bailout, done, one_char_separator, long_separator, non_trivial_array,
not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
@ -3984,19 +3969,18 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Register string = r4;
Register element = r5;
Register elements_end = r6;
Register scratch1 = r7;
Register scratch2 = r9;
Register scratch = r9;
// Separator operand is on the stack.
__ pop(separator);
// Check that the array is a JSArray.
__ JumpIfSmi(array, &bailout);
__ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
__ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE);
__ b(ne, &bailout);
// Check that the array has fast elements.
__ CheckFastElements(scratch1, scratch2, &bailout);
__ CheckFastElements(scratch, array_length, &bailout);
// If the array has length zero, return the empty string.
__ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
@ -4033,11 +4017,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&loop);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ JumpIfSmi(string, &bailout);
__ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
__ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch1), SetCC);
__ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
__ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
__ b(lt, &loop);
@ -4058,23 +4042,23 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Check that the separator is a flat ASCII string.
__ JumpIfSmi(separator, &bailout);
__ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
__ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi
__ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ sub(string_length, string_length, Operand(scratch1));
__ smull(scratch2, ip, array_length, scratch1);
__ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ sub(string_length, string_length, Operand(scratch));
__ smull(scratch, ip, array_length, scratch);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
__ cmp(ip, Operand::Zero());
__ b(ne, &bailout);
__ tst(scratch2, Operand(0x80000000));
__ tst(scratch, Operand(0x80000000));
__ b(ne, &bailout);
__ add(string_length, string_length, Operand(scratch2), SetCC);
__ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ SmiUntag(string_length);
@ -4091,9 +4075,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// array_length: Length of the array.
__ AllocateAsciiString(result,
string_length,
scratch1,
scratch2,
elements_end,
scratch,
string, // used as scratch
elements_end, // used as scratch
&bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
@ -4106,8 +4090,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
__ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ cmp(scratch1, Operand(Smi::FromInt(1)));
__ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ cmp(scratch, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator);
__ b(gt, &long_separator);
@ -4125,7 +4109,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@ -4157,7 +4141,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@ -4178,7 +4162,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
separator,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ CopyBytes(string, result_pos, string_length, scratch);
__ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
@ -4187,7 +4171,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@ -4894,6 +4878,91 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
static const int32_t kBranchBeforeInterrupt = 0x5a000004;
void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address pc,
BackEdgeState target_state,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
Address branch_address = pc - 3 * kInstrSize;
CodePatcher patcher(branch_address, 1);
switch (target_state) {
case INTERRUPT:
// <decrement profiling counter>
// 2a 00 00 01 bpl ok
// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
// e1 2f ff 3c blx ip
// ok-label
patcher.masm()->b(4 * kInstrSize, pl); // Jump offset is 4 instructions.
ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(branch_address));
break;
case ON_STACK_REPLACEMENT:
case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
// ok-label
patcher.masm()->nop();
break;
}
Address pc_immediate_load_address = pc - 2 * kInstrSize;
// Replace the call address.
uint32_t interrupt_address_offset =
Memory::uint16_at(pc_immediate_load_address) & 0xfff;
Address interrupt_address_pointer = pc + interrupt_address_offset;
Memory::uint32_at(interrupt_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_immediate_load_address, replacement_code);
}
BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
Isolate* isolate,
Code* unoptimized_code,
Address pc) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp);
Address branch_address = pc - 3 * kInstrSize;
Address pc_immediate_load_address = pc - 2 * kInstrSize;
uint32_t interrupt_address_offset =
Memory::uint16_at(pc_immediate_load_address) & 0xfff;
Address interrupt_address_pointer = pc + interrupt_address_offset;
if (Memory::int32_at(branch_address) == kBranchBeforeInterrupt) {
ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
reinterpret_cast<uint32_t>(
isolate->builtins()->InterruptCheck()->entry()));
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_immediate_load_address)));
return INTERRUPT;
}
ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address)));
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_immediate_load_address)));
if (Memory::uint32_at(interrupt_address_pointer) ==
reinterpret_cast<uint32_t>(
isolate->builtins()->OnStackReplacement()->entry())) {
return ON_STACK_REPLACEMENT;
}
ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
reinterpret_cast<uint32_t>(
isolate->builtins()->OsrAfterStackCheck()->entry()));
return OSR_AFTER_STACK_CHECK;
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

6
deps/v8/src/arm/ic-arm.cc

@ -656,7 +656,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@ -1394,7 +1394,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Register receiver = r2;
Register receiver_map = r3;
Register elements_map = r6;
Register elements = r7; // Elements array of the receiver.
Register elements = r9; // Elements array of the receiver.
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
@ -1487,7 +1487,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, strict_mode,
Code::HANDLER, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(

423
deps/v8/src/arm/lithium-arm.cc

@ -412,18 +412,19 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
int LPlatformChunk::GetNextSpillIndex(bool is_double) {
int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++;
if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
return spill_slot_count_++;
}
LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
int index = GetNextSpillIndex(kind);
if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@ -439,7 +440,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// which will be subsumed into this frame.
if (graph()->has_osr()) {
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
chunk_->GetNextSpillIndex(false);
chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
}
}
@ -655,7 +656,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@ -710,51 +711,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
if (instr->representation().IsTagged()) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
ASSERT(instr->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->left());
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->left());
HValue* right_value = instr->right();
LOperand* right = NULL;
int constant_value = 0;
bool does_deopt = false;
if (right_value->IsConstant()) {
HConstant* constant = HConstant::cast(right_value);
right = chunk_->DefineConstantOperand(constant);
constant_value = constant->Integer32Value() & 0x1f;
// Left shifts can deoptimize if we shift by > 0 and the result cannot be
// truncated to smi.
if (instr->representation().IsSmi() && constant_value > 0) {
does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
HValue* right_value = instr->right();
LOperand* right = NULL;
int constant_value = 0;
bool does_deopt = false;
if (right_value->IsConstant()) {
HConstant* constant = HConstant::cast(right_value);
right = chunk_->DefineConstantOperand(constant);
constant_value = constant->Integer32Value() & 0x1f;
// Left shifts can deoptimize if we shift by > 0 and the result cannot be
// truncated to smi.
if (instr->representation().IsSmi() && constant_value > 0) {
does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
}
} else {
right = UseRegisterAtStart(right_value);
}
} else {
right = UseRegisterAtStart(right_value);
}
// Shift operations can only deoptimize if we do a logical shift
// by 0 and the result cannot be truncated to int32.
if (op == Token::SHR && constant_value == 0) {
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
// Shift operations can only deoptimize if we do a logical shift
// by 0 and the result cannot be truncated to int32.
if (op == Token::SHR && constant_value == 0) {
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
}
LInstruction* result =
DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
return does_deopt ? AssignEnvironment(result) : result;
LInstruction* result =
DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
return does_deopt ? AssignEnvironment(result) : result;
} else {
return DoArithmeticT(op, instr);
}
}
@ -763,29 +757,34 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
ASSERT(op != Token::MOD);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
return DefineAsRegister(result);
if (op == Token::MOD) {
LOperand* left = UseFixedDouble(instr->left(), d1);
LOperand* right = UseFixedDouble(instr->right(), d2);
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
// We call a C function for double modulo. It can't trigger a GC. We need
// to use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
return MarkAsCall(DefineFixedDouble(result, d1), instr);
} else {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
return DefineAsRegister(result);
}
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr) {
ASSERT(op == Token::ADD ||
op == Token::DIV ||
op == Token::MOD ||
op == Token::MUL ||
op == Token::SUB);
HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left_operand = UseFixed(left, r1);
LOperand* right_operand = UseFixed(right, r0);
LArithmeticT* result =
new(zone()) LArithmeticT(op, left_operand, right_operand);
new(zone()) LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -861,9 +860,31 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
if (current->has_position()) position_ = current->position();
LInstruction* instr = current->CompileToLithium(this);
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
HValue* first_operand = current->OperandCount() == 0
? graph()->GetConstant1()
: current->OperandAt(0);
instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(first_operand)));
for (int i = 1; i < current->OperandCount(); ++i) {
LInstruction* dummy =
new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
dummy->set_hydrogen_value(current);
chunk_->AddInstruction(dummy, current_block_);
}
} else {
instr = current->CompileToLithium(this);
}
argument_count_ += current->argument_delta();
ASSERT(argument_count_ >= 0);
if (instr != NULL) {
// Associate the hydrogen instruction first, since we may need it for
// the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
instr->set_hydrogen_value(current);
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
@ -893,14 +914,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@ -992,19 +1011,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
return new(zone()) LGoto(instr->FirstSuccessor());
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
if (value->EmitAtUses()) {
HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new(zone()) LGoto(successor->block_id());
}
LInstruction* goto_instr = CheckElideControlInstruction(instr);
if (goto_instr != NULL) return goto_instr;
HValue* value = instr->value();
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
// deoptimization environment. If the instruction is generic no
@ -1047,9 +1062,10 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LInstanceOf* result =
new(zone()) LInstanceOf(UseFixed(instr->left(), r0),
UseFixed(instr->right(), r1));
new(zone()) LInstanceOf(context, UseFixed(instr->left(), r0),
UseFixed(instr->right(), r1));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -1057,18 +1073,14 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), r0),
FixedTemp(r4));
new(zone()) LInstanceOfKnownGlobal(
UseFixed(instr->context(), cp),
UseFixed(instr->left(), r0),
FixedTemp(r4));
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
LOperand* object = UseRegisterAtStart(instr->object());
return DefineAsRegister(new(zone()) LInstanceSize(object));
}
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@ -1091,7 +1103,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
++argument_count_;
LOperand* argument = Use(instr->argument());
return new(zone()) LPushArgument(argument);
}
@ -1122,14 +1133,13 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
// If there is a non-return use, the context must be allocated in a register.
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->IsReturn()) {
return DefineAsRegister(new(zone()) LContext);
}
if (instr->HasNoUses()) return NULL;
if (info()->IsStub()) {
return DefineFixed(new(zone()) LContext, cp);
}
return NULL;
return DefineAsRegister(new(zone()) LContext);
}
@ -1140,7 +1150,8 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
return MarkAsCall(new(zone()) LDeclareGlobals, instr);
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
@ -1158,15 +1169,14 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
argument_count_ -= instr->argument_count();
LInvokeFunction* result = new(zone()) LInvokeFunction(function);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@ -1206,8 +1216,12 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
Representation r = instr->value()->representation();
LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
? NULL
: UseFixed(instr->context(), cp);
LOperand* input = UseRegister(instr->value());
LMathAbs* result = new(zone()) LMathAbs(input);
LMathAbs* result = new(zone()) LMathAbs(context, input);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
@ -1243,7 +1257,7 @@ LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseTempRegister(instr->value());
LOperand* input = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
@ -1269,57 +1283,57 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
argument_count_ -= instr->argument_count();
LOperand* context = UseFixed(instr->context(), cp);
LOperand* key = UseFixed(instr->key(), r2);
return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), r0), instr);
return MarkAsCall(
DefineFixed(new(zone()) LCallKeyed(context, key), r0), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallNamed, r0), instr);
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), r0), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, r0), instr);
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), r0), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r1);
argument_count_ -= instr->argument_count();
LCallNew* result = new(zone()) LCallNew(constructor);
LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r1);
argument_count_ -= instr->argument_count();
LCallNewArray* result = new(zone()) LCallNewArray(constructor);
LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), r0),
instr);
return MarkAsCall(
DefineFixed(new(zone()) LCallFunction(context, function), r0), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, r0), instr);
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), r0), instr);
}
@ -1347,41 +1361,34 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsSmiOrInteger32()) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
LDivI* div =
new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
return AssignEnvironment(DefineSameAsFirst(div));
LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
return AssignEnvironment(DefineAsRegister(div));
}
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineAsRegister(div));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
return DoArithmeticT(Token::DIV, instr);
}
@ -1502,17 +1509,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
} else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
} else {
ASSERT(instr->representation().IsDouble());
// We call a C function for double modulo. It can't trigger a GC. We need
// to use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
UseFixedDouble(left, d1),
UseFixedDouble(right, d2));
return MarkAsCall(DefineFixedDouble(mod, d1), instr);
return DoArithmeticT(Token::MOD, instr);
}
}
@ -1679,7 +1679,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
return DoArithmeticD(Token::ADD, instr);
} else {
ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@ -1737,9 +1736,10 @@ LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LCmpT* result = new(zone()) LCmpT(left, right);
LCmpT* result = new(zone()) LCmpT(context, left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -1766,6 +1766,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
LInstruction* goto_instr = CheckElideControlInstruction(instr);
if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@ -1774,8 +1776,8 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
LOperand* object = UseRegisterAtStart(instr->object());
return new(zone()) LCmpHoleAndBranch(object);
LOperand* value = UseRegisterAtStart(instr->value());
return new(zone()) LCmpHoleAndBranch(value);
}
@ -1813,10 +1815,11 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
HStringCompareAndBranch* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LStringCompareAndBranch* result =
new(zone()) LStringCompareAndBranch(left, right);
new(zone()) LStringCompareAndBranch(context, left, right);
return MarkAsCall(result, instr);
}
@ -1883,11 +1886,9 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegister(instr->index());
LOperand* value = UseTempRegister(instr->value());
LSeqStringSetChar* result =
new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
return DefineAsRegister(result);
LOperand* index = UseRegisterOrConstant(instr->index());
LOperand* value = UseRegister(instr->value());
return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
}
@ -1905,9 +1906,17 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
return NULL;
}
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), r0);
return MarkAsCall(new(zone()) LThrow(value), instr);
return MarkAsCall(new(zone()) LThrow(context, value), instr);
}
@ -1936,7 +1945,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
if (from.IsTagged()) {
if (to.IsDouble()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@ -2006,8 +2014,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
LInstruction* result =
DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
LInstruction* result = val->CheckFlag(HInstruction::kUint32)
? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
: DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
@ -2040,12 +2049,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
return new(zone())
LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckInstanceType(value);
@ -2093,8 +2096,11 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
: NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
return new(zone()) LReturn(UseFixed(instr->value(), r0),
return new(zone()) LReturn(UseFixed(instr->value(), r0), context,
parameter_count);
}
@ -2127,8 +2133,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), r0);
LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
LLoadGlobalGeneric* result =
new(zone()) LLoadGlobalGeneric(context, global_object);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -2144,10 +2152,11 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), r1);
LOperand* value = UseFixed(instr->value(), r0);
LStoreGlobalGeneric* result =
new(zone()) LStoreGlobalGeneric(global_object, value);
new(zone()) LStoreGlobalGeneric(context, global_object, value);
return MarkAsCall(result, instr);
}
@ -2182,8 +2191,10 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), r0);
LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0);
LInstruction* result =
DefineFixed(new(zone()) LLoadNamedGeneric(context, object), r0);
return MarkAsCall(result, instr);
}
@ -2195,6 +2206,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
return DefineAsRegister(new(zone()) LLoadRoot);
}
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@ -2211,7 +2227,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (!instr->is_external()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseTempRegister(instr->elements());
obj = UseRegister(instr->elements());
} else {
ASSERT(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
@ -2239,18 +2255,17 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), r1);
LOperand* key = UseFixed(instr->key(), r0);
LInstruction* result =
DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), r0);
DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), r0);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@ -2260,15 +2275,19 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
val = UseTempRegister(instr->value());
val = UseRegister(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
ASSERT(instr->value()->representation().IsSmiOrTagged());
object = UseTempRegister(instr->elements());
val = needs_write_barrier ? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
key = needs_write_barrier ? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
if (needs_write_barrier) {
object = UseTempRegister(instr->elements());
val = UseTempRegister(instr->value());
key = UseTempRegister(instr->key());
} else {
object = UseRegisterAtStart(instr->elements());
val = UseRegisterAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
}
}
return new(zone()) LStoreKeyed(object, key, val);
@ -2276,17 +2295,13 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
(instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
(instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->elements()->representation().IsExternal());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
: UseRegister(instr->value());
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* external_pointer = UseRegister(instr->elements());
return new(zone()) LStoreKeyed(external_pointer, key, val);
@ -2294,6 +2309,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), r2);
LOperand* key = UseFixed(instr->key(), r1);
LOperand* val = UseFixed(instr->value(), r0);
@ -2302,7 +2318,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
return MarkAsCall(
new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
}
@ -2312,11 +2329,12 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, new_map_reg);
new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
return result;
} else {
LOperand* context = UseFixed(instr->context(), cp);
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, NULL);
new(zone()) LTransitionElementsKind(object, context, NULL);
return AssignPointerMap(result);
}
}
@ -2375,56 +2393,68 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), r1);
LOperand* val = UseFixed(instr->value(), r0);
LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), r0),
instr);
return MarkAsCall(
DefineFixed(new(zone()) LStringAdd(context, left, right), r0),
instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
LOperand* context = UseAny(instr->context());
LStringCharFromCode* result =
new(zone()) LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LOperand* size = instr->size()->IsConstant()
? UseConstant(instr->size())
: UseTempRegister(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr);
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(
DefineFixed(new(zone()) LRegExpLiteral(context), r0), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, r0), instr);
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(
DefineFixed(new(zone()) LFunctionLiteral(context), r0), instr);
}
@ -2471,8 +2501,8 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallStub, r0), instr);
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), r0), instr);
}
@ -2517,7 +2547,8 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), r0));
LOperand* context = UseFixed(instr->context(), cp);
LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), r0));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -2556,10 +2587,13 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
return MarkAsCall(new(zone()) LStackCheck, instr);
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(new(zone()) LStackCheck(context), instr);
} else {
ASSERT(instr->is_backwards_branch());
return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
LOperand* context = UseAny(instr->context());
return AssignEnvironment(
AssignPointerMap(new(zone()) LStackCheck(context)));
}
}
@ -2592,7 +2626,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
argument_count_ -= argument_count;
ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
@ -2604,8 +2638,9 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->enumerable(), r0);
LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}

462
deps/v8/src/arm/lithium-arm.h

@ -105,7 +105,6 @@ class LCodeGen;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
@ -113,13 +112,13 @@ class LCodeGen;
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsNumberAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@ -185,6 +184,7 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
V(WrapReceiver)
@ -216,7 +216,6 @@ class LInstruction : public ZoneObject {
: environment_(NULL),
hydrogen_value_(NULL),
bit_field_(IsCallBits::encode(false)) {
set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() {}
@ -257,15 +256,6 @@ class LInstruction : public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
// The 31 bits PositionBits is used to store the int position value. And the
// position value may be RelocInfo::kNoPosition (-1). The accessor always
// +1/-1 so that the encoded value of position in bit_field_ is always >= 0
// and can fit into the 31 bits PositionBits.
void set_position(int pos) {
bit_field_ = PositionBits::update(bit_field_, pos + 1);
}
int position() { return PositionBits::decode(bit_field_) - 1; }
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@ -277,7 +267,7 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
bool ClobbersDoubleRegisters() const { return IsCall(); }
virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return IsCall(); }
@ -305,7 +295,6 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@ -404,17 +393,17 @@ class LInstructionGap V8_FINAL : public LGap {
class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(int block_id) : block_id_(block_id) { }
explicit LGoto(HBasicBlock* block) : block_(block) { }
virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
virtual bool IsControl() const V8_OVERRIDE { return true; }
int block_id() const { return block_id_; }
int block_id() const { return block_->block_id(); }
private:
int block_id_;
HBasicBlock* block_;
};
@ -483,8 +472,14 @@ class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@ -785,12 +780,14 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LMathAbs(LOperand* value) {
LMathAbs(LOperand* context, LOperand* value) {
inputs_[1] = context;
inputs_[0] = value;
}
LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
@ -939,19 +936,6 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsNumberAndBranch(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
};
class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@ -1002,15 +986,17 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
public:
LStringCompareAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
inputs_[1] = left;
inputs_[2] = right;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
LOperand* context() { return inputs_[0]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
@ -1086,15 +1072,17 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
LCmpT(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
inputs_[1] = left;
inputs_[2] = right;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
LOperand* context() { return inputs_[0]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@ -1103,28 +1091,32 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
inputs_[1] = left;
inputs_[2] = right;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
LOperand* context() { return inputs_[0]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
inputs_[1] = value;
temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
LOperand* context() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@ -1145,19 +1137,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInstanceSize(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
};
class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@ -1318,7 +1297,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
Handle<Map> map() const { return hydrogen()->map(); }
Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@ -1373,8 +1352,8 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
Smi* index() const { return index_; }
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
DECLARE_HYDROGEN_ACCESSOR(DateField)
private:
Smi* index_;
@ -1405,13 +1384,15 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
LThrow(LOperand* context, LOperand* value) {
inputs_[0] = context;
inputs_[1] = value;
}
LOperand* value() { return inputs_[0]; }
LOperand* context() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
@ -1507,16 +1488,21 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
LArithmeticT(Token::Value op,
LOperand* context,
LOperand* left,
LOperand* right)
: op_(op) {
inputs_[0] = left;
inputs_[1] = right;
inputs_[0] = context;
inputs_[1] = left;
inputs_[2] = right;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
LOperand* context() { return inputs_[0]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
virtual Opcode opcode() const V8_OVERRIDE {
@ -1530,11 +1516,12 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
explicit LReturn(LOperand* value, LOperand* parameter_count) {
LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
inputs_[0] = value;
inputs_[1] = parameter_count;
inputs_[1] = context;
inputs_[2] = parameter_count;
}
LOperand* value() { return inputs_[0]; }
@ -1546,7 +1533,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
ASSERT(has_constant_parameter_count());
return LConstantOperand::cast(parameter_count());
}
LOperand* parameter_count() { return inputs_[1]; }
LOperand* parameter_count() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
@ -1565,13 +1552,15 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
LLoadNamedGeneric(LOperand* context, LOperand* object) {
inputs_[0] = context;
inputs_[1] = object;
}
LOperand* object() { return inputs_[0]; }
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@ -1593,6 +1582,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
Heap::RootListIndex index() const { return hydrogen()->index(); }
};
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@ -1631,15 +1629,17 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LLoadKeyedGeneric(LOperand* object, LOperand* key) {
inputs_[0] = object;
inputs_[1] = key;
LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = key;
}
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
};
@ -1652,13 +1652,15 @@ class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LLoadGlobalGeneric(LOperand* global_object) {
inputs_[0] = global_object;
LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
inputs_[0] = context;
inputs_[1] = global_object;
}
LOperand* global_object() { return inputs_[0]; }
LOperand* context() { return inputs_[0]; }
LOperand* global_object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@ -1683,16 +1685,19 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
explicit LStoreGlobalGeneric(LOperand* global_object,
LOperand* value) {
inputs_[0] = global_object;
inputs_[1] = value;
LStoreGlobalGeneric(LOperand* context,
LOperand* global_object,
LOperand* value) {
inputs_[0] = context;
inputs_[1] = global_object;
inputs_[2] = value;
}
LOperand* global_object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
LOperand* context() { return inputs_[0]; }
LOperand* global_object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
@ -1822,8 +1827,14 @@ class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
@ -1865,13 +1876,15 @@ class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LInvokeFunction(LOperand* function) {
inputs_[0] = function;
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
inputs_[1] = function;
}
LOperand* function() { return inputs_[0]; }
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
@ -1882,13 +1895,15 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LCallKeyed(LOperand* key) {
inputs_[0] = key;
LCallKeyed(LOperand* context, LOperand* key) {
inputs_[0] = context;
inputs_[1] = key;
}
LOperand* key() { return inputs_[0]; }
LOperand* context() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
@ -1900,8 +1915,14 @@ class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNamed(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
@ -1912,13 +1933,15 @@ class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LCallFunction(LOperand* function) {
inputs_[0] = function;
LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
inputs_[1] = function;
}
LOperand* function() { return inputs_[0]; }
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@ -1927,8 +1950,14 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallGlobal(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
@ -1950,13 +1979,15 @@ class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
inputs_[1] = constructor;
}
LOperand* constructor() { return inputs_[0]; }
LOperand* context() { return inputs_[0]; }
LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
@ -1967,13 +1998,15 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LCallNewArray(LOperand* constructor) {
inputs_[0] = constructor;
LCallNewArray(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
inputs_[1] = constructor;
}
LOperand* constructor() { return inputs_[0]; }
LOperand* context() { return inputs_[0]; }
LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
@ -1984,13 +2017,24 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
@ -2031,6 +2075,19 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToSmi(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
DECLARE_HYDROGEN_ACCESSOR(Change)
};
class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
@ -2119,7 +2176,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@ -2191,15 +2248,17 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* object, LOperand* value) {
inputs_[0] = object;
inputs_[1] = value;
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
}
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@ -2242,17 +2301,22 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = value;
LStoreKeyedGeneric(LOperand* context,
LOperand* obj,
LOperand* key,
LOperand* value) {
inputs_[0] = context;
inputs_[1] = obj;
inputs_[2] = key;
inputs_[3] = value;
}
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@ -2263,14 +2327,17 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* context,
LOperand* new_map_temp) {
inputs_[0] = object;
inputs_[1] = context;
temps_[0] = new_map_temp;
}
LOperand* context() { return inputs_[1]; }
LOperand* object() { return inputs_[0]; }
LOperand* new_map_temp() { return temps_[0]; }
@ -2280,8 +2347,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
Handle<Map> transitioned_map() {
return hydrogen()->transitioned_map().handle();
}
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@ -2303,15 +2372,17 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
inputs_[1] = left;
inputs_[2] = right;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
LOperand* context() { return inputs_[0]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
DECLARE_HYDROGEN_ACCESSOR(StringAdd)
@ -2319,28 +2390,32 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
inputs_[0] = context;
inputs_[1] = string;
inputs_[2] = index;
}
LOperand* string() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
LOperand* context() { return inputs_[0]; }
LOperand* string() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LStringCharFromCode(LOperand* char_code) {
inputs_[0] = char_code;
explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
inputs_[0] = context;
inputs_[1] = char_code;
}
LOperand* char_code() { return inputs_[0]; }
LOperand* context() { return inputs_[0]; }
LOperand* char_code() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
@ -2451,12 +2526,17 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
LAllocate(LOperand* context,
LOperand* size,
LOperand* temp1,
LOperand* temp2) {
inputs_[0] = context;
inputs_[1] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
LOperand* context() { return inputs_[0]; }
LOperand* size() { return inputs_[1]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
@ -2466,15 +2546,27 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LFunctionLiteral(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
@ -2493,13 +2585,15 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
LTypeof(LOperand* context, LOperand* value) {
inputs_[0] = context;
inputs_[1] = value;
}
LOperand* value() { return inputs_[0]; }
LOperand* context() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@ -2546,8 +2640,14 @@ class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStackCheck(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@ -2558,13 +2658,15 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {
inputs_[0] = object;
LForInPrepareMap(LOperand* context, LOperand* object) {
inputs_[0] = context;
inputs_[1] = object;
}
LOperand* object() { return inputs_[0]; }
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
@ -2620,8 +2722,8 @@ class LPlatformChunk V8_FINAL : public LChunk {
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
int GetNextSpillIndex(bool is_double);
LOperand* GetNextSpillSlot(bool is_double);
int GetNextSpillIndex(RegisterKind kind);
LOperand* GetNextSpillSlot(RegisterKind kind);
};
@ -2645,6 +2747,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Build the sequence for the graph.
LPlatformChunk* Build();
LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@ -2778,7 +2882,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr);
HBinaryOperation* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;

671
deps/v8/src/arm/lithium-codegen-arm.cc

File diff suppressed because it is too large

68
deps/v8/src/arm/lithium-codegen-arm.h

@ -32,6 +32,7 @@
#include "arm/lithium-gap-resolver-arm.h"
#include "deoptimizer.h"
#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
@ -43,43 +44,26 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
class LCodeGen V8_FINAL BASE_EMBEDDED {
class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: zone_(info->zone()),
chunk_(static_cast<LPlatformChunk*>(chunk)),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple),
old_position_(RelocInfo::kNoPosition) {
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info_->isolate(); }
Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@ -178,30 +162,15 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
#undef DECLARE_DO
private:
enum Status {
UNUSED,
GENERATING,
DONE,
ABORTED
};
bool is_unused() const { return status_ == UNUSED; }
bool is_generating() const { return status_ == GENERATING; }
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk()->graph(); }
Register scratch0() { return r9; }
LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; }
int GetNextEmittedBlock() const;
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
@ -214,14 +183,12 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
@ -249,7 +216,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr);
LInstruction* instr,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
@ -258,9 +226,11 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
CallRuntime(function, num_arguments, instr);
}
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr);
LInstruction* instr,
LOperand* context);
enum R1State {
R1_UNINITIALIZED,
@ -276,8 +246,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
CallKind call_kind,
R1State r1_state);
void LoadHeapObject(Register result, Handle<HeapObject> object);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@ -320,8 +288,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
void RecordPosition(int position);
void RecordAndUpdatePosition(int position);
void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@ -383,7 +351,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Register scratch,
LEnvironment* environment);
void EnsureSpaceForLazyDeopt();
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@ -391,24 +359,14 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
Zone* zone_;
LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
int current_block_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
@ -420,8 +378,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
int old_position_;
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,

4
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -252,7 +252,7 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsInteger32(constant_source)) {
__ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
__ Move(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DwVfpRegister result = cgen_->ToDoubleRegister(destination);
@ -267,7 +267,7 @@ void LGapResolver::EmitMove(int index) {
__ mov(kSavedValueRegister,
Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ LoadObject(kSavedValueRegister,
__ Move(kSavedValueRegister,
cgen_->ToHandle(constant_source));
}
__ str(kSavedValueRegister, cgen_->ToMemOperand(destination));

266
deps/v8/src/arm/macro-assembler-arm.cc

@ -35,6 +35,7 @@
#include "codegen.h"
#include "cpu-profiler.h"
#include "debug.h"
#include "isolate-inl.h"
#include "runtime.h"
namespace v8 {
@ -233,7 +234,19 @@ void MacroAssembler::Push(Handle<Object> handle) {
void MacroAssembler::Move(Register dst, Handle<Object> value) {
mov(dst, Operand(value));
AllowDeferredHandleDereference smi_check;
if (value->IsSmi()) {
mov(dst, Operand(value));
} else {
ASSERT(value->IsHeapObject());
if (isolate()->heap()->InNewSpace(*value)) {
Handle<Cell> cell = isolate()->factory()->NewCell(value);
mov(dst, Operand(cell));
ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
} else {
mov(dst, Operand(value));
}
}
}
@ -394,19 +407,6 @@ void MacroAssembler::StoreRoot(Register source,
}
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
mov(result, Operand(cell));
ldr(result, FieldMemOperand(result, Cell::kValueOffset));
} else {
mov(result, Operand(object));
}
}
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
@ -478,11 +478,6 @@ void MacroAssembler::RecordWrite(Register object,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
ASSERT(!address.is(cp) && !value.is(cp));
if (emit_debug_code()) {
ldr(ip, MemOperand(address));
cmp(ip, value);
@ -733,9 +728,11 @@ void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
bind(&fpscr_done);
}
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister value,
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
vsub(value, value, kDoubleRegZero, cond);
vsub(dst, src, kDoubleRegZero, cond);
}
@ -919,6 +916,33 @@ void MacroAssembler::LoadNumberAsInt32(Register object,
}
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
if (frame_mode == BUILD_STUB_FRAME) {
stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP.
add(fp, sp, Operand(2 * kPointerSize));
} else {
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
// The following three instructions must remain together and unmodified
// for code aging to work properly.
if (isolate()->IsCodePreAgingActive()) {
// Pre-age the code.
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
add(r0, pc, Operand(-8));
ldr(pc, MemOperand(pc, -4));
dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
} else {
stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
nop(ip.code());
// Adjust FP to point to saved FP.
add(fp, sp, Operand(2 * kPointerSize));
}
}
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
// r0-r3: preserved
stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
@ -1020,7 +1044,8 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count) {
Register argument_count,
bool restore_context) {
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
@ -1035,10 +1060,14 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
str(r3, MemOperand(ip));
// Restore current context from top and clear it in debug mode.
mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
ldr(cp, MemOperand(ip));
if (restore_context) {
mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
ldr(cp, MemOperand(ip));
}
#ifdef DEBUG
mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
str(r3, MemOperand(ip));
#endif
@ -1256,7 +1285,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
LoadHeapObject(r1, function);
Move(r1, function);
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// We call indirectly through the code field in the function to
@ -1330,7 +1359,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
// For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
// We will build up the handler from the bottom by pushing on the stack.
// Set up the code object (r5) and the state (r6) for pushing.
unsigned state =
@ -1341,9 +1370,9 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
mov(cp, Operand(Smi::FromInt(0))); // Indicates no context.
mov(ip, Operand::Zero()); // NULL frame pointer.
stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
} else {
stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
}
@ -2280,12 +2309,14 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
}
void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
Address function_address,
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
int return_value_offset) {
void MacroAssembler::CallApiFunctionAndReturn(
ExternalReference function,
Address function_address,
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
MemOperand return_value_operand,
MemOperand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
@ -2296,13 +2327,15 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
ExternalReference::handle_scope_level_address(isolate()),
next_address);
ASSERT(!thunk_last_arg.is(r3));
// Allocate HandleScope in callee-save registers.
mov(r7, Operand(next_address));
ldr(r4, MemOperand(r7, kNextOffset));
ldr(r5, MemOperand(r7, kLimitOffset));
ldr(r6, MemOperand(r7, kLevelOffset));
mov(r9, Operand(next_address));
ldr(r4, MemOperand(r9, kNextOffset));
ldr(r5, MemOperand(r9, kLimitOffset));
ldr(r6, MemOperand(r9, kLevelOffset));
add(r6, r6, Operand(1));
str(r6, MemOperand(r7, kLevelOffset));
str(r6, MemOperand(r9, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
@ -2313,7 +2346,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
PopSafepointRegisters();
}
ASSERT(!thunk_last_arg.is(r3));
Label profiler_disabled;
Label end_profiler_check;
bool* is_profiling_flag =
@ -2349,24 +2381,25 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
}
Label promote_scheduled_exception;
Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
// load value from ReturnValue
ldr(r0, MemOperand(fp, return_value_offset*kPointerSize));
ldr(r0, return_value_operand);
bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
str(r4, MemOperand(r7, kNextOffset));
str(r4, MemOperand(r9, kNextOffset));
if (emit_debug_code()) {
ldr(r1, MemOperand(r7, kLevelOffset));
ldr(r1, MemOperand(r9, kLevelOffset));
cmp(r1, r6);
Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
}
sub(r6, r6, Operand(1));
str(r6, MemOperand(r7, kLevelOffset));
ldr(ip, MemOperand(r7, kLimitOffset));
str(r6, MemOperand(r9, kLevelOffset));
ldr(ip, MemOperand(r9, kLimitOffset));
cmp(r5, ip);
b(ne, &delete_allocated_handles);
@ -2377,21 +2410,29 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
ldr(r5, MemOperand(ip));
cmp(r4, r5);
b(ne, &promote_scheduled_exception);
bind(&exception_handled);
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
ldr(cp, *context_restore_operand);
}
// LeaveExitFrame expects unwind space to be in a register.
mov(r4, Operand(stack_space));
LeaveExitFrame(false, r4);
LeaveExitFrame(false, r4, !restore_context);
mov(pc, lr);
bind(&promote_scheduled_exception);
TailCallExternalReference(
ExternalReference(Runtime::kPromoteScheduledException, isolate()),
0,
1);
{
FrameScope frame(this, StackFrame::INTERNAL);
CallExternalReference(
ExternalReference(Runtime::kPromoteScheduledException, isolate()),
0);
}
jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
str(r5, MemOperand(r7, kLimitOffset));
str(r5, MemOperand(r9, kLimitOffset));
mov(r4, r0);
PrepareCallCFunction(1, r5);
mov(r0, Operand(ExternalReference::isolate_address(isolate())));
@ -2603,7 +2644,8 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments) {
int num_arguments,
SaveFPRegsMode save_doubles) {
// All parameters are on the stack. r0 has the return value after call.
// If the expected number of arguments of the runtime function is
@ -2620,21 +2662,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
mov(r0, Operand(num_arguments));
mov(r1, Operand(ExternalReference(f, isolate())));
CEntryStub stub(1);
CallStub(&stub);
}
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
CEntryStub stub(1, kSaveFPRegs);
CEntryStub stub(1, save_doubles);
CallStub(&stub);
}
@ -3079,6 +3107,88 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
}
void MacroAssembler::LookupNumberStringCache(Register object,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
Label* not_found) {
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
Register mask = scratch3;
// Load the number string cache.
LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
// Divide length by two (length is a smi).
mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
sub(mask, mask, Operand(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
// doubles is the xor of the upper and lower words. See
// Heap::GetNumberStringCache.
Label is_smi;
Label load_result_from_cache;
JumpIfSmi(object, &is_smi);
CheckMap(object,
scratch1,
Heap::kHeapNumberMapRootIndex,
not_found,
DONT_DO_SMI_CHECK);
STATIC_ASSERT(8 == kDoubleSize);
add(scratch1,
object,
Operand(HeapNumber::kValueOffset - kHeapObjectTag));
ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
eor(scratch1, scratch1, Operand(scratch2));
and_(scratch1, scratch1, Operand(mask));
// Calculate address of entry in string cache: each entry consists
// of two pointer sized fields.
add(scratch1,
number_string_cache,
Operand(scratch1, LSL, kPointerSizeLog2 + 1));
Register probe = mask;
ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
JumpIfSmi(probe, not_found);
sub(scratch2, object, Operand(kHeapObjectTag));
vldr(d0, scratch2, HeapNumber::kValueOffset);
sub(probe, probe, Operand(kHeapObjectTag));
vldr(d1, probe, HeapNumber::kValueOffset);
VFPCompareAndSetFlags(d0, d1);
b(ne, not_found); // The cache did not contain this value.
b(&load_result_from_cache);
bind(&is_smi);
Register scratch = scratch1;
and_(scratch, mask, Operand(object, ASR, 1));
// Calculate address of entry in string cache: each entry consists
// of two pointer sized fields.
add(scratch,
number_string_cache,
Operand(scratch, LSL, kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
cmp(object, probe);
b(ne, not_found);
// Get the result from the cache.
bind(&load_result_from_cache);
ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
IncrementCounter(isolate()->counters()->number_to_string_native(),
1,
scratch1,
scratch2);
}
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
@ -3191,20 +3301,19 @@ void MacroAssembler::CopyBytes(Register src,
Register dst,
Register length,
Register scratch) {
Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
bind(&align_loop);
cmp(length, Operand::Zero());
b(eq, &done);
cmp(length, Operand(kPointerSize));
b(le, &byte_loop);
bind(&align_loop_1);
tst(src, Operand(kPointerSize - 1));
b(eq, &word_loop);
ldrb(scratch, MemOperand(src, 1, PostIndex));
strb(scratch, MemOperand(dst, 1, PostIndex));
sub(length, length, Operand(1), SetCC);
b(ne, &byte_loop_1);
b(&align_loop_1);
// Copy bytes in word size chunks.
bind(&word_loop);
if (emit_debug_code()) {
@ -3776,8 +3885,8 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg) {
Label no_memento_available;
Register scratch_reg,
Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
@ -3785,15 +3894,14 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
add(scratch_reg, receiver_reg,
Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Operand(new_space_start));
b(lt, &no_memento_available);
b(lt, no_memento_found);
mov(ip, Operand(new_space_allocation_top));
ldr(ip, MemOperand(ip));
cmp(scratch_reg, ip);
b(gt, &no_memento_available);
b(gt, no_memento_found);
ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
cmp(scratch_reg,
Operand(Handle<Map>(isolate()->heap()->allocation_memento_map())));
bind(&no_memento_available);
Operand(isolate()->factory()->allocation_memento_map()));
}

72
deps/v8/src/arm/macro-assembler-arm.h

@ -45,8 +45,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
// Give alias names to registers
const Register cp = { 8 }; // JavaScript context pointer
const Register kRootRegister = { 10 }; // Roots array pointer.
const Register pp = { kRegister_r7_Code }; // Constant pool pointer.
const Register cp = { kRegister_r8_Code }; // JavaScript context pointer.
const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
// Flags used for AllocateHeapNumber
enum TaggingMode {
@ -169,17 +170,6 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);
void LoadHeapObject(Register dst, Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
Move(result, object);
}
}
// ---------------------------------------------------------------------------
// GC Support
@ -469,8 +459,13 @@ class MacroAssembler: public Assembler {
void VFPEnsureFPSCRState(Register scratch);
// If the value is a NaN, canonicalize the value else, do nothing.
void VFPCanonicalizeNaN(const DwVfpRegister value,
void VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void VFPCanonicalizeNaN(const DwVfpRegister value,
const Condition cond = al) {
VFPCanonicalizeNaN(value, value, cond);
}
// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
@ -533,6 +528,8 @@ class MacroAssembler: public Assembler {
LowDwVfpRegister double_scratch1,
Label* not_int32);
// Generates function and stub prologue code.
void Prologue(PrologueFrameMode frame_mode);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@ -541,7 +538,9 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count);
void LeaveExitFrame(bool save_doubles,
Register argument_count,
bool restore_context);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@ -1037,11 +1036,18 @@ class MacroAssembler: public Assembler {
void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
void CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
void CallRuntime(Runtime::FunctionId id, int num_arguments) {
CallRuntime(Runtime::FunctionForId(id), num_arguments);
}
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
@ -1111,7 +1117,8 @@ class MacroAssembler: public Assembler {
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
int return_value_offset_from_fp);
MemOperand return_value_operand,
MemOperand* context_restore_operand);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
@ -1286,6 +1293,18 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities
// Generate code to do a lookup in the number string cache. If the number in
// the register object is found in the cache the generated code falls through
// with the result in the result register. The object and the result register
// can be the same. If the number is not found in the cache the code jumps to
// the label not_found with only the content of register object unchanged.
void LookupNumberStringCache(Register object,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
Label* not_found);
// Checks if both objects are sequential ASCII strings and jumps to label
// if either is not. Assumes that neither object is a smi.
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
@ -1360,9 +1379,20 @@ class MacroAssembler: public Assembler {
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
// If allocation info is present, condition flags are set to eq
// If allocation info is present, condition flags are set to eq.
void TestJSArrayForAllocationMemento(Register receiver_reg,
Register scratch_reg);
Register scratch_reg,
Label* no_memento_found);
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
Register scratch_reg,
Label* memento_found) {
Label no_memento_found;
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
&no_memento_found);
b(eq, memento_found);
bind(&no_memento_found);
}
private:
void CallCFunctionHelper(Register function,

5
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -223,11 +223,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// are always 0..num_saved_registers_-1)
int num_saved_registers_;
// Manage a small pre-allocated pool for writing label targets
// to for pushing backtrack addresses.
int backtrack_constant_pool_offset_;
int backtrack_constant_pool_capacity_;
// Labels used internally.
Label entry_label_;
Label start_label_;

79
deps/v8/src/arm/simulator-arm.cc

@ -912,6 +912,12 @@ double Simulator::get_double_from_register_pair(int reg) {
}
void Simulator::set_register_pair_from_double(int reg, double* value) {
ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
memcpy(registers_ + reg, value, sizeof(*value));
}
void Simulator::set_dw_register(int dreg, const int* dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0];
@ -1026,27 +1032,22 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) {
}
// Runtime FP routines take up to two double arguments and zero
// or one integer arguments. All are consructed here.
// from r0-r3 or d0 and d1.
// Runtime FP routines take:
// - two double arguments
// - one double argument and zero or one integer arguments.
// All are consructed here from r0-r3 or d0, d1 and r0.
void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
*x = vfp_registers_[0];
*y = vfp_registers_[1];
*z = registers_[1];
*x = get_double_from_d_register(0);
*y = get_double_from_d_register(1);
*z = get_register(0);
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
OS::MemCopy(buffer, registers_, sizeof(*x));
OS::MemCopy(x, buffer, sizeof(*x));
*x = get_double_from_register_pair(0);
// Register 2 and 3 -> y.
OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
OS::MemCopy(y, buffer, sizeof(*y));
*y = get_double_from_register_pair(2);
// Register 2 -> z
memcpy(buffer, registers_ + 2, sizeof(*z));
memcpy(z, buffer, sizeof(*z));
*z = get_register(2);
}
}
@ -1718,32 +1719,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
if (use_eabi_hardfloat()) {
// With the hard floating point calling convention, double
// arguments are passed in VFP registers. Fetch the arguments
// from there and call the builtin using soft floating point
// convention.
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
arg0 = vfp_registers_[0];
arg1 = vfp_registers_[1];
arg2 = vfp_registers_[2];
arg3 = vfp_registers_[3];
break;
case ExternalReference::BUILTIN_FP_CALL:
arg0 = vfp_registers_[0];
arg1 = vfp_registers_[1];
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
arg0 = vfp_registers_[0];
arg1 = vfp_registers_[1];
arg2 = get_register(0);
break;
default:
break;
}
}
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
@ -3816,19 +3791,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
double Simulator::CallFP(byte* entry, double d0, double d1) {
void Simulator::CallFP(byte* entry, double d0, double d1) {
if (use_eabi_hardfloat()) {
set_d_register_from_double(0, d0);
set_d_register_from_double(1, d1);
} else {
int buffer[2];
ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
OS::MemCopy(buffer, &d0, sizeof(d0));
set_dw_register(0, buffer);
OS::MemCopy(buffer, &d1, sizeof(d1));
set_dw_register(2, buffer);
set_register_pair_from_double(0, &d0);
set_register_pair_from_double(2, &d1);
}
CallInternal(entry);
}
int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
CallFP(entry, d0, d1);
int32_t result = get_register(r0);
return result;
}
double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) {
CallFP(entry, d0, d1);
if (use_eabi_hardfloat()) {
return get_double_from_d_register(0);
} else {

9
deps/v8/src/arm/simulator-arm.h

@ -163,6 +163,7 @@ class Simulator {
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
void set_register_pair_from_double(int reg, double* value);
void set_dw_register(int dreg, const int* dbl);
// Support for VFP.
@ -220,7 +221,9 @@ class Simulator {
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
double CallFP(byte* entry, double d0, double d1);
void CallFP(byte* entry, double d0, double d1);
int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
double CallFPReturnsDouble(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@ -444,6 +447,10 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_FP_INT(entry, p0, p1) \
Simulator::current(Isolate::Current())->CallFPReturnsInt( \
FUNCTION_ADDR(entry), p0, p1)
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)

242
deps/v8/src/arm/stub-cache-arm.cc

@ -380,31 +380,27 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss,
bool support_wrappers) {
Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
support_wrappers ? &check_wrapper : miss);
GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
// Load length directly from the string.
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
__ Ret();
if (support_wrappers) {
// Check if the object is a JSValue wrapper.
__ bind(&check_wrapper);
__ cmp(scratch1, Operand(JS_VALUE_TYPE));
__ b(ne, miss);
// Check if the object is a JSValue wrapper.
__ bind(&check_wrapper);
__ cmp(scratch1, Operand(JS_VALUE_TYPE));
__ b(ne, miss);
// Unwrap the value and check if the wrapped value is a string.
__ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
__ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
__ Ret();
}
// Unwrap the value and check if the wrapped value is a string.
__ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
__ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
__ Ret();
}
@ -437,7 +433,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
void StoreStubCompiler::GenerateNegativeHolderLookup(
MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@ -457,19 +453,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<JSObject> object,
LookupResult* lookup,
Handle<Map> transition,
Handle<Name> name,
Register receiver_reg,
Register storage_reg,
Register value_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Label* miss_label,
Label* slow) {
void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<JSObject> object,
LookupResult* lookup,
Handle<Map> transition,
Handle<Name> name,
Register receiver_reg,
Register storage_reg,
Register value_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Label* miss_label,
Label* slow) {
// r0 : value
Label exit;
@ -481,7 +477,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ LoadObject(scratch1, constant);
__ Move(scratch1, constant);
__ cmp(value_reg, scratch1);
__ b(ne, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
@ -621,15 +617,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
LookupResult* lookup,
Register receiver_reg,
Register name_reg,
Register value_reg,
Register scratch1,
Register scratch2,
Label* miss_label) {
void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
LookupResult* lookup,
Register receiver_reg,
Register name_reg,
Register value_reg,
Register scratch1,
Register scratch2,
Label* miss_label) {
// r0 : value
Label exit;
@ -740,9 +736,9 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
Label* label,
Handle<Name> name) {
void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
Label* label,
Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ mov(this->name(), Operand(name));
@ -843,25 +839,26 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
static void GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
int argc) {
int argc,
bool restore_context) {
// ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes)
// -- sp[4] : callee JS function
// -- sp[8] : call data
// -- sp[12] : isolate
// -- sp[16] : ReturnValue default value
// -- sp[20] : ReturnValue
// -- sp[24] : last JS argument
// -- sp[0] - sp[24] : FunctionCallbackInfo, incl.
// : holder (set by CheckPrototypes)
// -- sp[28] : last JS argument
// -- ...
// -- sp[(argc + 5) * 4] : first JS argument
// -- sp[(argc + 6) * 4] : receiver
// -- sp[(argc + 6) * 4] : first JS argument
// -- sp[(argc + 7) * 4] : receiver
// -----------------------------------
typedef FunctionCallbackArguments FCA;
// Save calling context.
__ str(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize));
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ LoadHeapObject(r5, function);
__ Move(r5, function);
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
__ str(r5, MemOperand(sp, FCA::kCalleeIndex * kPointerSize));
// Pass the additional arguments.
// Construct the FunctionCallbackInfo.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
@ -870,15 +867,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
} else {
__ Move(r6, call_data);
}
__ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate())));
// Store JS function, call data, isolate ReturnValue default and ReturnValue.
__ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
// Store call data.
__ str(r6, MemOperand(sp, FCA::kDataIndex * kPointerSize));
// Store isolate.
__ mov(r5, Operand(ExternalReference::isolate_address(masm->isolate())));
__ str(r5, MemOperand(sp, FCA::kIsolateIndex * kPointerSize));
// Store ReturnValue default and ReturnValue.
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ str(r5, MemOperand(sp, 4 * kPointerSize));
__ str(r5, MemOperand(sp, 5 * kPointerSize));
__ str(r5, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize));
__ str(r5, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize));
// Prepare arguments.
__ add(r2, sp, Operand(5 * kPointerSize));
__ mov(r2, sp);
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@ -887,18 +887,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// r0 = v8::Arguments&
// r0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ add(r0, sp, Operand(1 * kPointerSize));
// v8::Arguments::implicit_args_
// FunctionCallbackInfo::implicit_args_
__ str(r2, MemOperand(r0, 0 * kPointerSize));
// v8::Arguments::values_
__ add(ip, r2, Operand(argc * kPointerSize));
// FunctionCallbackInfo::values_
__ add(ip, r2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize));
__ str(ip, MemOperand(r0, 1 * kPointerSize));
// v8::Arguments::length_ = argc
// FunctionCallbackInfo::length_ = argc
__ mov(ip, Operand(argc));
__ str(ip, MemOperand(r0, 2 * kPointerSize));
// v8::Arguments::is_construct_call = 0
// FunctionCallbackInfo::is_construct_call = 0
__ mov(ip, Operand::Zero());
__ str(ip, MemOperand(r0, 3 * kPointerSize));
@ -916,12 +916,19 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
MemOperand return_value_operand(fp,
(2 + FCA::kReturnValueOffset) * kPointerSize);
__ CallApiFunctionAndReturn(ref,
function_address,
thunk_ref,
r1,
kStackUnwindSpace,
kFastApiCallArguments + 1);
return_value_operand,
restore_context ?
&context_restore_operand : NULL);
}
@ -935,11 +942,12 @@ static void GenerateFastApiCall(MacroAssembler* masm,
ASSERT(optimization.is_simple_api_call());
ASSERT(!receiver.is(scratch));
typedef FunctionCallbackArguments FCA;
const int stack_space = kFastApiCallArguments + argc + 1;
// Assign stack space for the call arguments.
__ sub(sp, sp, Operand(stack_space * kPointerSize));
// Write holder to stack frame.
__ str(receiver, MemOperand(sp, 0));
__ str(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
// Write receiver to stack frame.
int index = stack_space - 1;
__ str(receiver, MemOperand(sp, index * kPointerSize));
@ -950,7 +958,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ str(receiver, MemOperand(sp, index-- * kPointerSize));
}
GenerateFastApiDirectCall(masm, optimization, argc);
GenerateFastApiDirectCall(masm, optimization, argc, true);
}
@ -1064,7 +1072,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
GenerateFastApiDirectCall(
masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@ -1202,8 +1211,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register reg = object_reg;
int depth = 0;
typedef FunctionCallbackArguments FCA;
if (save_at_depth == depth) {
__ str(reg, MemOperand(sp));
__ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
// Check the maps in the prototype chain.
@ -1262,7 +1272,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
__ str(reg, MemOperand(sp));
__ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
// Go to the next object in the prototype chain.
@ -1294,9 +1304,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
Label* success,
Label* miss) {
void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
Label* success,
Label* miss) {
if (!miss->is_unused()) {
__ b(success);
__ bind(miss);
@ -1305,9 +1315,9 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
}
void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
Label* success,
Label* miss) {
void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
Label* success,
Label* miss) {
if (!miss->is_unused()) {
__ b(success);
GenerateRestoreName(masm(), miss, name);
@ -1316,7 +1326,7 @@ void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
}
Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Register LoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> object,
Register object_reg,
Handle<JSObject> holder,
@ -1363,7 +1373,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
}
void BaseLoadStubCompiler::NonexistentHandlerFrontend(
void LoadStubCompiler::NonexistentHandlerFrontend(
Handle<JSObject> object,
Handle<JSObject> last,
Handle<Name> name,
@ -1383,10 +1393,10 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
}
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
PropertyIndex field,
Representation representation) {
void LoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
PropertyIndex field,
Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
@ -1402,36 +1412,36 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
}
void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
__ LoadObject(r0, value);
__ Move(r0, value);
__ Ret();
}
void BaseLoadStubCompiler::GenerateLoadCallback(
void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 0, NULL);
}
void BaseLoadStubCompiler::GenerateLoadCallback(
void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
ASSERT(!scratch2().is(reg));
ASSERT(!scratch3().is(reg));
ASSERT(!scratch4().is(reg));
__ push(receiver());
__ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
if (heap()->InNewSpace(callback->data())) {
__ Move(scratch3(), callback);
__ ldr(scratch3(), FieldMemOperand(scratch3(),
@ -1445,19 +1455,21 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ Push(scratch3(), scratch4());
__ mov(scratch4(),
Operand(ExternalReference::isolate_address(isolate())));
__ Push(scratch4(), reg, name());
__ Push(scratch4(), reg);
__ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_
__ push(name());
__ mov(r0, sp); // r0 = Handle<Name>
const int kApiStackSpace = 1;
FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// Create AccessorInfo instance on the stack above the exit frame with
// Create PropertyAccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object** args_) as the data.
__ str(scratch2(), MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
const int kStackUnwindSpace = kFastApiCallArguments + 1;
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
@ -1475,11 +1487,12 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
thunk_ref,
r2,
kStackUnwindSpace,
6);
MemOperand(fp, 6 * kPointerSize),
NULL);
}
void BaseLoadStubCompiler::GenerateLoadInterceptor(
void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
Handle<JSObject> object,
Handle<JSObject> interceptor_holder,
@ -1839,15 +1852,15 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(r3, r7, &not_fast_object);
__ CheckFastObjectElements(r3, r9, &not_fast_object);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(r3, r7, &call_builtin);
__ CheckFastSmiElements(r3, r9, &call_builtin);
__ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
__ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r7, ip);
__ cmp(r9, ip);
__ b(eq, &call_builtin);
// edx: receiver
// r3: map
@ -1855,7 +1868,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r3,
r7,
r9,
&try_holey_map);
__ mov(r2, receiver);
ElementsTransitionGenerator::
@ -1868,7 +1881,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
r3,
r7,
r9,
&call_builtin);
__ mov(r2, receiver);
ElementsTransitionGenerator::
@ -1901,7 +1914,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ bind(&attempt_to_grow_elements);
// r0: array's length + 1.
// r4: elements' length.
if (!FLAG_inline_new) {
__ b(&call_builtin);
@ -1912,8 +1924,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(r2, &no_fast_elements_check);
__ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ CheckFastObjectElements(r7, r7, &call_builtin);
__ ldr(r9, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ CheckFastObjectElements(r9, r9, &call_builtin);
__ bind(&no_fast_elements_check);
ExternalReference new_space_allocation_top =
@ -1925,8 +1937,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Load top and check if it is the end of elements.
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
__ mov(r7, Operand(new_space_allocation_top));
__ ldr(r3, MemOperand(r7));
__ mov(r4, Operand(new_space_allocation_top));
__ ldr(r3, MemOperand(r4));
__ cmp(end_elements, r3);
__ b(ne, &call_builtin);
@ -1938,7 +1950,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// We fit and could grow elements.
// Update new_space_allocation_top.
__ str(r3, MemOperand(r7));
__ str(r3, MemOperand(r4));
// Push the argument.
__ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
@ -1949,6 +1961,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Update elements' and array's sizes.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
__ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
@ -2539,7 +2552,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
depth, &miss);
GenerateFastApiDirectCall(masm(), optimization, argc);
GenerateFastApiDirectCall(masm(), optimization, argc, false);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
@ -2991,6 +3004,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- r0 : receiver
@ -3002,7 +3016,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
__ push(r0);
__ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,

8
deps/v8/src/array-iterator.js

@ -36,9 +36,9 @@ var ARRAY_ITERATOR_KIND_VALUES = 2;
var ARRAY_ITERATOR_KIND_ENTRIES = 3;
// The spec draft also has "sparse" but it is never used.
var iteratorObjectSymbol = %CreateSymbol(void 0);
var arrayIteratorNextIndexSymbol = %CreateSymbol(void 0);
var arrayIterationKindSymbol = %CreateSymbol(void 0);
var iteratorObjectSymbol = %CreateSymbol(UNDEFINED);
var arrayIteratorNextIndexSymbol = %CreateSymbol(UNDEFINED);
var arrayIterationKindSymbol = %CreateSymbol(UNDEFINED);
function ArrayIterator() {}
@ -74,7 +74,7 @@ function ArrayIteratorNext() {
if (index >= length) {
iterator[arrayIteratorNextIndexSymbol] = 1 / 0; // Infinity
return CreateIteratorResultObject(void 0, true);
return CreateIteratorResultObject(UNDEFINED, true);
}
iterator[arrayIteratorNextIndexSymbol] = index + 1;

26
deps/v8/src/array.js

@ -399,14 +399,13 @@ function ObservedArrayPop(n) {
n--;
var value = this[n];
EnqueueSpliceRecord(this, n, [value], 0);
try {
BeginPerformSplice(this);
delete this[n];
this.length = n;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, n, [value], 0);
}
return value;
@ -431,7 +430,7 @@ function ArrayPop() {
n--;
var value = this[n];
delete this[n];
Delete(this, ToName(n), true);
this.length = n;
return value;
}
@ -441,8 +440,6 @@ function ObservedArrayPush() {
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
EnqueueSpliceRecord(this, n, [], m);
try {
BeginPerformSplice(this);
for (var i = 0; i < m; i++) {
@ -451,6 +448,7 @@ function ObservedArrayPush() {
this.length = n + m;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, n, [], m);
}
return this.length;
@ -581,14 +579,13 @@ function ArrayReverse() {
function ObservedArrayShift(len) {
var first = this[0];
EnqueueSpliceRecord(this, 0, [first], 0);
try {
BeginPerformSplice(this);
SimpleMove(this, 0, 1, len, 0);
this.length = len - 1;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, 0, [first], 0);
}
return first;
@ -627,8 +624,6 @@ function ObservedArrayUnshift() {
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
EnqueueSpliceRecord(this, 0, [], num_arguments);
try {
BeginPerformSplice(this);
SimpleMove(this, 0, 0, len, num_arguments);
@ -638,6 +633,7 @@ function ObservedArrayUnshift() {
this.length = len + num_arguments;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, 0, [], num_arguments);
}
return len + num_arguments;
@ -681,7 +677,7 @@ function ArraySlice(start, end) {
var start_i = TO_INTEGER(start);
var end_i = len;
if (end !== void 0) end_i = TO_INTEGER(end);
if (!IS_UNDEFINED(end)) end_i = TO_INTEGER(end);
if (start_i < 0) {
start_i += len;
@ -1020,7 +1016,7 @@ function ArraySort(comparefn) {
var proto_length = indices;
for (var i = from; i < proto_length; i++) {
if (proto.hasOwnProperty(i)) {
obj[i] = void 0;
obj[i] = UNDEFINED;
}
}
} else {
@ -1028,7 +1024,7 @@ function ArraySort(comparefn) {
var index = indices[i];
if (!IS_UNDEFINED(index) && from <= index &&
proto.hasOwnProperty(index)) {
obj[index] = void 0;
obj[index] = UNDEFINED;
}
}
}
@ -1065,7 +1061,7 @@ function ArraySort(comparefn) {
if (first_undefined < last_defined) {
// Fill in hole or undefined.
obj[first_undefined] = obj[last_defined];
obj[last_defined] = void 0;
obj[last_defined] = UNDEFINED;
}
}
// If there were any undefineds in the entire array, first_undefined
@ -1077,12 +1073,12 @@ function ArraySort(comparefn) {
// an undefined should be and vice versa.
var i;
for (i = first_undefined; i < length - num_holes; i++) {
obj[i] = void 0;
obj[i] = UNDEFINED;
}
for (i = length - num_holes; i < length; i++) {
// For compatability with Webkit, do not expose elements in the prototype.
if (i in %GetPrototype(obj)) {
obj[i] = void 0;
obj[i] = UNDEFINED;
} else {
delete obj[i];
}

8
deps/v8/src/arraybuffer.js

@ -81,6 +81,10 @@ function ArrayBufferSlice(start, end) {
return result;
}
function ArrayBufferIsView(obj) {
return %ArrayBufferIsView(obj);
}
function SetUpArrayBuffer() {
%CheckIsBootstrapping();
@ -93,6 +97,10 @@ function SetUpArrayBuffer() {
InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLength);
InstallFunctions($ArrayBuffer, DONT_ENUM, $Array(
"isView", ArrayBufferIsView
));
InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array(
"slice", ArrayBufferSlice
));

42
deps/v8/src/assembler.cc

@ -98,6 +98,7 @@ struct DoubleConstant BASE_EMBEDDED {
double negative_infinity;
double canonical_non_hole_nan;
double the_hole_nan;
double uint32_bias;
};
static DoubleConstant double_constants;
@ -206,6 +207,24 @@ CpuFeatureScope::~CpuFeatureScope() {
#endif
// -----------------------------------------------------------------------------
// Implementation of PlatformFeatureScope
PlatformFeatureScope::PlatformFeatureScope(CpuFeature f)
: old_cross_compile_(CpuFeatures::cross_compile_) {
// CpuFeatures is a global singleton, therefore this is only safe in
// single threaded code.
ASSERT(Serializer::enabled());
uint64_t mask = static_cast<uint64_t>(1) << f;
CpuFeatures::cross_compile_ |= mask;
}
PlatformFeatureScope::~PlatformFeatureScope() {
CpuFeatures::cross_compile_ = old_cross_compile_;
}
// -----------------------------------------------------------------------------
// Implementation of Label
@ -890,6 +909,8 @@ void ExternalReference::SetUp() {
double_constants.canonical_non_hole_nan = OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
double_constants.uint32_bias =
static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
math_exp_data_mutex = new Mutex();
}
@ -1067,6 +1088,13 @@ ExternalReference ExternalReference::get_make_code_young_function(
}
ExternalReference ExternalReference::get_mark_code_as_executed_function(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate, FUNCTION_ADDR(Code::MarkCodeAsExecuted)));
}
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
@ -1315,6 +1343,20 @@ ExternalReference ExternalReference::address_of_the_hole_nan() {
}
ExternalReference ExternalReference::record_object_allocation_function(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate,
FUNCTION_ADDR(HeapProfiler::RecordObjectAllocationFromMasm)));
}
ExternalReference ExternalReference::address_of_uint32_bias() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.uint32_bias));
}
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state(

18
deps/v8/src/assembler.h

@ -134,6 +134,18 @@ class CpuFeatureScope BASE_EMBEDDED {
};
// Enable a unsupported feature within a scope for cross-compiling for a
// different CPU.
class PlatformFeatureScope BASE_EMBEDDED {
public:
explicit PlatformFeatureScope(CpuFeature f);
~PlatformFeatureScope();
private:
uint64_t old_cross_compile_;
};
// -----------------------------------------------------------------------------
// Labels represent pc locations; they are typically jump or call targets.
// After declaration, a label can be freely used to denote known or (yet)
@ -389,6 +401,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Handle<Cell> target_cell_handle());
INLINE(void set_target_cell(Cell* cell,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(Handle<Object> code_age_stub_handle(Assembler* origin));
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(Code* stub));
@ -715,6 +728,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference date_cache_stamp(Isolate* isolate);
static ExternalReference get_make_code_young_function(Isolate* isolate);
static ExternalReference get_mark_code_as_executed_function(Isolate* isolate);
// New heap objects tracking support.
static ExternalReference record_object_allocation_function(Isolate* isolate);
// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
@ -798,6 +815,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference address_of_negative_infinity();
static ExternalReference address_of_canonical_non_hole_nan();
static ExternalReference address_of_the_hole_nan();
static ExternalReference address_of_uint32_bias();
static ExternalReference math_sin_double_function(Isolate* isolate);
static ExternalReference math_cos_double_function(Isolate* isolate);

61
deps/v8/src/ast.cc

@ -82,14 +82,13 @@ bool Expression::IsUndefinedLiteral(Isolate* isolate) {
}
VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
: Expression(isolate),
VariableProxy::VariableProxy(Isolate* isolate, Variable* var, int position)
: Expression(isolate, position),
name_(var->name()),
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
is_trivial_(false),
is_lvalue_(false),
position_(RelocInfo::kNoPosition),
interface_(var->interface()) {
BindTo(var);
}
@ -100,13 +99,12 @@ VariableProxy::VariableProxy(Isolate* isolate,
bool is_this,
Interface* interface,
int position)
: Expression(isolate),
: Expression(isolate, position),
name_(name),
var_(NULL),
is_this_(is_this),
is_trivial_(false),
is_lvalue_(false),
position_(position),
interface_(interface) {
// Names must be canonicalized for fast equality checks.
ASSERT(name->IsInternalizedString());
@ -133,15 +131,15 @@ Assignment::Assignment(Isolate* isolate,
Expression* target,
Expression* value,
int pos)
: Expression(isolate),
: Expression(isolate, pos),
op_(op),
target_(target),
value_(value),
pos_(pos),
binary_operation_(NULL),
assignment_id_(GetNextId(isolate)),
is_monomorphic_(false),
is_uninitialized_(false),
is_pre_monomorphic_(false),
store_mode_(STANDARD_STORE) { }
@ -234,33 +232,6 @@ bool ObjectLiteral::Property::emit_store() {
}
bool IsEqualString(void* first, void* second) {
ASSERT((*reinterpret_cast<String**>(first))->IsString());
ASSERT((*reinterpret_cast<String**>(second))->IsString());
Handle<String> h1(reinterpret_cast<String**>(first));
Handle<String> h2(reinterpret_cast<String**>(second));
return (*h1)->Equals(*h2);
}
bool IsEqualNumber(void* first, void* second) {
ASSERT((*reinterpret_cast<Object**>(first))->IsNumber());
ASSERT((*reinterpret_cast<Object**>(second))->IsNumber());
Handle<Object> h1(reinterpret_cast<Object**>(first));
Handle<Object> h2(reinterpret_cast<Object**>(second));
if (h1->IsSmi()) {
return h2->IsSmi() && *h1 == *h2;
}
if (h2->IsSmi()) return false;
Handle<HeapNumber> n1 = Handle<HeapNumber>::cast(h1);
Handle<HeapNumber> n2 = Handle<HeapNumber>::cast(h2);
ASSERT(std::isfinite(n1->value()));
ASSERT(std::isfinite(n2->value()));
return n1->value() == n2->value();
}
void ObjectLiteral::CalculateEmitStore(Zone* zone) {
ZoneAllocationPolicy allocator(zone);
@ -456,14 +427,13 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
is_uninitialized_ = oracle->LoadIsUninitialized(this);
if (is_uninitialized_) return;
is_pre_monomorphic_ = oracle->LoadIsPreMonomorphic(this);
is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
ASSERT(!is_pre_monomorphic_ || !is_monomorphic_);
receiver_types_.Clear();
if (key()->IsPropertyName()) {
FunctionPrototypeStub proto_stub(Code::LOAD_IC);
StringLengthStub string_stub(Code::LOAD_IC, false);
if (oracle->LoadIsStub(this, &string_stub)) {
is_string_length_ = true;
} else if (oracle->LoadIsStub(this, &proto_stub)) {
if (oracle->LoadIsStub(this, &proto_stub)) {
is_function_prototype_ = true;
} else {
Literal* lit_key = key()->AsLiteral();
@ -474,8 +444,7 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
} else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
is_string_access_ = true;
} else if (is_monomorphic_) {
receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this),
zone);
receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this), zone);
} else if (oracle->LoadIsPolymorphic(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_);
@ -490,7 +459,10 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
TypeFeedbackId id = AssignmentFeedbackId();
is_uninitialized_ = oracle->StoreIsUninitialized(id);
if (is_uninitialized_) return;
is_pre_monomorphic_ = oracle->StoreIsPreMonomorphic(id);
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
ASSERT(!is_pre_monomorphic_ || !is_monomorphic_);
receiver_types_.Clear();
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
@ -655,7 +627,7 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
receiver_types_.Add(handle(holder_->map()), oracle->zone());
}
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
for (int i = 0; i < length; i++) {
@ -1067,9 +1039,9 @@ CaseClause::CaseClause(Isolate* isolate,
Expression* label,
ZoneList<Statement*>* statements,
int pos)
: label_(label),
: AstNode(pos),
label_(label),
statements_(statements),
position_(pos),
compare_type_(Type::None(), isolate),
compare_id_(AstNode::GetNextId(isolate)),
entry_id_(AstNode::GetNextId(isolate)) {
@ -1111,6 +1083,7 @@ REGULAR_NODE(ContinueStatement)
REGULAR_NODE(BreakStatement)
REGULAR_NODE(ReturnStatement)
REGULAR_NODE(SwitchStatement)
REGULAR_NODE(CaseClause)
REGULAR_NODE(Conditional)
REGULAR_NODE(Literal)
REGULAR_NODE(ArrayLiteral)
@ -1146,7 +1119,7 @@ DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)

534
deps/v8/src/ast.h

File diff suppressed because it is too large

9
deps/v8/src/bootstrapper.cc

@ -824,7 +824,7 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
// work in the snapshot case is done in HookUpInnerGlobal.
void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function) {
// --- G l o b a l C o n t e x t ---
// --- N a t i v e C o n t e x t ---
// Use the empty function as closure (no scope info).
native_context()->set_closure(*empty_function);
native_context()->set_previous(NULL);
@ -1043,7 +1043,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
{ // -- J S O N
Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
Handle<String> name = factory->InternalizeUtf8String("JSON");
Handle<JSFunction> cons = factory->NewFunction(name,
factory->the_hole_value());
JSFunction::SetInstancePrototype(cons,
@ -2067,6 +2067,11 @@ bool Genesis::InstallExperimentalNatives() {
"native harmony-array.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
if (FLAG_harmony_maths &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native harmony-math.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
}
InstallExperimentalNativeFunctions();

122
deps/v8/src/builtins.cc

@ -195,79 +195,6 @@ BUILTIN(EmptyFunction) {
}
static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
Isolate* isolate,
JSFunction* constructor) {
ASSERT(args->length() >= 1);
Heap* heap = isolate->heap();
isolate->counters()->array_function_runtime()->Increment();
JSArray* array;
if (CalledAsConstructor(isolate)) {
array = JSArray::cast((*args)[0]);
// Initialize elements and length in case later allocations fail so that the
// array object is initialized in a valid state.
MaybeObject* maybe_array = array->Initialize(0);
if (maybe_array->IsFailure()) return maybe_array;
AllocationMemento* memento = AllocationMemento::FindForJSObject(array);
if (memento != NULL && memento->IsValid()) {
AllocationSite* site = memento->GetAllocationSite();
ElementsKind to_kind = site->GetElementsKind();
if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
to_kind)) {
// We have advice that we should change the elements kind
if (FLAG_trace_track_allocation_sites) {
PrintF("AllocationSite: pre-transitioning array %p(%s->%s)\n",
reinterpret_cast<void*>(array),
ElementsKindToString(array->GetElementsKind()),
ElementsKindToString(to_kind));
}
maybe_array = array->TransitionElementsKind(to_kind);
if (maybe_array->IsFailure()) return maybe_array;
}
}
if (!FLAG_smi_only_arrays) {
Context* native_context = isolate->context()->native_context();
if (array->GetElementsKind() == GetInitialFastElementsKind() &&
!native_context->js_array_maps()->IsUndefined()) {
FixedArray* map_array =
FixedArray::cast(native_context->js_array_maps());
array->set_map(Map::cast(map_array->
get(TERMINAL_FAST_ELEMENTS_KIND)));
}
}
} else {
// Allocate the JS Array
MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
if (!maybe_obj->To(&array)) return maybe_obj;
}
Arguments adjusted_arguments(args->length() - 1, args->arguments() - 1);
ASSERT(adjusted_arguments.length() < 1 ||
adjusted_arguments[0] == (*args)[1]);
return ArrayConstructInitializeElements(array, &adjusted_arguments);
}
BUILTIN(InternalArrayCodeGeneric) {
return ArrayCodeGenericCommon(
&args,
isolate,
isolate->context()->native_context()->internal_array_function());
}
BUILTIN(ArrayCodeGeneric) {
return ArrayCodeGenericCommon(
&args,
isolate,
isolate->context()->native_context()->array_function());
}
static void MoveDoubleElements(FixedDoubleArray* dst,
int dst_index,
FixedDoubleArray* src,
@ -346,10 +273,20 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
}
HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
elms->address() + size_delta));
return FixedArrayBase::cast(HeapObject::FromAddress(
elms->address() + to_trim * entry_size));
FixedArrayBase* new_elms = FixedArrayBase::cast(HeapObject::FromAddress(
elms->address() + size_delta));
HeapProfiler* profiler = heap->isolate()->heap_profiler();
if (profiler->is_profiling()) {
profiler->ObjectMoveEvent(elms->address(),
new_elms->address(),
new_elms->Size());
if (profiler->is_tracking_allocations()) {
// Report filler object as a new allocation.
// Otherwise it will become an untracked object.
profiler->NewObjectEvent(elms->address(), elms->Size());
}
}
return new_elms;
}
@ -1392,7 +1329,8 @@ static void Generate_LoadIC_Normal(MacroAssembler* masm) {
static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
LoadStubCompiler::GenerateLoadViaGetter(masm, Handle<JSFunction>());
LoadStubCompiler::GenerateLoadViaGetter(
masm, LoadStubCompiler::registers()[0], Handle<JSFunction>());
}
@ -1451,6 +1389,11 @@ static void Generate_StoreIC_Slow(MacroAssembler* masm) {
}
static void Generate_StoreIC_Slow_Strict(MacroAssembler* masm) {
StoreIC::GenerateSlow(masm);
}
static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm);
}
@ -1546,6 +1489,11 @@ static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
}
static void Generate_KeyedStoreIC_Slow_Strict(MacroAssembler* masm) {
KeyedStoreIC::GenerateSlow(masm);
}
static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
KeyedStoreIC::GenerateInitialize(masm);
}
@ -1728,8 +1676,19 @@ void Builtins::InitBuiltinFunctionTable() {
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;
#define DEF_FUNCTION_PTR_H(aname, kind, extra) \
functions->generator = FUNCTION_ADDR(Generate_##aname); \
functions->c_code = NULL; \
functions->s_name = #aname; \
functions->name = k##aname; \
functions->flags = Code::ComputeFlags( \
Code::HANDLER, MONOMORPHIC, extra, Code::NORMAL, Code::kind); \
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;
BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
BUILTIN_LIST_H(DEF_FUNCTION_PTR_H)
BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
#undef DEF_FUNCTION_PTR_C
@ -1854,8 +1813,15 @@ Handle<Code> Builtins::name() { \
reinterpret_cast<Code**>(builtin_address(k##name)); \
return Handle<Code>(code_address); \
}
#define DEFINE_BUILTIN_ACCESSOR_H(name, kind, extra) \
Handle<Code> Builtins::name() { \
Code** code_address = \
reinterpret_cast<Code**>(builtin_address(k##name)); \
return Handle<Code>(code_address); \
}
BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H)
BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
#undef DEFINE_BUILTIN_ACCESSOR_C
#undef DEFINE_BUILTIN_ACCESSOR_A

52
deps/v8/src/builtins.h

@ -50,6 +50,10 @@ enum BuiltinExtraArguments {
#define CODE_AGE_LIST(V) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
#define CODE_AGE_LIST_WITH_NO_AGE(V) \
V(NoAge) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
#define DECLARE_CODE_AGE_BUILTIN(C, V) \
V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \
UNINITIALIZED, Code::kNoExtraICState) \
@ -63,9 +67,6 @@ enum BuiltinExtraArguments {
\
V(EmptyFunction, NO_EXTRA_ARGUMENTS) \
\
V(InternalArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
\
V(ArrayPush, NO_EXTRA_ARGUMENTS) \
V(ArrayPop, NO_EXTRA_ARGUMENTS) \
V(ArrayShift, NO_EXTRA_ARGUMENTS) \
@ -110,8 +111,6 @@ enum BuiltinExtraArguments {
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyOSR, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
@ -120,29 +119,19 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Slow, STUB, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_Slow, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Slow, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Slow, STUB, MONOMORPHIC, \
Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
@ -162,8 +151,6 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(StoreIC_PreMonomorphic, STORE_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Generic, STORE_IC, GENERIC, \
@ -176,8 +163,6 @@ enum BuiltinExtraArguments {
kStrictMode) \
V(StoreIC_PreMonomorphic_Strict, STORE_IC, PREMONOMORPHIC, \
kStrictMode) \
V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
kStrictMode) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
V(StoreIC_GlobalProxy_Strict, STORE_IC, GENERIC, \
@ -219,10 +204,29 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(InterruptCheck, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StackCheck, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
// Define list of builtin handlers implemented in assembly.
#define BUILTIN_LIST_H(V) \
V(LoadIC_Slow, LOAD_IC, Code::kNoExtraICState) \
V(KeyedLoadIC_Slow, KEYED_LOAD_IC, Code::kNoExtraICState) \
V(StoreIC_Slow, STORE_IC, Code::kNoExtraICState) \
V(StoreIC_Slow_Strict, STORE_IC, kStrictMode) \
V(KeyedStoreIC_Slow, KEYED_STORE_IC, Code::kNoExtraICState)\
V(KeyedStoreIC_Slow_Strict, KEYED_STORE_IC, kStrictMode) \
V(LoadIC_Normal, LOAD_IC, Code::kNoExtraICState) \
V(StoreIC_Normal, STORE_IC, Code::kNoExtraICState) \
V(StoreIC_Normal_Strict, STORE_IC, kStrictMode)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
#define BUILTIN_LIST_DEBUG_A(V) \
@ -310,8 +314,10 @@ class Builtins {
enum Name {
#define DEF_ENUM_C(name, ignore) k##name,
#define DEF_ENUM_A(name, kind, state, extra) k##name,
#define DEF_ENUM_H(name, kind, extra) k##name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
BUILTIN_LIST_H(DEF_ENUM_H)
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
#undef DEF_ENUM_C
#undef DEF_ENUM_A
@ -335,8 +341,10 @@ class Builtins {
#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_H(name, kind, extra) Handle<Code> name();
BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H)
BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
#undef DECLARE_BUILTIN_ACCESSOR_C
#undef DECLARE_BUILTIN_ACCESSOR_A
@ -391,7 +399,6 @@ class Builtins {
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyOSR(MacroAssembler* masm);
static void Generate_NotifyStubFailure(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
@ -403,7 +410,7 @@ class Builtins {
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
static void Generate_OsrAfterStackCheck(MacroAssembler* masm);
static void Generate_InterruptCheck(MacroAssembler* masm);
static void Generate_StackCheck(MacroAssembler* masm);
@ -415,6 +422,9 @@ class Builtins {
CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR)
#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR
static void Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm);
static void Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm);
static void InitBuiltinFunctionTable();
bool initialized_;

46
deps/v8/src/checks.cc

@ -25,11 +25,48 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h>
#include "checks.h"
#include "v8.h"
#if V8_LIBC_GLIBC || V8_OS_BSD
# include <cxxabi.h>
# include <execinfo.h>
#endif // V8_LIBC_GLIBC || V8_OS_BSD
#include <stdio.h>
#include "platform.h"
#include "v8.h"
// Attempts to dump a backtrace (if supported).
static V8_INLINE void DumpBacktrace() {
#if V8_LIBC_GLIBC || V8_OS_BSD
void* trace[100];
int size = backtrace(trace, ARRAY_SIZE(trace));
char** symbols = backtrace_symbols(trace, size);
i::OS::PrintError("\n==== C stack trace ===============================\n\n");
if (size == 0) {
i::OS::PrintError("(empty)\n");
} else if (symbols == NULL) {
i::OS::PrintError("(no symbols)\n");
} else {
for (int i = 1; i < size; ++i) {
i::OS::PrintError("%2d: ", i);
char mangled[201];
if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
int status;
size_t length;
char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
i::OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
free(demangled);
} else {
i::OS::PrintError("??\n");
}
}
}
free(symbols);
#endif // V8_LIBC_GLIBC || V8_OS_BSD
}
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
@ -43,7 +80,8 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
i::OS::VPrintError(format, arguments);
va_end(arguments);
i::OS::PrintError("\n#\n");
i::OS::DumpBacktrace();
DumpBacktrace();
fflush(stderr);
i::OS::Abort();
}
@ -91,8 +129,6 @@ void API_Fatal(const char* location, const char* format, ...) {
namespace v8 { namespace internal {
bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; }
intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
} } // namespace v8::internal

19
deps/v8/src/checks.h

@ -272,7 +272,24 @@ template <int> class StaticAssertionHelper { };
#endif
#ifdef DEBUG
#ifndef OPTIMIZED_DEBUG
#define ENABLE_SLOW_ASSERTS 1
#endif
#endif
namespace v8 {
namespace internal {
#ifdef ENABLE_SLOW_ASSERTS
#define SLOW_ASSERT(condition) \
CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
extern bool FLAG_enable_slow_asserts;
#else
#define SLOW_ASSERT(condition) ((void) 0)
const bool FLAG_enable_slow_asserts = false;
#endif
} // namespace internal
} // namespace v8
// The ASSERT macro is equivalent to CHECK except that it only
@ -285,7 +302,6 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts || (condition))
#else
#define ASSERT_RESULT(expr) (expr)
#define ASSERT(condition) ((void) 0)
@ -294,7 +310,6 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) ((void) 0)
#define ASSERT_LT(v1, v2) ((void) 0)
#define ASSERT_LE(v1, v2) ((void) 0)
#define SLOW_ASSERT(condition) ((void) 0)
#endif
// Static asserts has no impact on runtime performance, so they can be
// safely enabled in release mode. Moreover, the ((void) 0) expression

292
deps/v8/src/code-stubs-hydrogen.cc

@ -146,14 +146,10 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
int param_count = descriptor_->register_param_count_;
HEnvironment* start_environment = graph()->start_environment();
HBasicBlock* next_block = CreateBasicBlock(start_environment);
current_block()->Goto(next_block);
Goto(next_block);
next_block->SetJoinId(BailoutId::StubEntry());
set_current_block(next_block);
HConstant* undefined_constant =
Add<HConstant>(isolate()->factory()->undefined_value());
graph()->set_undefined_constant(undefined_constant);
for (int i = 0; i < param_count; ++i) {
HParameter* param =
Add<HParameter>(i, HParameter::REGISTER_PARAMETER);
@ -162,7 +158,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
}
HInstruction* stack_parameter_count;
if (descriptor_->stack_parameter_count_ != NULL) {
if (descriptor_->stack_parameter_count_.is_valid()) {
ASSERT(descriptor_->environment_length() == (param_count + 1));
stack_parameter_count = New<HParameter>(param_count,
HParameter::REGISTER_PARAMETER,
@ -178,8 +174,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
arguments_length_ = graph()->GetConstant0();
}
context_ = New<HContext>();
AddInstruction(context_);
context_ = Add<HContext>();
start_environment->BindContext(context_);
Add<HSimulate>(BailoutId::StubEntry());
@ -207,8 +202,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
if (current_block() != NULL) {
HReturn* hreturn_instruction = New<HReturn>(return_value,
stack_pop_count);
current_block()->Finish(hreturn_instruction);
set_current_block(NULL);
FinishCurrentBlock(hreturn_instruction);
}
return true;
}
@ -298,12 +292,21 @@ static Handle<Code> DoGenerateCode(Isolate* isolate, Stub* stub) {
// the runtime that is significantly faster than using the standard
// stub-failure deopt mechanism.
if (stub->IsUninitialized() && descriptor->has_miss_handler()) {
ASSERT(descriptor->stack_parameter_count_ == NULL);
ASSERT(!descriptor->stack_parameter_count_.is_valid());
return stub->GenerateLightweightMissCode(isolate);
}
ElapsedTimer timer;
if (FLAG_profile_hydrogen_code_stub_compilation) {
timer.Start();
}
CodeStubGraphBuilder<Stub> builder(isolate, stub);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen();
Handle<Code> code = chunk->Codegen();
if (FLAG_profile_hydrogen_code_stub_compilation) {
double ms = timer.Elapsed().InMillisecondsF();
PrintF("[Lazy compilation of %s took %0.3f ms]\n", *stub->GetName(), ms);
}
return code;
}
@ -338,6 +341,19 @@ Handle<Code> ToNumberStub::GenerateCode(Isolate* isolate) {
}
template <>
HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
info()->MarkAsSavesCallerDoubles();
HValue* number = GetParameter(NumberToStringStub::kNumber);
return BuildNumberToString(number, handle(Type::Number(), isolate()));
}
Handle<Code> NumberToStringStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
template <>
HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
Factory* factory = isolate()->factory();
@ -355,42 +371,48 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
undefined);
checker.Then();
HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kTransitionInfoOffset);
HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
HValue* push_value;
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
if_fixed_cow.Then();
environment()->Push(BuildCloneShallowArray(boilerplate,
allocation_site,
alloc_site_mode,
FAST_ELEMENTS,
0/*copy-on-write*/));
push_value = BuildCloneShallowArray(boilerplate,
allocation_site,
alloc_site_mode,
FAST_ELEMENTS,
0/*copy-on-write*/);
environment()->Push(push_value);
if_fixed_cow.Else();
IfBuilder if_fixed(this);
if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
if_fixed.Then();
environment()->Push(BuildCloneShallowArray(boilerplate,
allocation_site,
alloc_site_mode,
FAST_ELEMENTS,
length));
push_value = BuildCloneShallowArray(boilerplate,
allocation_site,
alloc_site_mode,
FAST_ELEMENTS,
length);
environment()->Push(push_value);
if_fixed.Else();
environment()->Push(BuildCloneShallowArray(boilerplate,
allocation_site,
alloc_site_mode,
FAST_DOUBLE_ELEMENTS,
length));
push_value = BuildCloneShallowArray(boilerplate,
allocation_site,
alloc_site_mode,
FAST_DOUBLE_ELEMENTS,
length);
environment()->Push(push_value);
} else {
ElementsKind elements_kind = casted_stub()->ComputeElementsKind();
environment()->Push(BuildCloneShallowArray(boilerplate,
allocation_site,
alloc_site_mode,
elements_kind,
length));
push_value = BuildCloneShallowArray(boilerplate,
allocation_site,
alloc_site_mode,
elements_kind,
length);
environment()->Push(push_value);
}
checker.ElseDeopt("Uninitialized boilerplate literals");
@ -407,23 +429,33 @@ Handle<Code> FastCloneShallowArrayStub::GenerateCode(Isolate* isolate) {
template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
Zone* zone = this->zone();
HValue* undefined = graph()->GetConstantUndefined();
HInstruction* boilerplate = Add<HLoadKeyed>(GetParameter(0),
GetParameter(1),
static_cast<HValue*>(NULL),
FAST_ELEMENTS);
HInstruction* allocation_site = Add<HLoadKeyed>(GetParameter(0),
GetParameter(1),
static_cast<HValue*>(NULL),
FAST_ELEMENTS);
IfBuilder checker(this);
checker.IfNot<HCompareObjectEqAndBranch, HValue*>(boilerplate,
checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
undefined);
checker.And();
HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kTransitionInfoOffset);
HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize;
HValue* boilerplate_size =
AddInstruction(new(zone) HInstanceSize(boilerplate));
HValue* size_in_words = Add<HConstant>(size >> kPointerSizeLog2);
int object_size = size;
if (FLAG_allocation_site_pretenuring) {
size += AllocationMemento::kSize;
}
HValue* boilerplate_map = Add<HLoadNamedField>(
boilerplate, HObjectAccess::ForMap());
HValue* boilerplate_size = Add<HLoadNamedField>(
boilerplate_map, HObjectAccess::ForMapInstanceSize());
HValue* size_in_words = Add<HConstant>(object_size >> kPointerSizeLog2);
checker.If<HCompareNumericAndBranch>(boilerplate_size,
size_in_words, Token::EQ);
checker.Then();
@ -433,12 +465,17 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HInstruction* object = Add<HAllocate>(size_in_bytes, HType::JSObject(),
isolate()->heap()->GetPretenureMode(), JS_OBJECT_TYPE);
for (int i = 0; i < size; i += kPointerSize) {
for (int i = 0; i < object_size; i += kPointerSize) {
HObjectAccess access = HObjectAccess::ForJSObjectOffset(i);
Add<HStoreNamedField>(object, access,
Add<HLoadNamedField>(boilerplate, access));
}
ASSERT(FLAG_allocation_site_pretenuring || (size == object_size));
if (FLAG_allocation_site_pretenuring) {
BuildCreateAllocationMemento(object, object_size, allocation_site);
}
environment()->Push(object);
checker.ElseDeopt("Uninitialized boilerplate in fast clone");
checker.End();
@ -459,24 +496,39 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
JS_OBJECT_TYPE);
// Store the map
Handle<Map> allocation_site_map(isolate()->heap()->allocation_site_map(),
isolate());
Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
AddStoreMapConstant(object, allocation_site_map);
// Store the payload (smi elements kind)
HValue* initial_elements_kind = Add<HConstant>(GetInitialFastElementsKind());
Add<HStoreNamedField>(object,
HObjectAccess::ForAllocationSiteTransitionInfo(),
HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kTransitionInfoOffset),
initial_elements_kind);
// Unlike literals, constructed arrays don't have nested sites
Add<HStoreNamedField>(object,
HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kNestedSiteOffset),
graph()->GetConstant0());
// Store an empty fixed array for the code dependency.
HConstant* empty_fixed_array =
Add<HConstant>(isolate()->factory()->empty_fixed_array());
HStoreNamedField* store = Add<HStoreNamedField>(
object,
HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kDependentCodeOffset),
empty_fixed_array);
// Link the object to the allocation site list
HValue* site_list = Add<HConstant>(
ExternalReference::allocation_sites_list_address(isolate()));
HValue* site = Add<HLoadNamedField>(site_list,
HObjectAccess::ForAllocationSiteList());
HStoreNamedField* store =
Add<HStoreNamedField>(object, HObjectAccess::ForAllocationSiteWeakNext(),
site);
store = Add<HStoreNamedField>(object,
HObjectAccess::ForAllocationSiteOffset(AllocationSite::kWeakNextOffset),
site);
store->SkipWriteBarrier();
Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(),
object);
@ -519,7 +571,7 @@ HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
return AddLoadNamedField(GetParameter(0), access);
}
@ -534,7 +586,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
return AddLoadNamedField(GetParameter(0), access);
}
@ -640,14 +692,13 @@ HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
HValue* constant_zero = graph()->GetConstant0();
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* argument = AddInstruction(
new(zone()) HAccessArgumentsAt(elements, constant_one, constant_zero));
HInstruction* argument = Add<HAccessArgumentsAt>(
elements, constant_one, constant_zero);
HConstant* max_alloc_length =
Add<HConstant>(JSObject::kInitialMaxFastElementArray);
const int initial_capacity = JSArray::kPreallocatedArrayElements;
HConstant* initial_capacity_node = New<HConstant>(initial_capacity);
AddInstruction(initial_capacity_node);
HConstant* initial_capacity_node = Add<HConstant>(initial_capacity);
HInstruction* checked_arg = Add<HBoundsCheck>(argument, max_alloc_length);
IfBuilder if_builder(this);
@ -690,8 +741,8 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
HValue* start = graph()->GetConstant0();
HValue* key = builder.BeginBody(start, length, Token::LT);
HInstruction* argument_elements = Add<HArgumentsElements>(false);
HInstruction* argument = AddInstruction(new(zone()) HAccessArgumentsAt(
argument_elements, length, key));
HInstruction* argument = Add<HAccessArgumentsAt>(
argument_elements, length, key);
Add<HStoreKeyed>(elements, key, argument, kind);
builder.EndBody();
@ -792,7 +843,7 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
HIfContinuation continuation;
Handle<Map> sentinel_map(isolate->heap()->meta_map());
Handle<Type> type = stub->GetType(isolate, sentinel_map);
BuildCompareNil(GetParameter(0), type, RelocInfo::kNoPosition, &continuation);
BuildCompareNil(GetParameter(0), type, &continuation);
IfBuilder if_nil(this, &continuation);
if_nil.Then();
if (continuation.IsFalseReachable()) {
@ -811,6 +862,115 @@ Handle<Code> CompareNilICStub::GenerateCode(Isolate* isolate) {
}
template <>
HValue* CodeStubGraphBuilder<BinaryOpStub>::BuildCodeInitializedStub() {
BinaryOpStub* stub = casted_stub();
HValue* left = GetParameter(0);
HValue* right = GetParameter(1);
Handle<Type> left_type = stub->GetLeftType(isolate());
Handle<Type> right_type = stub->GetRightType(isolate());
Handle<Type> result_type = stub->GetResultType(isolate());
ASSERT(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
(stub->HasSideEffects(isolate()) || !result_type->Is(Type::None())));
HValue* result = NULL;
if (stub->operation() == Token::ADD &&
(left_type->Maybe(Type::String()) || right_type->Maybe(Type::String())) &&
!left_type->Is(Type::String()) && !right_type->Is(Type::String())) {
// For the generic add stub a fast case for string addition is performance
// critical.
if (left_type->Maybe(Type::String())) {
IfBuilder if_leftisstring(this);
if_leftisstring.If<HIsStringAndBranch>(left);
if_leftisstring.Then();
{
Push(AddInstruction(BuildBinaryOperation(
stub->operation(), left, right,
handle(Type::String(), isolate()), right_type,
result_type, stub->fixed_right_arg(), true)));
}
if_leftisstring.Else();
{
Push(AddInstruction(BuildBinaryOperation(
stub->operation(), left, right,
left_type, right_type, result_type,
stub->fixed_right_arg(), true)));
}
if_leftisstring.End();
result = Pop();
} else {
IfBuilder if_rightisstring(this);
if_rightisstring.If<HIsStringAndBranch>(right);
if_rightisstring.Then();
{
Push(AddInstruction(BuildBinaryOperation(
stub->operation(), left, right,
left_type, handle(Type::String(), isolate()),
result_type, stub->fixed_right_arg(), true)));
}
if_rightisstring.Else();
{
Push(AddInstruction(BuildBinaryOperation(
stub->operation(), left, right,
left_type, right_type, result_type,
stub->fixed_right_arg(), true)));
}
if_rightisstring.End();
result = Pop();
}
} else {
result = AddInstruction(BuildBinaryOperation(
stub->operation(), left, right,
left_type, right_type, result_type,
stub->fixed_right_arg(), true));
}
// If we encounter a generic argument, the number conversion is
// observable, thus we cannot afford to bail out after the fact.
if (!stub->HasSideEffects(isolate())) {
if (result_type->Is(Type::Smi())) {
if (stub->operation() == Token::SHR) {
// TODO(olivf) Replace this by a SmiTagU Instruction.
// 0x40000000: this number would convert to negative when interpreting
// the register as signed value;
IfBuilder if_of(this);
if_of.IfNot<HCompareNumericAndBranch>(result,
Add<HConstant>(static_cast<int>(SmiValuesAre32Bits()
? 0x80000000 : 0x40000000)), Token::EQ_STRICT);
if_of.Then();
if_of.ElseDeopt("UInt->Smi oveflow");
if_of.End();
}
}
result = EnforceNumberType(result, result_type);
}
// Reuse the double box of one of the operands if we are allowed to (i.e.
// chained binops).
if (stub->CanReuseDoubleBox()) {
HValue* operand = (stub->mode() == OVERWRITE_LEFT) ? left : right;
IfBuilder if_heap_number(this);
if_heap_number.IfNot<HIsSmiAndBranch>(operand);
if_heap_number.Then();
Add<HStoreNamedField>(operand, HObjectAccess::ForHeapNumberValue(), result);
Push(operand);
if_heap_number.Else();
Push(result);
if_heap_number.End();
result = Pop();
}
return result;
}
Handle<Code> BinaryOpStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
template <>
HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
ToBooleanStub* stub = casted_stub();
@ -918,8 +1078,7 @@ void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
HValue* native_context,
HValue* code_object) {
Counters* counters = isolate()->counters();
AddIncrementCounter(counters->fast_new_closure_install_optimized(),
context());
AddIncrementCounter(counters->fast_new_closure_install_optimized());
// TODO(fschneider): Idea: store proper code pointers in the optimized code
// map and either unmangle them on marking or do nothing as the whole map is
@ -967,7 +1126,7 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
}
is_optimized.Else();
{
AddIncrementCounter(counters->fast_new_closure_try_optimized(), context());
AddIncrementCounter(counters->fast_new_closure_try_optimized());
// optimized_map points to fixed array of 3-element entries
// (native context, optimized code, literals).
// Map must never be empty, so check the first elements.
@ -1012,8 +1171,8 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
}
restore_check.Else();
{
HValue* keyed_minus = AddInstruction(HSub::New(zone(), context(), key,
shared_function_entry_length));
HValue* keyed_minus = AddUncasted<HSub>(
key, shared_function_entry_length);
HInstruction* keyed_lookup = Add<HLoadKeyed>(optimized_map,
keyed_minus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
IfBuilder done_check(this);
@ -1022,8 +1181,8 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
done_check.Then();
{
// Hit: fetch the optimized code.
HValue* keyed_plus = AddInstruction(HAdd::New(zone(), context(),
keyed_minus, graph()->GetConstant1()));
HValue* keyed_plus = AddUncasted<HAdd>(
keyed_minus, graph()->GetConstant1());
HValue* code_object = Add<HLoadKeyed>(optimized_map,
keyed_plus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
BuildInstallOptimizedCode(js_function, native_context, code_object);
@ -1052,11 +1211,12 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
Add<HConstant>(factory->empty_fixed_array());
HValue* shared_info = GetParameter(0);
AddIncrementCounter(counters->fast_new_closure_total());
// Create a new closure from the given function info in new space
HValue* size = Add<HConstant>(JSFunction::kSize);
HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
NOT_TENURED, JS_FUNCTION_TYPE);
AddIncrementCounter(counters->fast_new_closure_total(), context());
int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
casted_stub()->is_generator());

552
deps/v8/src/code-stubs.cc

@ -41,7 +41,7 @@ namespace internal {
CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
: register_param_count_(-1),
stack_parameter_count_(NULL),
stack_parameter_count_(no_reg),
hint_stack_parameter_count_(-1),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
register_params_(NULL),
@ -129,6 +129,11 @@ Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
}
void CodeStub::VerifyPlatformFeatures(Isolate* isolate) {
ASSERT(CpuFeatures::VerifyCrossCompiling());
}
Handle<Code> CodeStub::GetCode(Isolate* isolate) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
@ -137,9 +142,14 @@ Handle<Code> CodeStub::GetCode(Isolate* isolate) {
? FindCodeInSpecialCache(&code, isolate)
: FindCodeInCache(&code, isolate)) {
ASSERT(IsPregenerated(isolate) == code->is_pregenerated());
ASSERT(GetCodeKind() == code->kind());
return Handle<Code>(code);
}
#ifdef DEBUG
VerifyPlatformFeatures(isolate);
#endif
{
HandleScope scope(isolate);
@ -203,119 +213,471 @@ void CodeStub::PrintName(StringStream* stream) {
}
void BinaryOpStub::Generate(MacroAssembler* masm) {
// Explicitly allow generation of nested stubs. It is safe here because
// generation code does not use any raw pointers.
AllowStubCallsScope allow_stub_calls(masm, true);
void BinaryOpStub::PrintBaseName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* ovr = "";
if (mode_ == OVERWRITE_LEFT) ovr = "_ReuseLeft";
if (mode_ == OVERWRITE_RIGHT) ovr = "_ReuseRight";
stream->Add("BinaryOpStub_%s%s", op_name, ovr);
}
BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
// The OddballStub handles a number and an oddball, not two oddballs.
operands_type = BinaryOpIC::GENERIC;
void BinaryOpStub::PrintState(StringStream* stream) {
stream->Add("(");
stream->Add(StateToName(left_state_));
stream->Add("*");
if (fixed_right_arg_.has_value) {
stream->Add("%d", fixed_right_arg_.value);
} else {
stream->Add(StateToName(right_state_));
}
switch (operands_type) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
break;
case BinaryOpIC::SMI:
GenerateSmiStub(masm);
break;
case BinaryOpIC::INT32:
GenerateInt32Stub(masm);
break;
case BinaryOpIC::NUMBER:
GenerateNumberStub(masm);
break;
case BinaryOpIC::ODDBALL:
GenerateOddballStub(masm);
break;
case BinaryOpIC::STRING:
GenerateStringStub(masm);
break;
case BinaryOpIC::GENERIC:
GenerateGeneric(masm);
break;
default:
UNREACHABLE();
stream->Add("->");
stream->Add(StateToName(result_state_));
stream->Add(")");
}
Maybe<Handle<Object> > BinaryOpStub::Result(Handle<Object> left,
Handle<Object> right,
Isolate* isolate) {
Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
Builtins::JavaScript func = BinaryOpIC::TokenToJSBuiltin(op_);
Object* builtin = builtins->javascript_builtin(func);
Handle<JSFunction> builtin_function =
Handle<JSFunction>(JSFunction::cast(builtin), isolate);
bool caught_exception;
Handle<Object> result = Execution::Call(isolate, builtin_function, left,
1, &right, &caught_exception);
return Maybe<Handle<Object> >(!caught_exception, result);
}
void BinaryOpStub::Initialize() {
fixed_right_arg_.has_value = false;
left_state_ = right_state_ = result_state_ = NONE;
}
void BinaryOpStub::Generate(Token::Value op,
State left,
State right,
State result,
OverwriteMode mode,
Isolate* isolate) {
BinaryOpStub stub(INITIALIZED);
stub.op_ = op;
stub.left_state_ = left;
stub.right_state_ = right;
stub.result_state_ = result;
stub.mode_ = mode;
stub.GetCode(isolate);
}
void BinaryOpStub::Generate(Token::Value op,
State left,
int right,
State result,
OverwriteMode mode,
Isolate* isolate) {
BinaryOpStub stub(INITIALIZED);
stub.op_ = op;
stub.left_state_ = left;
stub.fixed_right_arg_.has_value = true;
stub.fixed_right_arg_.value = right;
stub.right_state_ = SMI;
stub.result_state_ = result;
stub.mode_ = mode;
stub.GetCode(isolate);
}
void BinaryOpStub::GenerateAheadOfTime(Isolate* isolate) {
Token::Value binop[] = {Token::SUB, Token::MOD, Token::DIV, Token::MUL,
Token::ADD, Token::SAR, Token::BIT_OR, Token::BIT_AND,
Token::BIT_XOR, Token::SHL, Token::SHR};
for (unsigned i = 0; i < ARRAY_SIZE(binop); i++) {
BinaryOpStub stub(UNINITIALIZED);
stub.op_ = binop[i];
stub.GetCode(isolate);
}
// TODO(olivf) We should investigate why adding stubs to the snapshot is so
// expensive at runtime. When solved we should be able to add most binops to
// the snapshot instead of hand-picking them.
// Generated list of commonly used stubs
Generate(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE, isolate);
Generate(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE, isolate);
Generate(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE, isolate);
Generate(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE, isolate);
Generate(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE, isolate);
Generate(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE, isolate);
Generate(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE, isolate);
Generate(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE, isolate);
Generate(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE, isolate);
Generate(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE, isolate);
Generate(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE, isolate);
Generate(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE, isolate);
Generate(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE, isolate);
Generate(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE, isolate);
Generate(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE, isolate);
Generate(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE, isolate);
Generate(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE, isolate);
Generate(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE, isolate);
Generate(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE, isolate);
Generate(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::MOD, SMI, 2, SMI, NO_OVERWRITE, isolate);
Generate(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE, isolate);
Generate(Token::MOD, SMI, 32, SMI, NO_OVERWRITE, isolate);
Generate(Token::MOD, SMI, 4, SMI, NO_OVERWRITE, isolate);
Generate(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::MOD, SMI, 8, SMI, NO_OVERWRITE, isolate);
Generate(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE, isolate);
Generate(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE, isolate);
Generate(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE, isolate);
Generate(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE, isolate);
Generate(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE, isolate);
Generate(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE, isolate);
Generate(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
Generate(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE, isolate);
Generate(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
Generate(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
Generate(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE, isolate);
Generate(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
Generate(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
Generate(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
Generate(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE, isolate);
Generate(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
Generate(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
}
#define __ ACCESS_MASM(masm)
bool BinaryOpStub::can_encode_arg_value(int32_t value) const {
return op_ == Token::MOD && value > 0 && IsPowerOf2(value) &&
FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
}
void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
switch (op_) {
case Token::ADD:
__ InvokeBuiltin(Builtins::ADD, CALL_FUNCTION);
break;
case Token::SUB:
__ InvokeBuiltin(Builtins::SUB, CALL_FUNCTION);
break;
case Token::MUL:
__ InvokeBuiltin(Builtins::MUL, CALL_FUNCTION);
break;
case Token::DIV:
__ InvokeBuiltin(Builtins::DIV, CALL_FUNCTION);
break;
case Token::MOD:
__ InvokeBuiltin(Builtins::MOD, CALL_FUNCTION);
break;
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, CALL_FUNCTION);
break;
case Token::BIT_AND:
__ InvokeBuiltin(Builtins::BIT_AND, CALL_FUNCTION);
break;
case Token::BIT_XOR:
__ InvokeBuiltin(Builtins::BIT_XOR, CALL_FUNCTION);
int BinaryOpStub::encode_arg_value(int32_t value) const {
ASSERT(can_encode_arg_value(value));
return WhichPowerOf2(value);
}
int32_t BinaryOpStub::decode_arg_value(int value) const {
return 1 << value;
}
int BinaryOpStub::encode_token(Token::Value op) const {
ASSERT(op >= FIRST_TOKEN && op <= LAST_TOKEN);
return op - FIRST_TOKEN;
}
Token::Value BinaryOpStub::decode_token(int op) const {
int res = op + FIRST_TOKEN;
ASSERT(res >= FIRST_TOKEN && res <= LAST_TOKEN);
return static_cast<Token::Value>(res);
}
const char* BinaryOpStub::StateToName(State state) {
switch (state) {
case NONE:
return "None";
case SMI:
return "Smi";
case INT32:
return "Int32";
case NUMBER:
return "Number";
case STRING:
return "String";
case GENERIC:
return "Generic";
}
return "";
}
void BinaryOpStub::UpdateStatus(Handle<Object> left,
Handle<Object> right,
Maybe<Handle<Object> > result) {
int old_state = GetExtraICState();
UpdateStatus(left, &left_state_);
UpdateStatus(right, &right_state_);
int32_t value;
bool new_has_fixed_right_arg =
right->ToInt32(&value) && can_encode_arg_value(value) &&
(left_state_ == SMI || left_state_ == INT32) &&
(result_state_ == NONE || !fixed_right_arg_.has_value);
fixed_right_arg_ = Maybe<int32_t>(new_has_fixed_right_arg, value);
if (result.has_value) UpdateStatus(result.value, &result_state_);
State max_input = Max(left_state_, right_state_);
if (!has_int_result() && op_ != Token::SHR &&
max_input <= NUMBER && max_input > result_state_) {
result_state_ = max_input;
}
ASSERT(result_state_ <= (has_int_result() ? INT32 : NUMBER) ||
op_ == Token::ADD);
if (old_state == GetExtraICState()) {
// Tagged operations can lead to non-truncating HChanges
if (left->IsUndefined() || left->IsBoolean()) {
left_state_ = GENERIC;
} else if (right->IsUndefined() || right->IsBoolean()) {
right_state_ = GENERIC;
} else {
// Since the fpu is to precise, we might bail out on numbers which
// actually would truncate with 64 bit precision.
ASSERT(!CpuFeatures::IsSupported(SSE2) &&
result_state_ <= INT32);
result_state_ = NUMBER;
}
}
}
void BinaryOpStub::UpdateStatus(Handle<Object> object,
State* state) {
bool is_truncating = (op_ == Token::BIT_AND || op_ == Token::BIT_OR ||
op_ == Token::BIT_XOR || op_ == Token::SAR ||
op_ == Token::SHL || op_ == Token::SHR);
v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(object);
if (object->IsBoolean() && is_truncating) {
// Booleans are converted by truncating by HChange.
type = TypeInfo::Integer32();
}
if (object->IsUndefined()) {
// Undefined will be automatically truncated for us by HChange.
type = is_truncating ? TypeInfo::Integer32() : TypeInfo::Double();
}
State int_state = SmiValuesAre32Bits() ? NUMBER : INT32;
State new_state = NONE;
if (type.IsSmi()) {
new_state = SMI;
} else if (type.IsInteger32()) {
new_state = int_state;
} else if (type.IsNumber()) {
new_state = NUMBER;
} else if (object->IsString() && operation() == Token::ADD) {
new_state = STRING;
} else {
new_state = GENERIC;
}
if ((new_state <= NUMBER && *state > NUMBER) ||
(new_state > NUMBER && *state <= NUMBER && *state != NONE)) {
new_state = GENERIC;
}
*state = Max(*state, new_state);
}
Handle<Type> BinaryOpStub::StateToType(State state,
Isolate* isolate) {
Handle<Type> t = handle(Type::None(), isolate);
switch (state) {
case NUMBER:
t = handle(Type::Union(t, handle(Type::Double(), isolate)), isolate);
// Fall through.
case INT32:
t = handle(Type::Union(t, handle(Type::Signed32(), isolate)), isolate);
// Fall through.
case SMI:
t = handle(Type::Union(t, handle(Type::Smi(), isolate)), isolate);
break;
case Token::SAR:
__ InvokeBuiltin(Builtins::SAR, CALL_FUNCTION);
case STRING:
t = handle(Type::Union(t, handle(Type::String(), isolate)), isolate);
break;
case Token::SHR:
__ InvokeBuiltin(Builtins::SHR, CALL_FUNCTION);
case GENERIC:
return handle(Type::Any(), isolate);
break;
case Token::SHL:
__ InvokeBuiltin(Builtins::SHL, CALL_FUNCTION);
case NONE:
break;
default:
UNREACHABLE();
}
return t;
}
#undef __
Handle<Type> BinaryOpStub::GetLeftType(Isolate* isolate) const {
return StateToType(left_state_, isolate);
}
void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
case NO_OVERWRITE: overwrite_name = "Alloc"; break;
case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
stream->Add("BinaryOpStub_%s_%s_%s+%s",
op_name,
overwrite_name,
BinaryOpIC::GetName(left_type_),
BinaryOpIC::GetName(right_type_));
Handle<Type> BinaryOpStub::GetRightType(Isolate* isolate) const {
return StateToType(right_state_, isolate);
}
void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
GenerateBothStringStub(masm);
return;
Handle<Type> BinaryOpStub::GetResultType(Isolate* isolate) const {
if (HasSideEffects(isolate)) return StateToType(NONE, isolate);
if (result_state_ == GENERIC && op_ == Token::ADD) {
return handle(Type::Union(handle(Type::Number(), isolate),
handle(Type::String(), isolate)), isolate);
}
ASSERT(result_state_ != GENERIC);
if (result_state_ == NUMBER && op_ == Token::SHR) {
return handle(Type::Unsigned32(), isolate);
}
// Try to add arguments as strings, otherwise, transition to the generic
// BinaryOpIC type.
GenerateAddStrings(masm);
GenerateTypeTransition(masm);
return StateToType(result_state_, isolate);
}
@ -759,6 +1121,12 @@ void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
}
void NumberToStringStub::InstallDescriptors(Isolate* isolate) {
NumberToStringStub stub;
InstallDescriptor(isolate, &stub);
}
void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
FastNewClosureStub stub(STRICT_MODE, false);
InstallDescriptor(isolate, &stub);

326
deps/v8/src/code-stubs.h

@ -30,8 +30,9 @@
#include "allocation.h"
#include "assembler.h"
#include "globals.h"
#include "codegen.h"
#include "globals.h"
#include "macro-assembler.h"
namespace v8 {
namespace internal {
@ -200,19 +201,21 @@ class CodeStub BASE_EMBEDDED {
virtual void PrintName(StringStream* stream);
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
protected:
static bool CanUseFPRegisters();
// Generates the assembler code for the stub.
virtual Handle<Code> GenerateCode(Isolate* isolate) = 0;
virtual void VerifyPlatformFeatures(Isolate* isolate);
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
virtual bool NeedsImmovableCode() { return false; }
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
virtual void PrintBaseName(StringStream* stream);
virtual void PrintState(StringStream* stream) { }
@ -278,7 +281,7 @@ enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
struct CodeStubInterfaceDescriptor {
CodeStubInterfaceDescriptor();
int register_param_count_;
const Register* stack_parameter_count_;
Register stack_parameter_count_;
// if hint_stack_parameter_count_ > 0, the code stub can optimize the
// return sequence. Default value is -1, which means it is ignored.
int hint_stack_parameter_count_;
@ -287,7 +290,7 @@ struct CodeStubInterfaceDescriptor {
Address deoptimization_handler_;
int environment_length() const {
if (stack_parameter_count_ != NULL) {
if (stack_parameter_count_.is_valid()) {
return register_param_count_ + 1;
}
return register_param_count_;
@ -318,7 +321,7 @@ struct CodeStubInterfaceDescriptor {
// defined outside of the platform directories
#define DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index) \
((index) == (descriptor)->register_param_count_) \
? *((descriptor)->stack_parameter_count_) \
? (descriptor)->stack_parameter_count_ \
: (descriptor)->register_params_[(index)]
@ -402,9 +405,7 @@ enum StringAddFlags {
// Check right parameter.
STRING_ADD_CHECK_RIGHT = 1 << 1,
// Check both parameters.
STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT,
// Stub needs a frame before calling the runtime
STRING_ADD_ERECT_FRAME = 1 << 2
STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT
};
} } // namespace v8::internal
@ -464,6 +465,27 @@ class ToNumberStub: public HydrogenCodeStub {
};
class NumberToStringStub V8_FINAL : public HydrogenCodeStub {
public:
NumberToStringStub() {}
virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
static void InstallDescriptors(Isolate* isolate);
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kNumber = 0;
private:
virtual Major MajorKey() V8_OVERRIDE { return NumberToString; }
virtual int NotMissMinorKey() V8_OVERRIDE { return 0; }
};
class FastNewClosureStub : public HydrogenCodeStub {
public:
explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
@ -830,19 +852,12 @@ class FunctionPrototypeStub: public ICStub {
class StringLengthStub: public ICStub {
public:
StringLengthStub(Code::Kind kind, bool support_wrapper)
: ICStub(kind), support_wrapper_(support_wrapper) { }
explicit StringLengthStub(Code::Kind kind) : ICStub(kind) { }
virtual void Generate(MacroAssembler* masm);
private:
STATIC_ASSERT(KindBits::kSize == 4);
class WrapperModeBits: public BitField<bool, 4, 1> {};
virtual CodeStub::Major MajorKey() { return StringLength; }
virtual int MinorKey() {
return KindBits::encode(kind()) | WrapperModeBits::encode(support_wrapper_);
}
bool support_wrapper_;
virtual CodeStub::Major MajorKey() { return StringLength; }
};
@ -892,7 +907,7 @@ class HICStub: public HydrogenCodeStub {
class HandlerStub: public HICStub {
public:
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
virtual int GetStubFlags() { return kind(); }
protected:
@ -983,156 +998,177 @@ class KeyedLoadFieldStub: public LoadFieldStub {
};
class BinaryOpStub: public PlatformCodeStub {
class BinaryOpStub: public HydrogenCodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
platform_specific_bit_(false),
left_type_(BinaryOpIC::UNINITIALIZED),
right_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED),
encoded_right_arg_(false, encode_arg_value(1)) {
: HydrogenCodeStub(UNINITIALIZED), op_(op), mode_(mode) {
ASSERT(op <= LAST_TOKEN && op >= FIRST_TOKEN);
Initialize();
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
BinaryOpStub(
int key,
BinaryOpIC::TypeInfo left_type,
BinaryOpIC::TypeInfo right_type,
BinaryOpIC::TypeInfo result_type,
Maybe<int32_t> fixed_right_arg)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
platform_specific_bit_(PlatformSpecificBits::decode(key)),
left_type_(left_type),
right_type_(right_type),
result_type_(result_type),
encoded_right_arg_(fixed_right_arg.has_value,
encode_arg_value(fixed_right_arg.value)) { }
explicit BinaryOpStub(Code::ExtraICState state)
: op_(decode_token(OpBits::decode(state))),
mode_(OverwriteModeField::decode(state)),
fixed_right_arg_(
Maybe<int>(HasFixedRightArgBits::decode(state),
decode_arg_value(FixedRightArgValueBits::decode(state)))),
left_state_(LeftStateField::decode(state)),
right_state_(fixed_right_arg_.has_value
? ((fixed_right_arg_.value <= Smi::kMaxValue) ? SMI : INT32)
: RightStateField::decode(state)),
result_state_(ResultStateField::decode(state)) {
// We don't deserialize the SSE2 Field, since this is only used to be able
// to include SSE2 as well as non-SSE2 versions in the snapshot. For code
// generation we always want it to reflect the current state.
ASSERT(!fixed_right_arg_.has_value ||
can_encode_arg_value(fixed_right_arg_.value));
}
static const int FIRST_TOKEN = Token::BIT_OR;
static const int LAST_TOKEN = Token::MOD;
static void decode_types_from_minor_key(int minor_key,
BinaryOpIC::TypeInfo* left_type,
BinaryOpIC::TypeInfo* right_type,
BinaryOpIC::TypeInfo* result_type) {
*left_type =
static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key));
*right_type =
static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key));
*result_type =
static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key));
static void GenerateAheadOfTime(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate, CodeStubInterfaceDescriptor* descriptor);
static void InitializeForIsolate(Isolate* isolate) {
BinaryOpStub binopStub(UNINITIALIZED);
binopStub.InitializeInterfaceDescriptor(
isolate, isolate->code_stub_interface_descriptor(CodeStub::BinaryOp));
}
virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
if (Max(left_state_, right_state_) == NONE) {
return ::v8::internal::UNINITIALIZED;
}
if (Max(left_state_, right_state_) == GENERIC) return MEGAMORPHIC;
return MONOMORPHIC;
}
static Token::Value decode_op_from_minor_key(int minor_key) {
return static_cast<Token::Value>(OpBits::decode(minor_key));
virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
}
static Maybe<int> decode_fixed_right_arg_from_minor_key(int minor_key) {
return Maybe<int>(
HasFixedRightArgBits::decode(minor_key),
decode_arg_value(FixedRightArgValueBits::decode(minor_key)));
virtual Code::ExtraICState GetExtraICState() {
bool sse_field = Max(result_state_, Max(left_state_, right_state_)) > SMI &&
CpuFeatures::IsSafeForSnapshot(SSE2);
return OpBits::encode(encode_token(op_))
| LeftStateField::encode(left_state_)
| RightStateField::encode(fixed_right_arg_.has_value
? NONE : right_state_)
| ResultStateField::encode(result_state_)
| HasFixedRightArgBits::encode(fixed_right_arg_.has_value)
| FixedRightArgValueBits::encode(fixed_right_arg_.has_value
? encode_arg_value(
fixed_right_arg_.value)
: 0)
| SSE2Field::encode(sse_field)
| OverwriteModeField::encode(mode_);
}
int fixed_right_arg_value() const {
return decode_arg_value(encoded_right_arg_.value);
bool CanReuseDoubleBox() {
return result_state_ <= NUMBER && result_state_ > SMI &&
((left_state_ > SMI && left_state_ <= NUMBER &&
mode_ == OVERWRITE_LEFT) ||
(right_state_ > SMI && right_state_ <= NUMBER &&
mode_ == OVERWRITE_RIGHT));
}
static bool can_encode_arg_value(int32_t value) {
return value > 0 &&
IsPowerOf2(value) &&
FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
bool HasSideEffects(Isolate* isolate) const {
Handle<Type> left = GetLeftType(isolate);
Handle<Type> right = GetRightType(isolate);
return left->Maybe(Type::Receiver()) || right->Maybe(Type::Receiver());
}
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
};
virtual Handle<Code> GenerateCode(Isolate* isolate);
private:
Token::Value op_;
OverwriteMode mode_;
bool platform_specific_bit_; // Indicates SSE3 on IA32.
Maybe<Handle<Object> > Result(Handle<Object> left,
Handle<Object> right,
Isolate* isolate);
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo left_type_;
BinaryOpIC::TypeInfo right_type_;
BinaryOpIC::TypeInfo result_type_;
Token::Value operation() const { return op_; }
OverwriteMode mode() const { return mode_; }
Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
Maybe<int> encoded_right_arg_;
Handle<Type> GetLeftType(Isolate* isolate) const;
Handle<Type> GetRightType(Isolate* isolate) const;
Handle<Type> GetResultType(Isolate* isolate) const;
static int encode_arg_value(int32_t value) {
ASSERT(can_encode_arg_value(value));
return WhichPowerOf2(value);
}
void UpdateStatus(Handle<Object> left,
Handle<Object> right,
Maybe<Handle<Object> > result);
static int32_t decode_arg_value(int value) {
return 1 << value;
void PrintState(StringStream* stream);
private:
explicit BinaryOpStub(InitializationState state) : HydrogenCodeStub(state),
op_(Token::ADD),
mode_(NO_OVERWRITE) {
Initialize();
}
void Initialize();
virtual void PrintName(StringStream* stream);
enum State { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
// We truncate the last bit of the token.
STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 5));
class LeftStateField: public BitField<State, 0, 3> {};
// When fixed right arg is set, we don't need to store the right state.
// Thus the two fields can overlap.
class HasFixedRightArgBits: public BitField<bool, 4, 1> {};
class FixedRightArgValueBits: public BitField<int, 5, 4> {};
class RightStateField: public BitField<State, 5, 3> {};
class ResultStateField: public BitField<State, 9, 3> {};
class SSE2Field: public BitField<bool, 12, 1> {};
class OverwriteModeField: public BitField<OverwriteMode, 13, 2> {};
class OpBits: public BitField<int, 15, 5> {};
virtual CodeStub::Major MajorKey() { return BinaryOp; }
virtual int NotMissMinorKey() { return GetExtraICState(); }
// Minor key encoding in all 25 bits FFFFFHTTTRRRLLLPOOOOOOOMM.
// Note: We actually do not need 7 bits for the operation, just 4 bits to
// encode ADD, SUB, MUL, DIV, MOD, BIT_OR, BIT_AND, BIT_XOR, SAR, SHL, SHR.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class PlatformSpecificBits: public BitField<bool, 9, 1> {};
class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
class HasFixedRightArgBits: public BitField<bool, 19, 1> {};
class FixedRightArgValueBits: public BitField<int, 20, 5> {};
Major MajorKey() { return BinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| PlatformSpecificBits::encode(platform_specific_bit_)
| LeftTypeBits::encode(left_type_)
| RightTypeBits::encode(right_type_)
| ResultTypeBits::encode(result_type_)
| HasFixedRightArgBits::encode(encoded_right_arg_.has_value)
| FixedRightArgValueBits::encode(encoded_right_arg_.value);
}
static Handle<Type> StateToType(State state,
Isolate* isolate);
static void Generate(Token::Value op,
State left,
int right,
State result,
OverwriteMode mode,
Isolate* isolate);
// Platform-independent implementation.
void Generate(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
static void Generate(Token::Value op,
State left,
State right,
State result,
OverwriteMode mode,
Isolate* isolate);
// Platform-independent signature, platform-specific implementation.
void Initialize();
void GenerateAddStrings(MacroAssembler* masm);
void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateNumberStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
// Entirely platform-specific methods are defined as static helper
// functions in the <arch>/code-stubs-<arch>.cc files.
void UpdateStatus(Handle<Object> object,
State* state);
virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; }
bool can_encode_arg_value(int32_t value) const;
int encode_arg_value(int32_t value) const;
int32_t decode_arg_value(int value) const;
int encode_token(Token::Value op) const;
Token::Value decode_token(int op) const;
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(Max(left_type_, right_type_));
bool has_int_result() const {
return op_ == Token::BIT_XOR || op_ == Token::BIT_AND ||
op_ == Token::BIT_OR || op_ == Token::SAR || op_ == Token::SHL;
}
virtual void FinishCode(Handle<Code> code) {
code->set_stub_info(MinorKey());
}
const char* StateToName(State state);
void PrintBaseName(StringStream* stream);
friend class CodeGenerator;
Token::Value op_;
OverwriteMode mode_;
Maybe<int> fixed_right_arg_;
State left_state_;
State right_state_;
State result_state_;
};
@ -1318,6 +1354,11 @@ class CEntryStub : public PlatformCodeStub {
virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateAheadOfTime(Isolate* isolate);
protected:
virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
};
private:
void GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
@ -1705,7 +1746,9 @@ class DoubleToIStub : public PlatformCodeStub {
DestinationRegisterBits::encode(destination.code_) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) |
SkipFastPathBits::encode(skip_fastpath);
SkipFastPathBits::encode(skip_fastpath) |
SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
}
Register source() {
@ -1734,6 +1777,11 @@ class DoubleToIStub : public PlatformCodeStub {
virtual bool SometimesSetsUpAFrame() { return false; }
protected:
virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
}
private:
static const int kBitsPerRegisterNumber = 6;
STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
@ -1748,6 +1796,8 @@ class DoubleToIStub : public PlatformCodeStub {
public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
class SkipFastPathBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT
class SSEBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 5, 2> {}; // NOLINT
Major MajorKey() { return DoubleToI; }
int MinorKey() { return bit_field_; }

14
deps/v8/src/codegen.cc

@ -113,10 +113,12 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
masm->GetCode(&desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm->CodeObject(),
false, is_crankshafted);
false, is_crankshafted,
info->prologue_offset());
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
code->set_prologue_offset(info->prologue_offset());
isolate->heap()->IncrementCodeGeneratedBytes(is_crankshafted,
code->instruction_size());
return code;
}
@ -132,7 +134,9 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
if (print_code) {
// Print the source code if available.
FunctionLiteral* function = info->function();
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION ||
code->kind() == Code::FUNCTION;
if (print_source) {
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
PrintF("--- Raw source ---\n");
@ -160,12 +164,16 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
} else {
PrintF("--- Code ---\n");
}
if (print_source) {
PrintF("source_position = %d\n", function->start_position());
}
if (info->IsStub()) {
CodeStub::Major major_key = info->code_stub()->MajorKey();
code->Disassemble(CodeStub::MajorName(major_key, false));
} else {
code->Disassemble(*function->debug_name()->ToCString());
}
PrintF("--- End code ---\n");
}
#endif // ENABLE_DISASSEMBLER
}

252
deps/v8/src/compiler.cc

@ -112,7 +112,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
zone_ = zone;
deferred_handles_ = NULL;
code_stub_ = NULL;
prologue_offset_ = kPrologueOffsetNotSet;
prologue_offset_ = Code::kPrologueOffsetNotSet;
opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
? new List<OffsetRange>(2) : NULL;
@ -123,7 +123,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
mode_ = STUB;
return;
}
mode_ = isolate->use_crankshaft() ? mode : NONOPT;
mode_ = mode;
abort_due_to_dependency_ = false;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
@ -260,7 +260,7 @@ static bool AlwaysFullCompiler(Isolate* isolate) {
}
void OptimizingCompiler::RecordOptimizationStats() {
void RecompileJob::RecordOptimizationStats() {
Handle<JSFunction> function = info()->closure();
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
@ -297,23 +297,60 @@ void OptimizingCompiler::RecordOptimizationStats() {
// A return value of true indicates the compilation pipeline is still
// going, not necessarily that we optimized the code.
static bool MakeCrankshaftCode(CompilationInfo* info) {
OptimizingCompiler compiler(info);
OptimizingCompiler::Status status = compiler.CreateGraph();
RecompileJob job(info);
RecompileJob::Status status = job.CreateGraph();
if (status != OptimizingCompiler::SUCCEEDED) {
return status != OptimizingCompiler::FAILED;
if (status != RecompileJob::SUCCEEDED) {
return status != RecompileJob::FAILED;
}
status = compiler.OptimizeGraph();
if (status != OptimizingCompiler::SUCCEEDED) {
status = compiler.AbortOptimization();
return status != OptimizingCompiler::FAILED;
status = job.OptimizeGraph();
if (status != RecompileJob::SUCCEEDED) {
status = job.AbortOptimization();
return status != RecompileJob::FAILED;
}
status = compiler.GenerateAndInstallCode();
return status != OptimizingCompiler::FAILED;
status = job.GenerateAndInstallCode();
return status != RecompileJob::FAILED;
}
OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
class HOptimizedGraphBuilderWithPotisions: public HOptimizedGraphBuilder {
public:
explicit HOptimizedGraphBuilderWithPotisions(CompilationInfo* info)
: HOptimizedGraphBuilder(info) {
}
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) V8_OVERRIDE { \
if (node->position() != RelocInfo::kNoPosition) { \
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
}
EXPRESSION_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) V8_OVERRIDE { \
if (node->position() != RelocInfo::kNoPosition) { \
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
}
STATEMENT_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) V8_OVERRIDE { \
HOptimizedGraphBuilder::Visit##type(node); \
}
MODULE_NODE_LIST(DEF_VISIT)
DECLARATION_NODE_LIST(DEF_VISIT)
AUXILIARY_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
};
RecompileJob::Status RecompileJob::CreateGraph() {
ASSERT(isolate()->use_crankshaft());
ASSERT(info()->IsOptimizing());
ASSERT(!info()->IsCompilingForDebugging());
@ -419,7 +456,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// Type-check the function.
AstTyper::Run(info());
graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info());
graph_builder_ = FLAG_emit_opt_code_positions
? new(info()->zone()) HOptimizedGraphBuilderWithPotisions(info())
: new(info()->zone()) HOptimizedGraphBuilder(info());
Timer t(this, &time_taken_to_create_graph_);
graph_ = graph_builder_->CreateGraph();
@ -452,7 +491,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}
OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
RecompileJob::Status RecompileJob::OptimizeGraph() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
@ -475,7 +514,7 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
}
OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED);
ASSERT(!info()->HasAbortedDueToDependencyChange());
DisallowCodeDependencyChange no_dependency_change;
@ -555,6 +594,33 @@ static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
}
// Sets the expected number of properties based on estimate from compiler.
void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
int estimate) {
// See the comment in SetExpectedNofProperties.
if (shared->live_objects_may_exist()) return;
// If no properties are added in the constructor, they are more likely
// to be added later.
if (estimate == 0) estimate = 2;
// TODO(yangguo): check whether those heuristics are still up-to-date.
// We do not shrink objects that go into a snapshot (yet), so we adjust
// the estimate conservatively.
if (Serializer::enabled()) {
estimate += 2;
} else if (FLAG_clever_optimizations) {
// Inobject slack tracking will reclaim redundant inobject space later,
// so we can afford to adjust the estimate generously.
estimate += 8;
} else {
estimate += 3;
}
shared->set_expected_nof_properties(estimate);
}
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Isolate* isolate = info->isolate();
PostponeInterruptsScope postpone(isolate);
@ -599,66 +665,70 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
}
}
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
HistogramTimer* rate = info->is_eval()
? info->isolate()->counters()->compile_eval()
: info->isolate()->counters()->compile();
HistogramTimerScope timer(rate);
// Compile the code.
FunctionLiteral* lit = info->function();
LiveEditFunctionTracker live_edit_tracker(isolate, lit);
if (!MakeCode(info)) {
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return Handle<SharedFunctionInfo>::null();
}
Handle<SharedFunctionInfo> result;
{
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
HistogramTimer* rate = info->is_eval()
? info->isolate()->counters()->compile_eval()
: info->isolate()->counters()->compile();
HistogramTimerScope timer(rate);
// Allocate function.
ASSERT(!info->code().is_null());
Handle<SharedFunctionInfo> result =
isolate->factory()->NewSharedFunctionInfo(
lit->name(),
lit->materialized_literal_count(),
lit->is_generator(),
info->code(),
ScopeInfo::Create(info->scope(), info->zone()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);
if (script->name()->IsString()) {
PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
info,
String::cast(script->name())));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
info->code(),
info));
} else {
PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
info,
isolate->heap()->empty_string()));
GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
}
// Compile the code.
if (!MakeCode(info)) {
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return Handle<SharedFunctionInfo>::null();
}
// Allocate function.
ASSERT(!info->code().is_null());
result =
isolate->factory()->NewSharedFunctionInfo(
lit->name(),
lit->materialized_literal_count(),
lit->is_generator(),
info->code(),
ScopeInfo::Create(info->scope(), info->zone()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);
if (script->name()->IsString()) {
PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
info,
String::cast(script->name())));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
info->code(),
info));
} else {
PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
info,
isolate->heap()->empty_string()));
GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
}
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
// the instances of the function.
SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
// the instances of the function.
SetExpectedNofPropertiesFromEstimate(result,
lit->expected_property_count());
script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
@ -1032,16 +1102,15 @@ bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
info->SaveHandles();
if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
OptimizingCompiler* compiler =
new(info->zone()) OptimizingCompiler(*info);
OptimizingCompiler::Status status = compiler->CreateGraph();
if (status == OptimizingCompiler::SUCCEEDED) {
RecompileJob* job = new(info->zone()) RecompileJob(*info);
RecompileJob::Status status = job->CreateGraph();
if (status == RecompileJob::SUCCEEDED) {
info.Detach();
shared->code()->set_profiler_ticks(0);
isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
isolate->optimizing_compiler_thread()->QueueForOptimization(job);
ASSERT(!isolate->has_pending_exception());
return true;
} else if (status == OptimizingCompiler::BAILED_OUT) {
} else if (status == RecompileJob::BAILED_OUT) {
isolate->clear_pending_exception();
InstallFullCode(*info);
}
@ -1054,9 +1123,8 @@ bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
}
Handle<Code> Compiler::InstallOptimizedCode(
OptimizingCompiler* optimizing_compiler) {
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
Handle<Code> Compiler::InstallOptimizedCode(RecompileJob* job) {
SmartPointer<CompilationInfo> info(job->info());
// The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
if (info->shared_info()->optimization_disabled()) {
@ -1077,24 +1145,24 @@ Handle<Code> Compiler::InstallOptimizedCode(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status();
RecompileJob::Status status = job->last_status();
if (info->HasAbortedDueToDependencyChange()) {
info->set_bailout_reason(kBailedOutDueToDependencyChange);
status = optimizing_compiler->AbortOptimization();
} else if (status != OptimizingCompiler::SUCCEEDED) {
status = job->AbortOptimization();
} else if (status != RecompileJob::SUCCEEDED) {
info->set_bailout_reason(kFailedBailedOutLastTime);
status = optimizing_compiler->AbortOptimization();
status = job->AbortOptimization();
} else if (isolate->DebuggerHasBreakPoints()) {
info->set_bailout_reason(kDebuggerIsActive);
status = optimizing_compiler->AbortOptimization();
status = job->AbortOptimization();
} else {
status = optimizing_compiler->GenerateAndInstallCode();
ASSERT(status == OptimizingCompiler::SUCCEEDED ||
status == OptimizingCompiler::BAILED_OUT);
status = job->GenerateAndInstallCode();
ASSERT(status == RecompileJob::SUCCEEDED ||
status == RecompileJob::BAILED_OUT);
}
InstallCodeCommon(*info);
if (status == OptimizingCompiler::SUCCEEDED) {
if (status == RecompileJob::SUCCEEDED) {
Handle<Code> code = info->code();
ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
info->closure()->ReplaceCode(*code);
@ -1115,8 +1183,8 @@ Handle<Code> Compiler::InstallOptimizedCode(
// profiler ticks to prevent too soon re-opt after a deopt.
info->shared_info()->code()->set_profiler_ticks(0);
ASSERT(!info->closure()->IsInRecompileQueue());
return (status == OptimizingCompiler::SUCCEEDED) ? info->code()
: Handle<Code>::null();
return (status == RecompileJob::SUCCEEDED) ? info->code()
: Handle<Code>::null();
}

31
deps/v8/src/compiler.h

@ -35,8 +35,6 @@
namespace v8 {
namespace internal {
static const int kPrologueOffsetNotSet = -1;
class ScriptDataImpl;
class HydrogenCodeStub;
@ -86,6 +84,7 @@ class CompilationInfo {
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
uint32_t osr_pc_offset() const { return osr_pc_offset_; }
int opt_count() const { return opt_count_; }
int num_parameters() const;
int num_heap_slots() const;
@ -268,12 +267,12 @@ class CompilationInfo {
void set_bailout_reason(BailoutReason reason) { bailout_reason_ = reason; }
int prologue_offset() const {
ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_);
ASSERT_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
return prologue_offset_;
}
void set_prologue_offset(int prologue_offset) {
ASSERT_EQ(kPrologueOffsetNotSet, prologue_offset_);
ASSERT_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
prologue_offset_ = prologue_offset;
}
@ -505,14 +504,15 @@ class LChunk;
// fail, bail-out to the full code generator or succeed. Apart from
// their return value, the status of the phase last run can be checked
// using last_status().
class OptimizingCompiler: public ZoneObject {
class RecompileJob: public ZoneObject {
public:
explicit OptimizingCompiler(CompilationInfo* info)
explicit RecompileJob(CompilationInfo* info)
: info_(info),
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
last_status_(FAILED) { }
last_status_(FAILED),
awaiting_install_(false) { }
enum Status {
FAILED, BAILED_OUT, SUCCEEDED
@ -532,6 +532,13 @@ class OptimizingCompiler: public ZoneObject {
return SetLastStatus(BAILED_OUT);
}
void WaitForInstall() {
ASSERT(info_->is_osr());
awaiting_install_ = true;
}
bool IsWaitingForInstall() { return awaiting_install_; }
private:
CompilationInfo* info_;
HOptimizedGraphBuilder* graph_builder_;
@ -541,6 +548,7 @@ class OptimizingCompiler: public ZoneObject {
TimeDelta time_taken_to_optimize_;
TimeDelta time_taken_to_codegen_;
Status last_status_;
bool awaiting_install_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
last_status_ = status;
@ -549,9 +557,8 @@ class OptimizingCompiler: public ZoneObject {
void RecordOptimizationStats();
struct Timer {
Timer(OptimizingCompiler* compiler, TimeDelta* location)
: compiler_(compiler),
location_(location) {
Timer(RecompileJob* job, TimeDelta* location)
: job_(job), location_(location) {
ASSERT(location_ != NULL);
timer_.Start();
}
@ -560,7 +567,7 @@ class OptimizingCompiler: public ZoneObject {
*location_ += timer_.Elapsed();
}
OptimizingCompiler* compiler_;
RecompileJob* job_;
ElapsedTimer timer_;
TimeDelta* location_;
};
@ -625,7 +632,7 @@ class Compiler : public AllStatic {
bool is_toplevel,
Handle<Script> script);
static Handle<Code> InstallOptimizedCode(OptimizingCompiler* info);
static Handle<Code> InstallOptimizedCode(RecompileJob* job);
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);

2
deps/v8/src/contexts.cc

@ -259,7 +259,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsNativeContext());
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined()) {

4
deps/v8/src/conversions-inl.h

@ -355,7 +355,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
return JunkStringValue();
}
ASSERT(buffer_pos < kBufferSize);
SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
Vector<const char> buffer_vector(buffer, buffer_pos);
return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
@ -692,7 +692,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
exponent--;
}
ASSERT(buffer_pos < kBufferSize);
SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);

16
deps/v8/src/conversions.cc

@ -31,6 +31,7 @@
#include "conversions-inl.h"
#include "dtoa.h"
#include "list-inl.h"
#include "strtod.h"
#include "utils.h"
@ -45,8 +46,11 @@ namespace internal {
double StringToDouble(UnicodeCache* unicode_cache,
const char* str, int flags, double empty_string_val) {
const char* end = str + StrLength(str);
return InternalStringToDouble(unicode_cache, str, end, flags,
// We cast to const uint8_t* here to avoid instantiating the
// InternalStringToDouble() template for const char* as well.
const uint8_t* start = reinterpret_cast<const uint8_t*>(str);
const uint8_t* end = start + StrLength(str);
return InternalStringToDouble(unicode_cache, start, end, flags,
empty_string_val);
}
@ -55,11 +59,15 @@ double StringToDouble(UnicodeCache* unicode_cache,
Vector<const char> str,
int flags,
double empty_string_val) {
const char* end = str.start() + str.length();
return InternalStringToDouble(unicode_cache, str.start(), end, flags,
// We cast to const uint8_t* here to avoid instantiating the
// InternalStringToDouble() template for const char* as well.
const uint8_t* start = reinterpret_cast<const uint8_t*>(str.start());
const uint8_t* end = start + str.length();
return InternalStringToDouble(unicode_cache, start, end, flags,
empty_string_val);
}
double StringToDouble(UnicodeCache* unicode_cache,
Vector<const uc16> str,
int flags,

33
deps/v8/src/counters.h

@ -259,22 +259,51 @@ class HistogramTimer : public Histogram {
return Enabled() && timer_.IsStarted();
}
// TODO(bmeurer): Remove this when HistogramTimerScope is fixed.
#ifdef DEBUG
ElapsedTimer* timer() { return &timer_; }
#endif
private:
ElapsedTimer timer_;
};
// Helper class for scoping a HistogramTimer.
// TODO(bmeurer): The ifdeffery is an ugly hack around the fact that the
// Parser is currently reentrant (when it throws an error, we call back
// into JavaScript and all bets are off), but ElapsedTimer is not
// reentry-safe. Fix this properly and remove |allow_nesting|.
class HistogramTimerScope BASE_EMBEDDED {
public:
explicit HistogramTimerScope(HistogramTimer* timer) :
timer_(timer) {
explicit HistogramTimerScope(HistogramTimer* timer,
bool allow_nesting = false)
#ifdef DEBUG
: timer_(timer),
skipped_timer_start_(false) {
if (timer_->timer()->IsStarted() && allow_nesting) {
skipped_timer_start_ = true;
} else {
timer_->Start();
}
#else
: timer_(timer) {
timer_->Start();
#endif
}
~HistogramTimerScope() {
#ifdef DEBUG
if (!skipped_timer_start_) {
timer_->Stop();
}
#else
timer_->Stop();
#endif
}
private:
HistogramTimer* timer_;
#ifdef DEBUG
bool skipped_timer_start_;
#endif
};

30
deps/v8/src/cpu-profiler.cc

@ -64,14 +64,15 @@ void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
TickSampleEventRecord record(last_code_event_id_);
TickSample* sample = &record.sample;
sample->state = isolate->current_vm_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
for (StackTraceFrameIterator it(isolate);
!it.done() && sample->frames_count < TickSample::kMaxFramesCount;
it.Advance()) {
sample->stack[sample->frames_count++] = it.frame()->pc();
RegisterState regs;
StackFrameIterator it(isolate);
if (!it.done()) {
StackFrame* frame = it.frame();
regs.sp = frame->sp();
regs.fp = frame->fp();
regs.pc = frame->pc();
}
record.sample.Init(isolate, regs);
ticks_from_vm_buffer_.Enqueue(record);
}
@ -260,7 +261,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* source, int line) {
Name* source, int line, int column) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@ -270,7 +271,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
profiles_->GetFunctionName(shared->DebugName()),
CodeEntry::kEmptyNamePrefix,
profiles_->GetName(source),
line);
line,
column);
if (info) {
rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
}
@ -435,8 +437,18 @@ void CpuProfiler::StartProcessorIfNotStarted() {
logger->is_logging_ = false;
generator_ = new ProfileGenerator(profiles_);
Sampler* sampler = logger->sampler();
#if V8_CC_MSVC && (_MSC_VER >= 1800)
// VS2013 reports "warning C4316: 'v8::internal::ProfilerEventsProcessor'
// : object allocated on the heap may not be aligned 64". We need to
// figure out if this is a legitimate warning or a compiler bug.
#pragma warning(push)
#pragma warning(disable:4316)
#endif
processor_ = new ProfilerEventsProcessor(
generator_, sampler, sampling_interval_);
#if V8_CC_MSVC && (_MSC_VER >= 1800)
#pragma warning(pop)
#endif
is_profiling_ = true;
// Enumerate stuff we already have in the heap.
ASSERT(isolate_->heap()->HasBeenSetUp());

2
deps/v8/src/cpu-profiler.h

@ -238,7 +238,7 @@ class CpuProfiler : public CodeEventListener {
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* source, int line);
Name* source, int line, int column);
virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count);
virtual void CodeMovingGCEvent() {}

2
deps/v8/src/d8-debug.cc

@ -30,8 +30,6 @@
#include "d8.h"
#include "d8-debug.h"
#include "debug-agent.h"
#include "platform.h"
#include "platform/socket.h"
namespace v8 {

51
deps/v8/src/d8-posix.cc

@ -245,7 +245,8 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
if (args[3]->IsNumber()) {
*total_timeout = args[3]->Int32Value();
} else {
ThrowException(String::New("system: Argument 4 must be a number"));
args.GetIsolate()->ThrowException(
String::New("system: Argument 4 must be a number"));
return false;
}
}
@ -253,7 +254,8 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
if (args[2]->IsNumber()) {
*read_timeout = args[2]->Int32Value();
} else {
ThrowException(String::New("system: Argument 3 must be a number"));
args.GetIsolate()->ThrowException(
String::New("system: Argument 3 must be a number"));
return false;
}
}
@ -456,7 +458,8 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
Handle<Array> command_args;
if (args.Length() > 1) {
if (!args[1]->IsArray()) {
ThrowException(String::New("system: Argument 2 must be an array"));
args.GetIsolate()->ThrowException(
String::New("system: Argument 2 must be an array"));
return;
}
command_args = Handle<Array>::Cast(args[1]);
@ -464,11 +467,13 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
command_args = Array::New(0);
}
if (command_args->Length() > ExecArgs::kMaxArgs) {
ThrowException(String::New("Too many arguments to system()"));
args.GetIsolate()->ThrowException(
String::New("Too many arguments to system()"));
return;
}
if (args.Length() < 1) {
ThrowException(String::New("Too few arguments to system()"));
args.GetIsolate()->ThrowException(
String::New("Too few arguments to system()"));
return;
}
@ -483,11 +488,13 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
int stdout_fds[2];
if (pipe(exec_error_fds) != 0) {
ThrowException(String::New("pipe syscall failed."));
args.GetIsolate()->ThrowException(
String::New("pipe syscall failed."));
return;
}
if (pipe(stdout_fds) != 0) {
ThrowException(String::New("pipe syscall failed."));
args.GetIsolate()->ThrowException(
String::New("pipe syscall failed."));
return;
}
@ -531,17 +538,17 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "chdir() takes one argument";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.chdir(): String conversion of argument failed.";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
if (chdir(*directory) != 0) {
ThrowException(String::New(strerror(errno)));
args.GetIsolate()->ThrowException(String::New(strerror(errno)));
return;
}
}
@ -550,7 +557,7 @@ void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "umask() takes one argument";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
if (args[0]->IsNumber()) {
@ -560,7 +567,7 @@ void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
} else {
const char* message = "umask() argument must be numeric";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
}
@ -616,18 +623,18 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
mask = args[1]->Int32Value();
} else {
const char* message = "mkdirp() second argument must be numeric";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
} else if (args.Length() != 1) {
const char* message = "mkdirp() takes one or two arguments";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.mkdirp(): String conversion of argument failed.";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
mkdirp(*directory, mask);
@ -637,13 +644,13 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "rmdir() takes one or two arguments";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.rmdir(): String conversion of argument failed.";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
rmdir(*directory);
@ -653,7 +660,7 @@ void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2) {
const char* message = "setenv() takes two arguments";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
String::Utf8Value var(args[0]);
@ -661,13 +668,13 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
if (*value == NULL) {
const char* message =
"os.setenv(): String conversion of variable contents failed.";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
setenv(*var, *value, 1);
@ -677,14 +684,14 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "unsetenv() takes one argument";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
String::Utf8Value var(args[0]);
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
ThrowException(String::New(message));
args.GetIsolate()->ThrowException(String::New(message));
return;
}
unsetenv(*var);

5
deps/v8/src/d8-readline.cc

@ -150,7 +150,7 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
static Persistent<Array> current_completions;
Isolate* isolate = read_line_editor.isolate_;
Locker lock(isolate);
HandleScope scope;
HandleScope scope(isolate);
Handle<Array> completions;
if (state == 0) {
Local<String> full_text = String::New(rl_line_buffer, rl_point);
@ -167,8 +167,7 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
String::Utf8Value str(str_obj);
return strdup(*str);
} else {
current_completions.Dispose(isolate);
current_completions.Clear();
current_completions.Reset();
return NULL;
}
}

46
deps/v8/src/d8.cc

@ -49,6 +49,7 @@
#endif // !V8_SHARED
#ifdef V8_SHARED
#include "../include/v8-defaults.h"
#include "../include/v8-testing.h"
#endif // V8_SHARED
@ -66,6 +67,7 @@
#include "natives.h"
#include "platform.h"
#include "v8.h"
#include "v8-defaults.h"
#endif // V8_SHARED
#if !defined(_WIN32) && !defined(_WIN64)
@ -158,6 +160,7 @@ i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
i::Mutex Shell::context_mutex_;
const i::TimeTicks Shell::kInitialTicks = i::TimeTicks::HighResolutionNow();
Persistent<Context> Shell::utility_context_;
#endif // V8_SHARED
@ -263,7 +266,8 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realm_current_ = 0;
data_->realm_switch_ = 0;
data_->realms_ = new Persistent<Context>[1];
data_->realms_[0].Reset(data_->isolate_, Context::GetEntered());
data_->realms_[0].Reset(data_->isolate_,
data_->isolate_->GetEnteredContext());
data_->realm_shared_.Clear();
}
@ -286,11 +290,20 @@ int PerIsolateData::RealmFind(Handle<Context> context) {
}
#ifndef V8_SHARED
// performance.now() returns a time stamp as double, measured in milliseconds.
void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::TimeDelta delta = i::TimeTicks::HighResolutionNow() - kInitialTicks;
args.GetReturnValue().Set(delta.InMillisecondsF());
}
#endif // V8_SHARED
// Realm.current() returns the index of the currently active realm.
void Shell::RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
int index = data->RealmFind(Context::GetEntered());
int index = data->RealmFind(isolate->GetEnteredContext());
if (index == -1) return;
args.GetReturnValue().Set(index);
}
@ -869,6 +882,13 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
RealmSharedGet, RealmSharedSet);
global_template->Set(String::New("Realm"), realm_template);
#ifndef V8_SHARED
Handle<ObjectTemplate> performance_template = ObjectTemplate::New();
performance_template->Set(String::New("now"),
FunctionTemplate::New(PerformanceNow));
global_template->Set(String::New("performance"), performance_template);
#endif // V8_SHARED
#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
AddOSMethods(os_templ);
@ -939,8 +959,8 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory();
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
factory->NewFixedArray(js_args.argc());
for (int j = 0; j < js_args.argc(); j++) {
factory->NewFixedArray(js_args.argc);
for (int j = 0; j < js_args.argc; j++) {
i::Handle<i::String> arg =
factory->NewStringFromUtf8(i::CStrVector(js_args[j]));
arguments_array->set(j, *arg);
@ -1228,6 +1248,7 @@ SourceGroup::~SourceGroup() {
void SourceGroup::Execute(Isolate* isolate) {
bool exception_was_thrown = false;
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
@ -1236,7 +1257,8 @@ void SourceGroup::Execute(Isolate* isolate) {
Handle<String> file_name = String::New("unnamed");
Handle<String> source = String::New(argv_[i + 1]);
if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
Shell::Exit(1);
exception_was_thrown = true;
break;
}
++i;
} else if (arg[0] == '-') {
@ -1251,10 +1273,14 @@ void SourceGroup::Execute(Isolate* isolate) {
Shell::Exit(1);
}
if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
Shell::Exit(1);
exception_was_thrown = true;
break;
}
}
}
if (exception_was_thrown != Shell::options.expected_to_throw) {
Shell::Exit(1);
}
}
@ -1410,6 +1436,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.dump_heap_constants = true;
argv[i] = NULL;
#endif
} else if (strcmp(argv[i], "--throws") == 0) {
options.expected_to_throw = true;
argv[i] = NULL;
}
#ifdef V8_SHARED
else if (strcmp(argv[i], "--dump-counters") == 0) {
@ -1525,7 +1554,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
// Start preemption if threads have been created and preemption is enabled.
if (threads.length() > 0
&& options.use_preemption) {
Locker::StartPreemption(options.preemption_interval);
Locker::StartPreemption(isolate, options.preemption_interval);
}
#endif // V8_SHARED
}
@ -1543,7 +1572,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
if (threads.length() > 0 && options.use_preemption) {
Locker lock(isolate);
Locker::StopPreemption();
Locker::StopPreemption(isolate);
}
#endif // V8_SHARED
return 0;
@ -1648,6 +1677,7 @@ int Shell::Main(int argc, char* argv[]) {
#else
SetStandaloneFlagsViaCommandLine();
#endif
v8::SetDefaultResourceConstraintsForCurrentPlatform();
ShellArrayBufferAllocator array_buffer_allocator;
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
int result = 0;

8
deps/v8/src/d8.gyp

@ -31,7 +31,7 @@
'console%': '',
# Enable support for Intel VTune. Supported on ia32/x64 only
'v8_enable_vtunejit%': 0,
'v8_enable_i18n_support%': 0,
'v8_enable_i18n_support%': 1,
},
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'targets': [
@ -81,13 +81,13 @@
}],
['v8_enable_i18n_support==1', {
'dependencies': [
'<(DEPTH)/third_party/icu/icu.gyp:icui18n',
'<(DEPTH)/third_party/icu/icu.gyp:icuuc',
'<(icu_gyp_path):icui18n',
'<(icu_gyp_path):icuuc',
],
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
'<(DEPTH)/third_party/icu/icu.gyp:icudata',
'<(icu_gyp_path):icudata',
],
}],
],

5
deps/v8/src/d8.h

@ -232,6 +232,7 @@ class ShellOptions {
interactive_shell(false),
test_shell(false),
dump_heap_constants(false),
expected_to_throw(false),
num_isolates(1),
isolate_sources(NULL) { }
@ -256,6 +257,7 @@ class ShellOptions {
bool interactive_shell;
bool test_shell;
bool dump_heap_constants;
bool expected_to_throw;
int num_isolates;
SourceGroup* isolate_sources;
};
@ -300,6 +302,8 @@ class Shell : public i::AllStatic {
Handle<String> command);
static void DispatchDebugMessages();
#endif // ENABLE_DEBUGGER_SUPPORT
static void PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args);
#endif // V8_SHARED
static void RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args);
@ -391,6 +395,7 @@ class Shell : public i::AllStatic {
static CounterCollection* counters_;
static i::OS::MemoryMappedFile* counters_file_;
static i::Mutex context_mutex_;
static const i::TimeTicks kInitialTicks;
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript(Isolate* isolate);

14
deps/v8/src/d8.js

@ -40,7 +40,7 @@ function log10(num) {
function ToInspectableObject(obj) {
if (!obj && typeof obj === 'object') {
return void 0;
return UNDEFINED;
} else {
return Object(obj);
}
@ -333,7 +333,7 @@ function DebugRequest(cmd_line) {
}
if ((cmd === undefined) || !cmd) {
this.request_ = void 0;
this.request_ = UNDEFINED;
return;
}
@ -492,7 +492,7 @@ function DebugRequest(cmd_line) {
case 'trace':
case 'tr':
// Return undefined to indicate command handled internally (no JSON).
this.request_ = void 0;
this.request_ = UNDEFINED;
this.traceCommand_(args);
break;
@ -500,7 +500,7 @@ function DebugRequest(cmd_line) {
case '?':
this.helpCommand_(args);
// Return undefined to indicate command handled internally (no JSON).
this.request_ = void 0;
this.request_ = UNDEFINED;
break;
default:
@ -2124,7 +2124,7 @@ function SimpleObjectToJSON_(object) {
var property_value_json;
switch (typeof property_value) {
case 'object':
if (property_value === null) {
if (IS_NULL(property_value)) {
property_value_json = 'null';
} else if (typeof property_value.toJSONProtocol == 'function') {
property_value_json = property_value.toJSONProtocol(true);
@ -2217,7 +2217,7 @@ function Stringify(x, depth) {
case "symbol":
return "Symbol(" + (x.name ? Stringify(x.name, depth) : "") + ")"
case "object":
if (x === null) return "null";
if (IS_NULL(x)) return "null";
if (x.constructor && x.constructor.name === "Array") {
var elems = [];
for (var i = 0; i < x.length; ++i) {
@ -2233,7 +2233,7 @@ function Stringify(x, depth) {
var props = [];
for (var name in x) {
var desc = Object.getOwnPropertyDescriptor(x, name);
if (desc === void 0) continue;
if (IS_UNDEFINED(desc)) continue;
if ("value" in desc) {
props.push(name + ": " + Stringify(desc.value, depth - 1));
}

32
deps/v8/src/date.js

@ -41,7 +41,7 @@ function ThrowDateTypeError() {
}
var timezone_cache_time = $NaN;
var timezone_cache_time = NAN;
var timezone_cache_timezone;
function LocalTimezone(t) {
@ -66,10 +66,10 @@ function UTC(time) {
// ECMA 262 - 15.9.1.11
function MakeTime(hour, min, sec, ms) {
if (!$isFinite(hour)) return $NaN;
if (!$isFinite(min)) return $NaN;
if (!$isFinite(sec)) return $NaN;
if (!$isFinite(ms)) return $NaN;
if (!$isFinite(hour)) return NAN;
if (!$isFinite(min)) return NAN;
if (!$isFinite(sec)) return NAN;
if (!$isFinite(ms)) return NAN;
return TO_INTEGER(hour) * msPerHour
+ TO_INTEGER(min) * msPerMinute
+ TO_INTEGER(sec) * msPerSecond
@ -90,7 +90,7 @@ function TimeInYear(year) {
// MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1)
// MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11)
function MakeDay(year, month, date) {
if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return NAN;
// Convert to integer and map -0 to 0.
year = TO_INTEGER_MAP_MINUS_ZERO(year);
@ -99,7 +99,7 @@ function MakeDay(year, month, date) {
if (year < kMinYear || year > kMaxYear ||
month < kMinMonth || month > kMaxMonth) {
return $NaN;
return NAN;
}
// Now we rely on year and month being SMIs.
@ -115,15 +115,15 @@ function MakeDate(day, time) {
// is no way that the time can be within range even after UTC
// conversion we return NaN immediately instead of relying on
// TimeClip to do it.
if ($abs(time) > MAX_TIME_BEFORE_UTC) return $NaN;
if ($abs(time) > MAX_TIME_BEFORE_UTC) return NAN;
return time;
}
// ECMA 262 - 15.9.1.14
function TimeClip(time) {
if (!$isFinite(time)) return $NaN;
if ($abs(time) > MAX_TIME_MS) return $NaN;
if (!$isFinite(time)) return NAN;
if ($abs(time) > MAX_TIME_MS) return NAN;
return TO_INTEGER(time);
}
@ -132,7 +132,7 @@ function TimeClip(time) {
// strings over and over again.
var Date_cache = {
// Cached time value.
time: $NaN,
time: NAN,
// String input for which the cached time is valid.
string: null
};
@ -269,7 +269,7 @@ var parse_buffer = $Array(8);
// ECMA 262 - 15.9.4.2
function DateParse(string) {
var arr = %DateParseString(ToString(string), parse_buffer);
if (IS_NULL(arr)) return $NaN;
if (IS_NULL(arr)) return NAN;
var day = MakeDay(arr[0], arr[1], arr[2]);
var time = MakeTime(arr[3], arr[4], arr[5], arr[6]);
@ -671,7 +671,7 @@ function DateGetYear() {
function DateSetYear(year) {
CHECK_DATE(this);
year = ToNumber(year);
if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, $NaN);
if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, NAN);
year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
var t = LOCAL_DATE_VALUE(this);
@ -746,12 +746,12 @@ function DateToJSON(key) {
function ResetDateCache() {
// Reset the timezone cache:
timezone_cache_time = $NaN;
timezone_cache_time = NAN;
timezone_cache_timezone = undefined;
// Reset the date cache:
cache = Date_cache;
cache.time = $NaN;
cache.time = NAN;
cache.string = null;
}
@ -762,7 +762,7 @@ function SetUpDate() {
%CheckIsBootstrapping();
%SetCode($Date, DateConstructor);
%FunctionSetPrototype($Date, new $Date($NaN));
%FunctionSetPrototype($Date, new $Date(NAN));
// Set up non-enumerable properties of the Date object itself.
InstallFunctions($Date, DONT_ENUM, $Array(

4
deps/v8/src/debug-debugger.js

@ -448,7 +448,7 @@ ScriptBreakPoint.prototype.set = function (script) {
// If the position is not found in the script (the script might be shorter
// than it used to be) just ignore it.
if (position === null) return;
if (IS_NULL(position)) return;
// Create a break point object and set the break point.
break_point = MakeBreakPoint(position, this);
@ -2064,7 +2064,7 @@ DebugCommandProcessor.resolveValue_ = function(value_description) {
} else if ("value" in value_description) {
return value_description.value;
} else if (value_description.type == UNDEFINED_TYPE) {
return void 0;
return UNDEFINED;
} else if (value_description.type == NULL_TYPE) {
return null;
} else {

14
deps/v8/src/debug.cc

@ -1793,10 +1793,14 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
// function to be called and not the code for Builtins::FunctionApply or
// Builtins::FunctionCall. The receiver of call/apply is the target
// function.
if (!holder.is_null() && holder->IsJSFunction() &&
!JSFunction::cast(*holder)->IsBuiltin()) {
if (!holder.is_null() && holder->IsJSFunction()) {
Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
Debug::FloodWithOneShot(js_function);
if (!js_function->IsBuiltin()) {
Debug::FloodWithOneShot(js_function);
} else if (js_function->shared()->bound()) {
// Handle Function.prototype.bind
Debug::FloodBoundFunctionWithOneShot(js_function);
}
}
} else {
Debug::FloodWithOneShot(function);
@ -2102,6 +2106,7 @@ void Debug::PrepareForBreakPoints() {
if (!shared->allows_lazy_compilation()) continue;
if (!shared->script()->IsScript()) continue;
if (function->IsBuiltin()) continue;
if (shared->code()->gc_metadata() == active_code_marker) continue;
Code::Kind kind = function->code()->kind();
@ -3131,8 +3136,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
v8::Local<v8::Function> fun =
v8::Local<v8::Function>::Cast(api_exec_state->Get(fun_name));
v8::Handle<v8::Boolean> running =
auto_continue ? v8::True() : v8::False();
v8::Handle<v8::Boolean> running = v8::Boolean::New(auto_continue);
static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { running };
cmd_processor = v8::Local<v8::Object>::Cast(

1
deps/v8/src/debug.h

@ -38,6 +38,7 @@
#include "frames-inl.h"
#include "hashmap.h"
#include "platform.h"
#include "platform/socket.h"
#include "string-stream.h"
#include "v8threads.h"

79
deps/v8/src/marking-thread.cc → deps/v8/src/defaults.cc

@ -25,65 +25,46 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "marking-thread.h"
// The GYP based build ends up defining USING_V8_SHARED when compiling this
// file.
#undef USING_V8_SHARED
#include "../include/v8-defaults.h"
#include "platform.h"
#include "globals.h"
#include "v8.h"
#include "isolate.h"
#include "v8threads.h"
namespace v8 {
namespace internal {
MarkingThread::MarkingThread(Isolate* isolate)
: Thread("MarkingThread"),
isolate_(isolate),
heap_(isolate->heap()),
start_marking_semaphore_(0),
end_marking_semaphore_(0),
stop_semaphore_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
id_ = NoBarrier_AtomicIncrement(&id_counter_, 1);
}
Atomic32 MarkingThread::id_counter_ = -1;
void MarkingThread::Run() {
Isolate::SetIsolateThreadLocals(isolate_, NULL);
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
while (true) {
start_marking_semaphore_.Wait();
if (Acquire_Load(&stop_thread_)) {
stop_semaphore_.Signal();
return;
}
end_marking_semaphore_.Signal();
bool ConfigureResourceConstraintsForCurrentPlatform(
ResourceConstraints* constraints) {
if (constraints == NULL) {
return false;
}
}
void MarkingThread::Stop() {
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
start_marking_semaphore_.Signal();
stop_semaphore_.Wait();
Join();
}
void MarkingThread::StartMarking() {
start_marking_semaphore_.Signal();
int lump_of_memory = (i::kPointerSize / 4) * i::MB;
// The young_space_size should be a power of 2 and old_generation_size should
// be a multiple of Page::kPageSize.
#if V8_OS_ANDROID
constraints->set_max_young_space_size(8 * lump_of_memory);
constraints->set_max_old_space_size(256 * lump_of_memory);
constraints->set_max_executable_size(192 * lump_of_memory);
#else
constraints->set_max_young_space_size(16 * lump_of_memory);
constraints->set_max_old_space_size(700 * lump_of_memory);
constraints->set_max_executable_size(256 * lump_of_memory);
#endif
return true;
}
void MarkingThread::WaitForMarkingThread() {
end_marking_semaphore_.Wait();
bool SetDefaultResourceConstraintsForCurrentPlatform() {
ResourceConstraints constraints;
if (!ConfigureResourceConstraintsForCurrentPlatform(&constraints))
return false;
return SetResourceConstraints(&constraints);
}
} } // namespace v8::internal
} // namespace v8

175
deps/v8/src/deoptimizer.cc

@ -1494,7 +1494,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
intptr_t caller_arg_count = 0;
bool arg_count_known = descriptor->stack_parameter_count_ == NULL;
bool arg_count_known = !descriptor->stack_parameter_count_.is_valid();
// Build the Arguments object for the caller's parameters and a pointer to it.
output_frame_offset -= kPointerSize;
@ -1614,12 +1614,16 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
}
} else {
// Dispatch on the instance type of the object to be materialized.
Handle<Map> map = Handle<Map>::cast(MaterializeNextValue());
// We also need to make sure that the representation of all fields
// in the given object are general enough to hold a tagged value.
Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
Handle<Map>::cast(MaterializeNextValue()), Representation::Tagged());
switch (map->instance_type()) {
case HEAP_NUMBER_TYPE: {
Handle<HeapNumber> number =
Handle<HeapNumber>::cast(MaterializeNextValue());
materialized_objects_->Add(number);
Handle<HeapNumber> object = isolate_->factory()->NewHeapNumber(0.0);
materialized_objects_->Add(object);
Handle<Object> number = MaterializeNextValue();
object->set_value(number->Number());
materialization_value_index_ += kDoubleSize / kPointerSize - 1;
break;
}
@ -1693,29 +1697,35 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
// output frames are used to materialize arguments objects later on they need
// to already contain valid heap numbers.
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
if (trace_) {
PrintF("Materialized a new heap number %p [%e] in slot %p\n",
reinterpret_cast<void*>(*num),
d.value(),
d.slot_address());
d.destination());
}
Memory::Object_at(d.slot_address()) = *num;
Memory::Object_at(d.destination()) = *num;
}
// Materialize all heap numbers required for arguments/captured objects.
for (int i = 0; i < values.length(); i++) {
if (!values.at(i)->IsTheHole()) continue;
double double_value = deferred_objects_double_values_[i];
Handle<Object> num = isolate_->factory()->NewNumber(double_value);
for (int i = 0; i < deferred_objects_double_values_.length(); i++) {
HeapNumberMaterializationDescriptor<int> d =
deferred_objects_double_values_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
if (trace_) {
PrintF("Materialized a new heap number %p [%e] for object\n",
reinterpret_cast<void*>(*num), double_value);
PrintF("Materialized a new heap number %p [%e] for object at %d\n",
reinterpret_cast<void*>(*num),
d.value(),
d.destination());
}
values.Set(i, num);
ASSERT(values.at(d.destination())->IsTheHole());
values.Set(d.destination(), num);
}
// Play it safe and clear all object double values before we continue.
deferred_objects_double_values_.Clear();
// Materialize arguments/captured objects.
if (!deferred_objects_.is_empty()) {
List<Handle<Object> > materialized_objects(deferred_objects_.length());
@ -1765,11 +1775,11 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
Address parameters_bottom = parameters_top + parameters_size;
Address expressions_bottom = expressions_top + expressions_size;
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
// Check of the heap number to materialize actually belong to the frame
// being extracted.
Address slot = d.slot_address();
Address slot = d.destination();
if (parameters_top <= slot && slot < parameters_bottom) {
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
@ -1781,7 +1791,7 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
"for parameter slot #%d\n",
reinterpret_cast<void*>(*num),
d.value(),
d.slot_address(),
d.destination(),
index);
}
@ -1797,7 +1807,7 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
"for expression slot #%d\n",
reinterpret_cast<void*>(*num),
d.value(),
d.slot_address(),
d.destination(),
index);
}
@ -2337,85 +2347,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
void Deoptimizer::PatchInterruptCode(Isolate* isolate,
Code* unoptimized) {
DisallowHeapAllocation no_gc;
Code* replacement_code =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
// Iterate over the back edge table and patch every interrupt
// call to an unconditional call to the replacement code.
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) == loop_nesting_level) {
ASSERT_EQ(NOT_PATCHED, GetInterruptPatchState(isolate,
unoptimized,
back_edges.pc()));
PatchInterruptCodeAt(unoptimized,
back_edges.pc(),
replacement_code);
}
}
unoptimized->set_back_edges_patched_for_osr(true);
ASSERT(Deoptimizer::VerifyInterruptCode(
isolate, unoptimized, loop_nesting_level));
}
void Deoptimizer::RevertInterruptCode(Isolate* isolate,
Code* unoptimized) {
DisallowHeapAllocation no_gc;
Code* interrupt_code =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
// Iterate over the back edge table and revert the patched interrupt calls.
ASSERT(unoptimized->back_edges_patched_for_osr());
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) <= loop_nesting_level) {
ASSERT_EQ(PATCHED_FOR_OSR, GetInterruptPatchState(isolate,
unoptimized,
back_edges.pc()));
RevertInterruptCodeAt(unoptimized, back_edges.pc(), interrupt_code);
}
}
unoptimized->set_back_edges_patched_for_osr(false);
unoptimized->set_allow_osr_at_loop_nesting_level(0);
// Assert that none of the back edges are patched anymore.
ASSERT(Deoptimizer::VerifyInterruptCode(isolate, unoptimized, -1));
}
#ifdef DEBUG
bool Deoptimizer::VerifyInterruptCode(Isolate* isolate,
Code* unoptimized,
int loop_nesting_level) {
DisallowHeapAllocation no_gc;
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
!back_edges.Done();
back_edges.Next()) {
uint32_t loop_depth = back_edges.loop_depth();
CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
GetInterruptPatchState(isolate,
unoptimized,
back_edges.pc()) != NOT_PATCHED);
}
return true;
}
#endif // DEBUG
unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned fixed_size = ComputeFixedSize(function_);
// The fp-to-sp delta already takes the context and the function
@ -2484,18 +2415,19 @@ void Deoptimizer::AddObjectDuplication(intptr_t slot, int object_index) {
void Deoptimizer::AddObjectTaggedValue(intptr_t value) {
deferred_objects_tagged_values_.Add(reinterpret_cast<Object*>(value));
deferred_objects_double_values_.Add(isolate()->heap()->nan_value()->value());
}
void Deoptimizer::AddObjectDoubleValue(double value) {
deferred_objects_tagged_values_.Add(isolate()->heap()->the_hole_value());
deferred_objects_double_values_.Add(value);
HeapNumberMaterializationDescriptor<int> value_desc(
deferred_objects_tagged_values_.length() - 1, value);
deferred_objects_double_values_.Add(value_desc);
}
void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
HeapNumberMaterializationDescriptor value_desc(
HeapNumberMaterializationDescriptor<Address> value_desc(
reinterpret_cast<Address>(slot_address), value);
deferred_heap_numbers_.Add(value_desc);
}
@ -2814,46 +2746,11 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
const char* Translation::StringFor(Opcode opcode) {
#define TRANSLATION_OPCODE_CASE(item) case item: return #item;
switch (opcode) {
case BEGIN:
return "BEGIN";
case JS_FRAME:
return "JS_FRAME";
case ARGUMENTS_ADAPTOR_FRAME:
return "ARGUMENTS_ADAPTOR_FRAME";
case CONSTRUCT_STUB_FRAME:
return "CONSTRUCT_STUB_FRAME";
case GETTER_STUB_FRAME:
return "GETTER_STUB_FRAME";
case SETTER_STUB_FRAME:
return "SETTER_STUB_FRAME";
case COMPILED_STUB_FRAME:
return "COMPILED_STUB_FRAME";
case REGISTER:
return "REGISTER";
case INT32_REGISTER:
return "INT32_REGISTER";
case UINT32_REGISTER:
return "UINT32_REGISTER";
case DOUBLE_REGISTER:
return "DOUBLE_REGISTER";
case STACK_SLOT:
return "STACK_SLOT";
case INT32_STACK_SLOT:
return "INT32_STACK_SLOT";
case UINT32_STACK_SLOT:
return "UINT32_STACK_SLOT";
case DOUBLE_STACK_SLOT:
return "DOUBLE_STACK_SLOT";
case LITERAL:
return "LITERAL";
case DUPLICATED_OBJECT:
return "DUPLICATED_OBJECT";
case ARGUMENTS_OBJECT:
return "ARGUMENTS_OBJECT";
case CAPTURED_OBJECT:
return "CAPTURED_OBJECT";
TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE)
}
#undef TRANSLATION_OPCODE_CASE
UNREACHABLE();
return "";
}

111
deps/v8/src/deoptimizer.h

@ -60,17 +60,18 @@ class FrameDescription;
class TranslationIterator;
class DeoptimizedFrameInfo;
template<typename T>
class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
public:
HeapNumberMaterializationDescriptor(Address slot_address, double val)
: slot_address_(slot_address), val_(val) { }
HeapNumberMaterializationDescriptor(T destination, double value)
: destination_(destination), value_(value) { }
Address slot_address() const { return slot_address_; }
double value() const { return val_; }
T destination() const { return destination_; }
double value() const { return value_; }
private:
Address slot_address_;
double val_;
T destination_;
double value_;
};
@ -131,11 +132,6 @@ class Deoptimizer : public Malloced {
DEBUGGER
};
enum InterruptPatchState {
NOT_PATCHED,
PATCHED_FOR_OSR
};
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
struct JumpTableEntry {
@ -213,39 +209,6 @@ class Deoptimizer : public Malloced {
// The size in bytes of the code required at a lazy deopt patch site.
static int patch_size();
// Patch all interrupts with allowed loop depth in the unoptimized code to
// unconditionally call replacement_code.
static void PatchInterruptCode(Isolate* isolate,
Code* unoptimized_code);
// Patch the interrupt at the instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
static void PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* replacement_code);
// Change all patched interrupts patched in the unoptimized code
// back to normal interrupts.
static void RevertInterruptCode(Isolate* isolate,
Code* unoptimized_code);
// Change patched interrupt in the unoptimized code
// back to a normal interrupt.
static void RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code);
#ifdef DEBUG
static InterruptPatchState GetInterruptPatchState(Isolate* isolate,
Code* unoptimized_code,
Address pc_after);
// Verify that all back edges of a certain loop depth are patched.
static bool VerifyInterruptCode(Isolate* isolate,
Code* unoptimized_code,
int loop_nesting_level);
#endif // DEBUG
~Deoptimizer();
void MaterializeHeapObjects(JavaScriptFrameIterator* it);
@ -469,9 +432,10 @@ class Deoptimizer : public Malloced {
// Deferred values to be materialized.
List<Object*> deferred_objects_tagged_values_;
List<double> deferred_objects_double_values_;
List<HeapNumberMaterializationDescriptor<int> >
deferred_objects_double_values_;
List<ObjectMaterializationDescriptor> deferred_objects_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
List<HeapNumberMaterializationDescriptor<Address> > deferred_heap_numbers_;
// Output frame information. Only used during heap object materialization.
List<Handle<JSFunction> > jsframe_functions_;
@ -542,7 +506,15 @@ class FrameDescription {
void SetCallerFp(unsigned offset, intptr_t value);
intptr_t GetRegister(unsigned n) const {
ASSERT(n < ARRAY_SIZE(registers_));
#if DEBUG
// This convoluted ASSERT is needed to work around a gcc problem that
// improperly detects an array bounds overflow in optimized debug builds
// when using a plain ASSERT.
if (n >= ARRAY_SIZE(registers_)) {
ASSERT(false);
return 0;
}
#endif
return registers_[n];
}
@ -717,29 +689,36 @@ class TranslationIterator BASE_EMBEDDED {
};
#define TRANSLATION_OPCODE_LIST(V) \
V(BEGIN) \
V(JS_FRAME) \
V(CONSTRUCT_STUB_FRAME) \
V(GETTER_STUB_FRAME) \
V(SETTER_STUB_FRAME) \
V(ARGUMENTS_ADAPTOR_FRAME) \
V(COMPILED_STUB_FRAME) \
V(DUPLICATED_OBJECT) \
V(ARGUMENTS_OBJECT) \
V(CAPTURED_OBJECT) \
V(REGISTER) \
V(INT32_REGISTER) \
V(UINT32_REGISTER) \
V(DOUBLE_REGISTER) \
V(STACK_SLOT) \
V(INT32_STACK_SLOT) \
V(UINT32_STACK_SLOT) \
V(DOUBLE_STACK_SLOT) \
V(LITERAL)
class Translation BASE_EMBEDDED {
public:
#define DECLARE_TRANSLATION_OPCODE_ENUM(item) item,
enum Opcode {
BEGIN,
JS_FRAME,
CONSTRUCT_STUB_FRAME,
GETTER_STUB_FRAME,
SETTER_STUB_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
COMPILED_STUB_FRAME,
DUPLICATED_OBJECT,
ARGUMENTS_OBJECT,
CAPTURED_OBJECT,
REGISTER,
INT32_REGISTER,
UINT32_REGISTER,
DOUBLE_REGISTER,
STACK_SLOT,
INT32_STACK_SLOT,
UINT32_STACK_SLOT,
DOUBLE_STACK_SLOT,
LITERAL
TRANSLATION_OPCODE_LIST(DECLARE_TRANSLATION_OPCODE_ENUM)
LAST = LITERAL
};
#undef DECLARE_TRANSLATION_OPCODE_ENUM
Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
Zone* zone)

2
deps/v8/src/disassembler.cc

@ -250,7 +250,7 @@ static int DecodeIt(Isolate* isolate,
if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
out.AddFormatted(", argc = %d", code->arguments_count());
}
} else if (kind == Code::STUB) {
} else if (kind == Code::STUB || kind == Code::HANDLER) {
// Reverse lookup required as the minor key cannot be retrieved
// from the code object.
Object* obj = heap->code_stubs()->SlowReverseLookup(code);

2
deps/v8/src/elements.cc

@ -792,7 +792,7 @@ class ElementsAccessorBase : public ElementsAccessor {
FixedArray* to,
FixedArrayBase* from) {
int len0 = to->length();
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < len0; i++) {
ASSERT(!to->get(i)->IsTheHole());

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save