Browse Source

deps: upgrade v8 to 3.25.30

v0.11.13-release
Ben Noordhuis 11 years ago
committed by Fedor Indutny
parent
commit
67e078094b
  1. 3
      deps/v8/.gitignore
  2. 4
      deps/v8/AUTHORS
  3. 399
      deps/v8/ChangeLog
  4. 2
      deps/v8/DEPS
  5. 2
      deps/v8/LICENSE
  6. 44
      deps/v8/Makefile
  7. 60
      deps/v8/Makefile.android
  8. 9
      deps/v8/PRESUBMIT.py
  9. 1
      deps/v8/build/all.gyp
  10. 34
      deps/v8/build/android.gypi
  11. 2
      deps/v8/build/features.gypi
  12. 30
      deps/v8/build/standalone.gypi
  13. 19
      deps/v8/build/toolchain.gypi
  14. 0
      deps/v8/include/v8-debug.h
  15. 68
      deps/v8/include/v8-profiler.h
  16. 355
      deps/v8/include/v8-util.h
  17. 573
      deps/v8/include/v8.h
  18. 3
      deps/v8/samples/lineprocessor.cc
  19. 3
      deps/v8/samples/shell.cc
  20. 41
      deps/v8/src/accessors.cc
  21. 2
      deps/v8/src/accessors.h
  22. 173
      deps/v8/src/allocation-tracker.cc
  23. 56
      deps/v8/src/allocation-tracker.h
  24. 806
      deps/v8/src/api.cc
  25. 3
      deps/v8/src/api.h
  26. 1
      deps/v8/src/arm/OWNERS
  27. 82
      deps/v8/src/arm/assembler-arm-inl.h
  28. 438
      deps/v8/src/arm/assembler-arm.cc
  29. 120
      deps/v8/src/arm/assembler-arm.h
  30. 178
      deps/v8/src/arm/builtins-arm.cc
  31. 516
      deps/v8/src/arm/code-stubs-arm.cc
  32. 2
      deps/v8/src/arm/code-stubs-arm.h
  33. 2
      deps/v8/src/arm/constants-arm.h
  34. 12
      deps/v8/src/arm/debug-arm.cc
  35. 33
      deps/v8/src/arm/deoptimizer-arm.cc
  36. 12
      deps/v8/src/arm/disasm-arm.cc
  37. 494
      deps/v8/src/arm/full-codegen-arm.cc
  38. 26
      deps/v8/src/arm/ic-arm.cc
  39. 278
      deps/v8/src/arm/lithium-arm.cc
  40. 234
      deps/v8/src/arm/lithium-arm.h
  41. 740
      deps/v8/src/arm/lithium-codegen-arm.cc
  42. 23
      deps/v8/src/arm/lithium-codegen-arm.h
  43. 167
      deps/v8/src/arm/macro-assembler-arm.cc
  44. 87
      deps/v8/src/arm/macro-assembler-arm.h
  45. 10
      deps/v8/src/arm/simulator-arm.cc
  46. 4
      deps/v8/src/arm/simulator-arm.h
  47. 190
      deps/v8/src/arm/stub-cache-arm.cc
  48. 1
      deps/v8/src/arm64/OWNERS
  49. 1229
      deps/v8/src/arm64/assembler-arm64-inl.h
  50. 2813
      deps/v8/src/arm64/assembler-arm64.cc
  51. 2233
      deps/v8/src/arm64/assembler-arm64.h
  52. 1562
      deps/v8/src/arm64/builtins-arm64.cc
  53. 5743
      deps/v8/src/arm64/code-stubs-arm64.cc
  54. 500
      deps/v8/src/arm64/code-stubs-arm64.h
  55. 615
      deps/v8/src/arm64/codegen-arm64.cc
  56. 71
      deps/v8/src/arm64/codegen-arm64.h
  57. 1271
      deps/v8/src/arm64/constants-arm64.h
  58. 199
      deps/v8/src/arm64/cpu-arm64.cc
  59. 107
      deps/v8/src/arm64/cpu-arm64.h
  60. 393
      deps/v8/src/arm64/debug-arm64.cc
  61. 671
      deps/v8/src/arm64/decoder-arm64-inl.h
  62. 109
      deps/v8/src/arm64/decoder-arm64.cc
  63. 210
      deps/v8/src/arm64/decoder-arm64.h
  64. 388
      deps/v8/src/arm64/deoptimizer-arm64.cc
  65. 1856
      deps/v8/src/arm64/disasm-arm64.cc
  66. 115
      deps/v8/src/arm64/disasm-arm64.h
  67. 65
      deps/v8/src/arm64/frames-arm64.cc
  68. 133
      deps/v8/src/arm64/frames-arm64.h
  69. 5015
      deps/v8/src/arm64/full-codegen-arm64.cc
  70. 1407
      deps/v8/src/arm64/ic-arm64.cc
  71. 333
      deps/v8/src/arm64/instructions-arm64.cc
  72. 501
      deps/v8/src/arm64/instructions-arm64.h
  73. 618
      deps/v8/src/arm64/instrument-arm64.cc
  74. 107
      deps/v8/src/arm64/instrument-arm64.h
  75. 2576
      deps/v8/src/arm64/lithium-arm64.cc
  76. 3100
      deps/v8/src/arm64/lithium-arm64.h
  77. 5901
      deps/v8/src/arm64/lithium-codegen-arm64.cc
  78. 490
      deps/v8/src/arm64/lithium-codegen-arm64.h
  79. 334
      deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
  80. 90
      deps/v8/src/arm64/lithium-gap-resolver-arm64.h
  81. 1677
      deps/v8/src/arm64/macro-assembler-arm64-inl.h
  82. 5184
      deps/v8/src/arm64/macro-assembler-arm64.cc
  83. 2310
      deps/v8/src/arm64/macro-assembler-arm64.h
  84. 1728
      deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
  85. 315
      deps/v8/src/arm64/regexp-macro-assembler-arm64.h
  86. 3645
      deps/v8/src/arm64/simulator-arm64.cc
  87. 908
      deps/v8/src/arm64/simulator-arm64.h
  88. 1496
      deps/v8/src/arm64/stub-cache-arm64.cc
  89. 112
      deps/v8/src/arm64/utils-arm64.cc
  90. 135
      deps/v8/src/arm64/utils-arm64.h
  91. 10
      deps/v8/src/array-iterator.js
  92. 14
      deps/v8/src/array.js
  93. 95
      deps/v8/src/assembler.cc
  94. 87
      deps/v8/src/assembler.h
  95. 21
      deps/v8/src/assert-scope.cc
  96. 129
      deps/v8/src/assert-scope.h
  97. 49
      deps/v8/src/ast.cc
  98. 90
      deps/v8/src/ast.h
  99. 33
      deps/v8/src/atomicops.h
  100. 372
      deps/v8/src/atomicops_internals_arm64_gcc.h

3
deps/v8/.gitignore

@ -22,6 +22,7 @@
*~
.cpplint-cache
.d8_history
.*.sw?
bsuite
d8
d8_g
@ -46,7 +47,7 @@ shell_g
/test/mozilla/data
/test/mozilla/downloaded_*
/test/test262/data
/test/test262/test262-*
/test/test262/tc39-test262-*
/third_party
/tools/jsfunfuzz
/tools/jsfunfuzz.zip

4
deps/v8/AUTHORS

@ -17,8 +17,10 @@ Opera Software ASA
Akinori MUSHA <knu@FreeBSD.org>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com>
Alexandre Rames <alexandre.rames@arm.com>
Alexandre Vassalotti <avassalotti@gmail.com>
Andreas Anyuru <andreas.anyuru@gmail.com>
Baptiste Afsa <baptiste.afsa@arm.com>
Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
@ -31,6 +33,7 @@ Fedor Indutny <fedor@indutny.com>
Filipe David Manana <fdmanana@gmail.com>
Haitao Feng <haitao.feng@intel.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Jacob Bramley <jacob.bramley@arm.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
@ -59,6 +62,7 @@ Sandro Santilli <strk@keybit.net>
Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org>
Tobias Burnus <burnus@net-b.de>
Vincent Belliard <vincent.belliard@arm.com>
Vlad Burlik <vladbph@gmail.com>
Xi Qian <xi.qian@intel.com>
Yuqiang Xian <yuqiang.xian@intel.com>

399
deps/v8/ChangeLog

@ -1,3 +1,402 @@
2014-03-28: Version 3.25.30
NativeContext::map_cache reference should be strong in heap snapshots
(Chromium issue 357060).
Performance and stability improvements on all platforms.
2014-03-27: Version 3.25.29
Performance and stability improvements on all platforms.
2014-03-27: Version 3.25.28
Performance and stability improvements on all platforms.
2014-03-26: Version 3.25.27
Promise constructor should not be enumerable (Chromium issue 352597).
Performance and stability improvements on all platforms.
2014-03-26: Version 3.25.26
Performance and stability improvements on all platforms.
2014-03-25: Version 3.25.25
Roll ICU 239289:258359 and add support for external ICU data tables
(issue 3142, Chromium issue 72633).
Performance and stability improvements on all platforms.
2014-03-25: Version 3.25.24
Add support for per-isolate private symbols.
No longer OOM on invalid string length (issue 3060).
Remove Failure::OutOfMemory propagation and
V8::IgnoreOutOfMemoryException (issue 3060).
Tentative Windows dll build fix: Don't V8_EXPORT ScriptCompiler::Source
(issue 3228).
Performance and stability improvements on all platforms.
2014-03-24: Version 3.25.23
Rename A64 port to ARM64 port (Chromium issue 354405).
Fix missing access check in Runtime_SetPrototype (Chromium issue
354123).
Fix polymorphic hydrogen handling of SLOPPY_ARGUMENTS_ELEMENTS (Chromium
issue 354391).
Performance and stability improvements on all platforms.
2014-03-20: Version 3.25.22
Increase the "local variables in a function" limit (issue 3205).
Implement ES6 symbol registry and predefined symbols.
Throw exception on invalid string length instead of OOM (Chromium issue
349329).
Performance and stability improvements on all platforms.
2014-03-20: Version 3.25.21
Performance and stability improvements on all platforms.
2014-03-20: Version 3.25.20
Fix polymorphic keyed loads for SLOPPY_ARGUMENTS_ELEMENTS (Chromium
issue 350867).
Performance and stability improvements on all platforms.
2014-03-19: Version 3.25.19
Performance and stability improvements on all platforms.
2014-03-19: Version 3.25.18
Performance and stability improvements on all platforms.
2014-03-19: Version 3.25.17
Performance and stability improvements on all platforms.
2014-03-18: Version 3.25.16
Apply numeric casts correctly in typed arrays and related code (Chromium
issue 353004).
Performance and stability improvements on all platforms.
2014-03-18: Version 3.25.15
Don't generate keyed store ICs for global proxies (Chromium issue
352983).
MIPS: Make invalid LHSs a parse-time (reference) error (Chromium issue
351658).
Make invalid LHSs a parse-time (reference) error (Chromium issue
351658).
Add Promises/A+ Compliance Test Suite (Chromium issue 347095).
Split Promise API into Promise/Resolver.
Performance and stability improvements on all platforms.
2014-03-17: Version 3.25.14
Performance and stability improvements on all platforms.
2014-03-17: Version 3.25.13
Move profiler callback interfaces from v8.h to v8-profiler.h.
Performance and stability improvements on all platforms.
2014-03-14: Version 3.25.12
PromiseCoerce should deal with an error during accessing "then"
(Chromium issue 347095).
Propagate updated offsets in BoundsCheckBbData (Chromium issue 350863).
Add regression test for range analysis bug (issue 3204).
Continued fix for 351257. Reusing the feedback vector is too complex
(Chromium issue 351257).
StopCpuProfiling should return non-const CpuProfile (issue 3213).
Allow for compiling with xcode 5.1 (which doesn't have gcc anymore).
Performance and stability improvements on all platforms.
2014-03-14: Version 3.25.11
MIPS: Remove uses of CanBeNegative() in HMod (issue 3204).
MIPS: Remove uses of RangeCanInclude() in flooring division by power of
2 (issue 3204).
MIPS: Fix uses of range analysis results in HChange (issue 3204).
Make translation of modulus operation '--stress-opt'-proof (Chromium
issue 352059).
Remove uses of CanBeNegative() in HMod (issue 3204).
Remove uses of RangeCanInclude() in flooring division by power of 2
(issue 3204).
Fix uses of range analysis results in HChange (issue 3204).
Performance and stability improvements on all platforms.
2014-03-14: Version 3.25.10
This version was not committed due to script failures.
2014-03-13: Version 3.25.9
Reland "Enable Object.observe by default" again (issue 2409).
Use intrinsics for builtin ArrayBuffer property accesses (Chromium issue
351787).
Performance and stability improvements on all platforms.
2014-03-12: Version 3.25.8
Fix HIsSmiAndBranch::KnownSuccessorBlock() by deleting it (Chromium
issue 351320).
Fix handling of polymorphic array accesses with constant index (Chromium
issue 351319).
Fix lazy deopt after tagged binary ops (Chromium issue 350434).
MIPS: Cleanup some of the range uses in ModI/DivI (issue 3204).
Fix issue with getOwnPropertySymbols and hidden properties (Chromium
issue 350864).
Cleanup some of the range uses in ModI/DivI (issue 3204).
PromiseCoerce should ignore primitive values (Chromium issue 347095).
Use a per-isolate cache for the date object JS bits (Chromium issue
348856).
Performance and stability improvements on all platforms.
2014-03-11: Version 3.25.7
Promise.all and Promise.race should reject non-array parameter (Chromium
issue 347453).
Promise.all and Promise race should use "then" rather than "chain"
(Chromium issue 347427).
Merge the "Compute Minus Zero Checks" phase into the range analysis
(issue 3204).
Performance and stability improvements on all platforms.
2014-03-10: Version 3.25.6
Replace the recursion in PropagateMinusZeroChecks() with a loop and a
worklist (issue 3204).
Reland "Enable Object.observe by default" (issue 2409).
Enable Object.observe by default (issue 2409).
AllocationTracker now maintains a map from address range to stack trace
that allocated the range. When snapshot is generated the map is used to
find construction stack trace for an object using its address (Chromium
issue 277984).
Introduce Runtime_GetAllScopesDetails to get all scopes at once for a
frame (Chromium issue 340285).
Reduce heavy runtime calls from debug mirrors (Chromium issue 340285).
Check and clear date cache in DateCurrentTime, DateLocalTimezone and
getTimezoneOffset (Chromium issue 142141).
Performance and stability improvements on all platforms.
2014-03-06: Version 3.25.5
Fix HConstants with Smi-ranged HeapNumber values (Chromium issue
349878).
Fix issues with JSON stringify replacer array (issues 3200, 3201).
Performance and stability improvements on all platforms.
2014-03-05: Version 3.25.4
x64: Fix LMathMinMax for constant Smi right-hand operands (Chromium
issue 349079).
Performance and stability improvements on all platforms.
2014-03-04: Version 3.25.3
Clear optimized code cache in shared function info when code gets
deoptimized (Chromium issue 343609).
Fixed constant folding for Math.clz32 (Chromium issue 347906).
Fix JSObject::PrintTransitions (Chromium issue 347912).
Fix handling of constant global variable assignments (Chromium issue
347904).
Removed bogus ASSERT (Chromium issue 347542).
Mark HCompareMap as having Tagged representation (Chromium issue
346636).
Fix crasher in Object.getOwnPropertySymbols (Chromium issue 346141).
Fix the bit massaging code in CompleteParserRecorder::WriteNumber
(Chromium issue 346221).
Don't eliminate loads with incompatible types or representations
(Chromium issue 346343).
Check that after a weak callback, the handle is either dead or strong
(Chromium issue 346061).
Lazy preparsing vs. lazy parsing fix (Chromium issue 346207).
Performance and stability improvements on all platforms.
2014-02-25: Version 3.25.2
Fix the bit massaging code in CompleteParserRecorder::WriteNumber
(Chromium issue 346221).
Revert r19455 "Load target types and handlers before IC computation."
(Chromium issue 346149).
Don't eliminate loads with incompatible types or representations
(Chromium issue 346343).
Fix for a smi stores optimization on x64 with a regression test
(Chromium issue 345715).
Check that after a weak callback, the handle is either dead or strong
(Chromium issue 346061).
negative bounds checking on realm calls (Chromium issue 344285).
Lazy preparsing vs. lazy parsing fix (Chromium issue 346207).
Fix optimistic BCE to back off after deopt (issue 3176).
Performance and stability improvements on all platforms.
2014-02-21: Version 3.25.1
Performance and stability improvements on all platforms.
2014-02-19: Version 3.25.0
ES6: Tighten up Object.prototype.__proto__ (issue 3064).
Fix Hydrogen bounds check elimination (Chromium issue 344186).
Performance and stability improvements on all platforms.
2014-02-19: Version 3.24.40
A64: Let the MacroAssembler resolve branches to distant targets (issue
3148).
Fixed and improved code for integral division. Fixed and extended tests
(issue 3151).
MIPS: Fix assignment of function name constant (issue 3138).
Fix assignment of function name constant (issue 3138).
Performance and stability improvements on all platforms.
2014-02-14: Version 3.24.39
Introduce --job-based-sweeping flag and use individual jobs for sweeping
if set (issue 3104).
Performance and stability improvements on all platforms.
2014-02-13: Version 3.24.38
Merge experimental/a64 to bleeding_edge (issue 3113).
Performance and stability improvements on all platforms.
2014-02-12: Version 3.24.37
Fix spec violations in JSON.stringify wrt replacer array (issue 3135).
Performance and stability improvements on all platforms.
2014-02-11: Version 3.24.36
Fix inconsistencies wrt whitespaces (issue 3109).
Performance and stability improvements on all platforms.
2014-02-10: Version 3.24.35
Fix inconsistencies wrt whitespaces (issue 3109).

2
deps/v8/DEPS

@ -8,7 +8,7 @@ deps = {
"http://gyp.googlecode.com/svn/trunk@1831",
"v8/third_party/icu":
"https://src.chromium.org/chrome/trunk/deps/third_party/icu46@239289",
"https://src.chromium.org/chrome/trunk/deps/third_party/icu46@258359",
}
deps_os = {

2
deps/v8/LICENSE

@ -26,7 +26,7 @@ are:
These libraries have their own licenses; we recommend you read them,
as their terms may differ from the terms below.
Copyright 2006-2012, the V8 project authors. All rights reserved.
Copyright 2014, the V8 project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:

44
deps/v8/Makefile

@ -136,7 +136,16 @@ endif
# deprecation_warnings=on
ifeq ($(deprecationwarnings), on)
GYPFLAGS += -Dv8_deprecation_warnings=1
endif
endif
# asan=/path/to/clang++
ifneq ($(strip $(asan)),)
GYPFLAGS += -Dasan=1
export CXX="$(asan)"
export CXX_host="$(asan)"
export LINK="$(asan)"
export ASAN_SYMBOLIZER_PATH="$(dir $(asan))llvm-symbolizer"
endif
# arm specific flags.
# arm_version=<number | "default">
ifneq ($(strip $(arm_version)),)
@ -223,11 +232,11 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
ARCHES = ia32 x64 arm mipsel
ARCHES = ia32 x64 arm arm64 mipsel
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
ANDROID_ARCHES = android_ia32 android_arm android_mipsel
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
@ -247,13 +256,15 @@ NACL_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(NACL_ARCHES)))
# Generates corresponding test targets, e.g. "ia32.release.check".
CHECKS = $(addsuffix .check,$(BUILDS))
QUICKCHECKS = $(addsuffix .quickcheck,$(BUILDS))
ANDROID_CHECKS = $(addsuffix .check,$(ANDROID_BUILDS))
NACL_CHECKS = $(addsuffix .check,$(NACL_BUILDS))
# File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment
.PHONY: all check clean dependencies $(ENVFILE).new native \
qc quickcheck \
qc quickcheck $(QUICKCHECKS) \
$(addsuffix .quickcheck,$(MODES)) $(addsuffix .quickcheck,$(ARCHES)) \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
$(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \
@ -332,6 +343,18 @@ $(CHECKS): $$(basename $$@)
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) $(TESTFLAGS)
$(addsuffix .quickcheck,$(MODES)): $$(basename $$@)
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--mode=$(basename $@) $(TESTFLAGS) --quickcheck
$(addsuffix .quickcheck,$(ARCHES)): $$(basename $$@)
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch=$(basename $@) $(TESTFLAGS) --quickcheck
$(QUICKCHECKS): $$(basename $$@)
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) $(TESTFLAGS) --quickcheck
$(addsuffix .sync, $(ANDROID_BUILDS)): $$(basename $$@)
@tools/android-sync.sh $(basename $@) $(OUTDIR) \
$(shell pwd) $(ANDROID_V8)
@ -358,12 +381,17 @@ native.check: native
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
FASTTESTMODES = ia32.release,x64.release,ia32.optdebug,x64.optdebug,arm.optdebug
SUPERFASTTESTMODES = ia32.release
FASTTESTMODES = $(SUPERFASTTESTMODES),x64.release,ia32.optdebug,x64.optdebug,arm.optdebug,arm64.release
FASTCOMPILEMODES = $(FASTTESTMODES),arm64.optdebug
COMMA = ,
EMPTY =
SPACE = $(EMPTY) $(EMPTY)
quickcheck: $(subst $(COMMA),$(SPACE),$(FASTTESTMODES))
quickcheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(SUPERFASTTESTMODES) $(TESTFLAGS) --quickcheck \
--download-data mozilla webkit
tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) --quickcheck
qc: quickcheck
@ -392,7 +420,7 @@ $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
-Dv8_target_arch=$(subst .,,$(suffix $(basename $@))) \
-Dv8_optimized_debug=$(if $(findstring optdebug,$@),2,0) \
$(if $(findstring optdebug,$@),-Dv8_optimized_debug=2,) \
-S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS)
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
@ -446,4 +474,4 @@ dependencies:
--revision 1831
svn checkout --force \
https://src.chromium.org/chrome/trunk/deps/third_party/icu46 \
third_party/icu --revision 239289
third_party/icu --revision 258359

60
deps/v8/Makefile.android

@ -26,7 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
ANDROID_ARCHES = android_ia32 android_arm android_mipsel
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
@ -49,24 +49,40 @@ endif
ifeq ($(ARCH), android_arm)
DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm
DEFINES += arm_neon=0 arm_version=7
TOOLCHAIN_ARCH = arm-linux-androideabi-4.6
TOOLCHAIN_ARCH = arm-linux-androideabi
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.6
else
ifeq ($(ARCH), android_mipsel)
DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_arch=mips
DEFINES += mips_arch_variant=mips32r2
TOOLCHAIN_ARCH = mipsel-linux-android-4.6
ifeq ($(ARCH), android_arm64)
DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64
TOOLCHAIN_ARCH = aarch64-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8
else
ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
TOOLCHAIN_ARCH = x86-4.6
ifeq ($(ARCH), android_mipsel)
DEFINES = target_arch=mipsel v8_target_arch=mipsel
DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
TOOLCHAIN_ARCH = mipsel-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.6
else
$(error Target architecture "${ARCH}" is not supported)
ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.6
else
$(error Target architecture "${ARCH}" is not supported)
endif
endif
endif
endif
TOOLCHAIN_PATH = ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}/prebuilt
TOOLCHAIN_PATH = \
${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}-${TOOLCHAIN_VER}/prebuilt
ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),)
$(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}". Please \
check that ANDROID_NDK_ROOT and ANDROID_NDK_HOST_ARCH are set \
@ -79,23 +95,23 @@ DEFINES += host_os=${HOST_OS}
.SECONDEXPANSION:
$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
CXX="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
AR="$(ANDROID_TOOLCHAIN)/bin/*-ar" \
RANLIB="$(ANDROID_TOOLCHAIN)/bin/*-ranlib" \
CC="$(ANDROID_TOOLCHAIN)/bin/*-gcc" \
LD="$(ANDROID_TOOLCHAIN)/bin/*-ld" \
LINK="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
CXX="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
AR="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ar" \
RANLIB="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ranlib" \
CC="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-gcc" \
LD="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ld" \
LINK="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
# Android GYP file generation targets.
ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS))
$(ANDROID_MAKEFILES):
GYP_GENERATORS=make-android \
GYP_DEFINES="${DEFINES}" \
CC="${ANDROID_TOOLCHAIN}/bin/*-gcc" \
CXX="${ANDROID_TOOLCHAIN}/bin/*-g++" \
CC="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-gcc" \
CXX="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-g++" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \

9
deps/v8/PRESUBMIT.py

@ -98,3 +98,12 @@ def CheckChangeOnCommit(input_api, output_api):
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
return results
def GetPreferredTryMasters(project, change):
return {
'tryserver.v8': {
'v8_mac_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']),
},
}

1
deps/v8/build/all.gyp

@ -16,6 +16,7 @@
['component!="shared_library"', {
'dependencies': [
'../tools/lexer-shell.gyp:lexer-shell',
'../tools/lexer-shell.gyp:parser-shell',
],
}],
]

34
deps/v8/build/android.gypi

@ -184,6 +184,16 @@
'-L<(android_stlport_libs)/x86',
],
}],
['target_arch=="x64"', {
'ldflags': [
'-L<(android_stlport_libs)/x86_64',
],
}],
['target_arch=="arm64"', {
'ldflags': [
'-L<(android_stlport_libs)/arm64-v8a',
],
}],
],
}],
['target_arch=="ia32"', {
@ -208,10 +218,19 @@
],
'target_conditions': [
['_type=="executable"', {
'conditions': [
['target_arch=="arm64"', {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker64',
],
}, {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker',
],
}]
],
'ldflags': [
'-Bdynamic',
'-Wl,-dynamic-linker,/system/bin/linker',
'-Wl,--gc-sections',
'-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags.
'<(android_lib)/crtbegin_dynamic.o',
@ -238,8 +257,15 @@
}], # _toolset=="target"
# Settings for building host targets using the system toolchain.
['_toolset=="host"', {
'cflags': [ '-m32', '-pthread' ],
'ldflags': [ '-m32', '-pthread' ],
'conditions': [
['target_arch=="x64"', {
'cflags': [ '-m64', '-pthread' ],
'ldflags': [ '-m64', '-pthread' ],
}, {
'cflags': [ '-m32', '-pthread' ],
'ldflags': [ '-m32', '-pthread' ],
}],
],
'ldflags!': [
'-Wl,-z,noexecstack',
'-Wl,--gc-sections',

2
deps/v8/build/features.gypi

@ -115,7 +115,7 @@
'Release': {
'variables': {
'v8_enable_extra_checks%': 0,
'v8_enable_handle_zapping%': 0,
'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_extra_checks==1', {

30
deps/v8/build/standalone.gypi

@ -34,6 +34,7 @@
'variables': {
'component%': 'static_library',
'clang%': 0,
'asan%': 0,
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 1,
@ -52,7 +53,11 @@
# to gyp.
'host_arch%':
'<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mipsel/")',
s/x86_64/x64/;\
s/amd64/x64/;\
s/aarch64/arm64/;\
s/arm.*/arm/;\
s/mips.*/mipsel/")',
}, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac"
@ -97,6 +102,7 @@
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="arm64" and host_arch!="arm64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="x64" and host_arch!="x64") or \
(OS=="android" or OS=="qnx")', {
@ -164,6 +170,22 @@
],
},
'conditions': [
['asan==1', {
'target_defaults': {
'cflags_cc+': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
'-fsanitize=address',
'-w', # http://crbug.com/162783
],
'cflags_cc!': [
'-fomit-frame-pointer',
],
'ldflags': [
'-fsanitize=address',
],
},
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', {
'target_defaults': {
@ -322,6 +344,12 @@
}, {
'xcode_settings': {'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES'},
}],
['clang==1', {
'xcode_settings': {
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++11', # -std=gnu++11
},
}],
],
'target_conditions': [
['_type!="static_library"', {

19
deps/v8/build/toolchain.gypi

@ -268,6 +268,11 @@
}], # _toolset=="target"
],
}], # v8_target_arch=="arm"
['v8_target_arch=="arm64"', {
'defines': [
'V8_TARGET_ARCH_ARM64',
],
}],
['v8_target_arch=="ia32"', {
'defines': [
'V8_TARGET_ARCH_IA32',
@ -407,7 +412,8 @@
}],
],
}],
['(OS=="linux") and (v8_target_arch=="x64")', {
['(OS=="linux" or OS=="android") and \
(v8_target_arch=="x64" or v8_target_arch=="arm64")', {
# Check whether the host compiler and target compiler support the
# '-m64' option and set it if so.
'target_conditions': [
@ -422,8 +428,12 @@
'variables': {
'm64flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
},
'cflags': [ '<(m64flag)' ],
'ldflags': [ '<(m64flag)' ],
'conditions': [
['((OS!="android" and OS!="qnx") or clang==1)', {
'cflags': [ '<(m64flag)' ],
'ldflags': [ '<(m64flag)' ],
}],
],
}]
],
}],
@ -513,7 +523,8 @@
OS=="qnx"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual',
'<(wno_array_bounds)' ],
'<(wno_array_bounds)',
],
'conditions': [
['v8_optimized_debug==0', {
'cflags!': [

0
deps/v8/include/v8-debug.h

68
deps/v8/include/v8-profiler.h

@ -35,6 +35,9 @@
*/
namespace v8 {
class HeapGraphNode;
struct HeapStatsUpdate;
typedef uint32_t SnapshotObjectId;
/**
@ -158,12 +161,18 @@ class V8_EXPORT CpuProfiler {
* |record_samples| parameter controls whether individual samples should
* be recorded in addition to the aggregated tree.
*/
void StartProfiling(Handle<String> title, bool record_samples = false);
/** Deprecated. Use StartProfiling instead. */
void StartCpuProfiling(Handle<String> title, bool record_samples = false);
/**
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
*/
CpuProfile* StopProfiling(Handle<String> title);
/** Deprecated. Use StopProfiling instead. */
const CpuProfile* StopCpuProfiling(Handle<String> title);
/**
@ -179,9 +188,6 @@ class V8_EXPORT CpuProfiler {
};
class HeapGraphNode;
/**
* HeapSnapshotEdge represents a directed connection between heap
* graph nodes: from retainers to retained nodes.
@ -257,7 +263,11 @@ class V8_EXPORT HeapGraphNode {
SnapshotObjectId GetId() const;
/** Returns node's own size, in bytes. */
int GetSelfSize() const;
V8_DEPRECATED("Use GetShallowSize instead",
int GetSelfSize() const);
/** Returns node's own size, in bytes. */
size_t GetShallowSize() const;
/** Returns child nodes count of the node. */
int GetChildrenCount() const;
@ -267,6 +277,37 @@ class V8_EXPORT HeapGraphNode {
};
/**
* An interface for exporting data from V8, using "push" model.
*/
class V8_EXPORT OutputStream { // NOLINT
public:
enum WriteResult {
kContinue = 0,
kAbort = 1
};
virtual ~OutputStream() {}
/** Notify about the end of stream. */
virtual void EndOfStream() = 0;
/** Get preferred output chunk size. Called only once. */
virtual int GetChunkSize() { return 1024; }
/**
* Writes the next chunk of snapshot data into the stream. Writing
* can be stopped by returning kAbort as function result. EndOfStream
* will not be called in case writing was aborted.
*/
virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
/**
* Writes the next chunk of heap stats data into the stream. Writing
* can be stopped by returning kAbort as function result. EndOfStream
* will not be called in case writing was aborted.
*/
virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
return kAbort;
};
};
/**
* HeapSnapshots record the state of the JS heap at some moment.
*/
@ -334,7 +375,24 @@ class V8_EXPORT HeapSnapshot {
};
class RetainedObjectInfo;
/**
* An interface for reporting progress and controlling long-running
* activities.
*/
class V8_EXPORT ActivityControl { // NOLINT
public:
enum ControlOption {
kContinue = 0,
kAbort = 1
};
virtual ~ActivityControl() {}
/**
* Notify about current progress. The activity can be stopped by
* returning kAbort as the callback result.
*/
virtual ControlOption ReportProgressValue(int done, int total) = 0;
};
/**
* Interface for controlling heap profiling. Instance of the

355
deps/v8/include/v8-util.h

@ -0,0 +1,355 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_UTIL_H_
#define V8_UTIL_H_
#include "v8.h"
#include <map>
/**
* Support for Persistent containers.
*
* C++11 embedders can use STL containers with UniquePersistent values,
* but pre-C++11 does not support the required move semantic and hence
* may want these container classes.
*/
namespace v8 {
typedef uintptr_t PersistentContainerValue;
static const uintptr_t kPersistentContainerNotFound = 0;
/**
* A default trait implemenation for PersistentValueMap which uses std::map
* as a backing map.
*
* Users will have to implement their own weak callbacks & dispose traits.
*/
template<typename K, typename V>
class StdMapTraits {
public:
// STL map & related:
typedef std::map<K, PersistentContainerValue> Impl;
typedef typename Impl::iterator Iterator;
static bool Empty(Impl* impl) { return impl->empty(); }
static size_t Size(Impl* impl) { return impl->size(); }
static void Swap(Impl& a, Impl& b) { std::swap(a, b); } // NOLINT
static Iterator Begin(Impl* impl) { return impl->begin(); }
static Iterator End(Impl* impl) { return impl->end(); }
static K Key(Iterator it) { return it->first; }
static PersistentContainerValue Value(Iterator it) { return it->second; }
static PersistentContainerValue Set(Impl* impl, K key,
PersistentContainerValue value) {
std::pair<Iterator, bool> res = impl->insert(std::make_pair(key, value));
PersistentContainerValue old_value = kPersistentContainerNotFound;
if (!res.second) {
old_value = res.first->second;
res.first->second = value;
}
return old_value;
}
static PersistentContainerValue Get(Impl* impl, K key) {
Iterator it = impl->find(key);
if (it == impl->end()) return kPersistentContainerNotFound;
return it->second;
}
static PersistentContainerValue Remove(Impl* impl, K key) {
Iterator it = impl->find(key);
if (it == impl->end()) return kPersistentContainerNotFound;
PersistentContainerValue value = it->second;
impl->erase(it);
return value;
}
};
/**
* A default trait implementation for PersistentValueMap, which inherits
* a std:map backing map from StdMapTraits and holds non-weak persistent
* objects.
*
* Users have to implement their own dispose trait.
*/
template<typename K, typename V>
class StrongMapTraits : public StdMapTraits<K, V> {
public:
// Weak callback & friends:
static const bool kIsWeak = false;
typedef typename StdMapTraits<K, V>::Impl Impl;
typedef void WeakCallbackDataType;
static WeakCallbackDataType* WeakCallbackParameter(
Impl* impl, const K& key, Local<V> value);
static Impl* ImplFromWeakCallbackData(
const WeakCallbackData<V, WeakCallbackDataType>& data);
static K KeyFromWeakCallbackData(
const WeakCallbackData<V, WeakCallbackDataType>& data);
static void DisposeCallbackData(WeakCallbackDataType* data);
};
/**
* A default trait implementation for PersistentValueMap, with a std::map
* backing map, non-weak persistents as values, and no special dispose
* handling. Can be used as-is.
*/
template<typename K, typename V>
class DefaultPersistentValueMapTraits : public StrongMapTraits<K, V> {
public:
typedef typename StrongMapTraits<K, V>::Impl Impl;
static void Dispose(Isolate* isolate, UniquePersistent<V> value,
Impl* impl, K key) { }
};
/**
* A map wrapper that allows using UniquePersistent as a mapped value.
* C++11 embedders don't need this class, as they can use UniquePersistent
* directly in std containers.
*
* The map relies on a backing map, whose type and accessors are described
* by the Traits class. The backing map will handle values of type
* PersistentContainerValue, with all conversion into and out of V8
* handles being transparently handled by this class.
*/
template<typename K, typename V, typename Traits>
class PersistentValueMap {
public:
V8_INLINE explicit PersistentValueMap(Isolate* isolate) : isolate_(isolate) {}
V8_INLINE ~PersistentValueMap() { Clear(); }
V8_INLINE Isolate* GetIsolate() { return isolate_; }
/**
* Return size of the map.
*/
V8_INLINE size_t Size() { return Traits::Size(&impl_); }
/**
* Return whether the map holds weak persistents.
*/
V8_INLINE bool IsWeak() { return Traits::kIsWeak; }
/**
* Get value stored in map.
*/
V8_INLINE Local<V> Get(const K& key) {
return Local<V>::New(isolate_, FromVal(Traits::Get(&impl_, key)));
}
/**
* Check whether a value is contained in the map.
*/
V8_INLINE bool Contains(const K& key) {
return Traits::Get(&impl_, key) != 0;
}
/**
* Get value stored in map and set it in returnValue.
* Return true if a value was found.
*/
V8_INLINE bool SetReturnValue(const K& key,
ReturnValue<Value>& returnValue) {
PersistentContainerValue value = Traits::Get(&impl_, key);
bool hasValue = value != 0;
if (hasValue) {
returnValue.SetInternal(
*reinterpret_cast<internal::Object**>(FromVal(value)));
}
return hasValue;
}
/**
* Call Isolate::SetReference with the given parent and the map value.
*/
V8_INLINE void SetReference(const K& key,
const Persistent<Object>& parent) {
GetIsolate()->SetReference(
reinterpret_cast<internal::Object**>(parent.val_),
reinterpret_cast<internal::Object**>(FromVal(Traits::Get(&impl_, key))));
}
/**
* Put value into map. Depending on Traits::kIsWeak, the value will be held
* by the map strongly or weakly.
* Returns old value as UniquePersistent.
*/
UniquePersistent<V> Set(const K& key, Local<V> value) {
UniquePersistent<V> persistent(isolate_, value);
return SetUnique(key, &persistent);
}
/**
* Put value into map, like Set(const K&, Local<V>).
*/
UniquePersistent<V> Set(const K& key, UniquePersistent<V> value) {
return SetUnique(key, &value);
}
/**
* Return value for key and remove it from the map.
*/
V8_INLINE UniquePersistent<V> Remove(const K& key) {
return Release(Traits::Remove(&impl_, key)).Pass();
}
/**
* Traverses the map repeatedly,
* in case side effects of disposal cause insertions.
**/
void Clear() {
typedef typename Traits::Iterator It;
HandleScope handle_scope(isolate_);
// TODO(dcarney): figure out if this swap and loop is necessary.
while (!Traits::Empty(&impl_)) {
typename Traits::Impl impl;
Traits::Swap(impl_, impl);
for (It i = Traits::Begin(&impl); i != Traits::End(&impl); ++i) {
Traits::Dispose(isolate_, Release(Traits::Value(i)).Pass(), &impl,
Traits::Key(i));
}
}
}
private:
PersistentValueMap(PersistentValueMap&);
void operator=(PersistentValueMap&);
/**
* Put the value into the map, and set the 'weak' callback when demanded
* by the Traits class.
*/
UniquePersistent<V> SetUnique(const K& key, UniquePersistent<V>* persistent) {
if (Traits::kIsWeak) {
Local<V> value(Local<V>::New(isolate_, *persistent));
persistent->template SetWeak<typename Traits::WeakCallbackDataType>(
Traits::WeakCallbackParameter(&impl_, key, value), WeakCallback);
}
PersistentContainerValue old_value =
Traits::Set(&impl_, key, ClearAndLeak(persistent));
return Release(old_value).Pass();
}
static void WeakCallback(
const WeakCallbackData<V, typename Traits::WeakCallbackDataType>& data) {
if (Traits::kIsWeak) {
typename Traits::Impl* impl = Traits::ImplFromWeakCallbackData(data);
K key = Traits::KeyFromWeakCallbackData(data);
PersistentContainerValue value = Traits::Remove(impl, key);
Traits::Dispose(data.GetIsolate(), Release(value).Pass(), impl, key);
}
}
V8_INLINE static V* FromVal(PersistentContainerValue v) {
return reinterpret_cast<V*>(v);
}
V8_INLINE static PersistentContainerValue ClearAndLeak(
UniquePersistent<V>* persistent) {
V* v = persistent->val_;
persistent->val_ = 0;
return reinterpret_cast<PersistentContainerValue>(v);
}
/**
* Return a container value as UniquePersistent and make sure the weak
* callback is properly disposed of. All remove functionality should go
* through this.
*/
V8_INLINE static UniquePersistent<V> Release(PersistentContainerValue v) {
UniquePersistent<V> p;
p.val_ = FromVal(v);
if (Traits::kIsWeak && !p.IsEmpty()) {
Traits::DisposeCallbackData(
p.template ClearWeak<typename Traits::WeakCallbackDataType>());
}
return p.Pass();
}
Isolate* isolate_;
typename Traits::Impl impl_;
};
/**
* A map that uses UniquePersistent as value and std::map as the backing
* implementation. Persistents are held non-weak.
*
* C++11 embedders don't need this class, as they can use
* UniquePersistent directly in std containers.
*/
template<typename K, typename V,
typename Traits = DefaultPersistentValueMapTraits<K, V> >
class StdPersistentValueMap : public PersistentValueMap<K, V, Traits> {
public:
explicit StdPersistentValueMap(Isolate* isolate)
: PersistentValueMap<K, V, Traits>(isolate) {}
};
/**
* Empty default implementations for StrongTraits methods.
*
* These should not be necessary, since they're only used in code that
* is surrounded by if(Traits::kIsWeak), which for StrongMapTraits is
* compile-time false. Most compilers can live without them; however
* the compiler we use from 64-bit Win differs.
*
* TODO(vogelheim): Remove these once they're no longer necessary.
*/
template<typename K, typename V>
typename StrongMapTraits<K, V>::WeakCallbackDataType*
StrongMapTraits<K, V>::WeakCallbackParameter(
Impl* impl, const K& key, Local<V> value) {
return NULL;
}
template<typename K, typename V>
typename StrongMapTraits<K, V>::Impl*
StrongMapTraits<K, V>::ImplFromWeakCallbackData(
const WeakCallbackData<V, WeakCallbackDataType>& data) {
return NULL;
}
template<typename K, typename V>
K StrongMapTraits<K, V>::KeyFromWeakCallbackData(
const WeakCallbackData<V, WeakCallbackDataType>& data) {
return K();
}
template<typename K, typename V>
void StrongMapTraits<K, V>::DisposeCallbackData(WeakCallbackDataType* data) {
}
} // namespace v8
#endif // V8_UTIL_H_

573
deps/v8/include/v8.h

@ -108,6 +108,7 @@ class ObjectTemplate;
class Platform;
class Primitive;
class RawOperationDescriptor;
class Script;
class Signature;
class StackFrame;
class StackTrace;
@ -127,10 +128,12 @@ template<class T> class PersistentBase;
template<class T,
class M = NonCopyablePersistentTraits<T> > class Persistent;
template<class T> class UniquePersistent;
template<class K, class V, class T> class PersistentValueMap;
template<class T, class P> class WeakCallbackObject;
class FunctionTemplate;
class ObjectTemplate;
class Data;
template<typename T> class FunctionCallbackInfo;
template<typename T> class PropertyCallbackInfo;
class StackTrace;
class StackFrame;
@ -140,6 +143,7 @@ class ObjectOperationDescriptor;
class RawOperationDescriptor;
class CallHandlerHelper;
class EscapableHandleScope;
template<typename T> class ReturnValue;
namespace internal {
class Arguments;
@ -412,6 +416,7 @@ template <class T> class Local : public Handle<T> {
template<class F> friend class internal::CustomArguments;
friend class HandleScope;
friend class EscapableHandleScope;
template<class F1, class F2, class F3> friend class PersistentValueMap;
V8_INLINE static Local<T> New(Isolate* isolate, T* that);
};
@ -527,7 +532,11 @@ template <class T> class PersistentBase {
P* parameter,
typename WeakCallbackData<S, P>::Callback callback);
V8_INLINE void ClearWeak();
template<typename P>
V8_INLINE P* ClearWeak();
// TODO(dcarney): remove this.
V8_INLINE void ClearWeak() { ClearWeak<void>(); }
/**
* Marks the reference to this object independent. Garbage collector is free
@ -576,6 +585,8 @@ template <class T> class PersistentBase {
template<class F> friend class UniquePersistent;
template<class F> friend class PersistentBase;
template<class F> friend class ReturnValue;
template<class F1, class F2, class F3> friend class PersistentValueMap;
friend class Object;
explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
PersistentBase(PersistentBase& other); // NOLINT
@ -743,7 +754,7 @@ class UniquePersistent : public PersistentBase<T> {
};
public:
/**
/**
* A UniquePersistent with no storage cell.
*/
V8_INLINE UniquePersistent() : PersistentBase<T>(0) { }
@ -781,6 +792,7 @@ class UniquePersistent : public PersistentBase<T> {
template<class S>
V8_INLINE UniquePersistent& operator=(UniquePersistent<S> rhs) {
TYPE_CHECK(T, S);
this->Reset();
this->val_ = rhs.val_;
rhs.val_ = 0;
return *this;
@ -998,114 +1010,188 @@ class ScriptOrigin {
/**
* A compiled JavaScript script.
* A compiled JavaScript script, not yet tied to a Context.
*/
class V8_EXPORT Script {
class V8_EXPORT UnboundScript {
public:
/**
* Compiles the specified script (context-independent).
*
* \param source Script source code.
* \param origin Script origin, owned by caller, no references are kept
* when New() returns
* \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
* using pre_data speeds compilation if it's done multiple times.
* Owned by caller, no references are kept when New() returns.
* \param script_data Arbitrary data associated with script. Using
* this has same effect as calling SetData(), but allows data to be
* available to compile event handlers.
* \return Compiled script object (context independent; when run it
* will use the currently entered context).
* Binds the script to the currently entered context.
*/
static Local<Script> New(Handle<String> source,
ScriptOrigin* origin = NULL,
ScriptData* pre_data = NULL,
Handle<String> script_data = Handle<String>());
Local<Script> BindToCurrentContext();
int GetId();
Handle<Value> GetScriptName();
/**
* Compiles the specified script using the specified file name
* object (typically a string) as the script's origin.
*
* \param source Script source code.
* \param file_name file name object (typically a string) to be used
* as the script's origin.
* \return Compiled script object (context independent; when run it
* will use the currently entered context).
* Returns zero based line number of the code_pos location in the script.
* -1 will be returned if no information available.
*/
static Local<Script> New(Handle<String> source,
Handle<Value> file_name);
int GetLineNumber(int code_pos);
static const int kNoScriptId = 0;
};
/**
* A compiled JavaScript script, tied to a Context which was active when the
* script was compiled.
*/
class V8_EXPORT Script {
public:
/**
* Compiles the specified script (bound to current context).
*
* \param source Script source code.
* \param origin Script origin, owned by caller, no references are kept
* when Compile() returns
* \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
* using pre_data speeds compilation if it's done multiple times.
* Owned by caller, no references are kept when Compile() returns.
* \param script_data Arbitrary data associated with script. Using
* this has same effect as calling SetData(), but makes data available
* earlier (i.e. to compile event handlers).
* \return Compiled script object, bound to the context that was active
* when this function was called. When run it will always use this
* context.
* A shorthand for ScriptCompiler::Compile().
* The ScriptData parameter will be deprecated; use ScriptCompiler::Compile if
* you want to pass it.
*/
static Local<Script> Compile(Handle<String> source,
ScriptOrigin* origin = NULL,
ScriptData* pre_data = NULL,
Handle<String> script_data = Handle<String>());
ScriptData* script_data = NULL);
/**
* Compiles the specified script using the specified file name
* object (typically a string) as the script's origin.
*
* \param source Script source code.
* \param file_name File name to use as script's origin
* \param script_data Arbitrary data associated with script. Using
* this has same effect as calling SetData(), but makes data available
* earlier (i.e. to compile event handlers).
* \return Compiled script object, bound to the context that was active
* when this function was called. When run it will always use this
* context.
*/
// To be decprecated, use the Compile above.
static Local<Script> Compile(Handle<String> source,
Handle<Value> file_name,
Handle<String> script_data = Handle<String>());
Handle<String> file_name);
/**
* Runs the script returning the resulting value. If the script is
* context independent (created using ::New) it will be run in the
* currently entered context. If it is context specific (created
* using ::Compile) it will be run in the context in which it was
* compiled.
* Runs the script returning the resulting value. It will be run in the
* context in which it was created (ScriptCompiler::CompileBound or
* UnboundScript::BindToGlobalContext()).
*/
Local<Value> Run();
/**
* Returns the script id.
* Returns the corresponding context-unbound script.
*/
int GetId();
Local<UnboundScript> GetUnboundScript();
// To be deprecated; use GetUnboundScript()->GetId();
int GetId() {
return GetUnboundScript()->GetId();
}
// Use GetUnboundScript()->GetId();
V8_DEPRECATED("Use GetUnboundScript()->GetId()",
Handle<Value> GetScriptName()) {
return GetUnboundScript()->GetScriptName();
}
/**
* Associate an additional data object with the script. This is mainly used
* with the debugger as this data object is only available through the
* debugger API.
* Returns zero based line number of the code_pos location in the script.
* -1 will be returned if no information available.
*/
void SetData(Handle<String> data);
V8_DEPRECATED("Use GetUnboundScript()->GetLineNumber()",
int GetLineNumber(int code_pos)) {
return GetUnboundScript()->GetLineNumber(code_pos);
}
};
/**
* For compiling scripts.
*/
class V8_EXPORT ScriptCompiler {
public:
/**
* Returns the name value of one Script.
* Compilation data that the embedder can cache and pass back to speed up
* future compilations. The data is produced if the CompilerOptions passed to
* the compilation functions in ScriptCompiler contains produce_data_to_cache
* = true. The data to cache can then can be retrieved from
* UnboundScript.
*/
struct V8_EXPORT CachedData {
enum BufferPolicy {
BufferNotOwned,
BufferOwned
};
CachedData() : data(NULL), length(0), buffer_policy(BufferNotOwned) {}
// If buffer_policy is BufferNotOwned, the caller keeps the ownership of
// data and guarantees that it stays alive until the CachedData object is
// destroyed. If the policy is BufferOwned, the given data will be deleted
// (with delete[]) when the CachedData object is destroyed.
CachedData(const uint8_t* data, int length,
BufferPolicy buffer_policy = BufferNotOwned);
~CachedData();
// TODO(marja): Async compilation; add constructors which take a callback
// which will be called when V8 no longer needs the data.
const uint8_t* data;
int length;
BufferPolicy buffer_policy;
private:
// Prevent copying. Not implemented.
CachedData(const CachedData&);
CachedData& operator=(const CachedData&);
};
/**
* Source code which can be then compiled to a UnboundScript or
* BoundScript.
*/
Handle<Value> GetScriptName();
class Source {
public:
// Source takes ownership of CachedData.
V8_INLINE Source(Local<String> source_string, const ScriptOrigin& origin,
CachedData* cached_data = NULL);
V8_INLINE Source(Local<String> source_string,
CachedData* cached_data = NULL);
V8_INLINE ~Source();
// Ownership of the CachedData or its buffers is *not* transferred to the
// caller. The CachedData object is alive as long as the Source object is
// alive.
V8_INLINE const CachedData* GetCachedData() const;
private:
friend class ScriptCompiler;
// Prevent copying. Not implemented.
Source(const Source&);
Source& operator=(const Source&);
Local<String> source_string;
// Origin information
Handle<Value> resource_name;
Handle<Integer> resource_line_offset;
Handle<Integer> resource_column_offset;
Handle<Boolean> resource_is_shared_cross_origin;
// Cached data from previous compilation (if any), or generated during
// compilation (if the generate_cached_data flag is passed to
// ScriptCompiler).
CachedData* cached_data;
};
enum CompileOptions {
kNoCompileOptions,
kProduceDataToCache = 1 << 0
};
/**
* Returns zero based line number of the code_pos location in the script.
* -1 will be returned if no information available.
* Compiles the specified script (context-independent).
*
* \param source Script source code.
* \return Compiled script object (context independent; for running it must be
* bound to a context).
*/
int GetLineNumber(int code_pos);
static Local<UnboundScript> CompileUnbound(
Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions);
static const int kNoScriptId = 0;
/**
* Compiles the specified script (bound to current context).
*
* \param source Script source code.
* \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
* using pre_data speeds compilation if it's done multiple times.
* Owned by caller, no references are kept when this function returns.
* \return Compiled script object, bound to the context that was active
* when this function was called. When run it will always use this
* context.
*/
static Local<Script> Compile(
Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions);
};
@ -1430,6 +1516,11 @@ class V8_EXPORT Value : public Data {
*/
bool IsRegExp() const;
/**
* Returns true if this value is a Promise.
* This is an experimental feature.
*/
bool IsPromise() const;
/**
* Returns true if this value is an ArrayBuffer.
@ -1911,9 +2002,20 @@ class V8_EXPORT Symbol : public Primitive {
// Returns the print name string of the symbol, or undefined if none.
Local<Value> Name() const;
// Create a symbol. If data is not NULL, it will be used as a print name.
// Create a symbol. If name is not empty, it will be used as the description.
static Local<Symbol> New(
Isolate *isolate, const char* data = NULL, int length = -1);
Isolate *isolate, Local<String> name = Local<String>());
// Access global symbol registry.
// Note that symbols created this way are never collected, so
// they should only be used for statically fixed properties.
// Also, there is only one global name space for the names used as keys.
// To minimize the potential for clashes, use qualified names as keys.
static Local<Symbol> For(Isolate *isolate, Local<String> name);
// Retrieve a global symbol. Similar to |For|, but using a separate
// registry that is not accessible by (and cannot clash with) JavaScript code.
static Local<Symbol> ForApi(Isolate *isolate, Local<String> name);
V8_INLINE static Symbol* Cast(v8::Value* obj);
private:
@ -1932,9 +2034,18 @@ class V8_EXPORT Private : public Data {
// Returns the print name string of the private symbol, or undefined if none.
Local<Value> Name() const;
// Create a private symbol. If data is not NULL, it will be the print name.
// Create a private symbol. If name is not empty, it will be the description.
static Local<Private> New(
Isolate *isolate, const char* data = NULL, int length = -1);
Isolate *isolate, Local<String> name = Local<String>());
// Retrieve a global private symbol. If a symbol with this name has not
// been retrieved in the same isolate before, it is created.
// Note that private symbols created this way are never collected, so
// they should only be used for statically fixed properties.
// Also, there is only one global name space for the names used as keys.
// To minimize the potential for clashes, use qualified names as keys,
// e.g., "Class#property".
static Local<Private> ForApi(Isolate *isolate, Local<String> name);
private:
Private();
@ -2118,6 +2229,12 @@ class V8_EXPORT Object : public Value {
PropertyAttribute attribute = None,
AccessControl settings = DEFAULT);
void SetAccessorProperty(Local<String> name,
Local<Function> getter,
Handle<Function> setter = Handle<Function>(),
PropertyAttribute attribute = None,
AccessControl settings = DEFAULT);
/**
* Functionality for private properties.
* This is an experimental feature, use at your own risk.
@ -2185,6 +2302,12 @@ class V8_EXPORT Object : public Value {
/** Gets the number of internal fields for this Object. */
int InternalFieldCount();
/** Same as above, but works for Persistents */
V8_INLINE static int InternalFieldCount(
const PersistentBase<Object>& object) {
return object.val_->InternalFieldCount();
}
/** Gets the value from an internal field. */
V8_INLINE Local<Value> GetInternalField(int index);
@ -2198,6 +2321,12 @@ class V8_EXPORT Object : public Value {
*/
V8_INLINE void* GetAlignedPointerFromInternalField(int index);
/** Same as above, but works for Persistents */
V8_INLINE static void* GetAlignedPointerFromInternalField(
const PersistentBase<Object>& object, int index) {
return object.val_->GetAlignedPointerFromInternalField(index);
}
/**
* Sets a 2-byte-aligned native pointer in an internal field. To retrieve such
* a field, GetAlignedPointerFromInternalField must be used, everything else
@ -2389,6 +2518,8 @@ class ReturnValue {
template<class F> friend class ReturnValue;
template<class F> friend class FunctionCallbackInfo;
template<class F> friend class PropertyCallbackInfo;
template<class F, class G, class H> friend class PersistentValueMap;
V8_INLINE void SetInternal(internal::Object* value) { *value_ = value; }
V8_INLINE internal::Object* GetDefaultValue();
V8_INLINE explicit ReturnValue(internal::Object** slot);
internal::Object** value_;
@ -2542,6 +2673,56 @@ class V8_EXPORT Function : public Object {
static void CheckCast(Value* obj);
};
/**
* An instance of the built-in Promise constructor (ES6 draft).
* This API is experimental. Only works with --harmony flag.
*/
class V8_EXPORT Promise : public Object {
public:
class V8_EXPORT Resolver : public Object {
public:
/**
* Create a new resolver, along with an associated promise in pending state.
*/
static Local<Resolver> New(Isolate* isolate);
/**
* Extract the associated promise.
*/
Local<Promise> GetPromise();
/**
* Resolve/reject the associated promise with a given value.
* Ignored if the promise is no longer pending.
*/
void Resolve(Handle<Value> value);
void Reject(Handle<Value> value);
V8_INLINE static Resolver* Cast(Value* obj);
private:
Resolver();
static void CheckCast(Value* obj);
};
/**
* Register a resolution/rejection handler with a promise.
* The handler is given the respective resolution/rejection value as
* an argument. If the promise is already resolved/rejected, the handler is
* invoked at the end of turn.
*/
Local<Promise> Chain(Handle<Function> handler);
Local<Promise> Catch(Handle<Function> handler);
V8_INLINE static Promise* Cast(Value* obj);
private:
Promise();
static void CheckCast(Value* obj);
};
#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
// The number of required internal fields can be defined by embedder.
#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
@ -3805,6 +3986,9 @@ typedef void (*FatalErrorCallback)(const char* location, const char* message);
typedef void (*MessageCallback)(Handle<Message> message, Handle<Value> error);
// --- Tracing ---
typedef void (*LogEventCallback)(const char* name, int event);
/**
* Create new error objects by calling the corresponding error object
@ -3959,6 +4143,46 @@ class V8_EXPORT Isolate {
Scope& operator=(const Scope&);
};
/**
* Assert that no Javascript code is invoked.
*/
class DisallowJavascriptExecutionScope {
public:
enum OnFailure { CRASH_ON_FAILURE, THROW_ON_FAILURE };
DisallowJavascriptExecutionScope(Isolate* isolate, OnFailure on_failure);
~DisallowJavascriptExecutionScope();
private:
bool on_failure_;
void* internal_;
// Prevent copying of Scope objects.
DisallowJavascriptExecutionScope(const DisallowJavascriptExecutionScope&);
DisallowJavascriptExecutionScope& operator=(
const DisallowJavascriptExecutionScope&);
};
/**
* Introduce exception to DisallowJavascriptExecutionScope.
*/
class AllowJavascriptExecutionScope {
public:
explicit AllowJavascriptExecutionScope(Isolate* isolate);
~AllowJavascriptExecutionScope();
private:
void* internal_throws_;
void* internal_assert_;
// Prevent copying of Scope objects.
AllowJavascriptExecutionScope(const AllowJavascriptExecutionScope&);
AllowJavascriptExecutionScope& operator=(
const AllowJavascriptExecutionScope&);
};
/**
* Types of garbage collections that can be requested via
* RequestGarbageCollectionForTesting.
@ -4127,13 +4351,12 @@ class V8_EXPORT Isolate {
/**
* Enables the host application to receive a notification before a
* garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set
* or delete properties for example) since it is possible such
* operations will result in the allocation of objects. It is possible
* to specify the GCType filter for your callback. But it is not possible to
* register the same callback function two times with different
* GCType filters.
* garbage collection. Allocations are allowed in the callback function,
* but the callback is not re-entrant: if the allocation inside it will
* trigger the garbage collection, the callback won't be called again.
* It is possible to specify the GCType filter for your callback. But it is
* not possible to register the same callback function two times with
* different GCType filters.
*/
void AddGCPrologueCallback(
GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
@ -4146,13 +4369,12 @@ class V8_EXPORT Isolate {
/**
* Enables the host application to receive a notification after a
* garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set
* or delete properties for example) since it is possible such
* operations will result in the allocation of objects. It is possible
* to specify the GCType filter for your callback. But it is not possible to
* register the same callback function two times with different
* GCType filters.
* garbage collection. Allocations are allowed in the callback function,
* but the callback is not re-entrant: if the allocation inside it will
* trigger the garbage collection, the callback won't be called again.
* It is possible to specify the GCType filter for your callback. But it is
* not possible to register the same callback function two times with
* different GCType filters.
*/
void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
@ -4191,7 +4413,14 @@ class V8_EXPORT Isolate {
*/
void RequestGarbageCollectionForTesting(GarbageCollectionType type);
/**
* Set the callback to invoke for logging event.
*/
void SetEventLogger(LogEventCallback that);
private:
template<class K, class V, class Traits> friend class PersistentValueMap;
Isolate();
Isolate(const Isolate&);
~Isolate();
@ -4410,20 +4639,6 @@ class V8_EXPORT V8 {
*/
static void SetArrayBufferAllocator(ArrayBuffer::Allocator* allocator);
/**
* Ignore out-of-memory exceptions.
*
* V8 running out of memory is treated as a fatal error by default.
* This means that the fatal error handler is called and that V8 is
* terminated.
*
* IgnoreOutOfMemoryException can be used to not treat an
* out-of-memory situation as a fatal error. This way, the contexts
* that did not cause the out of memory problem might be able to
* continue execution.
*/
static void IgnoreOutOfMemoryException();
/**
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
@ -4578,6 +4793,22 @@ class V8_EXPORT V8 {
*/
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
/**
* Experimental: Runs the Microtask Work Queue until empty
*/
static void RunMicrotasks(Isolate* isolate);
/**
* Experimental: Enqueues the callback to the Microtask Work Queue
*/
static void EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask);
/**
* Experimental: Controls whether the Microtask Work Queue is automatically
* run when the script call depth decrements to zero.
*/
static void SetAutorunMicrotasks(Isolate *source, bool autorun);
/**
* Initializes from snapshot if possible. Otherwise, attempts to
* initialize from scratch. This function is called implicitly if
@ -4744,8 +4975,11 @@ class V8_EXPORT V8 {
/**
* Initialize the ICU library bundled with V8. The embedder should only
* invoke this method when using the bundled ICU. Returns true on success.
*
* If V8 was compiled with the ICU data in an external file, the location
* of the data file has to be provided.
*/
static bool InitializeICU();
static bool InitializeICU(const char* icu_data_file = NULL);
/**
* Sets the v8::Platform to use. This should be invoked before V8 is
@ -4770,7 +5004,7 @@ class V8_EXPORT V8 {
static void MakeWeak(internal::Object** global_handle,
void* data,
WeakCallback weak_callback);
static void ClearWeak(internal::Object** global_handle);
static void* ClearWeak(internal::Object** global_handle);
static void Eternalize(Isolate* isolate,
Value* handle,
int* index);
@ -5013,7 +5247,7 @@ class V8_EXPORT Context {
void Exit();
/** Returns true if the context has experienced an out of memory situation. */
bool HasOutOfMemoryException();
bool HasOutOfMemoryException() { return false; }
/** Returns an isolate associated with a current context. */
v8::Isolate* GetIsolate();
@ -5227,67 +5461,6 @@ class V8_EXPORT Locker {
};
/**
* A struct for exporting HeapStats data from V8, using "push" model.
*/
struct HeapStatsUpdate;
/**
* An interface for exporting data from V8, using "push" model.
*/
class V8_EXPORT OutputStream { // NOLINT
public:
enum OutputEncoding {
kAscii = 0 // 7-bit ASCII.
};
enum WriteResult {
kContinue = 0,
kAbort = 1
};
virtual ~OutputStream() {}
/** Notify about the end of stream. */
virtual void EndOfStream() = 0;
/** Get preferred output chunk size. Called only once. */
virtual int GetChunkSize() { return 1024; }
/** Get preferred output encoding. Called only once. */
virtual OutputEncoding GetOutputEncoding() { return kAscii; }
/**
* Writes the next chunk of snapshot data into the stream. Writing
* can be stopped by returning kAbort as function result. EndOfStream
* will not be called in case writing was aborted.
*/
virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
/**
* Writes the next chunk of heap stats data into the stream. Writing
* can be stopped by returning kAbort as function result. EndOfStream
* will not be called in case writing was aborted.
*/
virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
return kAbort;
};
};
/**
* An interface for reporting progress and controlling long-running
* activities.
*/
class V8_EXPORT ActivityControl { // NOLINT
public:
enum ControlOption {
kContinue = 0,
kAbort = 1
};
virtual ~ActivityControl() {}
/**
* Notify about current progress. The activity can be stopped by
* returning kAbort as the callback result.
*/
virtual ControlOption ReportProgressValue(int done, int total) = 0;
};
// --- Implementation ---
@ -5398,7 +5571,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
static const int kEmptyStringRootIndex = 147;
static const int kEmptyStringRootIndex = 154;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@ -5683,8 +5856,10 @@ void PersistentBase<T>::SetWeak(
template <class T>
void PersistentBase<T>::ClearWeak() {
V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_));
template<typename P>
P* PersistentBase<T>::ClearWeak() {
return reinterpret_cast<P*>(
V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_)));
}
@ -5925,6 +6100,32 @@ Handle<Boolean> ScriptOrigin::ResourceIsSharedCrossOrigin() const {
}
ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
CachedData* data)
: source_string(string),
resource_name(origin.ResourceName()),
resource_line_offset(origin.ResourceLineOffset()),
resource_column_offset(origin.ResourceColumnOffset()),
resource_is_shared_cross_origin(origin.ResourceIsSharedCrossOrigin()),
cached_data(data) {}
ScriptCompiler::Source::Source(Local<String> string,
CachedData* data)
: source_string(string), cached_data(data) {}
ScriptCompiler::Source::~Source() {
delete cached_data;
}
const ScriptCompiler::CachedData* ScriptCompiler::Source::GetCachedData()
const {
return cached_data;
}
Handle<Boolean> Boolean::New(Isolate* isolate, bool value) {
return value ? True(isolate) : False(isolate);
}
@ -6171,6 +6372,22 @@ Array* Array::Cast(v8::Value* value) {
}
Promise* Promise::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<Promise*>(value);
}
Promise::Resolver* Promise::Resolver::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<Promise::Resolver*>(value);
}
ArrayBuffer* ArrayBuffer::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);

3
deps/v8/samples/lineprocessor.cc

@ -238,7 +238,8 @@ int RunMain(int argc, char* argv[]) {
{
// Compile script in try/catch context.
v8::TryCatch try_catch;
script = v8::Script::Compile(script_source, script_name);
v8::ScriptOrigin origin(script_name);
script = v8::Script::Compile(script_source, &origin);
if (script.IsEmpty()) {
// Print errors that happened during compilation.
if (report_exceptions)

3
deps/v8/samples/shell.cc

@ -304,7 +304,8 @@ bool ExecuteString(v8::Isolate* isolate,
bool report_exceptions) {
v8::HandleScope handle_scope(isolate);
v8::TryCatch try_catch;
v8::Handle<v8::Script> script = v8::Script::Compile(source, name);
v8::ScriptOrigin origin(name);
v8::Handle<v8::Script> script = v8::Script::Compile(source, &origin);
if (script.IsEmpty()) {
// Print errors that happened during compilation.
if (report_exceptions)

41
deps/v8/src/accessors.cc

@ -119,9 +119,7 @@ bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
CheckForName(name, isolate->heap()->byte_length_string(),
JSTypedArray::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->heap()->byte_offset_string(),
JSTypedArray::kByteOffsetOffset, object_offset) ||
CheckForName(name, isolate->heap()->buffer_string(),
JSTypedArray::kBufferOffset, object_offset);
JSTypedArray::kByteOffsetOffset, object_offset);
case JS_ARRAY_BUFFER_TYPE:
return
CheckForName(name, isolate->heap()->byte_length_string(),
@ -131,9 +129,7 @@ bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
CheckForName(name, isolate->heap()->byte_length_string(),
JSDataView::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->heap()->byte_offset_string(),
JSDataView::kByteOffsetOffset, object_offset) ||
CheckForName(name, isolate->heap()->buffer_string(),
JSDataView::kBufferOffset, object_offset);
JSDataView::kByteOffsetOffset, object_offset);
default:
return false;
}
@ -213,7 +209,9 @@ MaybeObject* Accessors::ArraySetLength(Isolate* isolate,
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
return array_handle->SetElementsLength(*uint32_v);
Handle<Object> result = JSArray::SetElementsLength(array_handle, uint32_v);
RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
return isolate->Throw(
*isolate->factory()->NewRangeError("invalid_array_length",
@ -350,26 +348,6 @@ const AccessorDescriptor Accessors::ScriptColumnOffset = {
};
//
// Accessors::ScriptData
//
MaybeObject* Accessors::ScriptGetData(Isolate* isolate,
Object* object,
void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->data();
}
const AccessorDescriptor Accessors::ScriptData = {
ScriptGetData,
IllegalSetter,
0
};
//
// Accessors::ScriptType
//
@ -620,10 +598,7 @@ MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
}
Handle<Object> old_value;
bool is_observed =
FLAG_harmony_observation &&
*function == *object &&
function->map()->is_observed();
bool is_observed = *function == *object && function->map()->is_observed();
if (is_observed) {
if (function->has_prototype())
old_value = handle(function->prototype(), isolate);
@ -911,10 +886,10 @@ MaybeObject* Accessors::FunctionGetCaller(Isolate* isolate,
if (caller->shared()->bound()) {
return isolate->heap()->null_value();
}
// Censor if the caller is not a classic mode function.
// Censor if the caller is not a sloppy mode function.
// Change from ES5, which used to throw, see:
// https://bugs.ecmascript.org/show_bug.cgi?id=310
if (!caller->shared()->is_classic_mode()) {
if (caller->shared()->strict_mode() == STRICT) {
return isolate->heap()->null_value();
}

2
deps/v8/src/accessors.h

@ -49,7 +49,6 @@ namespace internal {
V(ScriptId) \
V(ScriptLineOffset) \
V(ScriptColumnOffset) \
V(ScriptData) \
V(ScriptType) \
V(ScriptCompilationType) \
V(ScriptLineEnds) \
@ -128,7 +127,6 @@ class Accessors : public AllStatic {
static MaybeObject* ScriptGetColumnOffset(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetData(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetType(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetCompilationType(Isolate* isolate,
Object* object,

173
deps/v8/src/allocation-tracker.cc

@ -36,9 +36,9 @@ namespace v8 {
namespace internal {
AllocationTraceNode::AllocationTraceNode(
AllocationTraceTree* tree, SnapshotObjectId shared_function_info_id)
AllocationTraceTree* tree, unsigned function_info_index)
: tree_(tree),
function_id_(shared_function_info_id),
function_info_index_(function_info_index),
total_size_(0),
allocation_count_(0),
id_(tree->next_node_id()) {
@ -50,19 +50,21 @@ AllocationTraceNode::~AllocationTraceNode() {
}
AllocationTraceNode* AllocationTraceNode::FindChild(SnapshotObjectId id) {
AllocationTraceNode* AllocationTraceNode::FindChild(
unsigned function_info_index) {
for (int i = 0; i < children_.length(); i++) {
AllocationTraceNode* node = children_[i];
if (node->function_id() == id) return node;
if (node->function_info_index() == function_info_index) return node;
}
return NULL;
}
AllocationTraceNode* AllocationTraceNode::FindOrAddChild(SnapshotObjectId id) {
AllocationTraceNode* child = FindChild(id);
AllocationTraceNode* AllocationTraceNode::FindOrAddChild(
unsigned function_info_index) {
AllocationTraceNode* child = FindChild(function_info_index);
if (child == NULL) {
child = new AllocationTraceNode(tree_, id);
child = new AllocationTraceNode(tree_, function_info_index);
children_.Add(child);
}
return child;
@ -78,17 +80,11 @@ void AllocationTraceNode::AddAllocation(unsigned size) {
void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
if (tracker != NULL) {
const char* name = "<unknown function>";
if (function_id_ != 0) {
AllocationTracker::FunctionInfo* info =
tracker->GetFunctionInfo(function_id_);
if (info != NULL) {
name = info->name;
}
}
OS::Print("%s #%u", name, id_);
AllocationTracker::FunctionInfo* info =
tracker->function_info_list()[function_info_index_];
OS::Print("%s #%u", info->name, id_);
} else {
OS::Print("%u #%u", function_id_, id_);
OS::Print("%u #%u", function_info_index_, id_);
}
OS::Print("\n");
indent += 2;
@ -109,9 +105,9 @@ AllocationTraceTree::~AllocationTraceTree() {
AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
const Vector<SnapshotObjectId>& path) {
const Vector<unsigned>& path) {
AllocationTraceNode* node = root();
for (SnapshotObjectId* entry = path.start() + path.length() - 1;
for (unsigned* entry = path.start() + path.length() - 1;
entry != path.start() - 1;
--entry) {
node = node->FindOrAddChild(*entry);
@ -126,6 +122,7 @@ void AllocationTraceTree::Print(AllocationTracker* tracker) {
root()->Print(0, tracker);
}
void AllocationTracker::DeleteUnresolvedLocation(
UnresolvedLocation** location) {
delete *location;
@ -134,6 +131,7 @@ void AllocationTracker::DeleteUnresolvedLocation(
AllocationTracker::FunctionInfo::FunctionInfo()
: name(""),
function_id(0),
script_name(""),
script_id(0),
line(-1),
@ -141,26 +139,103 @@ AllocationTracker::FunctionInfo::FunctionInfo()
}
void AddressToTraceMap::AddRange(Address start, int size,
unsigned trace_node_id) {
Address end = start + size;
RemoveRange(start, end);
RangeStack new_range(start, trace_node_id);
ranges_.insert(RangeMap::value_type(end, new_range));
}
unsigned AddressToTraceMap::GetTraceNodeId(Address addr) {
RangeMap::const_iterator it = ranges_.upper_bound(addr);
if (it == ranges_.end()) return 0;
if (it->second.start <= addr) {
return it->second.trace_node_id;
}
return 0;
}
void AddressToTraceMap::MoveObject(Address from, Address to, int size) {
unsigned trace_node_id = GetTraceNodeId(from);
if (trace_node_id == 0) return;
RemoveRange(from, from + size);
AddRange(to, size, trace_node_id);
}
void AddressToTraceMap::Clear() {
ranges_.clear();
}
void AddressToTraceMap::Print() {
PrintF("[AddressToTraceMap (%" V8PRIuPTR "): \n", ranges_.size());
for (RangeMap::iterator it = ranges_.begin(); it != ranges_.end(); ++it) {
PrintF("[%p - %p] => %u\n", it->second.start, it->first,
it->second.trace_node_id);
}
PrintF("]\n");
}
void AddressToTraceMap::RemoveRange(Address start, Address end) {
RangeMap::iterator it = ranges_.upper_bound(start);
if (it == ranges_.end()) return;
RangeStack prev_range(0, 0);
RangeMap::iterator to_remove_begin = it;
if (it->second.start < start) {
prev_range = it->second;
}
do {
if (it->first > end) {
if (it->second.start < end) {
it->second.start = end;
}
break;
}
++it;
}
while (it != ranges_.end());
ranges_.erase(to_remove_begin, it);
if (prev_range.start != 0) {
ranges_.insert(RangeMap::value_type(start, prev_range));
}
}
static bool AddressesMatch(void* key1, void* key2) {
return key1 == key2;
}
void AllocationTracker::DeleteFunctionInfo(FunctionInfo** info) {
delete *info;
}
AllocationTracker::AllocationTracker(
HeapObjectsMap* ids, StringsStorage* names)
: ids_(ids),
names_(names),
id_to_function_info_(AddressesMatch) {
id_to_function_info_index_(AddressesMatch),
info_index_for_other_state_(0) {
FunctionInfo* info = new FunctionInfo();
info->name = "(root)";
function_info_list_.Add(info);
}
AllocationTracker::~AllocationTracker() {
unresolved_locations_.Iterate(DeleteUnresolvedLocation);
for (HashMap::Entry* p = id_to_function_info_.Start();
p != NULL;
p = id_to_function_info_.Next(p)) {
delete reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
}
function_info_list_.Iterate(&DeleteFunctionInfo);
}
@ -193,13 +268,20 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
SharedFunctionInfo* shared = frame->function()->shared();
SnapshotObjectId id = ids_->FindOrAddEntry(
shared->address(), shared->Size(), false);
allocation_trace_buffer_[length++] = id;
AddFunctionInfo(shared, id);
allocation_trace_buffer_[length++] = AddFunctionInfo(shared, id);
it.Advance();
}
if (length == 0) {
unsigned index = functionInfoIndexForVMState(isolate->current_vm_state());
if (index != 0) {
allocation_trace_buffer_[length++] = index;
}
}
AllocationTraceNode* top_node = trace_tree_.AddPathFromEnd(
Vector<SnapshotObjectId>(allocation_trace_buffer_, length));
Vector<unsigned>(allocation_trace_buffer_, length));
top_node->AddAllocation(size);
address_to_trace_.AddRange(addr, size, top_node->id());
}
@ -209,24 +291,14 @@ static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
}
AllocationTracker::FunctionInfo* AllocationTracker::GetFunctionInfo(
SnapshotObjectId id) {
HashMap::Entry* entry = id_to_function_info_.Lookup(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), false);
if (entry == NULL) {
return NULL;
}
return reinterpret_cast<FunctionInfo*>(entry->value);
}
void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
SnapshotObjectId id) {
HashMap::Entry* entry = id_to_function_info_.Lookup(
unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
SnapshotObjectId id) {
HashMap::Entry* entry = id_to_function_info_index_.Lookup(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), true);
if (entry->value == NULL) {
FunctionInfo* info = new FunctionInfo();
info->name = names_->GetFunctionName(shared->DebugName());
info->function_id = id;
if (shared->script()->IsScript()) {
Script* script = Script::cast(shared->script());
if (script->name()->IsName()) {
@ -241,8 +313,22 @@ void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
shared->start_position(),
info));
}
entry->value = info;
entry->value = reinterpret_cast<void*>(function_info_list_.length());
function_info_list_.Add(info);
}
return static_cast<unsigned>(reinterpret_cast<intptr_t>((entry->value)));
}
unsigned AllocationTracker::functionInfoIndexForVMState(StateTag state) {
if (state != OTHER) return 0;
if (info_index_for_other_state_ == 0) {
FunctionInfo* info = new FunctionInfo();
info->name = "(V8 API)";
info_index_for_other_state_ = function_info_list_.length();
function_info_list_.Add(info);
}
return info_index_for_other_state_;
}
@ -267,6 +353,7 @@ AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
void AllocationTracker::UnresolvedLocation::Resolve() {
if (script_.is_null()) return;
HandleScope scope(script_->GetIsolate());
info_->line = GetScriptLineNumber(script_, start_position_);
info_->column = GetScriptColumnNumber(script_, start_position_);
}

56
deps/v8/src/allocation-tracker.h

@ -28,6 +28,8 @@
#ifndef V8_ALLOCATION_TRACKER_H_
#define V8_ALLOCATION_TRACKER_H_
#include <map>
namespace v8 {
namespace internal {
@ -38,13 +40,13 @@ class AllocationTraceTree;
class AllocationTraceNode {
public:
AllocationTraceNode(AllocationTraceTree* tree,
SnapshotObjectId shared_function_info_id);
unsigned function_info_index);
~AllocationTraceNode();
AllocationTraceNode* FindChild(SnapshotObjectId shared_function_info_id);
AllocationTraceNode* FindOrAddChild(SnapshotObjectId shared_function_info_id);
AllocationTraceNode* FindChild(unsigned function_info_index);
AllocationTraceNode* FindOrAddChild(unsigned function_info_index);
void AddAllocation(unsigned size);
SnapshotObjectId function_id() const { return function_id_; }
unsigned function_info_index() const { return function_info_index_; }
unsigned allocation_size() const { return total_size_; }
unsigned allocation_count() const { return allocation_count_; }
unsigned id() const { return id_; }
@ -54,7 +56,7 @@ class AllocationTraceNode {
private:
AllocationTraceTree* tree_;
SnapshotObjectId function_id_;
unsigned function_info_index_;
unsigned total_size_;
unsigned allocation_count_;
unsigned id_;
@ -68,7 +70,7 @@ class AllocationTraceTree {
public:
AllocationTraceTree();
~AllocationTraceTree();
AllocationTraceNode* AddPathFromEnd(const Vector<SnapshotObjectId>& path);
AllocationTraceNode* AddPathFromEnd(const Vector<unsigned>& path);
AllocationTraceNode* root() { return &root_; }
unsigned next_node_id() { return next_node_id_++; }
void Print(AllocationTracker* tracker);
@ -81,11 +83,36 @@ class AllocationTraceTree {
};
class AddressToTraceMap {
public:
void AddRange(Address addr, int size, unsigned node_id);
unsigned GetTraceNodeId(Address addr);
void MoveObject(Address from, Address to, int size);
void Clear();
size_t size() { return ranges_.size(); }
void Print();
private:
struct RangeStack {
RangeStack(Address start, unsigned node_id)
: start(start), trace_node_id(node_id) {}
Address start;
unsigned trace_node_id;
};
// [start, end) -> trace
typedef std::map<Address, RangeStack> RangeMap;
void RemoveRange(Address start, Address end);
RangeMap ranges_;
};
class AllocationTracker {
public:
struct FunctionInfo {
FunctionInfo();
const char* name;
SnapshotObjectId function_id;
const char* script_name;
int script_id;
int line;
@ -99,11 +126,15 @@ class AllocationTracker {
void AllocationEvent(Address addr, int size);
AllocationTraceTree* trace_tree() { return &trace_tree_; }
HashMap* id_to_function_info() { return &id_to_function_info_; }
FunctionInfo* GetFunctionInfo(SnapshotObjectId id);
const List<FunctionInfo*>& function_info_list() const {
return function_info_list_;
}
AddressToTraceMap* address_to_trace() { return &address_to_trace_; }
private:
void AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
unsigned AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
static void DeleteFunctionInfo(FunctionInfo** info);
unsigned functionInfoIndexForVMState(StateTag state);
class UnresolvedLocation {
public:
@ -125,9 +156,12 @@ class AllocationTracker {
HeapObjectsMap* ids_;
StringsStorage* names_;
AllocationTraceTree trace_tree_;
SnapshotObjectId allocation_trace_buffer_[kMaxAllocationTraceLength];
HashMap id_to_function_info_;
unsigned allocation_trace_buffer_[kMaxAllocationTraceLength];
List<FunctionInfo*> function_info_list_;
HashMap id_to_function_info_index_;
List<UnresolvedLocation*> unresolved_locations_;
unsigned info_index_for_other_state_;
AddressToTraceMap address_to_trace_;
DISALLOW_COPY_AND_ASSIGN(AllocationTracker);
};

806
deps/v8/src/api.cc

File diff suppressed because it is too large

3
deps/v8/src/api.h

@ -183,7 +183,8 @@ class RegisteredExtension {
V(DataView, JSDataView) \
V(String, String) \
V(Symbol, Symbol) \
V(Script, Object) \
V(Script, JSFunction) \
V(UnboundScript, SharedFunctionInfo) \
V(Function, JSFunction) \
V(Message, JSObject) \
V(Context, Context) \

1
deps/v8/src/arm/OWNERS

@ -0,0 +1 @@
rmcilroy@chromium.org

82
deps/v8/src/arm/assembler-arm-inl.h

@ -101,7 +101,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
return Assembler::target_address_at(pc_);
return Assembler::target_address_at(pc_, host_);
}
@ -109,7 +109,28 @@ Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_pointer_address_at(pc_);
if (FLAG_enable_ool_constant_pool ||
Assembler::IsMovW(Memory::int32_at(pc_))) {
// We return the PC for ool constant pool since this function is used by the
// serializerer and expects the address to reside within the code object.
return reinterpret_cast<Address>(pc_);
} else {
ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
return Assembler::target_pointer_address_at(pc_);
}
}
Address RelocInfo::constant_pool_entry_address() {
ASSERT(IsInConstantPool());
if (FLAG_enable_ool_constant_pool) {
ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_)));
return Assembler::target_constant_pool_address_at(pc_,
host_->constant_pool());
} else {
ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
return Assembler::target_pointer_address_at(pc_);
}
}
@ -120,7 +141,7 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(pc_, target);
Assembler::set_target_address_at(pc_, host_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@ -131,21 +152,22 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
Assembler::target_address_at(pc_)));
Assembler::target_address_at(pc_, host_)));
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
Assembler::set_target_address_at(pc_, host_,
reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
@ -157,7 +179,7 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_);
return Assembler::target_address_at(pc_, host_);
}
@ -268,7 +290,7 @@ void RelocInfo::WipeOut() {
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
Assembler::set_target_address_at(pc_, NULL);
Assembler::set_target_address_at(pc_, host_, NULL);
}
@ -402,7 +424,18 @@ Address Assembler::target_pointer_address_at(Address pc) {
}
Address Assembler::target_address_at(Address pc) {
Address Assembler::target_constant_pool_address_at(
Address pc, ConstantPoolArray* constant_pool) {
ASSERT(constant_pool != NULL);
ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
Instr instr = Memory::int32_at(pc);
return reinterpret_cast<Address>(constant_pool) +
GetLdrRegisterImmediateOffset(instr);
}
Address Assembler::target_address_at(Address pc,
ConstantPoolArray* constant_pool) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* instr = Instruction::At(pc);
@ -410,9 +443,14 @@ Address Assembler::target_address_at(Address pc) {
return reinterpret_cast<Address>(
(next_instr->ImmedMovwMovtValue() << 16) |
instr->ImmedMovwMovtValue());
} else if (FLAG_enable_ool_constant_pool) {
ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
return Memory::Address_at(
target_constant_pool_address_at(pc, constant_pool));
} else {
ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
return Memory::Address_at(target_pointer_address_at(pc));
}
ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
return Memory::Address_at(target_pointer_address_at(pc));
}
@ -430,7 +468,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate));
if (IsLdrPcImmediateOffset(candidate_instr)) {
if (IsLdrPcImmediateOffset(candidate_instr) |
IsLdrPpImmediateOffset(candidate_instr)) {
return candidate;
}
candidate = pc - 3 * Assembler::kInstrSize;
@ -441,7 +480,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
Address Assembler::return_address_from_call_start(Address pc) {
if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
return pc + kInstrSize * 2;
} else {
ASSERT(IsMovW(Memory::int32_at(pc)));
@ -452,8 +492,12 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
Address constant_pool_entry, Address target) {
Memory::Address_at(constant_pool_entry) = target;
Address constant_pool_entry, Code* code, Address target) {
if (FLAG_enable_ool_constant_pool) {
set_target_address_at(constant_pool_entry, code, target);
} else {
Memory::Address_at(constant_pool_entry) = target;
}
}
@ -463,7 +507,9 @@ static Instr EncodeMovwImmediate(uint32_t immediate) {
}
void Assembler::set_target_address_at(Address pc, Address target) {
void Assembler::set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
Address target) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
@ -479,6 +525,10 @@ void Assembler::set_target_address_at(Address pc, Address target) {
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
CPU::FlushICache(pc, 2 * kInstrSize);
} else if (FLAG_enable_ool_constant_pool) {
ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
Memory::Address_at(
target_constant_pool_address_at(pc, constant_pool)) = target;
} else {
ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Memory::Address_at(target_pointer_address_at(pc)) = target;

438
deps/v8/src/arm/assembler-arm.cc

@ -293,10 +293,20 @@ const int RelocInfo::kApplyMask = 0;
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
// specially coded on ARM means that it is a movw/movt instruction. We don't
// generate those yet.
return false;
// The deserializer needs to know whether a pointer is specially coded.  Being
// specially coded on ARM means that it is a movw/movt instruction, or is an
// out of line constant pool entry.  These only occur if
// FLAG_enable_ool_constant_pool is true.
return FLAG_enable_ool_constant_pool;
}
bool RelocInfo::IsInConstantPool() {
if (FLAG_enable_ool_constant_pool) {
return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_));
} else {
return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
}
}
@ -344,12 +354,17 @@ Operand::Operand(Handle<Object> handle) {
Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
ASSERT(is_uint5(shift_imm));
ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
rm_ = rm;
rs_ = no_reg;
shift_op_ = shift_op;
shift_imm_ = shift_imm & 31;
if (shift_op == RRX) {
if ((shift_op == ROR) && (shift_imm == 0)) {
// ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
// RRX as ROR #0 (See below).
shift_op = LSL;
} else if (shift_op == RRX) {
// encoded as ROR with shift_imm == 0
ASSERT(shift_imm == 0);
shift_op_ = ROR;
@ -475,9 +490,15 @@ const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
// ldr rd, [pc, #offset]
const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
// ldr rd, [pp, #offset]
const Instr kLdrPpMask = 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPpPattern = 5 * B24 | L | kRegister_r8_Code * B16;
// vldr dd, [pc, #offset]
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
// vldr dd, [pp, #offset]
const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@ -515,6 +536,7 @@ const Instr kLdrStrOffsetMask = 0x00000fff;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
constant_pool_builder_(),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
num_pending_32_bit_reloc_info_ = 0;
@ -525,6 +547,8 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
constant_pool_available_ = !FLAG_enable_ool_constant_pool;
constant_pool_full_ = false;
ClearRecordedAstId();
}
@ -535,11 +559,12 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
ASSERT(num_pending_32_bit_reloc_info_ == 0);
ASSERT(num_pending_64_bit_reloc_info_ == 0);
if (!FLAG_enable_ool_constant_pool) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
ASSERT(num_pending_32_bit_reloc_info_ == 0);
ASSERT(num_pending_64_bit_reloc_info_ == 0);
}
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@ -722,6 +747,13 @@ bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
}
bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// ldr<cond> <Rd>, [pp +/- offset_12].
return (instr & kLdrPpMask) == kLdrPpPattern;
}
bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// vldr<cond> <Dd>, [pc +/- offset_10].
@ -729,6 +761,13 @@ bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
}
bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// vldr<cond> <Dd>, [pp +/- offset_10].
return (instr & kVldrDPpMask) == kVldrDPpPattern;
}
bool Assembler::IsTstImmediate(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
(I | TST | S);
@ -1054,14 +1093,24 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
}
static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) {
if (assembler != NULL && !assembler->can_use_constant_pool()) {
// If there is no constant pool available, we must use an mov immediate.
// TODO(rmcilroy): enable ARMv6 support.
ASSERT(CpuFeatures::IsSupported(ARMv7));
return true;
}
if (x.must_output_reloc_info(assembler)) {
} else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size())) {
// Prefer movw / movt to constant pool if it is more efficient on the CPU.
return true;
} else if (x.must_output_reloc_info(assembler)) {
// Prefer constant pool if data is likely to be patched.
return false;
} else {
// Otherwise, use immediate load if movw / movt is available.
return CpuFeatures::IsSupported(ARMv7);
}
return CpuFeatures::IsSupported(ARMv7);
}
@ -1075,7 +1124,7 @@ bool Operand::is_single_instruction(const Assembler* assembler,
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
return !use_movw_movt(*this, assembler);
return !use_mov_immediate_load(*this, assembler);
} else {
// If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two
@ -1091,26 +1140,33 @@ bool Operand::is_single_instruction(const Assembler* assembler,
}
void Assembler::move_32_bit_immediate(Condition cond,
Register rd,
SBit s,
const Operand& x) {
if (rd.code() != pc.code() && s == LeaveCC) {
if (use_movw_movt(x, this)) {
if (x.must_output_reloc_info(this)) {
RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
// Make sure the movw/movt doesn't get separated.
BlockConstPoolFor(2);
}
emit(cond | 0x30*B20 | rd.code()*B12 |
EncodeMovwImmediate(x.imm32_ & 0xffff));
movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
return;
}
void Assembler::move_32_bit_immediate(Register rd,
const Operand& x,
Condition cond) {
RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
if (x.must_output_reloc_info(this)) {
RecordRelocInfo(rinfo);
}
RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
ldr(rd, MemOperand(pc, 0), cond);
if (use_mov_immediate_load(x, this)) {
Register target = rd.code() == pc.code() ? ip : rd;
// TODO(rmcilroy): add ARMv6 support for immediate loads.
ASSERT(CpuFeatures::IsSupported(ARMv7));
if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
// Make sure the movw/movt doesn't get separated.
BlockConstPoolFor(2);
}
emit(cond | 0x30*B20 | target.code()*B12 |
EncodeMovwImmediate(x.imm32_ & 0xffff));
movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
if (target.code() != rd.code()) {
mov(rd, target, LeaveCC, cond);
}
} else {
ASSERT(can_use_constant_pool());
ConstantPoolAddEntry(rinfo);
ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
}
}
@ -1133,20 +1189,9 @@ void Assembler::addrmod1(Instr instr,
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
move_32_bit_immediate(cond, rd, LeaveCC, x);
move_32_bit_immediate(rd, x, cond);
} else {
if ((instr & kMovMvnMask) == kMovMvnPattern) {
// Moves need to use a constant pool entry.
RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
ldr(ip, MemOperand(pc, 0), cond);
} else if (x.must_output_reloc_info(this)) {
// Otherwise, use most efficient form of fetching from constant pool.
move_32_bit_immediate(cond, ip, LeaveCC, x);
} else {
// If this is not a mov or mvn instruction we may still be able to
// avoid a constant pool entry by using mvn or movw.
mov(ip, x, LeaveCC, cond);
}
mov(ip, x, LeaveCC, cond);
addrmod1(instr, rn, rd, Operand(ip));
}
return;
@ -1748,7 +1793,9 @@ void Assembler::uxtb(Register dst,
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
ASSERT(src.shift_op() == ROR);
// Operand maps ROR #0 to LSL #0.
ASSERT((src.shift_op() == ROR) ||
((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
}
@ -1770,7 +1817,9 @@ void Assembler::uxtab(Register dst,
(src2.shift_imm_ == 8) ||
(src2.shift_imm_ == 16) ||
(src2.shift_imm_ == 24));
ASSERT(src2.shift_op() == ROR);
// Operand maps ROR #0 to LSL #0.
ASSERT((src2.shift_op() == ROR) ||
((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
}
@ -1790,7 +1839,9 @@ void Assembler::uxtb16(Register dst,
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
ASSERT(src.shift_op() == ROR);
// Operand maps ROR #0 to LSL #0.
ASSERT((src.shift_op() == ROR) ||
((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
}
@ -1814,8 +1865,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
if (src.must_output_reloc_info(this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
ldr(ip, MemOperand(pc, 0), cond);
move_32_bit_immediate(ip, src);
msr(fields, Operand(ip), cond);
return;
}
@ -2422,7 +2472,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
} else if (FLAG_enable_vldr_imm) {
} else if (FLAG_enable_vldr_imm && can_use_constant_pool()) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
@ -2438,8 +2488,9 @@ void Assembler::vmov(const DwVfpRegister dst,
// The code could also randomize the order of values, though
// that's tricky because vldr has a limited reach. Furthermore
// it breaks load locality.
RecordRelocInfo(imm);
vldr(dst, MemOperand(pc, 0));
RelocInfo rinfo(pc_, imm);
ConstantPoolAddEntry(rinfo);
vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
} else {
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
@ -3169,6 +3220,7 @@ void Assembler::GrowBuffer() {
ASSERT(rinfo.rmode() == RelocInfo::NONE64);
rinfo.set_pc(rinfo.pc() + pc_delta);
}
constant_pool_builder_.Relocate(pc_delta);
}
@ -3204,28 +3256,16 @@ void Assembler::emit_code_stub_address(Code* stub) {
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
UseConstantPoolMode mode) {
// We do not try to reuse pool constants.
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data, NULL);
if (((rmode >= RelocInfo::JS_RETURN) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
(rmode == RelocInfo::CONST_POOL) ||
mode == DONT_USE_CONSTANT_POOL) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode)
|| RelocInfo::IsConstPool(rmode)
|| mode == DONT_USE_CONSTANT_POOL);
// These modes do not need an entry in the constant pool.
} else {
RecordRelocInfoConstantPoolEntryHelper(rinfo);
}
RecordRelocInfo(rinfo);
}
void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
@ -3236,9 +3276,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
RelocInfo reloc_info_with_ast_id(pc_,
rmode,
if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
RelocInfo reloc_info_with_ast_id(rinfo.pc(),
rinfo.rmode(),
RecordedAstId().ToInt(),
NULL);
ClearRecordedAstId();
@ -3250,34 +3290,38 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
void Assembler::RecordRelocInfo(double data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(pc_, data);
RecordRelocInfoConstantPoolEntryHelper(rinfo);
}
void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
if (rinfo.rmode() == RelocInfo::NONE64) {
ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
if (num_pending_64_bit_reloc_info_ == 0) {
first_const_pool_64_use_ = pc_offset();
}
pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) {
if (FLAG_enable_ool_constant_pool) {
constant_pool_builder_.AddEntry(this, rinfo);
} else {
ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
if (num_pending_32_bit_reloc_info_ == 0) {
first_const_pool_32_use_ = pc_offset();
if (rinfo.rmode() == RelocInfo::NONE64) {
ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
if (num_pending_64_bit_reloc_info_ == 0) {
first_const_pool_64_use_ = pc_offset();
}
pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
} else {
ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
if (num_pending_32_bit_reloc_info_ == 0) {
first_const_pool_32_use_ = pc_offset();
}
pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
}
pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
}
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
}
void Assembler::BlockConstPoolFor(int instructions) {
if (FLAG_enable_ool_constant_pool) {
// Should be a no-op if using an out-of-line constant pool.
ASSERT(num_pending_32_bit_reloc_info_ == 0);
ASSERT(num_pending_64_bit_reloc_info_ == 0);
return;
}
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
// Max pool start (if we need a jump and an alignment).
@ -3299,6 +3343,13 @@ void Assembler::BlockConstPoolFor(int instructions) {
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
if (FLAG_enable_ool_constant_pool) {
// Should be a no-op if using an out-of-line constant pool.
ASSERT(num_pending_32_bit_reloc_info_ == 0);
ASSERT(num_pending_64_bit_reloc_info_ == 0);
return;
}
// Some short sequence of instruction mustn't be broken up by constant pool
// emission, such sequences are protected by calls to BlockConstPoolFor and
// BlockConstPoolScope.
@ -3496,6 +3547,195 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
ASSERT(FLAG_enable_ool_constant_pool);
return constant_pool_builder_.Allocate(heap);
}
void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
ASSERT(FLAG_enable_ool_constant_pool);
constant_pool_builder_.Populate(this, constant_pool);
}
ConstantPoolBuilder::ConstantPoolBuilder()
: entries_(),
merged_indexes_(),
count_of_64bit_(0),
count_of_code_ptr_(0),
count_of_heap_ptr_(0),
count_of_32bit_(0) { }
bool ConstantPoolBuilder::IsEmpty() {
return entries_.size() == 0;
}
bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) {
return rmode == RelocInfo::NONE64;
}
bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) {
return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64;
}
bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
return RelocInfo::IsCodeTarget(rmode);
}
bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode);
}
void ConstantPoolBuilder::AddEntry(Assembler* assm,
const RelocInfo& rinfo) {
RelocInfo::Mode rmode = rinfo.rmode();
ASSERT(rmode != RelocInfo::COMMENT &&
rmode != RelocInfo::POSITION &&
rmode != RelocInfo::STATEMENT_POSITION &&
rmode != RelocInfo::CONST_POOL);
// Try to merge entries which won't be patched.
int merged_index = -1;
if (RelocInfo::IsNone(rmode) ||
(!Serializer::enabled() && (rmode >= RelocInfo::CELL))) {
size_t i;
std::vector<RelocInfo>::const_iterator it;
for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
if (RelocInfo::IsEqual(rinfo, *it)) {
merged_index = i;
break;
}
}
}
entries_.push_back(rinfo);
merged_indexes_.push_back(merged_index);
if (merged_index == -1) {
// Not merged, so update the appropriate count.
if (Is64BitEntry(rmode)) {
count_of_64bit_++;
} else if (Is32BitEntry(rmode)) {
count_of_32bit_++;
} else if (IsCodePtrEntry(rmode)) {
count_of_code_ptr_++;
} else {
ASSERT(IsHeapPtrEntry(rmode));
count_of_heap_ptr_++;
}
}
// Check if we still have room for another entry given Arm's ldr and vldr
// immediate offset range.
if (!(is_uint12(ConstantPoolArray::SizeFor(count_of_64bit_,
count_of_code_ptr_,
count_of_heap_ptr_,
count_of_32bit_))) &&
is_uint10(ConstantPoolArray::SizeFor(count_of_64bit_, 0, 0, 0))) {
assm->set_constant_pool_full();
}
}
void ConstantPoolBuilder::Relocate(int pc_delta) {
for (std::vector<RelocInfo>::iterator rinfo = entries_.begin();
rinfo != entries_.end(); rinfo++) {
ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN);
rinfo->set_pc(rinfo->pc() + pc_delta);
}
}
MaybeObject* ConstantPoolBuilder::Allocate(Heap* heap) {
if (IsEmpty()) {
return heap->empty_constant_pool_array();
} else {
return heap->AllocateConstantPoolArray(count_of_64bit_, count_of_code_ptr_,
count_of_heap_ptr_, count_of_32bit_);
}
}
void ConstantPoolBuilder::Populate(Assembler* assm,
ConstantPoolArray* constant_pool) {
ASSERT(constant_pool->count_of_int64_entries() == count_of_64bit_);
ASSERT(constant_pool->count_of_code_ptr_entries() == count_of_code_ptr_);
ASSERT(constant_pool->count_of_heap_ptr_entries() == count_of_heap_ptr_);
ASSERT(constant_pool->count_of_int32_entries() == count_of_32bit_);
ASSERT(entries_.size() == merged_indexes_.size());
int index_64bit = 0;
int index_code_ptr = count_of_64bit_;
int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_;
int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_;
size_t i;
std::vector<RelocInfo>::const_iterator rinfo;
for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) {
RelocInfo::Mode rmode = rinfo->rmode();
// Update constant pool if necessary and get the entry's offset.
int offset;
if (merged_indexes_[i] == -1) {
if (Is64BitEntry(rmode)) {
offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag;
constant_pool->set(index_64bit++, rinfo->data64());
} else if (Is32BitEntry(rmode)) {
offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag;
constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data()));
} else if (IsCodePtrEntry(rmode)) {
offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
kHeapObjectTag;
constant_pool->set(index_code_ptr++,
reinterpret_cast<Object *>(rinfo->data()));
} else {
ASSERT(IsHeapPtrEntry(rmode));
offset = constant_pool->OffsetOfElementAt(index_heap_ptr) -
kHeapObjectTag;
constant_pool->set(index_heap_ptr++,
reinterpret_cast<Object *>(rinfo->data()));
}
merged_indexes_[i] = offset; // Stash offset for merged entries.
} else {
size_t merged_index = static_cast<size_t>(merged_indexes_[i]);
ASSERT(merged_index < merged_indexes_.size() && merged_index < i);
offset = merged_indexes_[merged_index];
}
// Patch vldr/ldr instruction with correct offset.
Instr instr = assm->instr_at(rinfo->pc());
if (Is64BitEntry(rmode)) {
// Instruction to patch must be 'vldr rd, [pp, #0]'.
ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) &&
Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
ASSERT(is_uint10(offset));
assm->instr_at_put(rinfo->pc(),
Assembler::SetVldrDRegisterImmediateOffset(instr, offset));
} else {
// Instruction to patch must be 'ldr rd, [pp, #0]'.
ASSERT((Assembler::IsLdrPpImmediateOffset(instr) &&
Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
ASSERT(is_uint12(offset));
assm->instr_at_put(rinfo->pc(),
Assembler::SetLdrRegisterImmediateOffset(instr, offset));
}
}
ASSERT((index_64bit == count_of_64bit_) &&
(index_code_ptr == (index_64bit + count_of_code_ptr_)) &&
(index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
(index_32bit == (index_heap_ptr + count_of_32bit_)));
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

120
deps/v8/src/arm/assembler-arm.h

@ -39,7 +39,10 @@
#ifndef V8_ARM_ASSEMBLER_ARM_H_
#define V8_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
#include <vector>
#include "assembler.h"
#include "constants-arm.h"
#include "serialize.h"
@ -376,8 +379,9 @@ struct QwNeonRegister {
}
void split_code(int* vm, int* m) const {
ASSERT(is_valid());
*m = (code_ & 0x10) >> 4;
*vm = code_ & 0x0F;
int encoded_code = code_ << 1;
*m = (encoded_code & 0x10) >> 4;
*vm = encoded_code & 0x0F;
}
int code_;
@ -702,9 +706,42 @@ class NeonListOperand BASE_EMBEDDED {
NeonListType type_;
};
// Class used to build a constant pool.
class ConstantPoolBuilder BASE_EMBEDDED {
public:
explicit ConstantPoolBuilder();
void AddEntry(Assembler* assm, const RelocInfo& rinfo);
void Relocate(int pc_delta);
bool IsEmpty();
MaybeObject* Allocate(Heap* heap);
void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
inline int count_of_64bit() const { return count_of_64bit_; }
inline int count_of_code_ptr() const { return count_of_code_ptr_; }
inline int count_of_heap_ptr() const { return count_of_heap_ptr_; }
inline int count_of_32bit() const { return count_of_32bit_; }
private:
bool Is64BitEntry(RelocInfo::Mode rmode);
bool Is32BitEntry(RelocInfo::Mode rmode);
bool IsCodePtrEntry(RelocInfo::Mode rmode);
bool IsHeapPtrEntry(RelocInfo::Mode rmode);
std::vector<RelocInfo> entries_;
std::vector<int> merged_indexes_;
int count_of_64bit_;
int count_of_code_ptr_;
int count_of_heap_ptr_;
int count_of_32bit_;
};
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
extern const Instr kLdrPpMask;
extern const Instr kLdrPpPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
extern const Instr kBlxIp;
@ -780,9 +817,27 @@ class Assembler : public AssemblerBase {
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_constant_pool_address_at(
Address pc, ConstantPoolArray* constant_pool));
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target));
INLINE(static Address target_address_at(Address pc,
ConstantPoolArray* constant_pool));
INLINE(static void set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
Address target));
INLINE(static Address target_address_at(Address pc, Code* code)) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(Address pc,
Code* code,
Address target)) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target);
}
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@ -795,7 +850,7 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
Address constant_pool_entry, Address target);
Address constant_pool_entry, Code* code, Address target);
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
@ -1292,12 +1347,6 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
static bool use_immediate_embedded_pointer_loads(
const Assembler* assembler) {
return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size());
}
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
@ -1401,6 +1450,8 @@ class Assembler : public AssemblerBase {
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static bool IsVldrDRegisterImmediate(Instr instr);
static bool IsLdrPpImmediateOffset(Instr instr);
static bool IsVldrDPpImmediateOffset(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static int GetVldrDRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
@ -1446,6 +1497,20 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
// Allocate a constant pool of the correct size for the generated code.
MaybeObject* AllocateConstantPool(Heap* heap);
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
bool can_use_constant_pool() const {
return is_constant_pool_available() && !constant_pool_full_;
}
void set_constant_pool_full() {
constant_pool_full_ = true;
}
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@ -1499,6 +1564,14 @@ class Assembler : public AssemblerBase {
(pc_offset() < no_const_pool_before_);
}
bool is_constant_pool_available() const {
return constant_pool_available_;
}
void set_constant_pool_available(bool available) {
constant_pool_available_ = available;
}
private:
int next_buffer_check_; // pc offset of next buffer check
@ -1556,19 +1629,27 @@ class Assembler : public AssemblerBase {
// Number of pending reloc info entries in the 64 bits buffer.
int num_pending_64_bit_reloc_info_;
ConstantPoolBuilder constant_pool_builder_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
// Indicates whether the constant pool can be accessed, which is only possible
// if the pp register points to the current code object's constant pool.
bool constant_pool_available_;
// Indicates whether the constant pool is too full to accept new entries due
// to the ldr instruction's limitted immediate offset range.
bool constant_pool_full_;
// Code emission
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
// 32-bit immediate values
void move_32_bit_immediate(Condition cond,
Register rd,
SBit s,
const Operand& x);
void move_32_bit_immediate(Register rd,
const Operand& x,
Condition cond = al);
// Instruction generation
void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
@ -1588,14 +1669,15 @@ class Assembler : public AssemblerBase {
};
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
UseConstantPoolMode mode = USE_CONSTANT_POOL);
void RecordRelocInfo(double data);
void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
void RecordRelocInfo(const RelocInfo& rinfo);
void ConstantPoolAddEntry(const RelocInfo& rinfo);
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
friend class FrameAndConstantPoolScope;
friend class ConstantPoolUnavailableScope;
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;

178
deps/v8/src/arm/builtins-arm.cc

@ -155,10 +155,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// tail call a stub
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
__ mov(r2, Operand(undefined_sentinel));
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@ -262,7 +259,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
{
FrameScope scope(masm, StackFrame::INTERNAL);
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
}
@ -282,7 +279,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
{
FrameScope scope(masm, StackFrame::INTERNAL);
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
@ -292,7 +289,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
// Push function as parameter to the runtime call.
@ -329,7 +326,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
@ -339,10 +336,12 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
bool count_constructions,
bool create_memento) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
// -- r2 : allocation site or undefined
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@ -350,11 +349,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
// Should never create mementos for api functions.
ASSERT(!is_api_function || !create_memento);
// Should never create mementos before slack tracking is finished.
ASSERT(!count_constructions || !create_memento);
Isolate* isolate = masm->isolate();
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
if (create_memento) {
__ AssertUndefinedOrAllocationSite(r2, r3);
__ push(r2);
}
// Preserve the two incoming parameters on the stack.
__ SmiTag(r0);
@ -405,7 +415,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(r2, r1); // r1 = constructor
// The call will replace the stub, so the countdown is only done once.
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ pop(r2);
__ pop(r1);
@ -417,13 +427,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
// r2: initial map
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
if (create_memento) {
__ add(r3, r3, Operand(AllocationMemento::kSize / kPointerSize));
}
__ Allocate(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
// r1: constructor function
// r2: initial map
// r3: object size
// r3: object size (not including memento if create_memento)
// r4: JSObject (not tagged)
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
@ -437,12 +451,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with the appropriate filler.
// r1: constructor function
// r2: initial map
// r3: object size (in words)
// r3: object size (in words, including memento if create_memento)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
@ -456,9 +471,28 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ InitializeFieldsWithFiller(r5, r0, r6);
// To allow for truncation.
__ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
__ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
__ InitializeFieldsWithFiller(r5, r0, r6);
} else if (create_memento) {
__ sub(r6, r3, Operand(AllocationMemento::kSize / kPointerSize));
__ add(r0, r4, Operand(r6, LSL, kPointerSizeLog2)); // End of object.
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
__ InitializeFieldsWithFiller(r5, r0, r6);
// Fill in memento fields.
// r5: points to the allocated but uninitialized memento.
__ LoadRoot(r6, Heap::kAllocationMementoMapRootIndex);
ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
// Load the AllocationSite
__ ldr(r6, MemOperand(sp, 2 * kPointerSize));
ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
} else {
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
__ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
__ InitializeFieldsWithFiller(r5, r0, r6);
}
__ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
__ InitializeFieldsWithFiller(r5, r0, r6);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@ -556,13 +590,47 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// r1: constructor function
__ bind(&rt_call);
if (create_memento) {
// Get the cell or allocation site.
__ ldr(r2, MemOperand(sp, 2 * kPointerSize));
__ push(r2);
}
__ push(r1); // argument for Runtime_NewObject
__ CallRuntime(Runtime::kNewObject, 1);
if (create_memento) {
__ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
} else {
__ CallRuntime(Runtime::kHiddenNewObject, 1);
}
__ mov(r4, r0);
// If we ended up using the runtime, and we want a memento, then the
// runtime call made it for us, and we shouldn't do create count
// increment.
Label count_incremented;
if (create_memento) {
__ jmp(&count_incremented);
}
// Receiver for constructor call allocated.
// r4: JSObject
__ bind(&allocated);
if (create_memento) {
__ ldr(r2, MemOperand(sp, kPointerSize * 2));
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r5);
__ b(eq, &count_incremented);
// r2 is an AllocationSite. We are creating a memento from it, so we
// need to increment the memento create count.
__ ldr(r3, FieldMemOperand(r2,
AllocationSite::kPretenureCreateCountOffset));
__ add(r3, r3, Operand(Smi::FromInt(1)));
__ str(r3, FieldMemOperand(r2,
AllocationSite::kPretenureCreateCountOffset));
__ bind(&count_incremented);
}
__ push(r4);
__ push(r4);
@ -665,17 +733,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true);
Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false);
Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false);
Generate_JSConstructStubHelper(masm, true, false, false);
}
@ -738,9 +806,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(r0, Operand(r3));
if (is_construct) {
// No type feedback cell is available
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(), masm->isolate());
__ mov(r2, Operand(undefined_sentinel));
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@ -768,13 +834,13 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
FrameScope scope(masm, StackFrame::INTERNAL);
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
// Push function as parameter to the runtime call.
@ -782,7 +848,7 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
__ CallRuntime(Runtime::kCompileOptimized, 2);
__ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
// Restore receiver.
__ pop(r1);
}
@ -870,14 +936,14 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
__ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
@ -899,11 +965,11 @@ void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass the function and deoptimization type to the runtime system.
__ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(r0);
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
__ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
}
// Get the full codegen state from the stack and untag it -> r6.
@ -947,7 +1013,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(r0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
@ -963,20 +1029,26 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ ldr(r1, MemOperand(r0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
__ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ ldr(r1, MemOperand(r1, FixedArray::OffsetOfElementAt(
DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
if (FLAG_enable_ool_constant_pool) {
__ ldr(pp, FieldMemOperand(r0, Code::kConstantPoolOffset));
}
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ add(r0, r0, Operand::SmiUntag(r1));
__ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
DeoptimizationInputData::kOsrPcOffsetIndex)));
// And "return" to the OSR entry point of the function.
__ Ret();
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ add(r0, r0, Operand::SmiUntag(r1));
__ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// And "return" to the OSR entry point of the function.
__ Ret();
}
}
@ -987,8 +1059,8 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kStackGuard, 0);
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@ -1039,7 +1111,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &shift_arguments);
// Compute the receiver in non-strict mode.
// Compute the receiver in sloppy mode.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize));
// r0: actual number of arguments
@ -1062,7 +1134,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
// Enter an internal frame in order to preserve argument count.
FrameScope scope(masm, StackFrame::INTERNAL);
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r0);
__ push(r0);
@ -1189,7 +1261,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kFunctionOffset = 4 * kPointerSize;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r0);
@ -1247,7 +1319,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &push_receiver);
// Compute the receiver in non-strict mode.
// Compute the receiver in sloppy mode.
__ JumpIfSmi(r0, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
@ -1354,8 +1426,14 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// then tear down the parameters.
__ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize)));
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
if (FLAG_enable_ool_constant_pool) {
__ add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
__ ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
} else {
__ mov(sp, fp);;
__ ldm(ia_w, sp, fp.bit() | lr.bit());
}
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
__ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
}

516
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

2
deps/v8/src/arm/code-stubs-arm.h

@ -324,7 +324,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }

2
deps/v8/src/arm/constants-arm.h

@ -343,7 +343,7 @@ enum NeonSize {
Neon8 = 0x0,
Neon16 = 0x1,
Neon32 = 0x2,
Neon64 = 0x4
Neon64 = 0x3
};
// -----------------------------------------------------------------------------

12
deps/v8/src/arm/debug-arm.cc

@ -117,7 +117,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
@ -265,9 +265,10 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
// -- r2 : cache cell for call target
// -- r2 : feedback array
// -- r3 : slot in feedback array
// -----------------------------------
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), 0);
}
@ -286,9 +287,10 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
// -- r1 : constructor function
// -- r2 : cache cell for call target
// -- r2 : feedback array
// -- r3 : feedback slot (smi)
// -----------------------------------
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), r0.bit());
}

33
deps/v8/src/arm/deoptimizer-arm.cc

@ -50,13 +50,36 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
if (FLAG_zap_code_space) {
// Fail hard and early if we enter this code object again.
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength * Assembler::kInstrSize;
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(pointer, 1);
patcher.masm()->bkpt(0);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
osr_patcher.masm()->bkpt(0);
}
}
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
SharedFunctionInfo* shared =
SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
@ -350,6 +373,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
ASSERT(FLAG_enable_ool_constant_pool);
SetFrameSlot(offset, value);
}
#undef __
} } // namespace v8::internal

12
deps/v8/src/arm/disasm-arm.cc

@ -1061,7 +1061,7 @@ void Decoder::DecodeType3(Instruction* instr) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "uxtb16'cond 'rd, 'rm, ror #0");
Format(instr, "uxtb16'cond 'rd, 'rm");
break;
case 1:
Format(instr, "uxtb16'cond 'rd, 'rm, ror #8");
@ -1085,7 +1085,7 @@ void Decoder::DecodeType3(Instruction* instr) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "uxtb'cond 'rd, 'rm, ror #0");
Format(instr, "uxtb'cond 'rd, 'rm");
break;
case 1:
Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
@ -1100,7 +1100,7 @@ void Decoder::DecodeType3(Instruction* instr) {
} else {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #0");
Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
break;
case 1:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
@ -1566,7 +1566,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl signed
int Vd = (instr->Bit(22) << 4) | instr->VdValue();
if ((instr->VdValue() & 1) != 0) Unknown(instr);
int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
@ -1579,7 +1580,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl unsigned
int Vd = (instr->Bit(22) << 4) | instr->VdValue();
if ((instr->VdValue() & 1) != 0) Unknown(instr);
int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,

494
deps/v8/src/arm/full-codegen-arm.cc

File diff suppressed because it is too large

26
deps/v8/src/arm/ic-arm.cc

@ -333,8 +333,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
ExtraICState extra_state) {
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@ -342,9 +341,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
Code::HANDLER, MONOMORPHIC, extra_state,
Code::NORMAL, Code::LOAD_IC);
Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@ -430,7 +427,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ b(ne, slow_case);
// Load the elements into scratch1 and check its map.
Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
@ -492,7 +489,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@ -518,7 +515,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@ -879,7 +876,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode) {
StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@ -1063,7 +1060,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@ -1162,8 +1159,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
ExtraICState extra_ic_state) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@ -1172,9 +1168,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
Code::HANDLER, MONOMORPHIC, extra_ic_state,
Code::NORMAL, Code::STORE_IC);
Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
@ -1225,7 +1219,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode) {
StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver

278
deps/v8/src/arm/lithium-arm.cc

@ -831,7 +831,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
@ -1110,6 +1109,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@ -1151,6 +1151,13 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
}
LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
LMathClz32* result = new(zone()) LMathClz32(input);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
@ -1242,21 +1249,62 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
ASSERT(instr->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
int32_t divisor = instr->right()->GetInteger32Constant();
LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
dividend, divisor));
if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
(instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
(!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1)) {
result = AssignEnvironment(result);
}
return result;
}
LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
int32_t divisor = instr->right()->GetInteger32Constant();
LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
dividend, divisor));
if (divisor == 0 ||
(instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
result = AssignEnvironment(result);
}
return result;
}
LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
ASSERT(instr->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineAsRegister(div));
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
return AssignEnvironment(DefineAsRegister(div));
return DoDivByPowerOf2I(instr);
} else if (instr->right()->IsConstant()) {
return DoDivByConstI(instr);
} else {
return DoDivI(instr);
}
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineAsRegister(div));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@ -1265,97 +1313,106 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
uint32_t divisor_abs = abs(divisor);
// Dividing by 0, 1, and powers of 2 is easy.
// Note that IsPowerOf2(0) returns true;
ASSERT(IsPowerOf2(0) == true);
if (IsPowerOf2(divisor_abs)) return true;
LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
LOperand* dividend = UseRegisterAtStart(instr->left());
int32_t divisor = instr->right()->GetInteger32Constant();
LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
dividend, divisor));
if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
(instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
result = AssignEnvironment(result);
}
return result;
}
// We have magic numbers for a few specific divisors.
// Details and proofs can be found in:
// - Hacker's Delight, Henry S. Warren, Jr.
// - The PowerPC Compiler Writer’s Guide
// and probably many others.
//
// We handle
// <divisor with magic numbers> * <power of 2>
// but not
// <divisor with magic numbers> * <other divisor with magic numbers>
int32_t power_of_2_factor =
CompilerIntrinsics::CountTrailingZeros(divisor_abs);
DivMagicNumbers magic_numbers =
DivMagicNumberFor(divisor_abs >> power_of_2_factor);
if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
return false;
LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
int32_t divisor = instr->right()->GetInteger32Constant();
LOperand* temp =
((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
(divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
NULL : TempRegister();
LInstruction* result = DefineAsRegister(
new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
if (divisor == 0 ||
(instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
result = AssignEnvironment(result);
}
return result;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
// LMathFloorOfDiv can only handle a subset of divisors, so fall
// back to a flooring division in all other cases.
HValue* right = instr->right();
if (!right->IsInteger32Constant() ||
(!CpuFeatures::IsSupported(SUDIV) &&
!HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()))) {
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(right);
LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineAsRegister(div));
if (instr->RightIsPowerOf2()) {
return DoFlooringDivByPowerOf2I(instr);
} else if (instr->right()->IsConstant()) {
return DoFlooringDivByConstI(instr);
} else {
return DoDivI(instr);
}
}
LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
ASSERT(instr->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegisterAtStart(instr->left());
int32_t divisor = instr->right()->GetInteger32Constant();
LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
dividend, divisor));
if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
result = AssignEnvironment(result);
}
return result;
}
LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
ASSERT(instr->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
? UseRegister(right)
: UseOrConstant(right);
LOperand* remainder = TempRegister();
return AssignEnvironment(DefineAsRegister(
new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
int32_t divisor = instr->right()->GetInteger32Constant();
LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
dividend, divisor));
if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
result = AssignEnvironment(result);
}
return result;
}
LInstruction* LChunkBuilder::DoModI(HMod* instr) {
ASSERT(instr->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d10);
LOperand* temp2 = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d11);
LInstruction* result = DefineAsRegister(new(zone()) LModI(
dividend, divisor, temp, temp2));
if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
result = AssignEnvironment(result);
}
return result;
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
UseConstant(right));
LInstruction* result = DefineAsRegister(mod);
return (left->CanBeNegative() &&
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
} else if (CpuFeatures::IsSupported(SUDIV)) {
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegister(right));
LInstruction* result = DefineAsRegister(mod);
return (right->CanBeZero() ||
(left->RangeCanInclude(kMinInt) &&
right->RangeCanInclude(-1) &&
instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
(left->CanBeNegative() &&
instr->CanBeZero() &&
instr->CheckFlag(HValue::kBailoutOnMinusZero)))
? AssignEnvironment(result)
: result;
return DoModByPowerOf2I(instr);
} else if (instr->right()->IsConstant()) {
return DoModByConstI(instr);
} else {
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegister(right),
FixedTemp(d10),
FixedTemp(d11));
LInstruction* result = DefineAsRegister(mod);
return (right->CanBeZero() ||
(left->CanBeNegative() &&
instr->CanBeZero() &&
instr->CheckFlag(HValue::kBailoutOnMinusZero)))
? AssignEnvironment(result)
: result;
return DoModI(instr);
}
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
@ -1846,25 +1903,27 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
if (val->CheckFlag(HInstruction::kUint32)) {
LNumberTagU* result = new(zone()) LNumberTagU(value);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else if (val->HasRange() && val->range()->IsInSmiRange()) {
if (!instr->CheckFlag(HValue::kCanOverflow)) {
return DefineAsRegister(new(zone()) LSmiTag(value));
} else if (val->CheckFlag(HInstruction::kUint32)) {
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
LInstruction* result = val->CheckFlag(HInstruction::kUint32)
? DefineAsRegister(new(zone()) LUint32ToSmi(value))
: DefineAsRegister(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return AssignEnvironment(result);
return result;
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@ -1939,6 +1998,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
HValue* value = instr->value();
ASSERT(value->representation().IsDouble());
return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
}
LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
LOperand* lo = UseRegister(instr->lo());
LOperand* hi = UseRegister(instr->hi());
return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
}
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
@ -2195,11 +2268,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
LOperand* val;
if (needs_write_barrier ||
(FLAG_track_fields && instr->field_representation().IsSmi())) {
if (needs_write_barrier || instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
} else if (FLAG_track_double_fields &&
instr->field_representation().IsDouble()) {
} else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@ -2209,8 +2280,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
if (FLAG_track_heap_object_fields &&
instr->field_representation().IsHeapObject()) {
if (instr->field_representation().IsHeapObject()) {
if (!instr->value()->type().IsHeapObject()) {
return AssignEnvironment(result);
}

234
deps/v8/src/arm/lithium-arm.h

@ -80,17 +80,23 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
V(DivByConstI) \
V(DivByPowerOf2I) \
V(DivI) \
V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
@ -103,7 +109,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@ -124,14 +129,16 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
V(MathClz32) \
V(MathExp) \
V(MathFloor) \
V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@ -173,7 +180,6 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(WrapReceiver)
@ -614,12 +620,45 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
divisor_ = divisor;
}
LOperand* dividend() { return inputs_[0]; }
int32_t divisor() const { return divisor_; }
DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
private:
int32_t divisor_;
};
class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LModByConstI(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
divisor_ = divisor;
}
LOperand* dividend() { return inputs_[0]; }
int32_t divisor() const { return divisor_; }
DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
private:
int32_t divisor_;
};
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LModI(LOperand* left,
LOperand* right,
LOperand* temp = NULL,
LOperand* temp2 = NULL) {
LModI(LOperand* left, LOperand* right, LOperand* temp, LOperand* temp2) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
@ -636,6 +675,42 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
divisor_ = divisor;
}
LOperand* dividend() { return inputs_[0]; }
int32_t divisor() const { return divisor_; }
DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
private:
int32_t divisor_;
};
class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDivByConstI(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
divisor_ = divisor;
}
LOperand* dividend() { return inputs_[0]; }
int32_t divisor() const { return divisor_; }
DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
private:
int32_t divisor_;
};
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
@ -648,29 +723,47 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
LOperand* temp = NULL) {
inputs_[0] = left;
inputs_[1] = right;
LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
divisor_ = divisor;
}
LOperand* dividend() { return inputs_[0]; }
int32_t divisor() { return divisor_; }
DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
"flooring-div-by-power-of-2-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
private:
int32_t divisor_;
};
class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
inputs_[0] = dividend;
divisor_ = divisor;
temps_[0] = temp;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
LOperand* dividend() { return inputs_[0]; }
int32_t divisor() const { return divisor_; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
private:
int32_t divisor_;
};
@ -809,6 +902,18 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathClz32(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
@ -1885,19 +1990,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToSmi(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
DECLARE_HYDROGEN_ACCESSOR(Change)
};
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
@ -1910,38 +2002,33 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToSmi(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
DECLARE_HYDROGEN_ACCESSOR(Change)
};
class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
explicit LNumberTagI(LOperand* value) {
LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp1;
temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
explicit LNumberTagU(LOperand* value) {
LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp1;
temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@ -2026,6 +2113,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
DECLARE_HYDROGEN_ACCESSOR(Change)
};
@ -2101,7 +2189,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@ -2164,7 +2252,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@ -2365,6 +2453,33 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleBits(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
};
class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LConstructDouble(LOperand* hi, LOperand* lo) {
inputs_[0] = hi;
inputs_[1] = lo;
}
LOperand* hi() { return inputs_[0]; }
LOperand* lo() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
};
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
@ -2579,10 +2694,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
allocator_(allocator) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
@ -2607,6 +2719,15 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
LInstruction* DoMathClz32(HUnaryMathOperation* instr);
LInstruction* DoDivByPowerOf2I(HDiv* instr);
LInstruction* DoDivByConstI(HDiv* instr);
LInstruction* DoDivI(HBinaryOperation* instr);
LInstruction* DoModByPowerOf2I(HMod* instr);
LInstruction* DoModByConstI(HMod* instr);
LInstruction* DoModI(HMod* instr);
LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
private:
enum Status {
@ -2717,9 +2838,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
HBasicBlock* current_block_;
HBasicBlock* next_block_;
LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};

740
deps/v8/src/arm/lithium-codegen-arm.cc

File diff suppressed because it is too large

23
deps/v8/src/arm/lithium-codegen-arm.h

@ -126,9 +126,11 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
void DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness);
void DoDeferredNumberTagIU(LInstruction* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2,
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
@ -162,9 +164,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
StrictMode strict_mode() const { return info()->strict_mode(); }
Scope* scope() const { return scope_; }
@ -348,17 +348,6 @@ class LCodeGen: public LCodeGenBase {
int* offset,
AllocationSiteMode mode);
// Emit optimized code for integer division.
// Inputs are signed.
// All registers are clobbered.
// If 'remainder' is no_reg, it is not computed.
void EmitSignedIntegerDivisionByConstant(Register result,
Register dividend,
int32_t divisor,
Register remainder,
Register scratch,
LEnvironment* environment);
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);

167
deps/v8/src/arm/macro-assembler-arm.cc

@ -133,6 +133,12 @@ void MacroAssembler::Call(Address target,
set_predictable_code_size(true);
}
#ifdef DEBUG
// Check the expected size before generating code to ensure we assume the same
// constant pool availability (e.g., whether constant pool is full or not).
int expected_size = CallSize(target, rmode, cond);
#endif
// Call sequence on V7 or later may be :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
@ -153,7 +159,7 @@ void MacroAssembler::Call(Address target,
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(old_predictable_code_size);
}
@ -888,6 +894,16 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
void MacroAssembler::LoadConstantPoolPointerRegister() {
if (FLAG_enable_ool_constant_pool) {
int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
pc_offset() - Instruction::kPCReadOffset;
ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
ldr(pp, MemOperand(pc, constant_pool_offset));
}
}
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
if (frame_mode == BUILD_STUB_FRAME) {
PushFixedFrame();
@ -912,22 +928,20 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
}
void MacroAssembler::LoadConstantPoolPointerRegister() {
if (FLAG_enable_ool_constant_pool) {
int constant_pool_offset =
Code::kConstantPoolOffset - Code::kHeaderSize - pc_offset() - 8;
ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
ldr(pp, MemOperand(pc, constant_pool_offset));
LoadConstantPoolPointerRegister();
set_constant_pool_available(true);
}
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool) {
// r0-r3: preserved
PushFixedFrame();
if (FLAG_enable_ool_constant_pool && load_constant_pool) {
LoadConstantPoolPointerRegister();
}
mov(ip, Operand(Smi::FromInt(type)));
push(ip);
mov(ip, Operand(CodeObject()));
@ -975,6 +989,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
}
if (FLAG_enable_ool_constant_pool) {
str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
LoadConstantPoolPointerRegister();
}
mov(ip, Operand(CodeObject()));
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
@ -1045,6 +1060,8 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count,
bool restore_context) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
@ -1059,7 +1076,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
str(r3, MemOperand(ip));
// Restore current context from top and clear it in debug mode.
if (restore_context) {
mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
@ -1366,6 +1382,11 @@ void MacroAssembler::JumpToHandlerEntry() {
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// r0 = exception, r1 = code object, r2 = state.
ConstantPoolUnavailableScope constant_pool_unavailable(this);
if (FLAG_enable_ool_constant_pool) {
ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); // Constant pool.
}
ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
@ -2411,7 +2432,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
{
FrameScope frame(this, StackFrame::INTERNAL);
CallExternalReference(
ExternalReference(Runtime::kPromoteScheduledException, isolate()),
ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
0);
}
jmp(&exception_handled);
@ -2806,16 +2827,8 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason) {
void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
// We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned
// properly. Instead, we pass an aligned pointer that is
// a proper v8 smi, but also pass the alignment difference
// from the real pointer as a smi.
const char* msg = GetBailoutReason(reason);
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@ -2827,25 +2840,24 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
mov(r0, Operand(p0));
push(r0);
mov(r0, Operand(Smi::FromInt(p1 - p0)));
mov(r0, Operand(Smi::FromInt(reason)));
push(r0);
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
CallRuntime(Runtime::kAbort, 2);
CallRuntime(Runtime::kAbort, 1);
} else {
CallRuntime(Runtime::kAbort, 2);
CallRuntime(Runtime::kAbort, 1);
}
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
static const int kExpectedAbortInstructions = 10;
static const int kExpectedAbortInstructions = 7;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
@ -2899,31 +2911,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch,
Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out));
Label done;
ldr(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
kind,
map_out,
scratch,
&done);
} else if (can_have_holes) {
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
map_out,
scratch,
&done);
}
bind(&done);
}
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
ldr(function,
@ -2936,19 +2923,6 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
void MacroAssembler::LoadArrayFunction(Register function) {
// Load the global or builtins object from the current context.
ldr(function,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the global context from the global or builtins object.
ldr(function,
FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
// Load the array function from the native context.
ldr(function,
MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
}
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@ -3070,6 +3044,20 @@ void MacroAssembler::AssertName(Register object) {
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
Label done_checking;
AssertNotSmi(object);
CompareRoot(object, Heap::kUndefinedValueRootIndex);
b(eq, &done_checking);
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
Assert(eq, kExpectedUndefinedOrCell);
bind(&done_checking);
}
}
void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
@ -3579,22 +3567,31 @@ void MacroAssembler::CallCFunctionHelper(Register function,
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
Register result) {
Register result) {
const uint32_t kLdrOffsetMask = (1 << 12) - 1;
const int32_t kPCRegOffset = 2 * kPointerSize;
ldr(result, MemOperand(ldr_location));
if (emit_debug_code()) {
// Check that the instruction is a ldr reg, [pc + offset] .
and_(result, result, Operand(kLdrPCPattern));
cmp(result, Operand(kLdrPCPattern));
Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
// Check that the instruction is a ldr reg, [<pc or pp> + offset] .
if (FLAG_enable_ool_constant_pool) {
and_(result, result, Operand(kLdrPpPattern));
cmp(result, Operand(kLdrPpPattern));
Check(eq, kTheInstructionToPatchShouldBeALoadFromPp);
} else {
and_(result, result, Operand(kLdrPCPattern));
cmp(result, Operand(kLdrPCPattern));
Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
}
// Result was clobbered. Restore it.
ldr(result, MemOperand(ldr_location));
}
// Get the address of the constant.
and_(result, result, Operand(kLdrOffsetMask));
add(result, ldr_location, Operand(result));
add(result, result, Operand(kPCRegOffset));
if (FLAG_enable_ool_constant_pool) {
add(result, pp, Operand(result));
} else {
add(result, ldr_location, Operand(result));
add(result, result, Operand(Instruction::kPCReadOffset));
}
}
@ -3849,9 +3846,9 @@ void MacroAssembler::Throw(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
CallRuntime(Runtime::kThrowMessage, 1);
CallRuntime(Runtime::kHiddenThrowMessage, 1);
} else {
CallRuntime(Runtime::kThrowMessage, 1);
CallRuntime(Runtime::kHiddenThrowMessage, 1);
}
// will not return here
if (is_const_pool_blocked()) {
@ -4079,6 +4076,26 @@ void CodePatcher::EmitCondition(Condition cond) {
}
void MacroAssembler::TruncatingDiv(Register result,
Register dividend,
int32_t divisor) {
ASSERT(!dividend.is(result));
ASSERT(!dividend.is(ip));
ASSERT(!result.is(ip));
MultiplierAndShift ms(divisor);
mov(ip, Operand(ms.multiplier()));
smull(ip, result, dividend, ip);
if (divisor > 0 && ms.multiplier() < 0) {
add(result, result, Operand(dividend));
}
if (divisor < 0 && ms.multiplier() > 0) {
sub(result, result, Operand(dividend));
}
if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift()));
add(result, result, Operand(dividend, LSR, 31));
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

87
deps/v8/src/arm/macro-assembler-arm.h

@ -540,9 +540,6 @@ class MacroAssembler: public Assembler {
// Generates function and stub prologue code.
void Prologue(PrologueFrameMode frame_mode);
// Loads the constant pool pointer (pp) register.
void LoadConstantPoolPointerRegister();
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0);
@ -570,14 +567,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
Register map_out,
bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@ -1162,6 +1152,10 @@ class MacroAssembler: public Assembler {
}
// Emit code for a truncating division by a constant. The dividend register is
// unchanged and ip gets clobbered. Dividend and result must be different.
void TruncatingDiv(Register result, Register dividend, int32_t divisor);
// ---------------------------------------------------------------------------
// StatsCounter support
@ -1296,6 +1290,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
// Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
void AssertIsRoot(Register reg, Heap::RootListIndex index);
@ -1390,7 +1388,7 @@ class MacroAssembler: public Assembler {
}
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool = false);
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type);
@ -1467,6 +1465,9 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
// Loads the constant pool pointer (pp) register.
void LoadConstantPoolPointerRegister();
bool generating_stub_;
bool has_frame_;
// This handle will be patched with the code object on installation.
@ -1516,6 +1517,70 @@ class CodePatcher {
};
class FrameAndConstantPoolScope {
public:
FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type)
: masm_(masm),
type_(type),
old_has_frame_(masm->has_frame()),
old_constant_pool_available_(masm->is_constant_pool_available()) {
masm->set_has_frame(true);
masm->set_constant_pool_available(true);
if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
masm->EnterFrame(type, !old_constant_pool_available_);
}
}
~FrameAndConstantPoolScope() {
masm_->LeaveFrame(type_);
masm_->set_has_frame(old_has_frame_);
masm_->set_constant_pool_available(old_constant_pool_available_);
}
// Normally we generate the leave-frame code when this object goes
// out of scope. Sometimes we may need to generate the code somewhere else
// in addition. Calling this will achieve that, but the object stays in
// scope, the MacroAssembler is still marked as being in a frame scope, and
// the code will be generated again when it goes out of scope.
void GenerateLeaveFrame() {
ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
masm_->LeaveFrame(type_);
}
private:
MacroAssembler* masm_;
StackFrame::Type type_;
bool old_has_frame_;
bool old_constant_pool_available_;
DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope);
};
// Class for scoping the the unavailability of constant pool access.
class ConstantPoolUnavailableScope {
public:
explicit ConstantPoolUnavailableScope(MacroAssembler* masm)
: masm_(masm),
old_constant_pool_available_(masm->is_constant_pool_available()) {
if (FLAG_enable_ool_constant_pool) {
masm_->set_constant_pool_available(false);
}
}
~ConstantPoolUnavailableScope() {
if (FLAG_enable_ool_constant_pool) {
masm_->set_constant_pool_available(old_constant_pool_available_);
}
}
private:
MacroAssembler* masm_;
int old_constant_pool_available_;
DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope);
};
// -----------------------------------------------------------------------------
// Static helper functions.

10
deps/v8/src/arm/simulator-arm.cc

@ -796,6 +796,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
Simulator::~Simulator() {
}
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
// host architecture. We need to call that function instead of trying to
@ -3466,7 +3470,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl signed
int Vd = (instr->Bit(22) << 4) | instr->VdValue();
if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
@ -3489,7 +3494,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl unsigned
int Vd = (instr->Bit(22) << 4) | instr->VdValue();
if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();

4
deps/v8/src/arm/simulator-arm.h

@ -207,6 +207,10 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
Address get_sp() {
return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
}
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;

190
deps/v8/src/arm/stub-cache-arm.cc

@ -322,7 +322,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@ -351,60 +351,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
// Generate code to check if an object is a string. If the object is a
// heap object, its map's instance type is left in the scratch1 register.
// If this is not needed, scratch1 and scratch2 may be the same register.
static void GenerateStringCheck(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* smi,
Label* non_string_object) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, smi);
// Check that the object is a string.
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ and_(scratch2, scratch1, Operand(kIsNotStringMask));
// The cast is to resolve the overload for the argument of 0x0.
__ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
__ b(ne, non_string_object);
}
// Generate code to load the length from a string object and return the length.
// If the receiver object is not a string or a wrapped string object the
// execution continues at the miss label. The register containing the
// receiver is potentially clobbered.
void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
// Load length directly from the string.
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
__ Ret();
// Check if the object is a JSValue wrapper.
__ bind(&check_wrapper);
__ cmp(scratch1, Operand(JS_VALUE_TYPE));
__ b(ne, miss);
// Unwrap the value and check if the wrapped value is a string.
__ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
__ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
__ Ret();
}
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@ -481,11 +427,11 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ Move(scratch1, constant);
__ cmp(value_reg, scratch1);
__ b(ne, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
} else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
} else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
} else if (representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
@ -559,15 +505,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
if (FLAG_track_double_fields && representation.IsDouble()) {
if (representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
}
if (!FLAG_track_fields || !representation.IsSmi()) {
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
@ -585,15 +531,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Get the properties array
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
if (FLAG_track_double_fields && representation.IsDouble()) {
if (representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ str(value_reg, FieldMemOperand(scratch1, offset));
}
if (!FLAG_track_fields || !representation.IsSmi()) {
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
@ -643,11 +589,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
if (FLAG_track_fields && representation.IsSmi()) {
if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
} else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
} else if (representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
@ -688,7 +634,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
int offset = object->map()->instance_size() + (index * kPointerSize);
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
if (!FLAG_track_fields || !representation.IsSmi()) {
if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@ -712,7 +658,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(value_reg, FieldMemOperand(scratch1, offset));
if (!FLAG_track_fields || !representation.IsSmi()) {
if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@ -783,13 +729,14 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
Handle<Map> receiver_map,
Register receiver,
Register scratch_in,
int argc,
Register* values) {
void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
Handle<Map> receiver_map,
Register receiver,
Register scratch_in,
bool is_store,
int argc,
Register* values) {
ASSERT(!receiver.is(scratch_in));
__ push(receiver);
// Write the arguments to stack frame.
@ -854,7 +801,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ mov(api_function_address, Operand(ref));
// Jump to stub.
CallApiFunctionStub stub(true, call_data_undefined, argc);
CallApiFunctionStub stub(is_store, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@ -878,9 +825,6 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
__ mov(scratch1, Operand(receiver_map));
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@ -1075,15 +1019,6 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
}
void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization,
Handle<Map> receiver_map) {
GenerateFastApiCall(
masm(), call_optimization, receiver_map,
receiver(), scratch3(), 0, NULL);
}
void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
@ -1173,7 +1108,7 @@ void LoadStubCompiler::GenerateLoadInterceptor(
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
{
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
__ Push(receiver(), holder_reg, this->name());
} else {
@ -1260,24 +1195,6 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization) {
HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
receiver(), holder, name);
Register values[] = { value() };
GenerateFastApiCall(
masm(), call_optimization, handle(object->map()),
receiver(), scratch3(), 1, values);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
#undef __
#define __ ACCESS_MASM(masm)
@ -1285,20 +1202,16 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
Handle<HeapType> type,
Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
Register receiver = r1;
Register value = r0;
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ push(value);
__ push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
@ -1308,7 +1221,7 @@ void StoreStubCompiler::GenerateStoreViaSetter(
FieldMemOperand(
receiver, JSGlobalObject::kGlobalReceiverOffset));
}
__ Push(receiver, value);
__ Push(receiver, value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
@ -1336,21 +1249,6 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
Label miss;
// Check that the map of the object hasn't changed.
__ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
}
// Stub is never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
@ -1358,10 +1256,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1);
// Handle store cache miss.
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
@ -1396,16 +1290,21 @@ Register* KeyedLoadStubCompiler::registers() {
}
Register StoreStubCompiler::value() {
return r0;
}
Register* StoreStubCompiler::registers() {
// receiver, name, value, scratch1, scratch2, scratch3.
static Register registers[] = { r1, r2, r0, r3, r4, r5 };
// receiver, name, scratch1, scratch2, scratch3.
static Register registers[] = { r1, r2, r3, r4, r5 };
return registers;
}
Register* KeyedStoreStubCompiler::registers() {
// receiver, name, value, scratch1, scratch2, scratch3.
static Register registers[] = { r2, r1, r0, r3, r4, r5 };
// receiver, name, scratch1, scratch2, scratch3.
static Register registers[] = { r2, r1, r3, r4, r5 };
return registers;
}
@ -1424,7 +1323,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
// -- lr : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
@ -1537,6 +1436,17 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
}
void StoreStubCompiler::GenerateStoreArrayLength() {
// Prepare tail call to StoreIC_ArrayLength.
__ Push(receiver(), value());
ExternalReference ref =
ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
masm()->isolate());
__ TailCallExternalReference(ref, 2, 1);
}
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,

1
deps/v8/src/arm64/OWNERS

@ -0,0 +1 @@
rmcilroy@chromium.org

1229
deps/v8/src/arm64/assembler-arm64-inl.h

File diff suppressed because it is too large

2813
deps/v8/src/arm64/assembler-arm64.cc

File diff suppressed because it is too large

2233
deps/v8/src/arm64/assembler-arm64.h

File diff suppressed because it is too large

1562
deps/v8/src/arm64/builtins-arm64.cc

File diff suppressed because it is too large

5743
deps/v8/src/arm64/code-stubs-arm64.cc

File diff suppressed because it is too large

500
deps/v8/src/arm64/code-stubs-arm64.h

@ -0,0 +1,500 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_CODE_STUBS_ARM64_H_
#define V8_ARM64_CODE_STUBS_ARM64_H_
#include "ic-inl.h"
namespace v8 {
namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() { return StoreBufferOverflow; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
class StringHelper : public AllStatic {
public:
// TODO(all): These don't seem to be used any more. Delete them.
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character);
static void GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character);
static void GenerateHashGetHash(MacroAssembler* masm,
Register hash,
Register scratch);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
class StoreRegistersStateStub: public PlatformCodeStub {
public:
explicit StoreRegistersStateStub(SaveFPRegsMode with_fp)
: save_doubles_(with_fp) {}
static Register to_be_pushed_lr() { return ip0; }
static void GenerateAheadOfTime(Isolate* isolate);
private:
Major MajorKey() { return StoreRegistersState; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
SaveFPRegsMode save_doubles_;
void Generate(MacroAssembler* masm);
};
class RestoreRegistersStateStub: public PlatformCodeStub {
public:
explicit RestoreRegistersStateStub(SaveFPRegsMode with_fp)
: save_doubles_(with_fp) {}
static void GenerateAheadOfTime(Isolate* isolate);
private:
Major MajorKey() { return RestoreRegistersState; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
SaveFPRegsMode save_doubles_;
void Generate(MacroAssembler* masm);
};
class RecordWriteStub: public PlatformCodeStub {
public:
// Stub to record the write of 'value' at 'address' in 'object'.
// Typically 'address' = 'object' + <some offset>.
// See MacroAssembler::RecordWriteField() for example.
RecordWriteStub(Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
}
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
virtual bool SometimesSetsUpAFrame() { return false; }
static Mode GetMode(Code* stub) {
// Find the mode depending on the first two instructions.
Instruction* instr1 =
reinterpret_cast<Instruction*>(stub->instruction_start());
Instruction* instr2 = instr1->following();
if (instr1->IsUncondBranchImm()) {
ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
return INCREMENTAL;
}
ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
if (instr2->IsUncondBranchImm()) {
return INCREMENTAL_COMPACTION;
}
ASSERT(instr2->IsPCRelAddressing());
return STORE_BUFFER_ONLY;
}
// We patch the two first instructions of the stub back and forth between an
// adr and branch when we start and stop incremental heap marking.
// The branch is
// b label
// The adr is
// adr xzr label
// so effectively a nop.
static void Patch(Code* stub, Mode mode) {
// We are going to patch the two first instructions of the stub.
PatchingAssembler patcher(
reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
Instruction* instr1 = patcher.InstructionAt(0);
Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
// Instructions must be either 'adr' or 'b'.
ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
// Retrieve the offsets to the labels.
int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
switch (mode) {
case STORE_BUFFER_ONLY:
ASSERT(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL_COMPACTION:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
break;
}
ASSERT(GetMode(stub) == mode);
}
private:
// This is a helper class to manage the registers associated with the stub.
// The 'object' and 'address' registers must be preserved.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
Register scratch)
: object_(object),
address_(address),
scratch0_(scratch),
saved_regs_(kCallerSaved) {
ASSERT(!AreAliased(scratch, object, address));
// We would like to require more scratch registers for this stub,
// but the number of registers comes down to the ones used in
// FullCodeGen::SetVar(), which is architecture independent.
// We allocate 2 extra scratch registers that we'll save on the stack.
CPURegList pool_available = GetValidRegistersForAllocation();
CPURegList used_regs(object, address, scratch);
pool_available.Remove(used_regs);
scratch1_ = Register(pool_available.PopLowestIndex());
scratch2_ = Register(pool_available.PopLowestIndex());
// SaveCallerRegisters method needs to save caller saved register, however
// we don't bother saving ip0 and ip1 because they are used as scratch
// registers by the MacroAssembler.
saved_regs_.Remove(ip0);
saved_regs_.Remove(ip1);
// The scratch registers will be restored by other means so we don't need
// to save them with the other caller saved registers.
saved_regs_.Remove(scratch0_);
saved_regs_.Remove(scratch1_);
saved_regs_.Remove(scratch2_);
}
void Save(MacroAssembler* masm) {
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
masm->Push(scratch1_, scratch2_);
}
void Restore(MacroAssembler* masm) {
masm->Pop(scratch2_, scratch1_);
}
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
// TODO(all): This can be very expensive, and it is likely that not every
// register will need to be preserved. Can we improve this?
masm->PushCPURegList(saved_regs_);
if (mode == kSaveFPRegs) {
masm->PushCPURegList(kCallerSavedFP);
}
}
void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
// TODO(all): This can be very expensive, and it is likely that not every
// register will need to be preserved. Can we improve this?
if (mode == kSaveFPRegs) {
masm->PopCPURegList(kCallerSavedFP);
}
masm->PopCPURegList(saved_regs_);
}
Register object() { return object_; }
Register address() { return address_; }
Register scratch0() { return scratch0_; }
Register scratch1() { return scratch1_; }
Register scratch2() { return scratch2_; }
private:
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
Register scratch2_;
CPURegList saved_regs_;
// TODO(all): We should consider moving this somewhere else.
static CPURegList GetValidRegistersForAllocation() {
// The list of valid registers for allocation is defined as all the
// registers without those with a special meaning.
//
// The default list excludes registers x26 to x31 because they are
// reserved for the following purpose:
// - x26 root register
// - x27 context pointer register
// - x28 jssp
// - x29 frame pointer
// - x30 link register(lr)
// - x31 xzr/stack pointer
CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25);
// We also remove MacroAssembler's scratch registers.
list.Remove(ip0);
list.Remove(ip1);
list.Remove(x8);
list.Remove(x9);
return list;
}
friend class RecordWriteStub;
};
// A list of stub variants which are pregenerated.
// The variants are stored in the same format as the minor key, so
// MinorKeyFor() can be used to populate and check this list.
static const int kAheadOfTime[];
void Generate(MacroAssembler* masm);
void GenerateIncremental(MacroAssembler* masm, Mode mode);
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
int MinorKey() {
return MinorKeyFor(object_, value_, address_, remembered_set_action_,
save_fp_regs_mode_);
}
static int MinorKeyFor(Register object,
Register value,
Register address,
RememberedSetAction action,
SaveFPRegsMode fp_mode) {
ASSERT(object.Is64Bits());
ASSERT(value.Is64Bits());
ASSERT(address.Is64Bits());
return ObjectBits::encode(object.code()) |
ValueBits::encode(value.code()) |
AddressBits::encode(address.code()) |
RememberedSetActionBits::encode(action) |
SaveFPRegsModeBits::encode(fp_mode);
}
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
class ObjectBits: public BitField<int, 0, 5> {};
class ValueBits: public BitField<int, 5, 5> {};
class AddressBits: public BitField<int, 10, 5> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
Register object_;
Register value_;
Register address_;
RememberedSetAction remembered_set_action_;
SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
};
// Helper to call C++ functions from generated code. The caller must prepare
// the exit frame before doing the call with GenerateCall.
class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target);
private:
Major MajorKey() { return DirectCEntry; }
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
};
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
void Generate(MacroAssembler* masm);
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register receiver,
Register properties,
Handle<Name> name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register scratch1,
Register scratch2);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
static const int kCapacityOffset =
NameDictionary::kHeaderSize +
NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() { return NameDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
}
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
LookupMode mode_;
};
class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
private:
Major MajorKey() { return SubString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
// Compares two flat ASCII strings and returns result in x0.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4);
// Compare two flat ASCII strings for equality and returns result
// in x0.
static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3);
private:
virtual Major MajorKey() { return StringCompare; }
virtual int MinorKey() { return 0; }
virtual void Generate(MacroAssembler* masm);
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
Register left,
Register right,
Register length,
Register scratch1,
Register scratch2,
Label* chars_not_equal);
};
struct PlatformCallInterfaceDescriptor {
explicit PlatformCallInterfaceDescriptor(
TargetAddressStorageMode storage_mode)
: storage_mode_(storage_mode) { }
TargetAddressStorageMode storage_mode() { return storage_mode_; }
private:
TargetAddressStorageMode storage_mode_;
};
} } // namespace v8::internal
#endif // V8_ARM64_CODE_STUBS_ARM64_H_

615
deps/v8/src/arm64/codegen-arm64.cc

@ -0,0 +1,615 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_ARM64
#include "codegen.h"
#include "macro-assembler.h"
#include "simulator-arm64.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
#if defined(USE_SIMULATOR)
byte* fast_exp_arm64_machine_code = NULL;
double fast_exp_simulator(double x) {
Simulator * simulator = Simulator::current(Isolate::Current());
Simulator::CallArgument args[] = {
Simulator::CallArgument(x),
Simulator::CallArgument::End()
};
return simulator->CallDouble(fast_exp_arm64_machine_code, args);
}
#endif
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &std::exp;
// Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
// an AAPCS64-compliant exp() function. This will be faster than the C
// library's exp() function, but probably less accurate.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
masm.SetStackPointer(csp);
// The argument will be in d0 on entry.
DoubleRegister input = d0;
// Use other caller-saved registers for all other values.
DoubleRegister result = d1;
DoubleRegister double_temp1 = d2;
DoubleRegister double_temp2 = d3;
Register temp1 = x10;
Register temp2 = x11;
Register temp3 = x12;
MathExpGenerator::EmitMathExp(&masm, input, result,
double_temp1, double_temp2,
temp1, temp2, temp3);
// Move the result to the return register.
masm.Fmov(d0, result);
masm.Ret();
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#else
fast_exp_arm64_machine_code = buffer;
return &fast_exp_simulator;
#endif
}
UnaryMathFunction CreateSqrtFunction() {
return &std::sqrt;
}
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
ASSERT(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
ASSERT(masm->has_frame());
masm->set_has_frame(false);
}
// -------------------------------------------------------------------------
// Code generators
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- x2 : receiver
// -- x3 : target map
// -----------------------------------
Register receiver = x2;
Register map = x3;
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
allocation_memento_found);
}
// Set transitioned map.
__ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
map,
x10,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
// ----------- S t a t e -------------
// -- lr : return address
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- x3 : target map, scratch for subsequent call
// -----------------------------------
Register receiver = x2;
Register target_map = x3;
Label gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
Register elements = x4;
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
__ Push(lr);
Register length = x5;
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray.
Register array_size = x6;
Register array = x7;
__ Lsl(array_size, length, kDoubleSizeLog2);
__ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
__ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
// Register array is non-tagged heap object.
// Set the destination FixedDoubleArray's length and map.
Register map_root = x6;
__ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ Add(x10, array, kHeapObjectTag);
__ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
x6, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Prepare for conversion loop.
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
__ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
__ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
FPRegister nan_d = d1;
__ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
Label entry, done;
__ B(&entry);
__ Bind(&only_change_map);
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ B(&done);
// Call into runtime if GC is required.
__ Bind(&gc_required);
__ Pop(lr);
__ B(fail);
// Iterate over the array, copying and coverting smis to doubles. If an
// element is non-smi, write a hole to the destination.
{
Label loop;
__ Bind(&loop);
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
__ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
__ Tst(x13, kSmiTagMask);
__ Fcsel(d0, d0, nan_d, eq);
__ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
__ Bind(&entry);
__ Cmp(dst_elements, dst_end);
__ B(lt, &loop);
}
__ Pop(lr);
__ Bind(&done);
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
// ----------- S t a t e -------------
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- lr : return address
// -- x3 : target map, scratch for subsequent call
// -- x4 : scratch (elements)
// -----------------------------------
Register value = x0;
Register key = x1;
Register receiver = x2;
Register target_map = x3;
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
Label only_change_map;
Register elements = x4;
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
__ Push(lr);
// TODO(all): These registers may not need to be pushed. Examine
// RecordWriteStub and check whether it's needed.
__ Push(target_map, receiver, key, value);
Register length = x5;
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedArray.
Register array_size = x6;
Register array = x7;
Label gc_required;
__ Mov(array_size, FixedDoubleArray::kHeaderSize);
__ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
__ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
// Set destination FixedDoubleArray's length and map.
Register map_root = x6;
__ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
// Prepare for conversion loop.
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
__ Add(src_elements, elements,
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedArray::kHeaderSize);
__ Add(array, array, kHeapObjectTag);
__ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
Register the_hole = x14;
Register heap_num_map = x15;
__ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
__ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
Label entry;
__ B(&entry);
// Call into runtime if GC is required.
__ Bind(&gc_required);
__ Pop(value, key, receiver, target_map);
__ Pop(lr);
__ B(fail);
{
Label loop, convert_hole;
__ Bind(&loop);
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
__ Cmp(x13, kHoleNanInt64);
__ B(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
Register heap_num = x5;
__ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map);
__ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset));
__ Mov(x13, dst_elements);
__ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
__ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ B(&entry);
// Replace the-hole NaN with the-hole pointer.
__ Bind(&convert_hole);
__ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
__ Bind(&entry);
__ Cmp(dst_elements, dst_end);
__ B(lt, &loop);
}
__ Pop(value, key, receiver, target_map);
// Replace receiver's backing store with newly created and filled FixedArray.
__ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Pop(lr);
__ Bind(&only_change_map);
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
bool Code::IsYoungSequence(byte* sequence) {
return MacroAssembler::IsYoungSequence(sequence);
}
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
byte* target = sequence + kCodeAgeStubEntryOffset;
Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
GetCodeAgeAndParity(stub, age, parity);
}
}
void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
PatchingAssembler patcher(sequence, kCodeAgeSequenceSize / kInstructionSize);
if (age == kNoAgeCodeAge) {
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
} else {
Code * stub = GetCodeAgeStub(isolate, age, parity);
MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
}
}
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,
Register result,
Label* call_runtime) {
ASSERT(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
// Fetch the instance type of the receiver into result register.
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label check_sequential;
__ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
// Dispatch on the indirect string shape: slice or cons.
Label cons_string;
__ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
// Handle slices.
Label indirect_string_loaded;
__ Ldr(result.W(),
UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
__ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ Add(index, index, result.W());
__ B(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
__ Bind(&cons_string);
__ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
__ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
// Get the first of the two strings and load its instance type.
__ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
__ Bind(&indirect_string_loaded);
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
// reduced to the underlying sequential or external string).
Label external_string, check_encoding;
__ Bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
// Prepare sequential strings
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
__ B(&check_encoding);
// Handle external strings.
__ Bind(&external_string);
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ Tst(result, kIsIndirectStringMask);
__ Assert(eq, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
// TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
// can be bound far away in deferred code.
__ Tst(result, kShortExternalStringMask);
__ B(ne, call_runtime);
__ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
Label ascii, done;
__ Bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
// Two-byte string.
__ Ldrh(result, MemOperand(string, index, SXTW, 1));
__ B(&done);
__ Bind(&ascii);
// Ascii string.
__ Ldrb(result, MemOperand(string, index, SXTW));
__ Bind(&done);
}
static MemOperand ExpConstant(Register base, int index) {
return MemOperand(base, index * kDoubleSize);
}
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_temp1,
DoubleRegister double_temp2,
Register temp1,
Register temp2,
Register temp3) {
// TODO(jbramley): There are several instances where fnmsub could be used
// instead of fmul and fsub. Doing this changes the result, but since this is
// an estimation anyway, does it matter?
ASSERT(!AreAliased(input, result,
double_temp1, double_temp2,
temp1, temp2, temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
Label done;
DoubleRegister double_temp3 = result;
Register constants = temp3;
// The algorithm used relies on some magic constants which are initialized in
// ExternalReference::InitializeMathExpData().
// Load the address of the start of the array.
__ Mov(constants, ExternalReference::math_exp_constants(0));
// We have to do a four-way split here:
// - If input <= about -708.4, the output always rounds to zero.
// - If input >= about 709.8, the output always rounds to +infinity.
// - If the input is NaN, the output is NaN.
// - Otherwise, the result needs to be calculated.
Label result_is_finite_non_zero;
// Assert that we can load offset 0 (the small input threshold) and offset 1
// (the large input threshold) with a single ldp.
ASSERT(kDRegSize == (ExpConstant(constants, 1).offset() -
ExpConstant(constants, 0).offset()));
__ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
__ Fcmp(input, double_temp1);
__ Fccmp(input, double_temp2, NoFlag, hi);
// At this point, the condition flags can be in one of five states:
// NZCV
// 1000 -708.4 < input < 709.8 result = exp(input)
// 0110 input == 709.8 result = +infinity
// 0010 input > 709.8 result = +infinity
// 0011 input is NaN result = input
// 0000 input <= -708.4 result = +0.0
// Continue the common case first. 'mi' tests N == 1.
__ B(&result_is_finite_non_zero, mi);
// TODO(jbramley): Consider adding a +infinity register for ARM64.
__ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
// Select between +0.0 and +infinity. 'lo' tests C == 0.
__ Fcsel(result, fp_zero, double_temp2, lo);
// Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
__ Fcsel(result, result, input, vc);
__ B(&done);
// The rest is magic, as described in InitializeMathExpData().
__ Bind(&result_is_finite_non_zero);
// Assert that we can load offset 3 and offset 4 with a single ldp.
ASSERT(kDRegSize == (ExpConstant(constants, 4).offset() -
ExpConstant(constants, 3).offset()));
__ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
__ Fmadd(double_temp1, double_temp1, input, double_temp3);
__ Fmov(temp2.W(), double_temp1.S());
__ Fsub(double_temp1, double_temp1, double_temp3);
// Assert that we can load offset 5 and offset 6 with a single ldp.
ASSERT(kDRegSize == (ExpConstant(constants, 6).offset() -
ExpConstant(constants, 5).offset()));
__ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
// TODO(jbramley): Consider using Fnmsub here.
__ Fmul(double_temp1, double_temp1, double_temp2);
__ Fsub(double_temp1, double_temp1, input);
__ Fmul(double_temp2, double_temp1, double_temp1);
__ Fsub(double_temp3, double_temp3, double_temp1);
__ Fmul(double_temp3, double_temp3, double_temp2);
__ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
__ Ldr(double_temp2, ExpConstant(constants, 7));
// TODO(jbramley): Consider using Fnmsub here.
__ Fmul(double_temp3, double_temp3, double_temp2);
__ Fsub(double_temp3, double_temp3, double_temp1);
// The 8th constant is 1.0, so use an immediate move rather than a load.
// We can't generate a runtime assertion here as we would need to call Abort
// in the runtime and we don't have an Isolate when we generate this code.
__ Fmov(double_temp2, 1.0);
__ Fadd(double_temp3, double_temp3, double_temp2);
__ And(temp2, temp2, 0x7ff);
__ Add(temp1, temp1, 0x3ff);
// Do the final table lookup.
__ Mov(temp3, ExternalReference::math_exp_log_table());
__ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2));
__ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
__ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
__ Bfi(temp2, temp1, 32, 32);
__ Fmov(double_temp1, temp2);
__ Fmul(result, double_temp3, double_temp1);
__ Bind(&done);
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64

71
deps/v8/src/arm64/codegen-arm64.h

@ -0,0 +1,71 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_CODEGEN_ARM64_H_
#define V8_ARM64_CODEGEN_ARM64_H_
#include "ast.h"
#include "ic-inl.h"
namespace v8 {
namespace internal {
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
// indexed character into |result|. We expect |index| as untagged input and
// |result| as untagged output. Register index is asserted to be a 32-bit W
// register.
static void Generate(MacroAssembler* masm,
Register string,
Register index,
Register result,
Label* call_runtime);
private:
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
class MathExpGenerator : public AllStatic {
public:
static void EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
DoubleRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3);
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
} } // namespace v8::internal
#endif // V8_ARM64_CODEGEN_ARM64_H_

1271
deps/v8/src/arm64/constants-arm64.h

File diff suppressed because it is too large

199
deps/v8/src/arm64/cpu-arm64.cc

@ -0,0 +1,199 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// CPU specific code for arm independent of OS goes here.
#include "v8.h"
#if V8_TARGET_ARCH_ARM64
#include "arm64/cpu-arm64.h"
#include "arm64/utils-arm64.h"
namespace v8 {
namespace internal {
#ifdef DEBUG
bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
unsigned CpuFeatures::cross_compile_ = 0;
// Initialise to smallest possible cache size.
unsigned CpuFeatures::dcache_line_size_ = 1;
unsigned CpuFeatures::icache_line_size_ = 1;
void CPU::SetUp() {
CpuFeatures::Probe();
}
bool CPU::SupportsCrankshaft() {
return true;
}
void CPU::FlushICache(void* address, size_t length) {
if (length == 0) {
return;
}
#ifdef USE_SIMULATOR
// TODO(all): consider doing some cache simulation to ensure every address
// run has been synced.
USE(address);
USE(length);
#else
// The code below assumes user space cache operations are allowed. The goal
// of this routine is to make sure the code generated is visible to the I
// side of the CPU.
uintptr_t start = reinterpret_cast<uintptr_t>(address);
// Sizes will be used to generate a mask big enough to cover a pointer.
uintptr_t dsize = static_cast<uintptr_t>(CpuFeatures::dcache_line_size());
uintptr_t isize = static_cast<uintptr_t>(CpuFeatures::icache_line_size());
// Cache line sizes are always a power of 2.
ASSERT(CountSetBits(dsize, 64) == 1);
ASSERT(CountSetBits(isize, 64) == 1);
uintptr_t dstart = start & ~(dsize - 1);
uintptr_t istart = start & ~(isize - 1);
uintptr_t end = start + length;
__asm__ __volatile__ ( // NOLINT
// Clean every line of the D cache containing the target data.
"0: \n\t"
// dc : Data Cache maintenance
// c : Clean
// va : by (Virtual) Address
// u : to the point of Unification
// The point of unification for a processor is the point by which the
// instruction and data caches are guaranteed to see the same copy of a
// memory location. See ARM DDI 0406B page B2-12 for more information.
"dc cvau, %[dline] \n\t"
"add %[dline], %[dline], %[dsize] \n\t"
"cmp %[dline], %[end] \n\t"
"b.lt 0b \n\t"
// Barrier to make sure the effect of the code above is visible to the rest
// of the world.
// dsb : Data Synchronisation Barrier
// ish : Inner SHareable domain
// The point of unification for an Inner Shareable shareability domain is
// the point by which the instruction and data caches of all the processors
// in that Inner Shareable shareability domain are guaranteed to see the
// same copy of a memory location. See ARM DDI 0406B page B2-12 for more
// information.
"dsb ish \n\t"
// Invalidate every line of the I cache containing the target data.
"1: \n\t"
// ic : instruction cache maintenance
// i : invalidate
// va : by address
// u : to the point of unification
"ic ivau, %[iline] \n\t"
"add %[iline], %[iline], %[isize] \n\t"
"cmp %[iline], %[end] \n\t"
"b.lt 1b \n\t"
// Barrier to make sure the effect of the code above is visible to the rest
// of the world.
"dsb ish \n\t"
// Barrier to ensure any prefetching which happened before this code is
// discarded.
// isb : Instruction Synchronisation Barrier
"isb \n\t"
: [dline] "+r" (dstart),
[iline] "+r" (istart)
: [dsize] "r" (dsize),
[isize] "r" (isize),
[end] "r" (end)
// This code does not write to memory but without the dependency gcc might
// move this code before the code is generated.
: "cc", "memory"
); // NOLINT
#endif
}
void CpuFeatures::Probe() {
// Compute I and D cache line size. The cache type register holds
// information about the caches.
uint32_t cache_type_register = GetCacheType();
static const int kDCacheLineSizeShift = 16;
static const int kICacheLineSizeShift = 0;
static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
// The cache type register holds the size of the I and D caches as a power of
// two.
uint32_t dcache_line_size_power_of_two =
(cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
uint32_t icache_line_size_power_of_two =
(cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
dcache_line_size_ = 1 << dcache_line_size_power_of_two;
icache_line_size_ = 1 << icache_line_size_power_of_two;
// AArch64 has no configuration options, no further probing is required.
supported_ = 0;
#ifdef DEBUG
initialized_ = true;
#endif
}
unsigned CpuFeatures::dcache_line_size() {
ASSERT(initialized_);
return dcache_line_size_;
}
unsigned CpuFeatures::icache_line_size() {
ASSERT(initialized_);
return icache_line_size_;
}
uint32_t CpuFeatures::GetCacheType() {
#ifdef USE_SIMULATOR
// This will lead to a cache with 1 byte long lines, which is fine since the
// simulator will not need this information.
return 0;
#else
uint32_t cache_type_register;
// Copy the content of the cache type register to a core register.
__asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
: [ctr] "=r" (cache_type_register));
return cache_type_register;
#endif
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64

107
deps/v8/src/arm64/cpu-arm64.h

@ -0,0 +1,107 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_CPU_ARM64_H_
#define V8_ARM64_CPU_ARM64_H_
#include <stdio.h>
#include "serialize.h"
#include "cpu.h"
namespace v8 {
namespace internal {
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a CpuFeatureScope before use.
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
// There are no optional features for ARM64.
return false;
};
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
// There are no optional features for ARM64.
return false;
}
static bool IsSafeForSnapshot(CpuFeature f) {
return (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
// I and D cache line size in bytes.
static unsigned dcache_line_size();
static unsigned icache_line_size();
static unsigned supported_;
static bool VerifyCrossCompiling() {
// There are no optional features for ARM64.
ASSERT(cross_compile_ == 0);
return true;
}
static bool VerifyCrossCompiling(CpuFeature f) {
// There are no optional features for ARM64.
USE(f);
ASSERT(cross_compile_ == 0);
return true;
}
private:
// Return the content of the cache type register.
static uint32_t GetCacheType();
// I and D cache line size in bytes.
static unsigned icache_line_size_;
static unsigned dcache_line_size_;
#ifdef DEBUG
static bool initialized_;
#endif
// This isn't used (and is always 0), but it is required by V8.
static unsigned found_by_runtime_probing_only_;
static unsigned cross_compile_;
friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
} } // namespace v8::internal
#endif // V8_ARM64_CPU_ARM64_H_

393
deps/v8/src/arm64/debug-arm64.cc

@ -0,0 +1,393 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_ARM64
#include "codegen.h"
#include "debug.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
#ifdef ENABLE_DEBUGGER_SUPPORT
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
void BreakLocationIterator::SetDebugBreakAtReturn() {
// Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
// the return from JS function sequence from
// mov sp, fp
// ldp fp, lr, [sp] #16
// lrd ip0, [pc, #(3 * kInstructionSize)]
// add sp, sp, ip0
// ret
// <number of paramters ...
// ... plus one (64 bits)>
// to a call to the debug break return code.
// ldr ip0, [pc, #(3 * kInstructionSize)]
// blr ip0
// hlt kHltBadCode @ code should not return, catch if it does.
// <debug break return code ...
// ... entry point address (64 bits)>
// The patching code must not overflow the space occupied by the return
// sequence.
STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5);
byte* entry =
debug_info_->GetIsolate()->debug()->debug_break_return()->entry();
// The first instruction of a patched return sequence must be a load literal
// loading the address of the debug break return code.
patcher.LoadLiteral(ip0, 3 * kInstructionSize);
// TODO(all): check the following is correct.
// The debug break return code will push a frame and call statically compiled
// code. By using blr, even though control will not return after the branch,
// this call site will be registered in the frame (lr being saved as the pc
// of the next instruction to execute for this frame). The debugger can now
// iterate on the frames to find call to debug break return code.
patcher.blr(ip0);
patcher.hlt(kHltBadCode);
patcher.dc64(reinterpret_cast<int64_t>(entry));
}
void BreakLocationIterator::ClearDebugBreakAtReturn() {
// Reset the code emitted by EmitReturnSequence to its original state.
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kJSRetSequenceInstructions);
}
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
bool BreakLocationIterator::IsDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
// Check whether the debug break slot instructions have been patched.
return rinfo()->IsPatchedDebugBreakSlotSequence();
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
// Patch the code emitted by Debug::GenerateSlots, changing the debug break
// slot code from
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// to a call to the debug slot code.
// ldr ip0, [pc, #(2 * kInstructionSize)]
// blr ip0
// <debug break slot code ...
// ... entry point address (64 bits)>
// TODO(all): consider adding a hlt instruction after the blr as we don't
// expect control to return here. This implies increasing
// kDebugBreakSlotInstructions to 5 instructions.
// The patching code must not overflow the space occupied by the return
// sequence.
STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4);
byte* entry =
debug_info_->GetIsolate()->debug()->debug_break_slot()->entry();
// The first instruction of a patched debug break slot must be a load literal
// loading the address of the debug break slot code.
patcher.LoadLiteral(ip0, 2 * kInstructionSize);
// TODO(all): check the following is correct.
// The debug break slot code will push a frame and call statically compiled
// code. By using blr, event hough control will not return after the branch,
// this call site will be registered in the frame (lr being saved as the pc
// of the next instruction to execute for this frame). The debugger can now
// iterate on the frames to find call to debug break slot code.
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<int64_t>(entry));
}
void BreakLocationIterator::ClearDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kDebugBreakSlotInstructions);
}
const bool Debug::FramePaddingLayout::kIsSupported = false;
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs,
Register scratch) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Any live values (object_regs and non_object_regs) in caller-saved
// registers (or lr) need to be stored on the stack so that their values are
// safely preserved for a call into C code.
//
// Also:
// * object_regs may be modified during the C code by the garbage
// collector. Every object register must be a valid tagged pointer or
// SMI.
//
// * non_object_regs will be converted to SMIs so that the garbage
// collector doesn't try to interpret them as pointers.
//
// TODO(jbramley): Why can't this handle callee-saved registers?
ASSERT((~kCallerSaved.list() & object_regs) == 0);
ASSERT((~kCallerSaved.list() & non_object_regs) == 0);
ASSERT((object_regs & non_object_regs) == 0);
ASSERT((scratch.Bit() & object_regs) == 0);
ASSERT((scratch.Bit() & non_object_regs) == 0);
ASSERT((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
STATIC_ASSERT(kSmiValueSize == 32);
CPURegList non_object_list =
CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
while (!non_object_list.IsEmpty()) {
// Store each non-object register as two SMIs.
Register reg = Register(non_object_list.PopLowestIndex());
__ Push(reg);
__ Poke(wzr, 0);
__ Push(reg.W(), wzr);
// Stack:
// jssp[12]: reg[63:32]
// jssp[8]: 0x00000000 (SMI tag & padding)
// jssp[4]: reg[31:0]
// jssp[0]: 0x00000000 (SMI tag & padding)
STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
}
if (object_regs != 0) {
__ PushXRegList(object_regs);
}
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ Mov(x0, 0); // No arguments.
__ Mov(x1, ExternalReference::debug_break(masm->isolate()));
CEntryStub stub(1);
__ CallStub(&stub);
// Restore the register values from the expression stack.
if (object_regs != 0) {
__ PopXRegList(object_regs);
}
non_object_list =
CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
while (!non_object_list.IsEmpty()) {
// Load each non-object register from two SMIs.
// Stack:
// jssp[12]: reg[63:32]
// jssp[8]: 0x00000000 (SMI tag & padding)
// jssp[4]: reg[31:0]
// jssp[0]: 0x00000000 (SMI tag & padding)
Register reg = Register(non_object_list.PopHighestIndex());
__ Pop(scratch, reg);
__ Bfxil(reg, scratch, 32, 32);
}
// Leave the internal frame.
}
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target(Debug_Address::AfterBreakTarget(),
masm->isolate());
__ Mov(scratch, after_break_target);
__ Ldr(scratch, MemOperand(scratch));
__ Br(scratch);
}
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
// ----------- S t a t e -------------
// -- x2 : name
// -- lr : return address
// -- x0 : receiver
// -- [sp] : receiver
// -----------------------------------
// Registers x0 and x2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10);
}
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm.cc).
// ----------- S t a t e -------------
// -- x0 : value
// -- x1 : receiver
// -- x2 : name
// -- lr : return address
// -----------------------------------
// Registers x0, x1, and x2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
}
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- x0 : key
// -- x1 : receiver
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10);
}
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- lr : return address
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
}
void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- r0 : value
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-arm.cc)
// ----------- S t a t e -------------
// -- x2 : name
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x2.Bit(), 0, x10);
}
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
}
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x1 : function
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10);
}
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x1 : function
// -- x2 : feedback array
// -- x3 : slot in feedback array
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit() | x2.Bit() | x3.Bit(), 0, x10);
}
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
// -- x1 : constructor function
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10);
}
void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
// -- x1 : constructor function
// -- x2 : feedback array
// -- x3 : feedback slot (smi)
// -----------------------------------
Generate_DebugBreakCallHelper(
masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10);
}
void Debug::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
__ RecordDebugBreakSlot();
for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
__ nop(Assembler::DEBUG_BREAK_NOP);
}
}
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0, x10);
}
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
}
const bool Debug::kFrameDropperSupported = false;
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64

671
deps/v8/src/arm64/decoder-arm64-inl.h

@ -0,0 +1,671 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_DECODER_ARM64_INL_H_
#define V8_ARM64_DECODER_ARM64_INL_H_
#include "arm64/decoder-arm64.h"
#include "globals.h"
#include "utils.h"
namespace v8 {
namespace internal {
// Top-level instruction decode function.
template<typename V>
void Decoder<V>::Decode(Instruction *instr) {
if (instr->Bits(28, 27) == 0) {
V::VisitUnallocated(instr);
} else {
switch (instr->Bits(27, 24)) {
// 0: PC relative addressing.
case 0x0: DecodePCRelAddressing(instr); break;
// 1: Add/sub immediate.
case 0x1: DecodeAddSubImmediate(instr); break;
// A: Logical shifted register.
// Add/sub with carry.
// Conditional compare register.
// Conditional compare immediate.
// Conditional select.
// Data processing 1 source.
// Data processing 2 source.
// B: Add/sub shifted register.
// Add/sub extended register.
// Data processing 3 source.
case 0xA:
case 0xB: DecodeDataProcessing(instr); break;
// 2: Logical immediate.
// Move wide immediate.
case 0x2: DecodeLogical(instr); break;
// 3: Bitfield.
// Extract.
case 0x3: DecodeBitfieldExtract(instr); break;
// 4: Unconditional branch immediate.
// Exception generation.
// Compare and branch immediate.
// 5: Compare and branch immediate.
// Conditional branch.
// System.
// 6,7: Unconditional branch.
// Test and branch immediate.
case 0x4:
case 0x5:
case 0x6:
case 0x7: DecodeBranchSystemException(instr); break;
// 8,9: Load/store register pair post-index.
// Load register literal.
// Load/store register unscaled immediate.
// Load/store register immediate post-index.
// Load/store register immediate pre-index.
// Load/store register offset.
// C,D: Load/store register pair offset.
// Load/store register pair pre-index.
// Load/store register unsigned immediate.
// Advanced SIMD.
case 0x8:
case 0x9:
case 0xC:
case 0xD: DecodeLoadStore(instr); break;
// E: FP fixed point conversion.
// FP integer conversion.
// FP data processing 1 source.
// FP compare.
// FP immediate.
// FP data processing 2 source.
// FP conditional compare.
// FP conditional select.
// Advanced SIMD.
// F: FP data processing 3 source.
// Advanced SIMD.
case 0xE:
case 0xF: DecodeFP(instr); break;
}
}
}
template<typename V>
void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x0);
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
// decode.
ASSERT(instr->Bit(28) == 0x1);
V::VisitPCRelAddressing(instr);
}
template<typename V>
void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0x4) ||
(instr->Bits(27, 24) == 0x5) ||
(instr->Bits(27, 24) == 0x6) ||
(instr->Bits(27, 24) == 0x7) );
switch (instr->Bits(31, 29)) {
case 0:
case 4: {
V::VisitUnconditionalBranch(instr);
break;
}
case 1:
case 5: {
if (instr->Bit(25) == 0) {
V::VisitCompareBranch(instr);
} else {
V::VisitTestBranch(instr);
}
break;
}
case 2: {
if (instr->Bit(25) == 0) {
if ((instr->Bit(24) == 0x1) ||
(instr->Mask(0x01000010) == 0x00000010)) {
V::VisitUnallocated(instr);
} else {
V::VisitConditionalBranch(instr);
}
} else {
V::VisitUnallocated(instr);
}
break;
}
case 6: {
if (instr->Bit(25) == 0) {
if (instr->Bit(24) == 0) {
if ((instr->Bits(4, 2) != 0) ||
(instr->Mask(0x00E0001D) == 0x00200001) ||
(instr->Mask(0x00E0001D) == 0x00400001) ||
(instr->Mask(0x00E0001E) == 0x00200002) ||
(instr->Mask(0x00E0001E) == 0x00400002) ||
(instr->Mask(0x00E0001C) == 0x00600000) ||
(instr->Mask(0x00E0001C) == 0x00800000) ||
(instr->Mask(0x00E0001F) == 0x00A00000) ||
(instr->Mask(0x00C0001C) == 0x00C00000)) {
V::VisitUnallocated(instr);
} else {
V::VisitException(instr);
}
} else {
if (instr->Bits(23, 22) == 0) {
const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
if ((instr->Bits(21, 19) == 0x4) ||
(masked_003FF0E0 == 0x00033000) ||
(masked_003FF0E0 == 0x003FF020) ||
(masked_003FF0E0 == 0x003FF060) ||
(masked_003FF0E0 == 0x003FF0E0) ||
(instr->Mask(0x00388000) == 0x00008000) ||
(instr->Mask(0x0038E000) == 0x00000000) ||
(instr->Mask(0x0039E000) == 0x00002000) ||
(instr->Mask(0x003AE000) == 0x00002000) ||
(instr->Mask(0x003CE000) == 0x00042000) ||
(instr->Mask(0x003FFFC0) == 0x000320C0) ||
(instr->Mask(0x003FF100) == 0x00032100) ||
(instr->Mask(0x003FF200) == 0x00032200) ||
(instr->Mask(0x003FF400) == 0x00032400) ||
(instr->Mask(0x003FF800) == 0x00032800) ||
(instr->Mask(0x0038F000) == 0x00005000) ||
(instr->Mask(0x0038E000) == 0x00006000)) {
V::VisitUnallocated(instr);
} else {
V::VisitSystem(instr);
}
} else {
V::VisitUnallocated(instr);
}
}
} else {
if ((instr->Bit(24) == 0x1) ||
(instr->Bits(20, 16) != 0x1F) ||
(instr->Bits(15, 10) != 0) ||
(instr->Bits(4, 0) != 0) ||
(instr->Bits(24, 21) == 0x3) ||
(instr->Bits(24, 22) == 0x3)) {
V::VisitUnallocated(instr);
} else {
V::VisitUnconditionalBranchToRegister(instr);
}
}
break;
}
case 3:
case 7: {
V::VisitUnallocated(instr);
break;
}
}
}
template<typename V>
void Decoder<V>::DecodeLoadStore(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0x8) ||
(instr->Bits(27, 24) == 0x9) ||
(instr->Bits(27, 24) == 0xC) ||
(instr->Bits(27, 24) == 0xD) );
if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) {
if (instr->Bit(26) == 0) {
// TODO(all): VisitLoadStoreExclusive.
V::VisitUnimplemented(instr);
} else {
DecodeAdvSIMDLoadStore(instr);
}
} else {
if ((instr->Bits(31, 30) == 0x3) ||
(instr->Mask(0xC4400000) == 0x40000000)) {
V::VisitUnallocated(instr);
} else {
if (instr->Bit(23) == 0) {
if (instr->Mask(0xC4400000) == 0xC0400000) {
V::VisitUnallocated(instr);
} else {
V::VisitLoadStorePairNonTemporal(instr);
}
} else {
V::VisitLoadStorePairPostIndex(instr);
}
}
}
} else {
if (instr->Bit(29) == 0) {
if (instr->Mask(0xC4000000) == 0xC4000000) {
V::VisitUnallocated(instr);
} else {
V::VisitLoadLiteral(instr);
}
} else {
if ((instr->Mask(0x84C00000) == 0x80C00000) ||
(instr->Mask(0x44800000) == 0x44800000) ||
(instr->Mask(0x84800000) == 0x84800000)) {
V::VisitUnallocated(instr);
} else {
if (instr->Bit(21) == 0) {
switch (instr->Bits(11, 10)) {
case 0: {
V::VisitLoadStoreUnscaledOffset(instr);
break;
}
case 1: {
if (instr->Mask(0xC4C00000) == 0xC0800000) {
V::VisitUnallocated(instr);
} else {
V::VisitLoadStorePostIndex(instr);
}
break;
}
case 2: {
// TODO(all): VisitLoadStoreRegisterOffsetUnpriv.
V::VisitUnimplemented(instr);
break;
}
case 3: {
if (instr->Mask(0xC4C00000) == 0xC0800000) {
V::VisitUnallocated(instr);
} else {
V::VisitLoadStorePreIndex(instr);
}
break;
}
}
} else {
if (instr->Bits(11, 10) == 0x2) {
if (instr->Bit(14) == 0) {
V::VisitUnallocated(instr);
} else {
V::VisitLoadStoreRegisterOffset(instr);
}
} else {
V::VisitUnallocated(instr);
}
}
}
}
}
} else {
if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) {
V::VisitUnallocated(instr);
} else {
if ((instr->Bits(31, 30) == 0x3) ||
(instr->Mask(0xC4400000) == 0x40000000)) {
V::VisitUnallocated(instr);
} else {
if (instr->Bit(23) == 0) {
V::VisitLoadStorePairOffset(instr);
} else {
V::VisitLoadStorePairPreIndex(instr);
}
}
}
} else {
if (instr->Bit(29) == 0) {
V::VisitUnallocated(instr);
} else {
if ((instr->Mask(0x84C00000) == 0x80C00000) ||
(instr->Mask(0x44800000) == 0x44800000) ||
(instr->Mask(0x84800000) == 0x84800000)) {
V::VisitUnallocated(instr);
} else {
V::VisitLoadStoreUnsignedOffset(instr);
}
}
}
}
}
template<typename V>
void Decoder<V>::DecodeLogical(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x2);
if (instr->Mask(0x80400000) == 0x00400000) {
V::VisitUnallocated(instr);
} else {
if (instr->Bit(23) == 0) {
V::VisitLogicalImmediate(instr);
} else {
if (instr->Bits(30, 29) == 0x1) {
V::VisitUnallocated(instr);
} else {
V::VisitMoveWideImmediate(instr);
}
}
}
}
template<typename V>
void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x3);
if ((instr->Mask(0x80400000) == 0x80000000) ||
(instr->Mask(0x80400000) == 0x00400000) ||
(instr->Mask(0x80008000) == 0x00008000)) {
V::VisitUnallocated(instr);
} else if (instr->Bit(23) == 0) {
if ((instr->Mask(0x80200000) == 0x00200000) ||
(instr->Mask(0x60000000) == 0x60000000)) {
V::VisitUnallocated(instr);
} else {
V::VisitBitfield(instr);
}
} else {
if ((instr->Mask(0x60200000) == 0x00200000) ||
(instr->Mask(0x60000000) != 0x00000000)) {
V::VisitUnallocated(instr);
} else {
V::VisitExtract(instr);
}
}
}
template<typename V>
void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x1);
if (instr->Bit(23) == 1) {
V::VisitUnallocated(instr);
} else {
V::VisitAddSubImmediate(instr);
}
}
template<typename V>
void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0xA) ||
(instr->Bits(27, 24) == 0xB) );
if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) {
if (instr->Mask(0x80008000) == 0x00008000) {
V::VisitUnallocated(instr);
} else {
V::VisitLogicalShifted(instr);
}
} else {
switch (instr->Bits(23, 21)) {
case 0: {
if (instr->Mask(0x0000FC00) != 0) {
V::VisitUnallocated(instr);
} else {
V::VisitAddSubWithCarry(instr);
}
break;
}
case 2: {
if ((instr->Bit(29) == 0) ||
(instr->Mask(0x00000410) != 0)) {
V::VisitUnallocated(instr);
} else {
if (instr->Bit(11) == 0) {
V::VisitConditionalCompareRegister(instr);
} else {
V::VisitConditionalCompareImmediate(instr);
}
}
break;
}
case 4: {
if (instr->Mask(0x20000800) != 0x00000000) {
V::VisitUnallocated(instr);
} else {
V::VisitConditionalSelect(instr);
}
break;
}
case 6: {
if (instr->Bit(29) == 0x1) {
V::VisitUnallocated(instr);
} else {
if (instr->Bit(30) == 0) {
if ((instr->Bit(15) == 0x1) ||
(instr->Bits(15, 11) == 0) ||
(instr->Bits(15, 12) == 0x1) ||
(instr->Bits(15, 12) == 0x3) ||
(instr->Bits(15, 13) == 0x3) ||
(instr->Mask(0x8000EC00) == 0x00004C00) ||
(instr->Mask(0x8000E800) == 0x80004000) ||
(instr->Mask(0x8000E400) == 0x80004000)) {
V::VisitUnallocated(instr);
} else {
V::VisitDataProcessing2Source(instr);
}
} else {
if ((instr->Bit(13) == 1) ||
(instr->Bits(20, 16) != 0) ||
(instr->Bits(15, 14) != 0) ||
(instr->Mask(0xA01FFC00) == 0x00000C00) ||
(instr->Mask(0x201FF800) == 0x00001800)) {
V::VisitUnallocated(instr);
} else {
V::VisitDataProcessing1Source(instr);
}
}
break;
}
}
case 1:
case 3:
case 5:
case 7: V::VisitUnallocated(instr); break;
}
}
} else {
if (instr->Bit(28) == 0) {
if (instr->Bit(21) == 0) {
if ((instr->Bits(23, 22) == 0x3) ||
(instr->Mask(0x80008000) == 0x00008000)) {
V::VisitUnallocated(instr);
} else {
V::VisitAddSubShifted(instr);
}
} else {
if ((instr->Mask(0x00C00000) != 0x00000000) ||
(instr->Mask(0x00001400) == 0x00001400) ||
(instr->Mask(0x00001800) == 0x00001800)) {
V::VisitUnallocated(instr);
} else {
V::VisitAddSubExtended(instr);
}
}
} else {
if ((instr->Bit(30) == 0x1) ||
(instr->Bits(30, 29) == 0x1) ||
(instr->Mask(0xE0600000) == 0x00200000) ||
(instr->Mask(0xE0608000) == 0x00400000) ||
(instr->Mask(0x60608000) == 0x00408000) ||
(instr->Mask(0x60E00000) == 0x00E00000) ||
(instr->Mask(0x60E00000) == 0x00800000) ||
(instr->Mask(0x60E00000) == 0x00600000)) {
V::VisitUnallocated(instr);
} else {
V::VisitDataProcessing3Source(instr);
}
}
}
}
template<typename V>
void Decoder<V>::DecodeFP(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0xE) ||
(instr->Bits(27, 24) == 0xF) );
if (instr->Bit(28) == 0) {
DecodeAdvSIMDDataProcessing(instr);
} else {
if (instr->Bit(29) == 1) {
V::VisitUnallocated(instr);
} else {
if (instr->Bits(31, 30) == 0x3) {
V::VisitUnallocated(instr);
} else if (instr->Bits(31, 30) == 0x1) {
DecodeAdvSIMDDataProcessing(instr);
} else {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if ((instr->Bit(23) == 1) ||
(instr->Bit(18) == 1) ||
(instr->Mask(0x80008000) == 0x00000000) ||
(instr->Mask(0x000E0000) == 0x00000000) ||
(instr->Mask(0x000E0000) == 0x000A0000) ||
(instr->Mask(0x00160000) == 0x00000000) ||
(instr->Mask(0x00160000) == 0x00120000)) {
V::VisitUnallocated(instr);
} else {
V::VisitFPFixedPointConvert(instr);
}
} else {
if (instr->Bits(15, 10) == 32) {
V::VisitUnallocated(instr);
} else if (instr->Bits(15, 10) == 0) {
if ((instr->Bits(23, 22) == 0x3) ||
(instr->Mask(0x000E0000) == 0x000A0000) ||
(instr->Mask(0x000E0000) == 0x000C0000) ||
(instr->Mask(0x00160000) == 0x00120000) ||
(instr->Mask(0x00160000) == 0x00140000) ||
(instr->Mask(0x20C40000) == 0x00800000) ||
(instr->Mask(0x20C60000) == 0x00840000) ||
(instr->Mask(0xA0C60000) == 0x80060000) ||
(instr->Mask(0xA0C60000) == 0x00860000) ||
(instr->Mask(0xA0C60000) == 0x00460000) ||
(instr->Mask(0xA0CE0000) == 0x80860000) ||
(instr->Mask(0xA0CE0000) == 0x804E0000) ||
(instr->Mask(0xA0CE0000) == 0x000E0000) ||
(instr->Mask(0xA0D60000) == 0x00160000) ||
(instr->Mask(0xA0D60000) == 0x80560000) ||
(instr->Mask(0xA0D60000) == 0x80960000)) {
V::VisitUnallocated(instr);
} else {
V::VisitFPIntegerConvert(instr);
}
} else if (instr->Bits(14, 10) == 16) {
const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
if ((instr->Mask(0x80180000) != 0) ||
(masked_A0DF8000 == 0x00020000) ||
(masked_A0DF8000 == 0x00030000) ||
(masked_A0DF8000 == 0x00068000) ||
(masked_A0DF8000 == 0x00428000) ||
(masked_A0DF8000 == 0x00430000) ||
(masked_A0DF8000 == 0x00468000) ||
(instr->Mask(0xA0D80000) == 0x00800000) ||
(instr->Mask(0xA0DE0000) == 0x00C00000) ||
(instr->Mask(0xA0DF0000) == 0x00C30000) ||
(instr->Mask(0xA0DC0000) == 0x00C40000)) {
V::VisitUnallocated(instr);
} else {
V::VisitFPDataProcessing1Source(instr);
}
} else if (instr->Bits(13, 10) == 8) {
if ((instr->Bits(15, 14) != 0) ||
(instr->Bits(2, 0) != 0) ||
(instr->Mask(0x80800000) != 0x00000000)) {
V::VisitUnallocated(instr);
} else {
V::VisitFPCompare(instr);
}
} else if (instr->Bits(12, 10) == 4) {
if ((instr->Bits(9, 5) != 0) ||
(instr->Mask(0x80800000) != 0x00000000)) {
V::VisitUnallocated(instr);
} else {
V::VisitFPImmediate(instr);
}
} else {
if (instr->Mask(0x80800000) != 0x00000000) {
V::VisitUnallocated(instr);
} else {
switch (instr->Bits(11, 10)) {
case 1: {
V::VisitFPConditionalCompare(instr);
break;
}
case 2: {
if ((instr->Bits(15, 14) == 0x3) ||
(instr->Mask(0x00009000) == 0x00009000) ||
(instr->Mask(0x0000A000) == 0x0000A000)) {
V::VisitUnallocated(instr);
} else {
V::VisitFPDataProcessing2Source(instr);
}
break;
}
case 3: {
V::VisitFPConditionalSelect(instr);
break;
}
default: UNREACHABLE();
}
}
}
}
} else {
// Bit 30 == 1 has been handled earlier.
ASSERT(instr->Bit(30) == 0);
if (instr->Mask(0xA0800000) != 0) {
V::VisitUnallocated(instr);
} else {
V::VisitFPDataProcessing3Source(instr);
}
}
}
}
}
}
template<typename V>
void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
// TODO(all): Implement Advanced SIMD load/store instruction decode.
ASSERT(instr->Bits(29, 25) == 0x6);
V::VisitUnimplemented(instr);
}
template<typename V>
void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
// TODO(all): Implement Advanced SIMD data processing instruction decode.
ASSERT(instr->Bits(27, 25) == 0x7);
V::VisitUnimplemented(instr);
}
} } // namespace v8::internal
#endif // V8_ARM64_DECODER_ARM64_INL_H_

109
deps/v8/src/arm64/decoder-arm64.cc

@ -0,0 +1,109 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_ARM64
#include "globals.h"
#include "utils.h"
#include "arm64/decoder-arm64.h"
namespace v8 {
namespace internal {
void DispatchingDecoderVisitor::AppendVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
visitors_.push_front(new_visitor);
}
void DispatchingDecoderVisitor::PrependVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
visitors_.push_back(new_visitor);
}
void DispatchingDecoderVisitor::InsertVisitorBefore(
DecoderVisitor* new_visitor, DecoderVisitor* registered_visitor) {
visitors_.remove(new_visitor);
std::list<DecoderVisitor*>::iterator it;
for (it = visitors_.begin(); it != visitors_.end(); it++) {
if (*it == registered_visitor) {
visitors_.insert(it, new_visitor);
return;
}
}
// We reached the end of the list. The last element must be
// registered_visitor.
ASSERT(*it == registered_visitor);
visitors_.insert(it, new_visitor);
}
void DispatchingDecoderVisitor::InsertVisitorAfter(
DecoderVisitor* new_visitor, DecoderVisitor* registered_visitor) {
visitors_.remove(new_visitor);
std::list<DecoderVisitor*>::iterator it;
for (it = visitors_.begin(); it != visitors_.end(); it++) {
if (*it == registered_visitor) {
it++;
visitors_.insert(it, new_visitor);
return;
}
}
// We reached the end of the list. The last element must be
// registered_visitor.
ASSERT(*it == registered_visitor);
visitors_.push_back(new_visitor);
}
void DispatchingDecoderVisitor::RemoveVisitor(DecoderVisitor* visitor) {
visitors_.remove(visitor);
}
#define DEFINE_VISITOR_CALLERS(A) \
void DispatchingDecoderVisitor::Visit##A(Instruction* instr) { \
if (!(instr->Mask(A##FMask) == A##Fixed)) { \
ASSERT(instr->Mask(A##FMask) == A##Fixed); \
} \
std::list<DecoderVisitor*>::iterator it; \
for (it = visitors_.begin(); it != visitors_.end(); it++) { \
(*it)->Visit##A(instr); \
} \
}
VISITOR_LIST(DEFINE_VISITOR_CALLERS)
#undef DEFINE_VISITOR_CALLERS
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64

210
deps/v8/src/arm64/decoder-arm64.h

@ -0,0 +1,210 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_DECODER_ARM64_H_
#define V8_ARM64_DECODER_ARM64_H_
#include <list>
#include "globals.h"
#include "arm64/instructions-arm64.h"
namespace v8 {
namespace internal {
// List macro containing all visitors needed by the decoder class.
#define VISITOR_LIST(V) \
V(PCRelAddressing) \
V(AddSubImmediate) \
V(LogicalImmediate) \
V(MoveWideImmediate) \
V(Bitfield) \
V(Extract) \
V(UnconditionalBranch) \
V(UnconditionalBranchToRegister) \
V(CompareBranch) \
V(TestBranch) \
V(ConditionalBranch) \
V(System) \
V(Exception) \
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
V(LoadStorePairNonTemporal) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \
V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \
V(LogicalShifted) \
V(AddSubShifted) \
V(AddSubExtended) \
V(AddSubWithCarry) \
V(ConditionalCompareRegister) \
V(ConditionalCompareImmediate) \
V(ConditionalSelect) \
V(DataProcessing1Source) \
V(DataProcessing2Source) \
V(DataProcessing3Source) \
V(FPCompare) \
V(FPConditionalCompare) \
V(FPConditionalSelect) \
V(FPImmediate) \
V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \
V(FPIntegerConvert) \
V(FPFixedPointConvert) \
V(Unallocated) \
V(Unimplemented)
// The Visitor interface. Disassembler and simulator (and other tools)
// must provide implementations for all of these functions.
class DecoderVisitor {
public:
virtual ~DecoderVisitor() {}
#define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
VISITOR_LIST(DECLARE)
#undef DECLARE
};
// A visitor that dispatches to a list of visitors.
class DispatchingDecoderVisitor : public DecoderVisitor {
public:
DispatchingDecoderVisitor() {}
virtual ~DispatchingDecoderVisitor() {}
// Register a new visitor class with the decoder.
// Decode() will call the corresponding visitor method from all registered
// visitor classes when decoding reaches the leaf node of the instruction
// decode tree.
// Visitors are called in the order.
// A visitor can only be registered once.
// Registering an already registered visitor will update its position.
//
// d.AppendVisitor(V1);
// d.AppendVisitor(V2);
// d.PrependVisitor(V2); // Move V2 at the start of the list.
// d.InsertVisitorBefore(V3, V2);
// d.AppendVisitor(V4);
// d.AppendVisitor(V4); // No effect.
//
// d.Decode(i);
//
// will call in order visitor methods in V3, V2, V1, V4.
void AppendVisitor(DecoderVisitor* visitor);
void PrependVisitor(DecoderVisitor* visitor);
void InsertVisitorBefore(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
void InsertVisitorAfter(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
// Remove a previously registered visitor class from the list of visitors
// stored by the decoder.
void RemoveVisitor(DecoderVisitor* visitor);
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
private:
// Visitors are registered in a list.
std::list<DecoderVisitor*> visitors_;
};
template<typename V>
class Decoder : public V {
public:
Decoder() {}
virtual ~Decoder() {}
// Top-level instruction decoder function. Decodes an instruction and calls
// the visitor functions registered with the Decoder class.
virtual void Decode(Instruction *instr);
private:
// Decode the PC relative addressing instruction, and call the corresponding
// visitors.
// On entry, instruction bits 27:24 = 0x0.
void DecodePCRelAddressing(Instruction* instr);
// Decode the add/subtract immediate instruction, and call the corresponding
// visitors.
// On entry, instruction bits 27:24 = 0x1.
void DecodeAddSubImmediate(Instruction* instr);
// Decode the branch, system command, and exception generation parts of
// the instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
void DecodeBranchSystemException(Instruction* instr);
// Decode the load and store parts of the instruction tree, and call
// the corresponding visitors.
// On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
void DecodeLoadStore(Instruction* instr);
// Decode the logical immediate and move wide immediate parts of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x2.
void DecodeLogical(Instruction* instr);
// Decode the bitfield and extraction parts of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x3.
void DecodeBitfieldExtract(Instruction* instr);
// Decode the data processing parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
void DecodeDataProcessing(Instruction* instr);
// Decode the floating point parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0xE, 0xF}.
void DecodeFP(Instruction* instr);
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 29:25 = 0x6.
void DecodeAdvSIMDLoadStore(Instruction* instr);
// Decode the Advanced SIMD (NEON) data processing part of the instruction
// tree, and call the corresponding visitors.
// On entry, instruction bits 27:25 = 0x7.
void DecodeAdvSIMDDataProcessing(Instruction* instr);
};
} } // namespace v8::internal
#endif // V8_ARM64_DECODER_ARM64_H_

388
deps/v8/src/arm64/deoptimizer-arm64.cc

@ -0,0 +1,388 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "safepoint-table.h"
namespace v8 {
namespace internal {
int Deoptimizer::patch_size() {
// Size of the code used to patch lazy bailout points.
// Patching is done by Deoptimizer::DeoptimizeFunction.
return 4 * kInstructionSize;
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// TODO(jkummerow): if (FLAG_zap_code_space), make the code object's
// entry sequence unusable (see other architectures).
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
SharedFunctionInfo* shared =
SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
Address code_start_address = code->instruction_start();
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
patcher.LoadLiteral(ip0, 2 * kInstructionSize);
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
ASSERT((prev_call_address == NULL) ||
(call_address >= prev_call_address + patch_size()));
ASSERT(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
}
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
// spilled. Registers fp and sp are set to the correct values though.
for (int i = 0; i < Register::NumRegisters(); i++) {
input_->SetRegister(i, 0);
}
// TODO(all): Do we also need to set a value to csp?
input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
// Fill the frame content from the actual data on the frame.
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
}
}
bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
// There is no dynamic alignment padding on ARM64 in the input frame.
return false;
}
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(x0.code(), params);
output_frame->SetRegister(x1.code(), handler);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
Code* Deoptimizer::NotifyStubFailureBuiltin() {
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
}
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
// TODO(all): This code needs to be revisited. We probably only need to save
// caller-saved registers here. Callee-saved registers can be stored directly
// in the input frame.
// Save all allocatable floating point registers.
CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSizeInBits,
FPRegister::kAllocatableFPRegisters);
__ PushCPURegList(saved_fp_registers);
// We save all the registers expcept jssp, sp and lr.
CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
saved_registers.Combine(fp);
__ PushCPURegList(saved_registers);
const int kSavedRegistersAreaSize =
(saved_registers.Count() * kXRegSize) +
(saved_fp_registers.Count() * kDRegSize);
// Floating point registers are saved on the stack above core registers.
const int kFPRegistersOffset = saved_registers.Count() * kXRegSize;
// Get the bailout id from the stack.
Register bailout_id = x2;
__ Peek(bailout_id, kSavedRegistersAreaSize);
Register code_object = x3;
Register fp_to_sp = x4;
// Get the address of the location in the code object. This is the return
// address for lazy deoptimization.
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, and correct one word for bailout id.
__ Add(fp_to_sp, masm()->StackPointer(),
kSavedRegistersAreaSize + (1 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Mov(x1, type());
// Following arguments are already loaded:
// - x2: bailout id
// - x3: code object address
// - x4: fp-to-sp delta
__ Mov(x5, ExternalReference::isolate_address(isolate()));
{
// Call Deoptimizer::New().
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}
// Preserve "deoptimizer" object in register x0.
Register deoptimizer = x0;
// Get the input frame descriptor pointer.
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
// Copy core registers into the input frame.
CPURegList copy_to_input = saved_registers;
for (int i = 0; i < saved_registers.Count(); i++) {
// TODO(all): Look for opportunities to optimize this by using ldp/stp.
__ Peek(x2, i * kPointerSize);
CPURegister current_reg = copy_to_input.PopLowestIndex();
int offset = (current_reg.code() * kPointerSize) +
FrameDescription::registers_offset();
__ Str(x2, MemOperand(x1, offset));
}
// Copy FP registers to the input frame.
for (int i = 0; i < saved_fp_registers.Count(); i++) {
// TODO(all): Look for opportunities to optimize this by using ldp/stp.
int dst_offset = FrameDescription::double_registers_offset() +
(i * kDoubleSize);
int src_offset = kFPRegistersOffset + (i * kDoubleSize);
__ Peek(x2, src_offset);
__ Str(x2, MemOperand(x1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
// Compute a pointer to the unwinding limit in register x2; that is
// the first stack slot not part of the input frame.
Register unwind_limit = x2;
__ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
__ Add(unwind_limit, unwind_limit, __ StackPointer());
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ Add(x3, x1, FrameDescription::frame_content_offset());
Label pop_loop;
Label pop_loop_header;
__ B(&pop_loop_header);
__ Bind(&pop_loop);
__ Pop(x4);
__ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
__ Bind(&pop_loop_header);
__ Cmp(unwind_limit, __ StackPointer());
__ B(ne, &pop_loop);
// Compute the output frame in the deoptimizer.
__ Push(x0); // Preserve deoptimizer object across call.
{
// Call Deoptimizer::ComputeOutputFrames().
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
__ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
__ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
__ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
__ B(&outer_loop_header);
__ Bind(&outer_push_loop);
Register current_frame = x2;
__ Ldr(current_frame, MemOperand(x0, 0));
__ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
__ B(&inner_loop_header);
__ Bind(&inner_push_loop);
__ Sub(x3, x3, kPointerSize);
__ Add(x6, current_frame, x3);
__ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
__ Push(x7);
__ Bind(&inner_loop_header);
__ Cbnz(x3, &inner_push_loop);
__ Add(x0, x0, kPointerSize);
__ Bind(&outer_loop_header);
__ Cmp(x0, x1);
__ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
!saved_fp_registers.IncludesAliasOf(fp_zero) &&
!saved_fp_registers.IncludesAliasOf(fp_scratch));
int src_offset = FrameDescription::double_registers_offset();
while (!saved_fp_registers.IsEmpty()) {
const CPURegister reg = saved_fp_registers.PopLowestIndex();
__ Ldr(reg, MemOperand(x1, src_offset));
src_offset += kDoubleSize;
}
// Push state from the last output frame.
__ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
__ Push(x6);
// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
// stack, then pops it all into registers. Here, we try to load it directly
// into the relevant registers. Is this correct? If so, we should improve the
// ARM code.
// TODO(all): This code needs to be revisited, We probably don't need to
// restore all the registers as fullcodegen does not keep live values in
// registers (note that at least fp must be restored though).
// Restore registers from the last output frame.
// Note that lr is not in the list of saved_registers and will be restored
// later. We can use it to hold the address of last output frame while
// reloading the other registers.
ASSERT(!saved_registers.IncludesAliasOf(lr));
Register last_output_frame = lr;
__ Mov(last_output_frame, current_frame);
// We don't need to restore x7 as it will be clobbered later to hold the
// continuation address.
Register continuation = x7;
saved_registers.Remove(continuation);
while (!saved_registers.IsEmpty()) {
// TODO(all): Look for opportunities to optimize this by using ldp.
CPURegister current_reg = saved_registers.PopLowestIndex();
int offset = (current_reg.code() * kPointerSize) +
FrameDescription::registers_offset();
__ Ldr(current_reg, MemOperand(last_output_frame, offset));
}
__ Ldr(continuation, MemOperand(last_output_frame,
FrameDescription::continuation_offset()));
__ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
__ InitializeRootRegister();
__ Br(continuation);
}
// Size of an entry of the second level deopt table.
// This is the code size generated by GeneratePrologue for one entry.
const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
UseScratchRegisterScope temps(masm());
Register entry_id = temps.AcquireX();
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
Label done;
{
InstructionAccurateScope scope(masm());
// The number of entry will never exceed kMaxNumberOfEntries.
// As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
// a movz instruction to load the entry id.
ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ movz(entry_id, i);
__ b(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
}
}
__ Bind(&done);
__ Push(entry_id);
}
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No out-of-line constant pool support.
UNREACHABLE();
}
#undef __
} } // namespace v8::internal

1856
deps/v8/src/arm64/disasm-arm64.cc

File diff suppressed because it is too large

115
deps/v8/src/arm64/disasm-arm64.h

@ -0,0 +1,115 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H
#include "v8.h"
#include "globals.h"
#include "utils.h"
#include "instructions-arm64.h"
#include "decoder-arm64.h"
namespace v8 {
namespace internal {
class Disassembler: public DecoderVisitor {
public:
Disassembler();
Disassembler(char* text_buffer, int buffer_size);
virtual ~Disassembler();
char* GetOutput();
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
protected:
virtual void ProcessOutput(Instruction* instr);
void Format(Instruction* instr, const char* mnemonic, const char* format);
void Substitute(Instruction* instr, const char* string);
int SubstituteField(Instruction* instr, const char* format);
int SubstituteRegisterField(Instruction* instr, const char* format);
int SubstituteImmediateField(Instruction* instr, const char* format);
int SubstituteLiteralField(Instruction* instr, const char* format);
int SubstituteBitfieldImmediateField(Instruction* instr, const char* format);
int SubstituteShiftField(Instruction* instr, const char* format);
int SubstituteExtendField(Instruction* instr, const char* format);
int SubstituteConditionField(Instruction* instr, const char* format);
int SubstitutePCRelAddressField(Instruction* instr, const char* format);
int SubstituteBranchTargetField(Instruction* instr, const char* format);
int SubstituteLSRegOffsetField(Instruction* instr, const char* format);
int SubstitutePrefetchField(Instruction* instr, const char* format);
int SubstituteBarrierField(Instruction* instr, const char* format);
bool RdIsZROrSP(Instruction* instr) const {
return (instr->Rd() == kZeroRegCode);
}
bool RnIsZROrSP(Instruction* instr) const {
return (instr->Rn() == kZeroRegCode);
}
bool RmIsZROrSP(Instruction* instr) const {
return (instr->Rm() == kZeroRegCode);
}
bool RaIsZROrSP(Instruction* instr) const {
return (instr->Ra() == kZeroRegCode);
}
bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
void ResetOutput();
void AppendToOutput(const char* string, ...);
char* buffer_;
uint32_t buffer_pos_;
uint32_t buffer_size_;
bool own_buffer_;
};
class PrintDisassembler: public Disassembler {
public:
explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
~PrintDisassembler() { }
virtual void ProcessOutput(Instruction* instr);
private:
FILE *stream_;
};
} } // namespace v8::internal
#endif // V8_ARM64_DISASM_ARM64_H

65
deps/v8/src/arm64/frames-arm64.cc

@ -0,0 +1,65 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_ARM64
#include "assembler.h"
#include "assembler-arm64.h"
#include "assembler-arm64-inl.h"
#include "frames.h"
namespace v8 {
namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
return no_reg;
}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
UNREACHABLE();
return no_reg;
}
Object*& ExitFrame::constant_pool_slot() const {
UNREACHABLE();
return Memory::Object_at(NULL);
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64

133
deps/v8/src/arm64/frames-arm64.h

@ -0,0 +1,133 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "arm64/constants-arm64.h"
#include "arm64/assembler-arm64.h"
#ifndef V8_ARM64_FRAMES_ARM64_H_
#define V8_ARM64_FRAMES_ARM64_H_
namespace v8 {
namespace internal {
const int kNumRegs = kNumberOfRegisters;
// Registers x0-x17 are caller-saved.
const int kNumJSCallerSaved = 18;
const RegList kJSCallerSaved = 0x3ffff;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of eight.
// TODO(all): Refine this number.
const int kNumSafepointRegisters = 32;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
#define kNumSafepointSavedRegisters \
CPURegList::GetSafepointSavedRegisters().Count();
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public AllStatic {
public:
static const int kFrameSize = 2 * kPointerSize;
static const int kCallerSPDisplacement = 2 * kPointerSize;
static const int kCallerPCOffset = 1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
static const int kSPOffset = -1 * kPointerSize;
static const int kCodeOffset = -2 * kPointerSize;
static const int kLastExitFrameField = kCodeOffset;
static const int kConstantPoolOffset = 0; // Not used
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
// There are two words on the stack (saved fp and saved lr) between fp and
// the arguments.
static const int kLastParameterOffset = 2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
};
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
class ConstructFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
static const int kLengthOffset = -4 * kPointerSize;
static const int kConstructorOffset = -5 * kPointerSize;
static const int kImplicitReceiverOffset = -6 * kPointerSize;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
};
class InternalFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
}
inline void StackHandler::SetFp(Address slot, Address fp) {
Memory::Address_at(slot) = fp;
}
} } // namespace v8::internal
#endif // V8_ARM64_FRAMES_ARM64_H_

5015
deps/v8/src/arm64/full-codegen-arm64.cc

File diff suppressed because it is too large

1407
deps/v8/src/arm64/ic-arm64.cc

File diff suppressed because it is too large

333
deps/v8/src/arm64/instructions-arm64.cc

@ -0,0 +1,333 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_FP_STATICS
#include "arm64/instructions-arm64.h"
#include "arm64/assembler-arm64-inl.h"
namespace v8 {
namespace internal {
bool Instruction::IsLoad() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) != 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case LDRB_w:
case LDRH_w:
case LDR_w:
case LDR_x:
case LDRSB_w:
case LDRSB_x:
case LDRSH_w:
case LDRSH_x:
case LDRSW_x:
case LDR_s:
case LDR_d: return true;
default: return false;
}
}
}
bool Instruction::IsStore() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) == 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case STRB_w:
case STRH_w:
case STR_w:
case STR_x:
case STR_s:
case STR_d: return true;
default: return false;
}
}
}
static uint64_t RotateRight(uint64_t value,
unsigned int rotate,
unsigned int width) {
ASSERT(width <= 64);
rotate &= 63;
return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
(value >> rotate);
}
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
uint64_t value,
unsigned width) {
ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
(width == 32));
ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
uint64_t result = value & ((1UL << width) - 1UL);
for (unsigned i = width; i < reg_size; i *= 2) {
result |= (result << i);
}
return result;
}
// Logical immediates can't encode zero, so a return value of zero is used to
// indicate a failure case. Specifically, where the constraints on imm_s are not
// met.
uint64_t Instruction::ImmLogical() {
unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
int64_t n = BitN();
int64_t imm_s = ImmSetBits();
int64_t imm_r = ImmRotate();
// An integer is constructed from the n, imm_s and imm_r bits according to
// the following table:
//
// N imms immr size S R
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
// (s bits must not be all set)
//
// A pattern is constructed of size bits, where the least significant S+1
// bits are set. The pattern is rotated right by R, and repeated across a
// 32 or 64-bit value, depending on destination register width.
//
if (n == 1) {
if (imm_s == 0x3F) {
return 0;
}
uint64_t bits = (1UL << (imm_s + 1)) - 1;
return RotateRight(bits, imm_r, 64);
} else {
if ((imm_s >> 1) == 0x1F) {
return 0;
}
for (int width = 0x20; width >= 0x2; width >>= 1) {
if ((imm_s & width) == 0) {
int mask = width - 1;
if ((imm_s & mask) == mask) {
return 0;
}
uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
return RepeatBitsAcrossReg(reg_size,
RotateRight(bits, imm_r & mask, width),
width);
}
}
}
UNREACHABLE();
return 0;
}
float Instruction::ImmFP32() {
// ImmFP: abcdefgh (8 bits)
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
// where B is b ^ 1
uint32_t bits = ImmFP();
uint32_t bit7 = (bits >> 7) & 0x1;
uint32_t bit6 = (bits >> 6) & 0x1;
uint32_t bit5_to_0 = bits & 0x3f;
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
return rawbits_to_float(result);
}
double Instruction::ImmFP64() {
// ImmFP: abcdefgh (8 bits)
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
// where B is b ^ 1
uint32_t bits = ImmFP();
uint64_t bit7 = (bits >> 7) & 0x1;
uint64_t bit6 = (bits >> 6) & 0x1;
uint64_t bit5_to_0 = bits & 0x3f;
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
return rawbits_to_double(result);
}
LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
switch (op) {
case STP_x:
case LDP_x:
case STP_d:
case LDP_d: return LSDoubleWord;
default: return LSWord;
}
}
ptrdiff_t Instruction::ImmPCOffset() {
ptrdiff_t offset;
if (IsPCRelAddressing()) {
// PC-relative addressing. Only ADR is supported.
offset = ImmPCRel();
} else if (BranchType() != UnknownBranchType) {
// All PC-relative branches.
// Relative branch offsets are instruction-size-aligned.
offset = ImmBranch() << kInstructionSizeLog2;
} else {
// Load literal (offset from PC).
ASSERT(IsLdrLiteral());
// The offset is always shifted by 2 bits, even for loads to 64-bits
// registers.
offset = ImmLLiteral() << kInstructionSizeLog2;
}
return offset;
}
Instruction* Instruction::ImmPCOffsetTarget() {
return InstructionAtOffset(ImmPCOffset());
}
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
int32_t offset) {
return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
}
bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
return IsValidImmPCOffset(BranchType(), DistanceTo(target));
}
void Instruction::SetImmPCOffsetTarget(Instruction* target) {
if (IsPCRelAddressing()) {
SetPCRelImmTarget(target);
} else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target);
} else {
SetImmLLiteral(target);
}
}
void Instruction::SetPCRelImmTarget(Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction.
ASSERT(Mask(PCRelAddressingMask) == ADR);
Instr imm = Assembler::ImmPCRelAddress(DistanceTo(target));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
}
void Instruction::SetBranchImmTarget(Instruction* target) {
ASSERT(IsAligned(DistanceTo(target), kInstructionSize));
Instr branch_imm = 0;
uint32_t imm_mask = 0;
ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2;
switch (BranchType()) {
case CondBranchType: {
branch_imm = Assembler::ImmCondBranch(offset);
imm_mask = ImmCondBranch_mask;
break;
}
case UncondBranchType: {
branch_imm = Assembler::ImmUncondBranch(offset);
imm_mask = ImmUncondBranch_mask;
break;
}
case CompareBranchType: {
branch_imm = Assembler::ImmCmpBranch(offset);
imm_mask = ImmCmpBranch_mask;
break;
}
case TestBranchType: {
branch_imm = Assembler::ImmTestBranch(offset);
imm_mask = ImmTestBranch_mask;
break;
}
default: UNREACHABLE();
}
SetInstructionBits(Mask(~imm_mask) | branch_imm);
}
void Instruction::SetImmLLiteral(Instruction* source) {
ASSERT(IsAligned(DistanceTo(source), kInstructionSize));
ptrdiff_t offset = DistanceTo(source) >> kLiteralEntrySizeLog2;
Instr imm = Assembler::ImmLLiteral(offset);
Instr mask = ImmLLiteral_mask;
SetInstructionBits(Mask(~mask) | imm);
}
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-arm64-inl.h to work around this.
bool InstructionSequence::IsInlineData() const {
// Inline data is encoded as a single movz instruction which writes to xzr
// (x31).
return IsMovz() && SixtyFourBits() && (Rd() == xzr.code());
// TODO(all): If we extend ::InlineData() to support bigger data, we need
// to update this method too.
}
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-arm64-inl.h to work around this.
uint64_t InstructionSequence::InlineData() const {
ASSERT(IsInlineData());
uint64_t payload = ImmMoveWide();
// TODO(all): If we extend ::InlineData() to support bigger data, we need
// to update this method too.
return payload;
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64

501
deps/v8/src/arm64/instructions-arm64.h

@ -0,0 +1,501 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
#define V8_ARM64_INSTRUCTIONS_ARM64_H_
#include "globals.h"
#include "utils.h"
#include "arm64/constants-arm64.h"
#include "arm64/utils-arm64.h"
namespace v8 {
namespace internal {
// ISA constants. --------------------------------------------------------------
typedef uint32_t Instr;
// The following macros initialize a float/double variable with a bit pattern
// without using static initializers: If ARM64_DEFINE_FP_STATICS is defined, the
// symbol is defined as uint32_t/uint64_t initialized with the desired bit
// pattern. Otherwise, the same symbol is declared as an external float/double.
#if defined(ARM64_DEFINE_FP_STATICS)
#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
#else
#define DEFINE_FLOAT(name, value) extern const float name
#define DEFINE_DOUBLE(name, value) extern const double name
#endif // defined(ARM64_DEFINE_FP_STATICS)
DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL);
// This value is a signalling NaN as both a double and as a float (taking the
// least-significant word).
DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001);
DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001);
// A similar value, but as a quiet NaN.
DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001);
DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
// The default NaN values (for FPCR.DN=1).
DEFINE_DOUBLE(kFP64DefaultNaN, 0x7ff8000000000000UL);
DEFINE_FLOAT(kFP32DefaultNaN, 0x7fc00000);
#undef DEFINE_FLOAT
#undef DEFINE_DOUBLE
enum LSDataSize {
LSByte = 0,
LSHalfword = 1,
LSWord = 2,
LSDoubleWord = 3
};
LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
enum ImmBranchType {
UnknownBranchType = 0,
CondBranchType = 1,
UncondBranchType = 2,
CompareBranchType = 3,
TestBranchType = 4
};
enum AddrMode {
Offset,
PreIndex,
PostIndex
};
enum FPRounding {
// The first four values are encodable directly by FPCR<RMode>.
FPTieEven = 0x0,
FPPositiveInfinity = 0x1,
FPNegativeInfinity = 0x2,
FPZero = 0x3,
// The final rounding mode is only available when explicitly specified by the
// instruction (such as with fcvta). It cannot be set in FPCR.
FPTieAway
};
enum Reg31Mode {
Reg31IsStackPointer,
Reg31IsZeroRegister
};
// Instructions. ---------------------------------------------------------------
class Instruction {
public:
V8_INLINE Instr InstructionBits() const {
return *reinterpret_cast<const Instr*>(this);
}
V8_INLINE void SetInstructionBits(Instr new_instr) {
*reinterpret_cast<Instr*>(this) = new_instr;
}
int Bit(int pos) const {
return (InstructionBits() >> pos) & 1;
}
uint32_t Bits(int msb, int lsb) const {
return unsigned_bitextract_32(msb, lsb, InstructionBits());
}
int32_t SignedBits(int msb, int lsb) const {
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
return signed_bitextract_32(msb, lsb, bits);
}
Instr Mask(uint32_t mask) const {
return InstructionBits() & mask;
}
V8_INLINE Instruction* following(int count = 1) {
return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
}
V8_INLINE Instruction* preceding(int count = 1) {
return following(-count);
}
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int64_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
// formed from ImmPCRelLo and ImmPCRelHi.
int ImmPCRel() const {
int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
int const width = ImmPCRelLo_width + ImmPCRelHi_width;
return signed_bitextract_32(width-1, 0, offset);
}
uint64_t ImmLogical();
float ImmFP32();
double ImmFP64();
LSDataSize SizeLSPair() const {
return CalcLSPairDataSize(
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
}
// Helpers.
bool IsCondBranchImm() const {
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
}
bool IsUncondBranchImm() const {
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
}
bool IsCompareBranch() const {
return Mask(CompareBranchFMask) == CompareBranchFixed;
}
bool IsTestBranch() const {
return Mask(TestBranchFMask) == TestBranchFixed;
}
bool IsLdrLiteral() const {
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
}
bool IsLdrLiteralX() const {
return Mask(LoadLiteralMask) == LDR_x_lit;
}
bool IsPCRelAddressing() const {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
}
bool IsLogicalImmediate() const {
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
}
bool IsAddSubImmediate() const {
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
}
bool IsAddSubExtended() const {
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
}
// Match any loads or stores, including pairs.
bool IsLoadOrStore() const {
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
}
// Match any loads, including pairs.
bool IsLoad() const;
// Match any stores, including pairs.
bool IsStore() const;
// Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field.
Reg31Mode RdMode() const {
// The following instructions use csp or wsp as Rd:
// Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags.
// Logical (immediate) when not setting the flags.
// Otherwise, r31 is the zero register.
if (IsAddSubImmediate() || IsAddSubExtended()) {
if (Mask(AddSubSetFlagsBit)) {
return Reg31IsZeroRegister;
} else {
return Reg31IsStackPointer;
}
}
if (IsLogicalImmediate()) {
// Of the logical (immediate) instructions, only ANDS (and its aliases)
// can set the flags. The others can all write into csp.
// Note that some logical operations are not available to
// immediate-operand instructions, so we have to combine two masks here.
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
return Reg31IsZeroRegister;
} else {
return Reg31IsStackPointer;
}
}
return Reg31IsZeroRegister;
}
// Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field.
Reg31Mode RnMode() const {
// The following instructions use csp or wsp as Rn:
// All loads and stores.
// Add/sub (immediate).
// Add/sub (extended).
// Otherwise, r31 is the zero register.
if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
return Reg31IsStackPointer;
}
return Reg31IsZeroRegister;
}
ImmBranchType BranchType() const {
if (IsCondBranchImm()) {
return CondBranchType;
} else if (IsUncondBranchImm()) {
return UncondBranchType;
} else if (IsCompareBranch()) {
return CompareBranchType;
} else if (IsTestBranch()) {
return TestBranchType;
} else {
return UnknownBranchType;
}
}
static int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
switch (branch_type) {
case UncondBranchType:
return ImmUncondBranch_width;
case CondBranchType:
return ImmCondBranch_width;
case CompareBranchType:
return ImmCmpBranch_width;
case TestBranchType:
return ImmTestBranch_width;
default:
UNREACHABLE();
return 0;
}
}
// The range of the branch instruction, expressed as 'instr +- range'.
static int32_t ImmBranchRange(ImmBranchType branch_type) {
return
(1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 -
kInstructionSize;
}
int ImmBranch() const {
switch (BranchType()) {
case CondBranchType: return ImmCondBranch();
case UncondBranchType: return ImmUncondBranch();
case CompareBranchType: return ImmCmpBranch();
case TestBranchType: return ImmTestBranch();
default: UNREACHABLE();
}
return 0;
}
bool IsBranchAndLinkToRegister() const {
return Mask(UnconditionalBranchToRegisterMask) == BLR;
}
bool IsMovz() const {
return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
(Mask(MoveWideImmediateMask) == MOVZ_w);
}
bool IsMovk() const {
return (Mask(MoveWideImmediateMask) == MOVK_x) ||
(Mask(MoveWideImmediateMask) == MOVK_w);
}
bool IsMovn() const {
return (Mask(MoveWideImmediateMask) == MOVN_x) ||
(Mask(MoveWideImmediateMask) == MOVN_w);
}
bool IsNop(int n) {
// A marking nop is an instruction
// mov r<n>, r<n>
// which is encoded as
// orr r<n>, xzr, r<n>
return (Mask(LogicalShiftedMask) == ORR_x) &&
(Rd() == Rm()) &&
(Rd() == n);
}
// Find the PC offset encoded in this instruction. 'this' may be a branch or
// a PC-relative addressing instruction.
// The offset returned is unscaled.
ptrdiff_t ImmPCOffset();
// Find the target of this instruction. 'this' may be a branch or a
// PC-relative addressing instruction.
Instruction* ImmPCOffsetTarget();
static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
void SetImmPCOffsetTarget(Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
uint8_t* LiteralAddress() {
int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
return reinterpret_cast<uint8_t*>(this) + offset;
}
enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };
V8_INLINE Instruction* InstructionAtOffset(
int64_t offset,
CheckAlignment check = CHECK_ALIGNMENT) {
Address addr = reinterpret_cast<Address>(this) + offset;
// The FUZZ_disasm test relies on no check being done.
ASSERT(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize));
return Cast(addr);
}
template<typename T> V8_INLINE static Instruction* Cast(T src) {
return reinterpret_cast<Instruction*>(src);
}
V8_INLINE ptrdiff_t DistanceTo(Instruction* target) {
return reinterpret_cast<Address>(target) - reinterpret_cast<Address>(this);
}
void SetPCRelImmTarget(Instruction* target);
void SetBranchImmTarget(Instruction* target);
};
// Where Instruction looks at instructions generated by the Assembler,
// InstructionSequence looks at instructions sequences generated by the
// MacroAssembler.
class InstructionSequence : public Instruction {
public:
static InstructionSequence* At(Address address) {
return reinterpret_cast<InstructionSequence*>(address);
}
// Sequences generated by MacroAssembler::InlineData().
bool IsInlineData() const;
uint64_t InlineData() const;
};
// Simulator/Debugger debug instructions ---------------------------------------
// Each debug marker is represented by a HLT instruction. The immediate comment
// field in the instruction is used to identify the type of debug marker. Each
// marker encodes arguments in a different way, as described below.
// Indicate to the Debugger that the instruction is a redirected call.
const Instr kImmExceptionIsRedirectedCall = 0xca11;
// Represent unreachable code. This is used as a guard in parts of the code that
// should not be reachable, such as in data encoded inline in the instructions.
const Instr kImmExceptionIsUnreachable = 0xdebf;
// A pseudo 'printf' instruction. The arguments will be passed to the platform
// printf method.
const Instr kImmExceptionIsPrintf = 0xdeb1;
// Parameters are stored in ARM64 registers as if the printf pseudo-instruction
// was a call to the real printf method:
//
// x0: The format string, then either of:
// x1-x7: Optional arguments.
// d0-d7: Optional arguments.
//
// Floating-point and integer arguments are passed in separate sets of
// registers in AAPCS64 (even for varargs functions), so it is not possible to
// determine the type of location of each arguments without some information
// about the values that were passed in. This information could be retrieved
// from the printf format string, but the format string is not trivial to
// parse so we encode the relevant information with the HLT instruction.
// - Type
// Either kRegister or kFPRegister, but stored as a uint32_t because there's
// no way to guarantee the size of the CPURegister::RegisterType enum.
const unsigned kPrintfTypeOffset = 1 * kInstructionSize;
const unsigned kPrintfLength = 2 * kInstructionSize;
// A pseudo 'debug' instruction.
const Instr kImmExceptionIsDebug = 0xdeb0;
// Parameters are inlined in the code after a debug pseudo-instruction:
// - Debug code.
// - Debug parameters.
// - Debug message string. This is a NULL-terminated ASCII string, padded to
// kInstructionSize so that subsequent instructions are correctly aligned.
// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
// string data.
const unsigned kDebugCodeOffset = 1 * kInstructionSize;
const unsigned kDebugParamsOffset = 2 * kInstructionSize;
const unsigned kDebugMessageOffset = 3 * kInstructionSize;
// Debug parameters.
// Used without a TRACE_ option, the Debugger will print the arguments only
// once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
// before every instruction for the specified LOG_ parameters.
//
// TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
// others that were not specified.
//
// For example:
//
// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
// will print the registers and fp registers only once.
//
// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
// starts disassembling the code.
//
// __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
// adds the general purpose registers to the trace.
//
// __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
// stops tracing the registers.
const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
enum DebugParameters {
NO_PARAM = 0,
BREAK = 1 << 0,
LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
LOG_REGS = 1 << 2, // Log general purpose registers.
LOG_FP_REGS = 1 << 3, // Log floating-point registers.
LOG_SYS_REGS = 1 << 4, // Log the status flags.
LOG_WRITE = 1 << 5, // Log any memory write.
LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS,
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
// Trace control.
TRACE_ENABLE = 1 << 6,
TRACE_DISABLE = 2 << 6,
TRACE_OVERRIDE = 3 << 6
};
} } // namespace v8::internal
#endif // V8_ARM64_INSTRUCTIONS_ARM64_H_

618
deps/v8/src/arm64/instrument-arm64.cc

@ -0,0 +1,618 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "arm64/instrument-arm64.h"
namespace v8 {
namespace internal {
Counter::Counter(const char* name, CounterType type)
: count_(0), enabled_(false), type_(type) {
ASSERT(name != NULL);
strncpy(name_, name, kCounterNameMaxLength);
}
void Counter::Enable() {
enabled_ = true;
}
void Counter::Disable() {
enabled_ = false;
}
bool Counter::IsEnabled() {
return enabled_;
}
void Counter::Increment() {
if (enabled_) {
count_++;
}
}
uint64_t Counter::count() {
uint64_t result = count_;
if (type_ == Gauge) {
// If the counter is a Gauge, reset the count after reading.
count_ = 0;
}
return result;
}
const char* Counter::name() {
return name_;
}
CounterType Counter::type() {
return type_;
}
typedef struct {
const char* name;
CounterType type;
} CounterDescriptor;
static const CounterDescriptor kCounterList[] = {
{"Instruction", Cumulative},
{"Move Immediate", Gauge},
{"Add/Sub DP", Gauge},
{"Logical DP", Gauge},
{"Other Int DP", Gauge},
{"FP DP", Gauge},
{"Conditional Select", Gauge},
{"Conditional Compare", Gauge},
{"Unconditional Branch", Gauge},
{"Compare and Branch", Gauge},
{"Test and Branch", Gauge},
{"Conditional Branch", Gauge},
{"Load Integer", Gauge},
{"Load FP", Gauge},
{"Load Pair", Gauge},
{"Load Literal", Gauge},
{"Store Integer", Gauge},
{"Store FP", Gauge},
{"Store Pair", Gauge},
{"PC Addressing", Gauge},
{"Other", Gauge},
{"SP Adjust", Gauge},
};
Instrument::Instrument(const char* datafile, uint64_t sample_period)
: output_stream_(stderr), sample_period_(sample_period) {
// Set up the output stream. If datafile is non-NULL, use that file. If it
// can't be opened, or datafile is NULL, use stderr.
if (datafile != NULL) {
output_stream_ = fopen(datafile, "w");
if (output_stream_ == NULL) {
fprintf(stderr, "Can't open output file %s. Using stderr.\n", datafile);
output_stream_ = stderr;
}
}
static const int num_counters =
sizeof(kCounterList) / sizeof(CounterDescriptor);
// Dump an instrumentation description comment at the top of the file.
fprintf(output_stream_, "# counters=%d\n", num_counters);
fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
// Construct Counter objects from counter description array.
for (int i = 0; i < num_counters; i++) {
Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type);
counters_.push_back(counter);
}
DumpCounterNames();
}
Instrument::~Instrument() {
// Dump any remaining instruction data to the output file.
DumpCounters();
// Free all the counter objects.
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
delete *it;
}
if (output_stream_ != stderr) {
fclose(output_stream_);
}
}
void Instrument::Update() {
// Increment the instruction counter, and dump all counters if a sample period
// has elapsed.
static Counter* counter = GetCounter("Instruction");
ASSERT(counter->type() == Cumulative);
counter->Increment();
if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
DumpCounters();
}
}
void Instrument::DumpCounters() {
// Iterate through the counter objects, dumping their values to the output
// stream.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
fprintf(output_stream_, "%" PRIu64 ",", (*it)->count());
}
fprintf(output_stream_, "\n");
fflush(output_stream_);
}
void Instrument::DumpCounterNames() {
// Iterate through the counter objects, dumping the counter names to the
// output stream.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
fprintf(output_stream_, "%s,", (*it)->name());
}
fprintf(output_stream_, "\n");
fflush(output_stream_);
}
void Instrument::HandleInstrumentationEvent(unsigned event) {
switch (event) {
case InstrumentStateEnable: Enable(); break;
case InstrumentStateDisable: Disable(); break;
default: DumpEventMarker(event);
}
}
void Instrument::DumpEventMarker(unsigned marker) {
// Dumpan event marker to the output stream as a specially formatted comment
// line.
static Counter* counter = GetCounter("Instruction");
fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
(marker >> 8) & 0xff, counter->count());
}
Counter* Instrument::GetCounter(const char* name) {
// Get a Counter object by name from the counter list.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
if (strcmp((*it)->name(), name) == 0) {
return *it;
}
}
// A Counter by that name does not exist: print an error message to stderr
// and the output file, and exit.
static const char* error_message =
"# Error: Unknown counter \"%s\". Exiting.\n";
fprintf(stderr, error_message, name);
fprintf(output_stream_, error_message, name);
exit(1);
}
void Instrument::Enable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
(*it)->Enable();
}
}
void Instrument::Disable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
(*it)->Disable();
}
}
void Instrument::VisitPCRelAddressing(Instruction* instr) {
Update();
static Counter* counter = GetCounter("PC Addressing");
counter->Increment();
}
void Instrument::VisitAddSubImmediate(Instruction* instr) {
Update();
static Counter* sp_counter = GetCounter("SP Adjust");
static Counter* add_sub_counter = GetCounter("Add/Sub DP");
if (((instr->Mask(AddSubOpMask) == SUB) ||
(instr->Mask(AddSubOpMask) == ADD)) &&
(instr->Rd() == 31) && (instr->Rn() == 31)) {
// Count adjustments to the C stack pointer caused by V8 needing two SPs.
sp_counter->Increment();
} else {
add_sub_counter->Increment();
}
}
void Instrument::VisitLogicalImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Logical DP");
counter->Increment();
}
void Instrument::VisitMoveWideImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Move Immediate");
if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) {
unsigned imm = instr->ImmMoveWide();
HandleInstrumentationEvent(imm);
} else {
counter->Increment();
}
}
void Instrument::VisitBitfield(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitExtract(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitUnconditionalBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
void Instrument::VisitUnconditionalBranchToRegister(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
void Instrument::VisitCompareBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Compare and Branch");
counter->Increment();
}
void Instrument::VisitTestBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Test and Branch");
counter->Increment();
}
void Instrument::VisitConditionalBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Branch");
counter->Increment();
}
void Instrument::VisitSystem(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitException(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::InstrumentLoadStorePair(Instruction* instr) {
static Counter* load_pair_counter = GetCounter("Load Pair");
static Counter* store_pair_counter = GetCounter("Store Pair");
if (instr->Mask(LoadStorePairLBit) != 0) {
load_pair_counter->Increment();
} else {
store_pair_counter->Increment();
}
}
void Instrument::VisitLoadStorePairPostIndex(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairOffset(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadLiteral(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Load Literal");
counter->Increment();
}
void Instrument::InstrumentLoadStore(Instruction* instr) {
static Counter* load_int_counter = GetCounter("Load Integer");
static Counter* store_int_counter = GetCounter("Store Integer");
static Counter* load_fp_counter = GetCounter("Load FP");
static Counter* store_fp_counter = GetCounter("Store FP");
switch (instr->Mask(LoadStoreOpMask)) {
case STRB_w: // Fall through.
case STRH_w: // Fall through.
case STR_w: // Fall through.
case STR_x: store_int_counter->Increment(); break;
case STR_s: // Fall through.
case STR_d: store_fp_counter->Increment(); break;
case LDRB_w: // Fall through.
case LDRH_w: // Fall through.
case LDR_w: // Fall through.
case LDR_x: // Fall through.
case LDRSB_x: // Fall through.
case LDRSH_x: // Fall through.
case LDRSW_x: // Fall through.
case LDRSB_w: // Fall through.
case LDRSH_w: load_int_counter->Increment(); break;
case LDR_s: // Fall through.
case LDR_d: load_fp_counter->Increment(); break;
default: UNREACHABLE();
}
}
void Instrument::VisitLoadStoreUnscaledOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStorePostIndex(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStorePreIndex(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStoreRegisterOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLogicalShifted(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Logical DP");
counter->Increment();
}
void Instrument::VisitAddSubShifted(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitAddSubExtended(Instruction* instr) {
Update();
static Counter* sp_counter = GetCounter("SP Adjust");
static Counter* add_sub_counter = GetCounter("Add/Sub DP");
if (((instr->Mask(AddSubOpMask) == SUB) ||
(instr->Mask(AddSubOpMask) == ADD)) &&
(instr->Rd() == 31) && (instr->Rn() == 31)) {
// Count adjustments to the C stack pointer caused by V8 needing two SPs.
sp_counter->Increment();
} else {
add_sub_counter->Increment();
}
}
void Instrument::VisitAddSubWithCarry(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitConditionalCompareRegister(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitConditionalCompareImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitConditionalSelect(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
void Instrument::VisitDataProcessing1Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitDataProcessing2Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitDataProcessing3Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitFPCompare(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPConditionalCompare(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitFPConditionalSelect(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
void Instrument::VisitFPImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing1Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing2Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing3Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPIntegerConvert(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitUnallocated(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitUnimplemented(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
} } // namespace v8::internal

107
deps/v8/src/arm64/instrument-arm64.h

@ -0,0 +1,107 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_INSTRUMENT_ARM64_H_
#define V8_ARM64_INSTRUMENT_ARM64_H_
#include "globals.h"
#include "utils.h"
#include "arm64/decoder-arm64.h"
#include "arm64/constants-arm64.h"
namespace v8 {
namespace internal {
const int kCounterNameMaxLength = 256;
const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
enum InstrumentState {
InstrumentStateDisable = 0,
InstrumentStateEnable = 1
};
enum CounterType {
Gauge = 0, // Gauge counters reset themselves after reading.
Cumulative = 1 // Cumulative counters keep their value after reading.
};
class Counter {
public:
Counter(const char* name, CounterType type = Gauge);
void Increment();
void Enable();
void Disable();
bool IsEnabled();
uint64_t count();
const char* name();
CounterType type();
private:
char name_[kCounterNameMaxLength];
uint64_t count_;
bool enabled_;
CounterType type_;
};
class Instrument: public DecoderVisitor {
public:
explicit Instrument(const char* datafile = NULL,
uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
~Instrument();
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
private:
void Update();
void Enable();
void Disable();
void DumpCounters();
void DumpCounterNames();
void DumpEventMarker(unsigned marker);
void HandleInstrumentationEvent(unsigned event);
Counter* GetCounter(const char* name);
void InstrumentLoadStore(Instruction* instr);
void InstrumentLoadStorePair(Instruction* instr);
std::list<Counter*> counters_;
FILE *output_stream_;
uint64_t sample_period_;
};
} } // namespace v8::internal
#endif // V8_ARM64_INSTRUMENT_ARM64_H_

2576
deps/v8/src/arm64/lithium-arm64.cc

File diff suppressed because it is too large

3100
deps/v8/src/arm64/lithium-arm64.h

File diff suppressed because it is too large

5901
deps/v8/src/arm64/lithium-codegen-arm64.cc

File diff suppressed because it is too large

490
deps/v8/src/arm64/lithium-codegen-arm64.h

@ -0,0 +1,490 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
#define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
#include "arm64/lithium-arm64.h"
#include "arm64/lithium-gap-resolver-arm64.h"
#include "deoptimizer.h"
#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
class SafepointGenerator;
class BranchGenerator;
class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
// Simple accessors.
Scope* scope() const { return scope_; }
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
bool IsNextEmittedBlock(int block_id) const {
return LookupDestination(block_id) == GetNextEmittedBlock();
}
bool NeedsEagerFrame() const {
return GetStackSlotCount() > 0 ||
info()->is_non_deferred_calling() ||
!info()->IsStub() ||
info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
LinkRegisterStatus GetLinkRegisterState() const {
return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
}
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
void FinishCode(Handle<Code> code);
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
Register ToRegister32(LOperand* op) const;
Operand ToOperand(LOperand* op);
Operand ToOperand32I(LOperand* op);
Operand ToOperand32U(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// TODO(jbramley): Examine these helpers and check that they make sense.
// IsInteger32Constant returns true for smi constants, for example.
bool IsInteger32Constant(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
DoubleRegister ToDoubleRegister(LOperand* op) const;
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
// Return a double scratch register which can be used locally
// when generating code for a lithium instruction.
DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
Label* exit,
Label* allocation_entry);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
void DoDeferredNumberTagU(LInstruction* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2);
void DoDeferredTaggedToI(LTaggedToI* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void DoGap(LGap* instr);
// Generic version of EmitBranch. It contains some code to avoid emitting a
// branch on the next emitted basic block where we could just fall-through.
// You shouldn't use that directly but rather consider one of the helper like
// LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
template<class InstrType>
void EmitBranchGeneric(InstrType instr,
const BranchGenerator& branch);
template<class InstrType>
void EmitBranch(InstrType instr, Condition condition);
template<class InstrType>
void EmitCompareAndBranch(InstrType instr,
Condition condition,
const Register& lhs,
const Operand& rhs);
template<class InstrType>
void EmitTestAndBranch(InstrType instr,
Condition condition,
const Register& value,
uint64_t mask);
template<class InstrType>
void EmitBranchIfNonZeroNumber(InstrType instr,
const FPRegister& value,
const FPRegister& scratch);
template<class InstrType>
void EmitBranchIfHeapNumber(InstrType instr,
const Register& value);
template<class InstrType>
void EmitBranchIfRoot(InstrType instr,
const Register& value,
Heap::RootListIndex index);
// Emits optimized code to deep-copy the contents of statically known object
// graphs (e.g. object literal boilerplate). Expects a pointer to the
// allocated destination object in the result register, and a pointer to the
// source object in the source register.
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
Register scratch,
int* offset,
AllocationSiteMode mode);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
SmiCheck check_needed);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
MemOperand BuildSeqStringOperand(Register string,
Register temp,
LOperand* index,
String::Encoding encoding);
void DeoptimizeBranch(
LEnvironment* environment,
BranchType branch_type, Register reg = NoReg, int bit = -1,
Deoptimizer::BailoutType* override_bailout_type = NULL);
void Deoptimize(LEnvironment* environment,
Deoptimizer::BailoutType* override_bailout_type = NULL);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void DeoptimizeIfZero(Register rt, LEnvironment* environment);
void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
void DeoptimizeIfRoot(Register rt,
Heap::RootListIndex index,
LEnvironment* environment);
void DeoptimizeIfNotRoot(Register rt,
Heap::RootListIndex index,
LEnvironment* environment);
void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base,
Register scratch,
bool key_is_smi,
bool key_is_constant,
int constant_key,
ElementsKind elements_kind,
int additional_index);
void CalcKeyedArrayBaseRegister(Register base,
Register elements,
Register key,
bool key_is_tagged,
ElementsKind elements_kind);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation steps. Returns true if code generation should continue.
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
void GenerateOsrPrologue();
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
void CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context);
// Generate a direct call to a known function.
// If the function is already loaded into x1 by the caller, function_reg may
// be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
// automatically load it.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
Register function_reg = NoReg);
// Support for recording safepoint and position information.
void RecordAndWritePosition(int position) V8_OVERRIDE;
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table itself is
// emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
UseScratchRegisterScope temps(codegen_->masm_);
// Preserve the value of lr which must be saved on the stack (the call to
// the stub will clobber it).
Register to_be_pushed_lr =
temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
codegen_->masm_->Mov(to_be_pushed_lr, lr);
switch (codegen_->expected_safepoint_kind_) {
case Safepoint::kWithRegisters: {
StoreRegistersStateStub stub(kDontSaveFPRegs);
codegen_->masm_->CallStub(&stub);
break;
}
case Safepoint::kWithRegistersAndDoubles: {
StoreRegistersStateStub stub(kSaveFPRegs);
codegen_->masm_->CallStub(&stub);
break;
}
default:
UNREACHABLE();
}
}
~PushSafepointRegistersScope() {
Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
ASSERT((kind & Safepoint::kWithRegisters) != 0);
switch (kind) {
case Safepoint::kWithRegisters: {
RestoreRegistersStateStub stub(kDontSaveFPRegs);
codegen_->masm_->CallStub(&stub);
break;
}
case Safepoint::kWithRegistersAndDoubles: {
RestoreRegistersStateStub stub(kSaveFPRegs);
codegen_->masm_->CallStub(&stub);
break;
}
default:
UNREACHABLE();
}
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
}
private:
LCodeGen* codegen_;
};
friend class LDeferredCode;
friend class SafepointGenerator;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
MacroAssembler* masm() const { return codegen_->masm(); }
private:
LCodeGen* codegen_;
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
// This is the abstract class used by EmitBranchGeneric.
// It is used to emit code for conditional branching. The Emit() function
// emits code to branch when the condition holds and EmitInverted() emits
// the branch when the inverted condition is verified.
//
// For actual examples of condition see the concrete implementation in
// lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
class BranchGenerator BASE_EMBEDDED {
public:
explicit BranchGenerator(LCodeGen* codegen)
: codegen_(codegen) { }
virtual ~BranchGenerator() { }
virtual void Emit(Label* label) const = 0;
virtual void EmitInverted(Label* label) const = 0;
protected:
MacroAssembler* masm() const { return codegen_->masm(); }
LCodeGen* codegen_;
};
} } // namespace v8::internal
#endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_

334
deps/v8/src/arm64/lithium-gap-resolver-arm64.cc

@ -0,0 +1,334 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "arm64/lithium-gap-resolver-arm64.h"
#include "arm64/lithium-codegen-arm64.h"
namespace v8 {
namespace internal {
// We use the root register to spill a value while breaking a cycle in parallel
// moves. We don't need access to roots while resolving the move list and using
// the root register has two advantages:
// - It is not in crankshaft allocatable registers list, so it can't interfere
// with any of the moves we are resolving.
// - We don't need to push it on the stack, as we can reload it with its value
// once we have resolved a cycle.
#define kSavedValue root
// We use the MacroAssembler floating-point scratch register to break a cycle
// involving double values as the MacroAssembler will not need it for the
// operations performed by the gap resolver.
#define kSavedDoubleValue fp_scratch
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
saved_destination_(NULL), need_to_restore_root_(false) { }
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::Resolve(LParallelMove* parallel_move) {
ASSERT(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
root_index_ = i; // Any cycle is found when we reach this move again.
PerformMove(i);
if (in_cycle_) RestoreValue();
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
if (!move.IsEliminated()) {
ASSERT(move.source()->IsConstantOperand());
EmitMove(i);
}
}
if (need_to_restore_root_) {
ASSERT(kSavedValue.Is(root));
__ InitializeRootRegister();
need_to_restore_root_ = false;
}
moves_.Rewind(0);
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph.
LMoveOperands& current_move = moves_[index];
ASSERT(!current_move.IsPending());
ASSERT(!current_move.IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
LOperand* destination = current_move.destination();
current_move.set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
PerformMove(i);
// If there is a blocking, pending move it must be moves_[root_index_]
// and all other moves with the same source as moves_[root_index_] are
// sucessfully executed (because they are cycle-free) by this loop.
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
current_move.set_destination(destination);
// The move may be blocked on a pending move, which must be the starting move.
// In this case, we have a cycle, and we save the source of this move to
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
ASSERT(other_move.IsPending());
BreakCycle(index);
return;
}
// This move is no longer blocked.
EmitMove(index);
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_ASSERTS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
void LGapResolver::BreakCycle(int index) {
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
ASSERT(!in_cycle_);
// We use registers which are not allocatable by crankshaft to break the cycle
// to be sure they don't interfere with the moves we are resolving.
ASSERT(!kSavedValue.IsAllocatable());
ASSERT(!kSavedDoubleValue.IsAllocatable());
// We save in a register the source of that move and we remember its
// destination. Then we mark this move as resolved so the cycle is
// broken and we can perform the other moves.
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
need_to_restore_root_ = true;
__ Mov(kSavedValue, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
need_to_restore_root_ = true;
__ Ldr(kSavedValue, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
__ Fmov(kSavedDoubleValue, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
__ Ldr(kSavedDoubleValue, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
// Mark this move as resolved.
// This move will be actually performed by moving the saved value to this
// move's destination in LGapResolver::RestoreValue().
moves_[index].Eliminate();
}
void LGapResolver::RestoreValue() {
ASSERT(in_cycle_);
ASSERT(saved_destination_ != NULL);
if (saved_destination_->IsRegister()) {
__ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
} else if (saved_destination_->IsStackSlot()) {
__ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedDoubleValue);
cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ Str(kSavedDoubleValue, cgen_->ToMemOperand(saved_destination_));
cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
} else {
UNREACHABLE();
}
in_cycle_ = false;
saved_destination_ = NULL;
}
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
Register source_register = cgen_->ToRegister(source);
if (destination->IsRegister()) {
__ Mov(cgen_->ToRegister(destination), source_register);
} else {
ASSERT(destination->IsStackSlot());
__ Str(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ Ldr(cgen_->ToRegister(destination), source_operand);
} else {
ASSERT(destination->IsStackSlot());
EmitStackSlotMove(index);
}
} else if (source->IsConstantOperand()) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
if (cgen_->IsSmi(constant_source)) {
__ Mov(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ Mov(dst, cgen_->ToInteger32(constant_source));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
__ Fmov(result, cgen_->ToDouble(constant_source));
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
need_to_restore_root_ = true;
if (cgen_->IsSmi(constant_source)) {
__ Mov(kSavedValue, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
} else {
__ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
}
__ Str(kSavedValue, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleRegister()) {
DoubleRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ Fmov(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
__ Str(src, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleStackSlot()) {
MemOperand src = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ Ldr(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
EmitStackSlotMove(index);
}
} else {
UNREACHABLE();
}
// The move has been emitted, we can eliminate it.
moves_[index].Eliminate();
}
void LGapResolver::EmitStackSlotMove(int index) {
// We need a temp register to perform a stack slot to stack slot move, and
// the register must not be involved in breaking cycles.
// Use the Crankshaft double scratch register as the temporary.
DoubleRegister temp = crankshaft_fp_scratch;
LOperand* src = moves_[index].source();
LOperand* dst = moves_[index].destination();
ASSERT(src->IsStackSlot());
ASSERT(dst->IsStackSlot());
__ Ldr(temp, cgen_->ToMemOperand(src));
__ Str(temp, cgen_->ToMemOperand(dst));
}
} } // namespace v8::internal

90
deps/v8/src/arm64/lithium-gap-resolver-arm64.h

@ -0,0 +1,90 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
#define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
#include "v8.h"
#include "lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// If a cycle is found in the series of moves, save the blocking value to
// a scratch register. The cycle must be found by hitting the root of the
// depth-first search.
void BreakCycle(int index);
// After a cycle has been resolved, restore the value from the scratch
// register to its proper destination.
void RestoreValue();
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Emit a move from one stack slot to another.
void EmitStackSlotMove(int index);
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
int root_index_;
bool in_cycle_;
LOperand* saved_destination_;
// We use the root register as a scratch in a few places. When that happens,
// this flag is set to indicate that it needs to be restored.
bool need_to_restore_root_;
};
} } // namespace v8::internal
#endif // V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_

1677
deps/v8/src/arm64/macro-assembler-arm64-inl.h

File diff suppressed because it is too large

5184
deps/v8/src/arm64/macro-assembler-arm64.cc

File diff suppressed because it is too large

2310
deps/v8/src/arm64/macro-assembler-arm64.h

File diff suppressed because it is too large

1728
deps/v8/src/arm64/regexp-macro-assembler-arm64.cc

File diff suppressed because it is too large

315
deps/v8/src/arm64/regexp-macro-assembler-arm64.h

@ -0,0 +1,315 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
#define V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
#include "arm64/assembler-arm64.h"
#include "arm64/assembler-arm64-inl.h"
#include "macro-assembler.h"
namespace v8 {
namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerARM64(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerARM64();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
virtual void CheckCharacter(unsigned c, Label* on_equal);
virtual void CheckCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
virtual void CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckNotAtStart(Label* on_not_at_start);
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_not_equal);
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
uc16 minus,
uc16 mask,
Label* on_not_equal);
virtual void CheckCharacterInRange(uc16 from,
uc16 to,
Label* on_in_range);
virtual void CheckCharacterNotInRange(uc16 from,
uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
virtual void LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds = true,
int characters = 1);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
virtual void PushCurrentPosition();
virtual void PushRegister(int register_index,
StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
static int CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame,
int start_offset,
const byte** input_start,
const byte** input_end);
private:
// Above the frame pointer - Stored registers and stack passed parameters.
// Callee-saved registers x19-x29, where x29 is the old frame pointer.
static const int kCalleeSavedRegisters = 0;
// Return address.
// It is placed above the 11 callee-saved registers.
static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize;
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameter placed by caller.
static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
static const int kDirectCall = kCalleeSavedRegisters - kPointerSize;
static const int kStackBase = kDirectCall - kPointerSize;
static const int kOutputSize = kStackBase - kPointerSize;
static const int kInput = kOutputSize - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessCounter = kInput - kPointerSize;
// First position register address on the stack. Following positions are
// below it. A position is a 32 bit value.
static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSize;
// A capture is a 64 bit value holding two position.
static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
// When initializing registers to a non-position value we can unroll
// the loop. Set the limit of registers to unroll.
static const int kNumRegistersToUnroll = 16;
// We are using x0 to x7 as a register cache. Each hardware register must
// contain one capture, that is two 32 bit registers. We can cache at most
// 16 registers.
static const int kNumCachedRegisters = 16;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
// Check whether preemption has been requested.
void CheckPreemption();
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
// Location of a 32 bit position register.
MemOperand register_location(int register_index);
// Location of a 64 bit capture, combining two position registers.
MemOperand capture_location(int register_index, Register scratch);
// Register holding the current input position as negative offset from
// the end of the string.
Register current_input_offset() { return w21; }
// The register containing the current character after LoadCurrentCharacter.
Register current_character() { return w22; }
// Register holding address of the end of the input string.
Register input_end() { return x25; }
// Register holding address of the start of the input string.
Register input_start() { return x26; }
// Register holding the offset from the start of the string where we should
// start matching.
Register start_offset() { return w27; }
// Pointer to the output array's first element.
Register output_array() { return x28; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
Register backtrack_stackpointer() { return x23; }
// Register holding pointer to the current code object.
Register code_pointer() { return x20; }
// Register holding the value used for clearing capture registers.
Register non_position_value() { return w24; }
// The top 32 bit of this register is used to store this value
// twice. This is used for clearing more than one register at a time.
Register twice_non_position_value() { return x24; }
// Byte size of chars in the string to match (decided by the Mode argument)
int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is NULL, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to);
// Compares reg against immmediate before calling BranchOrBacktrack.
// It makes use of the Cbz and Cbnz instructions.
void CompareAndBranchOrBacktrack(Register reg,
int immediate,
Condition condition,
Label* to);
inline void CallIf(Label* to, Condition condition);
// Save and restore the link register on the stack in a way that
// is GC-safe.
inline void SaveLinkRegister();
inline void RestoreLinkRegister();
// Pushes the value of a register on the backtrack stack. Decrements the
// stack pointer by a word size and stores the register's value there.
inline void Push(Register source);
// Pops a value from the backtrack stack. Reads the word at the stack pointer
// and increments it by a word size.
inline void Pop(Register target);
// This state indicates where the register actually is.
enum RegisterState {
STACKED, // Resides in memory.
CACHED_LSW, // Least Significant Word of a 64 bit hardware register.
CACHED_MSW // Most Significant Word of a 64 bit hardware register.
};
RegisterState GetRegisterState(int register_index) {
ASSERT(register_index >= 0);
if (register_index >= kNumCachedRegisters) {
return STACKED;
} else {
if ((register_index % 2) == 0) {
return CACHED_LSW;
} else {
return CACHED_MSW;
}
}
}
// Store helper that takes the state of the register into account.
inline void StoreRegister(int register_index, Register source);
// Returns a hardware W register that holds the value of the capture
// register.
//
// This function will try to use an existing cache register (w0-w7) for the
// result. Otherwise, it will load the value into maybe_result.
//
// If the returned register is anything other than maybe_result, calling code
// must not write to it.
inline Register GetRegister(int register_index, Register maybe_result);
// Returns the harware register (x0-x7) holding the value of the capture
// register.
// This assumes that the state of the register is not STACKED.
inline Register GetCachedRegister(int register_index);
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
// Which mode to generate code for (ASCII or UC16).
Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
int num_saved_registers_;
// Labels used internally.
Label entry_label_;
Label start_label_;
Label success_label_;
Label backtrack_label_;
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
};
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
#endif // V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_

3645
deps/v8/src/arm64/simulator-arm64.cc

File diff suppressed because it is too large

908
deps/v8/src/arm64/simulator-arm64.h

@ -0,0 +1,908 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_SIMULATOR_ARM64_H_
#define V8_ARM64_SIMULATOR_ARM64_H_
#include <stdarg.h>
#include <vector>
#include "v8.h"
#include "globals.h"
#include "utils.h"
#include "allocation.h"
#include "assembler.h"
#include "arm64/assembler-arm64.h"
#include "arm64/decoder-arm64.h"
#include "arm64/disasm-arm64.h"
#include "arm64/instrument-arm64.h"
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
namespace v8 {
namespace internal {
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native ARM64 platform.
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*arm64_regexp_matcher)(String* input,
int64_t start_offset,
const byte* input_start,
const byte* input_end,
int* output,
int64_t output_size,
Address stack_base,
int64_t direct_call,
void* return_address,
Isolate* isolate);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm64_regexp_matcher.
// The ninth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<arm64_regexp_matcher>(entry)( \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
// Running without a simulator there is nothing to do.
class SimulatorStack : public v8::internal::AllStatic {
public:
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
USE(isolate);
return c_limit;
}
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
return try_catch_address;
}
static void UnregisterCTryCatch() { }
};
#else // !defined(USE_SIMULATOR)
enum ReverseByteMode {
Reverse16 = 0,
Reverse32 = 1,
Reverse64 = 2
};
// The proper way to initialize a simulated system register (such as NZCV) is as
// follows:
// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
class SimSystemRegister {
public:
// The default constructor represents a register which has no writable bits.
// It is not possible to set its value to anything other than 0.
SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
uint32_t RawValue() const {
return value_;
}
void SetRawValue(uint32_t new_value) {
value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
}
uint32_t Bits(int msb, int lsb) const {
return unsigned_bitextract_32(msb, lsb, value_);
}
int32_t SignedBits(int msb, int lsb) const {
return signed_bitextract_32(msb, lsb, value_);
}
void SetBits(int msb, int lsb, uint32_t bits);
// Default system register values.
static SimSystemRegister DefaultValueFor(SystemRegister id);
#define DEFINE_GETTER(Name, HighBit, LowBit, Func, Type) \
Type Name() const { return static_cast<Type>(Func(HighBit, LowBit)); } \
void Set##Name(Type bits) { \
SetBits(HighBit, LowBit, static_cast<Type>(bits)); \
}
#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
#undef DEFINE_ZERO_BITS
#undef DEFINE_GETTER
protected:
// Most system registers only implement a few of the bits in the word. Other
// bits are "read-as-zero, write-ignored". The write_ignore_mask argument
// describes the bits which are not modifiable.
SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
: value_(value), write_ignore_mask_(write_ignore_mask) { }
uint32_t value_;
uint32_t write_ignore_mask_;
};
// Represent a register (r0-r31, v0-v31).
template<int kSizeInBytes>
class SimRegisterBase {
public:
template<typename T>
void Set(T new_value, unsigned size = sizeof(T)) {
ASSERT(size <= kSizeInBytes);
ASSERT(size <= sizeof(new_value));
// All AArch64 registers are zero-extending; Writing a W register clears the
// top bits of the corresponding X register.
memset(value_, 0, kSizeInBytes);
memcpy(value_, &new_value, size);
}
// Copy 'size' bytes of the register to the result, and zero-extend to fill
// the result.
template<typename T>
T Get(unsigned size = sizeof(T)) const {
ASSERT(size <= kSizeInBytes);
T result;
memset(&result, 0, sizeof(result));
memcpy(&result, value_, size);
return result;
}
protected:
uint8_t value_[kSizeInBytes];
};
typedef SimRegisterBase<kXRegSize> SimRegister; // r0-r31
typedef SimRegisterBase<kDRegSize> SimFPRegister; // v0-v31
class Simulator : public DecoderVisitor {
public:
explicit Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
Isolate* isolate = NULL,
FILE* stream = stderr);
Simulator();
~Simulator();
// System functions.
static void Initialize(Isolate* isolate);
static Simulator* current(v8::internal::Isolate* isolate);
class CallArgument;
// Call an arbitrary function taking an arbitrary number of arguments. The
// varargs list must be a set of arguments with type CallArgument, and
// terminated by CallArgument::End().
void CallVoid(byte* entry, CallArgument* args);
// Like CallVoid, but expect a return value.
int64_t CallInt64(byte* entry, CallArgument* args);
double CallDouble(byte* entry, CallArgument* args);
// V8 calls into generated JS code with 5 parameters and into
// generated RegExp code with 10 parameters. These are convenience functions,
// which set up the simulator state and grab the result on return.
int64_t CallJS(byte* entry,
byte* function_entry,
JSFunction* func,
Object* revc,
int64_t argc,
Object*** argv);
int64_t CallRegExp(byte* entry,
String* input,
int64_t start_offset,
const byte* input_start,
const byte* input_end,
int* output,
int64_t output_size,
Address stack_base,
int64_t direct_call,
void* return_address,
Isolate* isolate);
// A wrapper class that stores an argument for one of the above Call
// functions.
//
// Only arguments up to 64 bits in size are supported.
class CallArgument {
public:
template<typename T>
explicit CallArgument(T argument) {
ASSERT(sizeof(argument) <= sizeof(bits_));
memcpy(&bits_, &argument, sizeof(argument));
type_ = X_ARG;
}
explicit CallArgument(double argument) {
ASSERT(sizeof(argument) == sizeof(bits_));
memcpy(&bits_, &argument, sizeof(argument));
type_ = D_ARG;
}
explicit CallArgument(float argument) {
// TODO(all): CallArgument(float) is untested, remove this check once
// tested.
UNIMPLEMENTED();
// Make the D register a NaN to try to trap errors if the callee expects a
// double. If it expects a float, the callee should ignore the top word.
ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_));
memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN));
// Write the float payload to the S register.
ASSERT(sizeof(argument) <= sizeof(bits_));
memcpy(&bits_, &argument, sizeof(argument));
type_ = D_ARG;
}
// This indicates the end of the arguments list, so that CallArgument
// objects can be passed into varargs functions.
static CallArgument End() { return CallArgument(); }
int64_t bits() const { return bits_; }
bool IsEnd() const { return type_ == NO_ARG; }
bool IsX() const { return type_ == X_ARG; }
bool IsD() const { return type_ == D_ARG; }
private:
enum CallArgumentType { X_ARG, D_ARG, NO_ARG };
// All arguments are aligned to at least 64 bits and we don't support
// passing bigger arguments, so the payload size can be fixed at 64 bits.
int64_t bits_;
CallArgumentType type_;
CallArgument() { type_ = NO_ARG; }
};
// Start the debugging command line.
void Debug();
bool GetValue(const char* desc, int64_t* value);
bool PrintValue(const char* desc);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
// Pop an address from the JS stack.
uintptr_t PopAddress();
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
void ResetState();
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
ExternalReference::Type type);
void DoRuntimeCall(Instruction* instr);
// Run the simulator.
static const Instruction* kEndOfSimAddress;
void DecodeInstruction();
void Run();
void RunFrom(Instruction* start);
// Simulation helpers.
template <typename T>
void set_pc(T new_pc) {
ASSERT(sizeof(T) == sizeof(pc_));
memcpy(&pc_, &new_pc, sizeof(T));
pc_modified_ = true;
}
Instruction* pc() { return pc_; }
void increment_pc() {
if (!pc_modified_) {
pc_ = pc_->following();
}
pc_modified_ = false;
}
virtual void Decode(Instruction* instr) {
decoder_->Decode(instr);
}
void ExecuteInstruction() {
ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
CheckBreakNext();
Decode(pc_);
LogProcessorState();
increment_pc();
CheckBreakpoints();
}
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
// Register accessors.
// Return 'size' bits of the value of an integer register, as the specified
// type. The value is zero-extended to fill the result.
//
// The only supported values of 'size' are kXRegSizeInBits and
// kWRegSizeInBits.
template<typename T>
T reg(unsigned size, unsigned code,
Reg31Mode r31mode = Reg31IsZeroRegister) const {
unsigned size_in_bytes = size / 8;
ASSERT(size_in_bytes <= sizeof(T));
ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits));
ASSERT(code < kNumberOfRegisters);
if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
T result;
memset(&result, 0, sizeof(result));
return result;
}
return registers_[code].Get<T>(size_in_bytes);
}
// Like reg(), but infer the access size from the template type.
template<typename T>
T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<T>(sizeof(T) * 8, code, r31mode);
}
// Common specialized accessors for the reg() template.
int32_t wreg(unsigned code,
Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<int32_t>(code, r31mode);
}
int64_t xreg(unsigned code,
Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<int64_t>(code, r31mode);
}
int64_t reg(unsigned size, unsigned code,
Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<int64_t>(size, code, r31mode);
}
// Write 'size' bits of 'value' into an integer register. The value is
// zero-extended. This behaviour matches AArch64 register writes.
//
// The only supported values of 'size' are kXRegSizeInBits and
// kWRegSizeInBits.
template<typename T>
void set_reg(unsigned size, unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
unsigned size_in_bytes = size / 8;
ASSERT(size_in_bytes <= sizeof(T));
ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits));
ASSERT(code < kNumberOfRegisters);
if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
return;
}
return registers_[code].Set(value, size_in_bytes);
}
// Like set_reg(), but infer the access size from the template type.
template<typename T>
void set_reg(unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg(sizeof(value) * 8, code, value, r31mode);
}
// Common specialized accessors for the set_reg() template.
void set_wreg(unsigned code, int32_t value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg(kWRegSizeInBits, code, value, r31mode);
}
void set_xreg(unsigned code, int64_t value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg(kXRegSizeInBits, code, value, r31mode);
}
// Commonly-used special cases.
template<typename T>
void set_lr(T value) {
ASSERT(sizeof(T) == kPointerSize);
set_reg(kLinkRegCode, value);
}
template<typename T>
void set_sp(T value) {
ASSERT(sizeof(T) == kPointerSize);
set_reg(31, value, Reg31IsStackPointer);
}
int64_t sp() { return xreg(31, Reg31IsStackPointer); }
int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
int64_t fp() {
return xreg(kFramePointerRegCode, Reg31IsStackPointer);
}
Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); }
// Return 'size' bits of the value of a floating-point register, as the
// specified type. The value is zero-extended to fill the result.
//
// The only supported values of 'size' are kDRegSizeInBits and
// kSRegSizeInBits.
template<typename T>
T fpreg(unsigned size, unsigned code) const {
unsigned size_in_bytes = size / 8;
ASSERT(size_in_bytes <= sizeof(T));
ASSERT((size == kDRegSizeInBits) || (size == kSRegSizeInBits));
ASSERT(code < kNumberOfFPRegisters);
return fpregisters_[code].Get<T>(size_in_bytes);
}
// Like fpreg(), but infer the access size from the template type.
template<typename T>
T fpreg(unsigned code) const {
return fpreg<T>(sizeof(T) * 8, code);
}
// Common specialized accessors for the fpreg() template.
float sreg(unsigned code) const {
return fpreg<float>(code);
}
uint32_t sreg_bits(unsigned code) const {
return fpreg<uint32_t>(code);
}
double dreg(unsigned code) const {
return fpreg<double>(code);
}
uint64_t dreg_bits(unsigned code) const {
return fpreg<uint64_t>(code);
}
double fpreg(unsigned size, unsigned code) const {
switch (size) {
case kSRegSizeInBits: return sreg(code);
case kDRegSizeInBits: return dreg(code);
default:
UNREACHABLE();
return 0.0;
}
}
// Write 'value' into a floating-point register. The value is zero-extended.
// This behaviour matches AArch64 register writes.
template<typename T>
void set_fpreg(unsigned code, T value) {
ASSERT((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
ASSERT(code < kNumberOfFPRegisters);
fpregisters_[code].Set(value, sizeof(value));
}
// Common specialized accessors for the set_fpreg() template.
void set_sreg(unsigned code, float value) {
set_fpreg(code, value);
}
void set_sreg_bits(unsigned code, uint32_t value) {
set_fpreg(code, value);
}
void set_dreg(unsigned code, double value) {
set_fpreg(code, value);
}
void set_dreg_bits(unsigned code, uint64_t value) {
set_fpreg(code, value);
}
SimSystemRegister& nzcv() { return nzcv_; }
SimSystemRegister& fpcr() { return fpcr_; }
// Debug helpers
// Simulator breakpoints.
struct Breakpoint {
Instruction* location;
bool enabled;
};
std::vector<Breakpoint> breakpoints_;
void SetBreakpoint(Instruction* breakpoint);
void ListBreakpoints();
void CheckBreakpoints();
// Helpers for the 'next' command.
// When this is set, the Simulator will insert a breakpoint after the next BL
// instruction it meets.
bool break_on_next_;
// Check if the Simulator should insert a break after the current instruction
// for the 'next' command.
void CheckBreakNext();
// Disassemble instruction at the given address.
void PrintInstructionsAt(Instruction* pc, uint64_t count);
void PrintSystemRegisters(bool print_all = false);
void PrintRegisters(bool print_all_regs = false);
void PrintFPRegisters(bool print_all_regs = false);
void PrintProcessorState();
void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
void LogSystemRegisters() {
if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters();
}
void LogRegisters() {
if (log_parameters_ & LOG_REGS) PrintRegisters();
}
void LogFPRegisters() {
if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters();
}
void LogProcessorState() {
LogSystemRegisters();
LogRegisters();
LogFPRegisters();
}
void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) {
if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes);
}
int log_parameters() { return log_parameters_; }
void set_log_parameters(int new_parameters) {
log_parameters_ = new_parameters;
if (!decoder_) {
if (new_parameters & LOG_DISASM) {
PrintF("Run --debug-sim to dynamically turn on disassembler\n");
}
return;
}
if (new_parameters & LOG_DISASM) {
decoder_->InsertVisitorBefore(print_disasm_, this);
} else {
decoder_->RemoveVisitor(print_disasm_);
}
}
static inline const char* WRegNameForCode(unsigned code,
Reg31Mode mode = Reg31IsZeroRegister);
static inline const char* XRegNameForCode(unsigned code,
Reg31Mode mode = Reg31IsZeroRegister);
static inline const char* SRegNameForCode(unsigned code);
static inline const char* DRegNameForCode(unsigned code);
static inline const char* VRegNameForCode(unsigned code);
static inline int CodeFromName(const char* name);
protected:
// Simulation helpers ------------------------------------
bool ConditionPassed(Condition cond) {
SimSystemRegister& flags = nzcv();
switch (cond) {
case eq:
return flags.Z();
case ne:
return !flags.Z();
case hs:
return flags.C();
case lo:
return !flags.C();
case mi:
return flags.N();
case pl:
return !flags.N();
case vs:
return flags.V();
case vc:
return !flags.V();
case hi:
return flags.C() && !flags.Z();
case ls:
return !(flags.C() && !flags.Z());
case ge:
return flags.N() == flags.V();
case lt:
return flags.N() != flags.V();
case gt:
return !flags.Z() && (flags.N() == flags.V());
case le:
return !(!flags.Z() && (flags.N() == flags.V()));
case nv: // Fall through.
case al:
return true;
default:
UNREACHABLE();
return false;
}
}
bool ConditionFailed(Condition cond) {
return !ConditionPassed(cond);
}
void AddSubHelper(Instruction* instr, int64_t op2);
int64_t AddWithCarry(unsigned reg_size,
bool set_flags,
int64_t src1,
int64_t src2,
int64_t carry_in = 0);
void LogicalHelper(Instruction* instr, int64_t op2);
void ConditionalCompareHelper(Instruction* instr, int64_t op2);
void LoadStoreHelper(Instruction* instr,
int64_t offset,
AddrMode addrmode);
void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
uint8_t* LoadStoreAddress(unsigned addr_reg,
int64_t offset,
AddrMode addrmode);
void LoadStoreWriteBack(unsigned addr_reg,
int64_t offset,
AddrMode addrmode);
void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
uint64_t MemoryRead(uint8_t* address, unsigned num_bytes);
uint8_t MemoryRead8(uint8_t* address);
uint16_t MemoryRead16(uint8_t* address);
uint32_t MemoryRead32(uint8_t* address);
float MemoryReadFP32(uint8_t* address);
uint64_t MemoryRead64(uint8_t* address);
double MemoryReadFP64(uint8_t* address);
void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
void MemoryWrite32(uint8_t* address, uint32_t value);
void MemoryWriteFP32(uint8_t* address, float value);
void MemoryWrite64(uint8_t* address, uint64_t value);
void MemoryWriteFP64(uint8_t* address, double value);
int64_t ShiftOperand(unsigned reg_size,
int64_t value,
Shift shift_type,
unsigned amount);
int64_t Rotate(unsigned reg_width,
int64_t value,
Shift shift_type,
unsigned amount);
int64_t ExtendValue(unsigned reg_width,
int64_t value,
Extend extend_type,
unsigned left_shift = 0);
uint64_t ReverseBits(uint64_t value, unsigned num_bits);
uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
template <typename T>
T FPDefaultNaN() const;
void FPCompare(double val0, double val1);
double FPRoundInt(double value, FPRounding round_mode);
double FPToDouble(float value);
float FPToFloat(double value, FPRounding round_mode);
double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
int32_t FPToInt32(double value, FPRounding rmode);
int64_t FPToInt64(double value, FPRounding rmode);
uint32_t FPToUInt32(double value, FPRounding rmode);
uint64_t FPToUInt64(double value, FPRounding rmode);
template <typename T>
T FPAdd(T op1, T op2);
template <typename T>
T FPDiv(T op1, T op2);
template <typename T>
T FPMax(T a, T b);
template <typename T>
T FPMaxNM(T a, T b);
template <typename T>
T FPMin(T a, T b);
template <typename T>
T FPMinNM(T a, T b);
template <typename T>
T FPMul(T op1, T op2);
template <typename T>
T FPMulAdd(T a, T op1, T op2);
template <typename T>
T FPSqrt(T op);
template <typename T>
T FPSub(T op1, T op2);
// Standard NaN processing.
template <typename T>
T FPProcessNaN(T op);
bool FPProcessNaNs(Instruction* instr);
template <typename T>
T FPProcessNaNs(T op1, T op2);
template <typename T>
T FPProcessNaNs3(T op1, T op2, T op3);
void CheckStackAlignment();
inline void CheckPCSComplianceAndRun();
#ifdef DEBUG
// Corruption values should have their least significant byte cleared to
// allow the code of the register being corrupted to be inserted.
static const uint64_t kCallerSavedRegisterCorruptionValue =
0xca11edc0de000000UL;
// This value is a NaN in both 32-bit and 64-bit FP.
static const uint64_t kCallerSavedFPRegisterCorruptionValue =
0x7ff000007f801000UL;
// This value is a mix of 32/64-bits NaN and "verbose" immediate.
static const uint64_t kDefaultCPURegisterCorruptionValue =
0x7ffbad007f8bad00UL;
void CorruptRegisters(CPURegList* list,
uint64_t value = kDefaultCPURegisterCorruptionValue);
void CorruptAllCallerSavedCPURegisters();
#endif
// Processor state ---------------------------------------
// Output stream.
FILE* stream_;
PrintDisassembler* print_disasm_;
// Instrumentation.
Instrument* instrument_;
// General purpose registers. Register 31 is the stack pointer.
SimRegister registers_[kNumberOfRegisters];
// Floating point registers
SimFPRegister fpregisters_[kNumberOfFPRegisters];
// Processor state
// bits[31, 27]: Condition flags N, Z, C, and V.
// (Negative, Zero, Carry, Overflow)
SimSystemRegister nzcv_;
// Floating-Point Control Register
SimSystemRegister fpcr_;
// Only a subset of FPCR features are supported by the simulator. This helper
// checks that the FPCR settings are supported.
//
// This is checked when floating-point instructions are executed, not when
// FPCR is set. This allows generated code to modify FPCR for external
// functions, or to save and restore it when entering and leaving generated
// code.
void AssertSupportedFPCR() {
ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
// The simulator does not support half-precision operations so fpcr().AHP()
// is irrelevant, and is not checked here.
}
static int CalcNFlag(uint64_t result, unsigned reg_size) {
return (result >> (reg_size - 1)) & 1;
}
static int CalcZFlag(uint64_t result) {
return result == 0;
}
static const uint32_t kConditionFlagsMask = 0xf0000000;
// Stack
byte* stack_;
static const intptr_t stack_protection_size_ = KB;
intptr_t stack_size_;
byte* stack_limit_;
Decoder<DispatchingDecoderVisitor>* decoder_;
Decoder<DispatchingDecoderVisitor>* disassembler_decoder_;
// Indicates if the pc has been modified by the instruction and should not be
// automatically incremented.
bool pc_modified_;
Instruction* pc_;
static const char* xreg_names[];
static const char* wreg_names[];
static const char* sreg_names[];
static const char* dreg_names[];
static const char* vreg_names[];
// Debugger input.
void set_last_debugger_input(char* input) {
DeleteArray(last_debugger_input_);
last_debugger_input_ = input;
}
char* last_debugger_input() { return last_debugger_input_; }
char* last_debugger_input_;
private:
void Init(FILE* stream);
int log_parameters_;
Isolate* isolate_;
};
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
FUNCTION_ADDR(entry), \
p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->CallRegExp( \
entry, \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code.
// See also 'class SimulatorStack' in arm/simulator-arm.h.
class SimulatorStack : public v8::internal::AllStatic {
public:
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
return Simulator::current(isolate)->StackLimit();
}
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
Simulator* sim = Simulator::current(Isolate::Current());
return sim->PushAddress(try_catch_address);
}
static void UnregisterCTryCatch() {
Simulator::current(Isolate::Current())->PopAddress();
}
};
#endif // !defined(USE_SIMULATOR)
} } // namespace v8::internal
#endif // V8_ARM64_SIMULATOR_ARM64_H_

1496
deps/v8/src/arm64/stub-cache-arm64.cc

File diff suppressed because it is too large

112
deps/v8/src/arm64/utils-arm64.cc

@ -0,0 +1,112 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if V8_TARGET_ARCH_ARM64
#include "arm64/utils-arm64.h"
namespace v8 {
namespace internal {
#define __ assm->
int CountLeadingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for ARM64 hosts.
ASSERT((width == 32) || (width == 64));
int count = 0;
uint64_t bit_test = 1UL << (width - 1);
while ((count < width) && ((bit_test & value) == 0)) {
count++;
bit_test >>= 1;
}
return count;
}
int CountLeadingSignBits(int64_t value, int width) {
// TODO(jbramley): Optimize this for ARM64 hosts.
ASSERT((width == 32) || (width == 64));
if (value >= 0) {
return CountLeadingZeros(value, width) - 1;
} else {
return CountLeadingZeros(~value, width) - 1;
}
}
int CountTrailingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for ARM64 hosts.
ASSERT((width == 32) || (width == 64));
int count = 0;
while ((count < width) && (((value >> count) & 1) == 0)) {
count++;
}
return count;
}
int CountSetBits(uint64_t value, int width) {
// TODO(jbramley): Would it be useful to allow other widths? The
// implementation already supports them.
ASSERT((width == 32) || (width == 64));
// Mask out unused bits to ensure that they are not counted.
value &= (0xffffffffffffffffUL >> (64-width));
// Add up the set bits.
// The algorithm works by adding pairs of bit fields together iteratively,
// where the size of each bit field doubles each time.
// An example for an 8-bit value:
// Bits: h g f e d c b a
// \ | \ | \ | \ |
// value = h+g f+e d+c b+a
// \ | \ |
// value = h+g+f+e d+c+b+a
// \ |
// value = h+g+f+e+d+c+b+a
value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
return value;
}
int MaskToBit(uint64_t mask) {
ASSERT(CountSetBits(mask, 64) == 1);
return CountTrailingZeros(mask, 64);
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64

135
deps/v8/src/arm64/utils-arm64.h

@ -0,0 +1,135 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_UTILS_ARM64_H_
#define V8_ARM64_UTILS_ARM64_H_
#include <cmath>
#include "v8.h"
#include "arm64/constants-arm64.h"
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
namespace v8 {
namespace internal {
// These are global assumptions in v8.
STATIC_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
STATIC_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
// Floating point representation.
static inline uint32_t float_to_rawbits(float value) {
uint32_t bits = 0;
memcpy(&bits, &value, 4);
return bits;
}
static inline uint64_t double_to_rawbits(double value) {
uint64_t bits = 0;
memcpy(&bits, &value, 8);
return bits;
}
static inline float rawbits_to_float(uint32_t bits) {
float value = 0.0;
memcpy(&value, &bits, 4);
return value;
}
static inline double rawbits_to_double(uint64_t bits) {
double value = 0.0;
memcpy(&value, &bits, 8);
return value;
}
// Bit counting.
int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
int CountTrailingZeros(uint64_t value, int width);
int CountSetBits(uint64_t value, int width);
int MaskToBit(uint64_t mask);
// NaN tests.
inline bool IsSignallingNaN(double num) {
uint64_t raw = double_to_rawbits(num);
if (std::isnan(num) && ((raw & kDQuietNanMask) == 0)) {
return true;
}
return false;
}
inline bool IsSignallingNaN(float num) {
uint32_t raw = float_to_rawbits(num);
if (std::isnan(num) && ((raw & kSQuietNanMask) == 0)) {
return true;
}
return false;
}
template <typename T>
inline bool IsQuietNaN(T num) {
return std::isnan(num) && !IsSignallingNaN(num);
}
// Convert the NaN in 'num' to a quiet NaN.
inline double ToQuietNaN(double num) {
ASSERT(isnan(num));
return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask);
}
inline float ToQuietNaN(float num) {
ASSERT(isnan(num));
return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask);
}
// Fused multiply-add.
inline double FusedMultiplyAdd(double op1, double op2, double a) {
return fma(op1, op2, a);
}
inline float FusedMultiplyAdd(float op1, float op2, float a) {
return fmaf(op1, op2, a);
}
} } // namespace v8::internal
#endif // V8_ARM64_UTILS_ARM64_H_

10
deps/v8/src/array-iterator.js

@ -36,9 +36,9 @@ var ARRAY_ITERATOR_KIND_VALUES = 2;
var ARRAY_ITERATOR_KIND_ENTRIES = 3;
// The spec draft also has "sparse" but it is never used.
var iteratorObjectSymbol = NEW_PRIVATE("iterator_object");
var arrayIteratorNextIndexSymbol = NEW_PRIVATE("iterator_next");
var arrayIterationKindSymbol = NEW_PRIVATE("iterator_kind");
var arrayIteratorObjectSymbol = GLOBAL_PRIVATE("ArrayIterator#object");
var arrayIteratorNextIndexSymbol = GLOBAL_PRIVATE("ArrayIterator#next");
var arrayIterationKindSymbol = GLOBAL_PRIVATE("ArrayIterator#kind");
function ArrayIterator() {}
@ -46,7 +46,7 @@ function ArrayIterator() {}
function CreateArrayIterator(array, kind) {
var object = ToObject(array);
var iterator = new ArrayIterator;
SET_PRIVATE(iterator, iteratorObjectSymbol, object);
SET_PRIVATE(iterator, arrayIteratorObjectSymbol, object);
SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, 0);
SET_PRIVATE(iterator, arrayIterationKindSymbol, kind);
return iterator;
@ -60,7 +60,7 @@ function CreateIteratorResultObject(value, done) {
// 15.4.5.2.2 ArrayIterator.prototype.next( )
function ArrayIteratorNext() {
var iterator = ToObject(this);
var array = GET_PRIVATE(iterator, iteratorObjectSymbol);
var array = GET_PRIVATE(iterator, arrayIteratorObjectSymbol);
if (!array) {
throw MakeTypeError('incompatible_method_receiver',
['Array Iterator.prototype.next']);

14
deps/v8/src/array.js

@ -1115,8 +1115,8 @@ function ArraySort(comparefn) {
max_prototype_element = CopyFromPrototype(this, length);
}
var num_non_undefined = %IsObserved(this) ?
-1 : %RemoveArrayHoles(this, length);
// %RemoveArrayHoles returns -1 if fast removal is not supported.
var num_non_undefined = %RemoveArrayHoles(this, length);
if (num_non_undefined == -1) {
// The array is observed, or there were indexed accessors in the array.
@ -1153,7 +1153,7 @@ function ArrayFilter(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
} else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
@ -1201,7 +1201,7 @@ function ArrayForEach(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
} else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
@ -1242,7 +1242,7 @@ function ArraySome(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
} else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
@ -1282,7 +1282,7 @@ function ArrayEvery(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
} else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
@ -1321,7 +1321,7 @@ function ArrayMap(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
} else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}

95
deps/v8/src/assembler.cc

@ -59,6 +59,8 @@
#include "ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "x64/assembler-x64-inl.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
@ -73,6 +75,8 @@
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/regexp-macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
@ -122,7 +126,6 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@ -283,9 +286,12 @@ int Label::pos() const {
// 00 [4 bit middle_tag] 11 followed by
// 00 [6 bit pc delta]
//
// 1101: constant pool. Used on ARM only for now.
// The format is: 11 1101 11
// signed int (size of the constant pool).
// 1101: constant or veneer pool. Used only on ARM and ARM64 for now.
// The format is: [2-bit sub-type] 1101 11
// signed int (size of the pool).
// The 2-bit sub-types are:
// 00: constant pool
// 01: veneer pool
// 1110: long_data_record
// The format is: [2-bit data_type_tag] 1110 11
// signed intptr_t, lowest byte written first
@ -342,8 +348,9 @@ const int kNonstatementPositionTag = 1;
const int kStatementPositionTag = 2;
const int kCommentTag = 3;
const int kConstPoolExtraTag = kPCJumpExtraTag - 2;
const int kConstPoolTag = 3;
const int kPoolExtraTag = kPCJumpExtraTag - 2;
const int kConstPoolTag = 0;
const int kVeneerPoolTag = 1;
uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
@ -403,8 +410,8 @@ void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
}
void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
WriteExtraTag(kConstPoolExtraTag, kConstPoolTag);
void RelocInfoWriter::WriteExtraTaggedPoolData(int data, int pool_type) {
WriteExtraTag(kPoolExtraTag, pool_type);
for (int i = 0; i < kIntSize; i++) {
*--pos_ = static_cast<byte>(data);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
@ -476,9 +483,11 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedData(rinfo->data(), kCommentTag);
ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
} else if (RelocInfo::IsConstPool(rmode)) {
} else if (RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)) {
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedConstPoolData(static_cast<int>(rinfo->data()));
WriteExtraTaggedPoolData(static_cast<int>(rinfo->data()),
RelocInfo::IsConstPool(rmode) ? kConstPoolTag
: kVeneerPoolTag);
} else {
ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
@ -529,7 +538,7 @@ void RelocIterator::AdvanceReadId() {
}
void RelocIterator::AdvanceReadConstPoolData() {
void RelocIterator::AdvanceReadPoolData() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
@ -671,10 +680,13 @@ void RelocIterator::next() {
}
Advance(kIntptrSize);
}
} else if ((extra_tag == kConstPoolExtraTag) &&
(GetTopTag() == kConstPoolTag)) {
if (SetMode(RelocInfo::CONST_POOL)) {
AdvanceReadConstPoolData();
} else if (extra_tag == kPoolExtraTag) {
int pool_type = GetTopTag();
ASSERT(pool_type == kConstPoolTag || pool_type == kVeneerPoolTag);
RelocInfo::Mode rmode = (pool_type == kConstPoolTag) ?
RelocInfo::CONST_POOL : RelocInfo::VENEER_POOL;
if (SetMode(rmode)) {
AdvanceReadPoolData();
return;
}
Advance(kIntSize);
@ -793,6 +805,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "internal reference";
case RelocInfo::CONST_POOL:
return "constant pool";
case RelocInfo::VENEER_POOL:
return "veneer pool";
case RelocInfo::DEBUG_BREAK_SLOT:
#ifndef ENABLE_DEBUGGER_SUPPORT
UNREACHABLE();
@ -880,6 +894,7 @@ void RelocInfo::Verify() {
case EXTERNAL_REFERENCE:
case INTERNAL_REFERENCE:
case CONST_POOL:
case VENEER_POOL:
case DEBUG_BREAK_SLOT:
case NONE32:
case NONE64:
@ -1025,14 +1040,6 @@ ExternalReference ExternalReference::
}
ExternalReference ExternalReference::
incremental_evacuation_record_write_function(Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
}
ExternalReference ExternalReference::
store_buffer_overflow_function(Isolate* isolate) {
return ExternalReference(Redirect(
@ -1052,6 +1059,12 @@ ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
}
ExternalReference ExternalReference::out_of_memory_function(Isolate* isolate) {
return
ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::OutOfMemory)));
}
ExternalReference ExternalReference::delete_handle_scope_extensions(
Isolate* isolate) {
return ExternalReference(Redirect(
@ -1336,6 +1349,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
#elif V8_TARGET_ARCH_IA32
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM64
function = FUNCTION_ADDR(RegExpMacroAssemblerARM64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
@ -1596,4 +1611,38 @@ bool PositionsRecorder::WriteRecordedPositions() {
return written;
}
MultiplierAndShift::MultiplierAndShift(int32_t d) {
ASSERT(d <= -2 || 2 <= d);
const uint32_t two31 = 0x80000000;
uint32_t ad = Abs(d);
uint32_t t = two31 + (uint32_t(d) >> 31);
uint32_t anc = t - 1 - t % ad; // Absolute value of nc.
int32_t p = 31; // Init. p.
uint32_t q1 = two31 / anc; // Init. q1 = 2**p/|nc|.
uint32_t r1 = two31 - q1 * anc; // Init. r1 = rem(2**p, |nc|).
uint32_t q2 = two31 / ad; // Init. q2 = 2**p/|d|.
uint32_t r2 = two31 - q2 * ad; // Init. r2 = rem(2**p, |d|).
uint32_t delta;
do {
p++;
q1 *= 2; // Update q1 = 2**p/|nc|.
r1 *= 2; // Update r1 = rem(2**p, |nc|).
if (r1 >= anc) { // Must be an unsigned comparison here.
q1++;
r1 = r1 - anc;
}
q2 *= 2; // Update q2 = 2**p/|d|.
r2 *= 2; // Update r2 = rem(2**p, |d|).
if (r2 >= ad) { // Must be an unsigned comparison here.
q2++;
r2 = r2 - ad;
}
delta = ad - r2;
} while (q1 < delta || (q1 == delta && r1 == 0));
int32_t mul = static_cast<int32_t>(q2 + 1);
multiplier_ = (d < 0) ? -mul : mul;
shift_ = p - 32;
}
} } // namespace v8::internal

87
deps/v8/src/assembler.h

@ -82,6 +82,10 @@ class AssemblerBase: public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
// This function is called when code generation is aborted, so that
// the assembler could clean up internal data structures.
virtual void AbortedCodeGeneration() { }
static const int kMinimalBufferSize = 4*KB;
protected:
@ -210,6 +214,12 @@ class Label BASE_EMBEDDED {
friend class Assembler;
friend class Displacement;
friend class RegExpMacroAssemblerIrregexp;
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
// branches to distant targets. Copying labels would confuse the Assembler.
DISALLOW_COPY_AND_ASSIGN(Label); // NOLINT
#endif
};
@ -276,9 +286,10 @@ class RelocInfo BASE_EMBEDDED {
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
// Marks a constant pool. Only used on ARM.
// It uses a custom noncompact encoding.
// Marks constant and veneer pools. Only used on ARM and ARM64.
// They use a custom noncompact encoding.
CONST_POOL,
VENEER_POOL,
// add more as needed
// Pseudo-types
@ -288,7 +299,7 @@ class RelocInfo BASE_EMBEDDED {
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
// code aging.
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = CONST_POOL,
LAST_REAL_RELOC_MODE = VENEER_POOL,
FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_CODE_ENUM = DEBUG_BREAK,
@ -342,6 +353,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsConstPool(Mode mode) {
return mode == CONST_POOL;
}
static inline bool IsVeneerPool(Mode mode) {
return mode == VENEER_POOL;
}
static inline bool IsPosition(Mode mode) {
return mode == POSITION || mode == STATEMENT_POSITION;
}
@ -365,6 +379,15 @@ class RelocInfo BASE_EMBEDDED {
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Returns true if the first RelocInfo has the same mode and raw data as the
// second one.
static inline bool IsEqual(RelocInfo first, RelocInfo second) {
return first.rmode() == second.rmode() &&
(first.rmode() == RelocInfo::NONE64 ?
first.raw_data64() == second.raw_data64() :
first.data() == second.data());
}
// Accessors
byte* pc() const { return pc_; }
void set_pc(byte* pc) { pc_ = pc; }
@ -375,6 +398,7 @@ class RelocInfo BASE_EMBEDDED {
return BitCast<uint64_t>(data64_);
}
Code* host() const { return host_; }
void set_host(Code* host) { host_ = host; }
// Apply a relocation by delta bytes
INLINE(void apply(intptr_t delta));
@ -384,6 +408,10 @@ class RelocInfo BASE_EMBEDDED {
// instructions).
bool IsCodedSpecially();
// If true, the pointer this relocation info refers to is an entry in the
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
// Read/modify the code target in the branch/call instruction
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@ -406,6 +434,10 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(Code* stub));
// Returns the address of the constant pool entry where the target address
// is held. This should only be called if IsInConstantPool returns true.
INLINE(Address constant_pool_entry_address());
// Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent.
// The only architecture-independent user of this function is the serializer.
@ -413,6 +445,7 @@ class RelocInfo BASE_EMBEDDED {
// output before the next target. Architecture-independent code shouldn't
// dereference the pointer it gets back from this.
INLINE(Address target_address_address());
// This indicates how much space a target takes up when deserializing a code
// stream. For most architectures this is just the size of a pointer. For
// an instruction like movw/movt where the target bits are mixed into the
@ -537,7 +570,7 @@ class RelocInfoWriter BASE_EMBEDDED {
inline void WriteTaggedPC(uint32_t pc_delta, int tag);
inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
inline void WriteExtraTaggedConstPoolData(int data);
inline void WriteExtraTaggedPoolData(int data, int pool_type);
inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
inline void WriteTaggedData(intptr_t data_delta, int tag);
inline void WriteExtraTag(int extra_tag, int top_tag);
@ -588,7 +621,7 @@ class RelocIterator: public Malloced {
void ReadTaggedPC();
void AdvanceReadPC();
void AdvanceReadId();
void AdvanceReadConstPoolData();
void AdvanceReadPoolData();
void AdvanceReadPosition();
void AdvanceReadData();
void AdvanceReadVariableLengthPCJump();
@ -711,12 +744,11 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference incremental_marking_record_write_function(
Isolate* isolate);
static ExternalReference incremental_evacuation_record_write_function(
Isolate* isolate);
static ExternalReference store_buffer_overflow_function(
Isolate* isolate);
static ExternalReference flush_icache_function(Isolate* isolate);
static ExternalReference perform_gc_function(Isolate* isolate);
static ExternalReference out_of_memory_function(Isolate* isolate);
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
static ExternalReference get_date_field_function(Isolate* isolate);
@ -1002,32 +1034,6 @@ class PreservePositionScope BASE_EMBEDDED {
// -----------------------------------------------------------------------------
// Utility functions
inline bool is_intn(int x, int n) {
return -(1 << (n-1)) <= x && x < (1 << (n-1));
}
inline bool is_int8(int x) { return is_intn(x, 8); }
inline bool is_int16(int x) { return is_intn(x, 16); }
inline bool is_int18(int x) { return is_intn(x, 18); }
inline bool is_int24(int x) { return is_intn(x, 24); }
inline bool is_uintn(int x, int n) {
return (x & -(1 << n)) == 0;
}
inline bool is_uint2(int x) { return is_uintn(x, 2); }
inline bool is_uint3(int x) { return is_uintn(x, 3); }
inline bool is_uint4(int x) { return is_uintn(x, 4); }
inline bool is_uint5(int x) { return is_uintn(x, 5); }
inline bool is_uint6(int x) { return is_uintn(x, 6); }
inline bool is_uint8(int x) { return is_uintn(x, 8); }
inline bool is_uint10(int x) { return is_uintn(x, 10); }
inline bool is_uint12(int x) { return is_uintn(x, 12); }
inline bool is_uint16(int x) { return is_uintn(x, 16); }
inline bool is_uint24(int x) { return is_uintn(x, 24); }
inline bool is_uint26(int x) { return is_uintn(x, 26); }
inline bool is_uint28(int x) { return is_uintn(x, 28); }
inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
for (num_bits_set = 0; x; x >>= 1) {
@ -1065,6 +1071,21 @@ class NullCallWrapper : public CallWrapper {
virtual void AfterCall() const { }
};
// The multiplier and shift for signed division via multiplication, see Warren's
// "Hacker's Delight", chapter 10.
class MultiplierAndShift {
public:
explicit MultiplierAndShift(int32_t d);
int32_t multiplier() const { return multiplier_; }
int32_t shift() const { return shift_; }
private:
int32_t multiplier_;
int32_t shift_;
};
} } // namespace v8::internal
#endif // V8_ASSEMBLER_H_

21
deps/v8/src/assert-scope.cc

@ -0,0 +1,21 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "assert-scope.h"
#include "v8.h"
namespace v8 {
namespace internal {
uint32_t PerIsolateAssertBase::GetData(Isolate* isolate) {
return isolate->per_isolate_assert_data();
}
void PerIsolateAssertBase::SetData(Isolate* isolate, uint32_t data) {
isolate->set_per_isolate_assert_data(data);
}
} } // namespace v8::internal

129
deps/v8/src/assert-scope.h

@ -30,6 +30,7 @@
#include "allocation.h"
#include "platform.h"
#include "utils.h"
namespace v8 {
namespace internal {
@ -46,7 +47,13 @@ enum PerThreadAssertType {
};
#ifdef DEBUG
enum PerIsolateAssertType {
JAVASCRIPT_EXECUTION_ASSERT,
JAVASCRIPT_EXECUTION_THROWS,
ALLOCATION_FAILURE_ASSERT
};
class PerThreadAssertData {
public:
PerThreadAssertData() : nesting_level_(0) {
@ -72,12 +79,9 @@ class PerThreadAssertData {
DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData);
};
#endif // DEBUG
class PerThreadAssertScopeBase {
#ifdef DEBUG
protected:
PerThreadAssertScopeBase() {
data_ = GetAssertData();
@ -110,18 +114,12 @@ class PerThreadAssertScopeBase {
static void SetThreadLocalData(PerThreadAssertData* data) {
Thread::SetThreadLocal(thread_local_key, data);
}
#endif // DEBUG
};
template <PerThreadAssertType type, bool allow>
class PerThreadAssertScope : public PerThreadAssertScopeBase {
public:
#ifndef DEBUG
PerThreadAssertScope() { }
static void SetIsAllowed(bool is_allowed) { }
#else
PerThreadAssertScope() {
old_state_ = data_->get(type);
data_->set(type, allow);
@ -136,49 +134,140 @@ class PerThreadAssertScope : public PerThreadAssertScopeBase {
private:
bool old_state_;
DISALLOW_COPY_AND_ASSIGN(PerThreadAssertScope);
};
class PerIsolateAssertBase {
protected:
static uint32_t GetData(Isolate* isolate);
static void SetData(Isolate* isolate, uint32_t data);
};
template <PerIsolateAssertType type, bool allow>
class PerIsolateAssertScope : public PerIsolateAssertBase {
public:
explicit PerIsolateAssertScope(Isolate* isolate) : isolate_(isolate) {
STATIC_ASSERT(type < 32);
old_data_ = GetData(isolate_);
SetData(isolate_, DataBit::update(old_data_, allow));
}
~PerIsolateAssertScope() {
SetData(isolate_, old_data_);
}
static bool IsAllowed(Isolate* isolate) {
return DataBit::decode(GetData(isolate));
}
private:
typedef BitField<bool, type, 1> DataBit;
uint32_t old_data_;
Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(PerIsolateAssertScope);
};
template <PerThreadAssertType type, bool allow>
#ifdef DEBUG
class PerThreadAssertScopeDebugOnly : public
PerThreadAssertScope<type, allow> {
#else
class PerThreadAssertScopeDebugOnly {
public:
PerThreadAssertScopeDebugOnly() { }
#endif
};
template <PerIsolateAssertType type, bool allow>
#ifdef DEBUG
class PerIsolateAssertScopeDebugOnly : public
PerIsolateAssertScope<type, allow> {
public:
explicit PerIsolateAssertScopeDebugOnly(Isolate* isolate)
: PerIsolateAssertScope<type, allow>(isolate) { }
#else
class PerIsolateAssertScopeDebugOnly {
public:
explicit PerIsolateAssertScopeDebugOnly(Isolate* isolate) { }
#endif
};
// Per-thread assert scopes.
// Scope to document where we do not expect handles to be created.
typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>
typedef PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, false>
DisallowHandleAllocation;
// Scope to introduce an exception to DisallowHandleAllocation.
typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>
typedef PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, true>
AllowHandleAllocation;
// Scope to document where we do not expect any allocation and GC.
typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, false>
typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, false>
DisallowHeapAllocation;
// Scope to introduce an exception to DisallowHeapAllocation.
typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, true>
typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, true>
AllowHeapAllocation;
// Scope to document where we do not expect any handle dereferences.
typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>
typedef PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, false>
DisallowHandleDereference;
// Scope to introduce an exception to DisallowHandleDereference.
typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>
typedef PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, true>
AllowHandleDereference;
// Scope to document where we do not expect deferred handles to be dereferenced.
typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
typedef PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
DisallowDeferredHandleDereference;
// Scope to introduce an exception to DisallowDeferredHandleDereference.
typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
typedef PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
AllowDeferredHandleDereference;
// Scope to document where we do not expect deferred handles to be dereferenced.
typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>
typedef PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, false>
DisallowCodeDependencyChange;
// Scope to introduce an exception to DisallowDeferredHandleDereference.
typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>
typedef PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, true>
AllowCodeDependencyChange;
// Per-isolate assert scopes.
// Scope to document where we do not expect javascript execution.
typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>
DisallowJavascriptExecution;
// Scope to introduce an exception to DisallowJavascriptExecution.
typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>
AllowJavascriptExecution;
// Scope in which javascript execution leads to exception being thrown.
typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>
ThrowOnJavascriptExecution;
// Scope to introduce an exception to ThrowOnJavascriptExecution.
typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, true>
NoThrowOnJavascriptExecution;
// Scope to document where we do not expect an allocation failure.
typedef PerIsolateAssertScopeDebugOnly<ALLOCATION_FAILURE_ASSERT, false>
DisallowAllocationFailure;
// Scope to introduce an exception to DisallowAllocationFailure.
typedef PerIsolateAssertScopeDebugOnly<ALLOCATION_FAILURE_ASSERT, true>
AllowAllocationFailure;
} } // namespace v8::internal
#endif // V8_ASSERT_SCOPE_H_

49
deps/v8/src/ast.cc

@ -180,8 +180,8 @@ int FunctionLiteral::end_position() const {
}
LanguageMode FunctionLiteral::language_mode() const {
return scope()->language_mode();
StrictMode FunctionLiteral::strict_mode() const {
return scope()->strict_mode();
}
@ -357,8 +357,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// Allocate a fixed array to hold all the object literals.
Handle<JSArray> array =
isolate->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
isolate->factory()->SetElementsCapacityAndLength(
array, values()->length(), values()->length());
JSArray::Expand(array, values()->length());
// Fill in the literals.
bool is_simple = true;
@ -379,9 +378,9 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
} else if (boilerplate_value->IsUninitialized()) {
is_simple = false;
JSObject::SetOwnElement(
array, i, handle(Smi::FromInt(0), isolate), kNonStrictMode);
array, i, handle(Smi::FromInt(0), isolate), SLOPPY);
} else {
JSObject::SetOwnElement(array, i, boilerplate_value, kNonStrictMode);
JSObject::SetOwnElement(array, i, boilerplate_value, SLOPPY);
}
}
@ -593,6 +592,17 @@ void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
}
int Call::ComputeFeedbackSlotCount(Isolate* isolate) {
CallType call_type = GetCallType(isolate);
if (call_type == LOOKUP_SLOT_CALL || call_type == OTHER_CALL) {
// Call only uses a slot in some cases.
return 1;
}
return 0;
}
Call::CallType Call::GetCallType(Isolate* isolate) const {
VariableProxy* proxy = expression()->AsVariableProxy();
if (proxy != NULL) {
@ -632,11 +642,14 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
int allocation_site_feedback_slot = FLAG_pretenuring_call_new
? AllocationSiteFeedbackSlot()
: CallNewFeedbackSlot();
allocation_site_ =
oracle->GetCallNewAllocationSite(CallNewFeedbackId());
is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackId());
oracle->GetCallNewAllocationSite(allocation_site_feedback_slot);
is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackSlot());
if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(CallNewFeedbackId());
target_ = oracle->GetCallNewTarget(CallNewFeedbackSlot());
if (!allocation_site_.is_null()) {
elements_kind_ = allocation_site_->GetElementsKind();
}
@ -1039,6 +1052,11 @@ CaseClause::CaseClause(Zone* zone,
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
}
#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_slot_node(node); \
}
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@ -1051,6 +1069,12 @@ CaseClause::CaseClause(Zone* zone,
increase_node_count(); \
add_flag(kDontSelfOptimize); \
}
#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_slot_node(node); \
add_flag(kDontSelfOptimize); \
}
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@ -1085,8 +1109,8 @@ REGULAR_NODE(CountOperation)
REGULAR_NODE(BinaryOperation)
REGULAR_NODE(CompareOperation)
REGULAR_NODE(ThisFunction)
REGULAR_NODE(Call)
REGULAR_NODE(CallNew)
REGULAR_NODE_WITH_FEEDBACK_SLOTS(Call)
REGULAR_NODE_WITH_FEEDBACK_SLOTS(CallNew)
// In theory, for VariableProxy we'd have to add:
// if (node->var()->IsLookupSlot()) add_flag(kDontInline);
// But node->var() is usually not bound yet at VariableProxy creation time, and
@ -1111,11 +1135,12 @@ DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
DONT_SELFOPTIMIZE_NODE(ForInStatement)
DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement)
DONT_SELFOPTIMIZE_NODE(ForOfStatement)
DONT_CACHE_NODE(ModuleLiteral)
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count();
if (node->is_jsruntime()) {

90
deps/v8/src/ast.h

@ -32,6 +32,7 @@
#include "assembler.h"
#include "factory.h"
#include "feedback-slots.h"
#include "isolate.h"
#include "jsregexp.h"
#include "list-inl.h"
@ -181,7 +182,7 @@ class AstProperties V8_FINAL BASE_EMBEDDED {
public:
class Flags : public EnumSet<AstPropertiesFlag, int> {};
AstProperties() : node_count_(0) { }
AstProperties() : node_count_(0) {}
Flags* flags() { return &flags_; }
int node_count() { return node_count_; }
@ -914,7 +915,8 @@ class ForEachStatement : public IterationStatement {
};
class ForInStatement V8_FINAL : public ForEachStatement {
class ForInStatement V8_FINAL : public ForEachStatement,
public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(ForInStatement)
@ -922,7 +924,16 @@ class ForInStatement V8_FINAL : public ForEachStatement {
return subject();
}
TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
// Type feedback information.
virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; }
virtual int ComputeFeedbackSlotCount(Isolate* isolate) { return 1; }
virtual void SetFirstFeedbackSlot(int slot) { for_in_feedback_slot_ = slot; }
int ForInFeedbackSlot() {
ASSERT(for_in_feedback_slot_ != kInvalidFeedbackSlot);
return for_in_feedback_slot_;
}
enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
ForInType for_in_type() const { return for_in_type_; }
void set_for_in_type(ForInType type) { for_in_type_ = type; }
@ -936,11 +947,13 @@ class ForInStatement V8_FINAL : public ForEachStatement {
ForInStatement(Zone* zone, ZoneStringList* labels, int pos)
: ForEachStatement(zone, labels, pos),
for_in_type_(SLOW_FOR_IN),
for_in_feedback_slot_(kInvalidFeedbackSlot),
body_id_(GetNextId(zone)),
prepare_id_(GetNextId(zone)) {
}
ForInType for_in_type_;
int for_in_feedback_slot_;
const BailoutId body_id_;
const BailoutId prepare_id_;
};
@ -1733,7 +1746,7 @@ class Property V8_FINAL : public Expression {
};
class Call V8_FINAL : public Expression {
class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(Call)
@ -1741,7 +1754,16 @@ class Call V8_FINAL : public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
virtual ComputablePhase GetComputablePhase() { return AFTER_SCOPING; }
virtual int ComputeFeedbackSlotCount(Isolate* isolate);
virtual void SetFirstFeedbackSlot(int slot) {
call_feedback_slot_ = slot;
}
bool HasCallFeedbackSlot() const {
return call_feedback_slot_ != kInvalidFeedbackSlot;
}
int CallFeedbackSlot() const { return call_feedback_slot_; }
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
if (expression()->IsProperty()) {
@ -1790,6 +1812,7 @@ class Call V8_FINAL : public Expression {
: Expression(zone, pos),
expression_(expression),
arguments_(arguments),
call_feedback_slot_(kInvalidFeedbackSlot),
return_id_(GetNextId(zone)) {
if (expression->IsProperty()) {
expression->AsProperty()->mark_for_call();
@ -1802,12 +1825,13 @@ class Call V8_FINAL : public Expression {
Handle<JSFunction> target_;
Handle<Cell> cell_;
int call_feedback_slot_;
const BailoutId return_id_;
};
class CallNew V8_FINAL : public Expression {
class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(CallNew)
@ -1815,7 +1839,24 @@ class CallNew V8_FINAL : public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; }
virtual int ComputeFeedbackSlotCount(Isolate* isolate) {
return FLAG_pretenuring_call_new ? 2 : 1;
}
virtual void SetFirstFeedbackSlot(int slot) {
callnew_feedback_slot_ = slot;
}
int CallNewFeedbackSlot() {
ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
return callnew_feedback_slot_;
}
int AllocationSiteFeedbackSlot() {
ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
ASSERT(FLAG_pretenuring_call_new);
return callnew_feedback_slot_ + 1;
}
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
@ -1824,6 +1865,8 @@ class CallNew V8_FINAL : public Expression {
return allocation_site_;
}
static int feedback_slots() { return 1; }
BailoutId ReturnId() const { return return_id_; }
protected:
@ -1836,6 +1879,7 @@ class CallNew V8_FINAL : public Expression {
arguments_(arguments),
is_monomorphic_(false),
elements_kind_(GetInitialFastElementsKind()),
callnew_feedback_slot_(kInvalidFeedbackSlot),
return_id_(GetNextId(zone)) { }
private:
@ -1846,6 +1890,7 @@ class CallNew V8_FINAL : public Expression {
Handle<JSFunction> target_;
ElementsKind elements_kind_;
Handle<AllocationSite> allocation_site_;
int callnew_feedback_slot_;
const BailoutId return_id_;
};
@ -2276,8 +2321,7 @@ class FunctionLiteral V8_FINAL : public Expression {
int SourceSize() const { return end_position() - start_position(); }
bool is_expression() const { return IsExpression::decode(bitfield_); }
bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
LanguageMode language_mode() const;
StrictMode strict_mode() const;
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
@ -2332,7 +2376,15 @@ class FunctionLiteral V8_FINAL : public Expression {
void set_ast_properties(AstProperties* ast_properties) {
ast_properties_ = *ast_properties;
}
void set_slot_processor(DeferredFeedbackSlotProcessor* slot_processor) {
slot_processor_ = *slot_processor;
}
void ProcessFeedbackSlots(Isolate* isolate) {
slot_processor_.ProcessFeedbackSlots(isolate);
}
int slot_count() {
return slot_processor_.slot_count();
}
bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
void set_dont_optimize_reason(BailoutReason reason) {
@ -2382,6 +2434,7 @@ class FunctionLiteral V8_FINAL : public Expression {
ZoneList<Statement*>* body_;
Handle<String> inferred_name_;
AstProperties ast_properties_;
DeferredFeedbackSlotProcessor slot_processor_;
BailoutReason dont_optimize_reason_;
int materialized_literal_count_;
@ -2856,10 +2909,13 @@ private: \
class AstConstructionVisitor BASE_EMBEDDED {
public:
AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { }
explicit AstConstructionVisitor(Zone* zone)
: dont_optimize_reason_(kNoReason),
zone_(zone) { }
AstProperties* ast_properties() { return &properties_; }
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
DeferredFeedbackSlotProcessor* slot_processor() { return &slot_processor_; }
private:
template<class> friend class AstNodeFactory;
@ -2876,13 +2932,21 @@ class AstConstructionVisitor BASE_EMBEDDED {
dont_optimize_reason_ = reason;
}
void add_slot_node(FeedbackSlotInterface* slot_node) {
slot_processor_.add_slot_node(zone_, slot_node);
}
AstProperties properties_;
DeferredFeedbackSlotProcessor slot_processor_;
BailoutReason dont_optimize_reason_;
Zone* zone_;
};
class AstNullVisitor BASE_EMBEDDED {
public:
explicit AstNullVisitor(Zone* zone) {}
// Node visitors.
#define DEF_VISIT(type) \
void Visit##type(type* node) {}
@ -2898,7 +2962,9 @@ class AstNullVisitor BASE_EMBEDDED {
template<class Visitor>
class AstNodeFactory V8_FINAL BASE_EMBEDDED {
public:
explicit AstNodeFactory(Zone* zone) : zone_(zone) { }
explicit AstNodeFactory(Zone* zone)
: zone_(zone),
visitor_(zone) { }
Visitor* visitor() { return &visitor_; }

33
deps/v8/src/atomicops.h

@ -51,6 +51,15 @@
#include "../include/v8.h"
#include "globals.h"
#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
// windows.h #defines this (only on x64). This causes problems because the
// public API also uses MemoryBarrier at the public name for this fence. So, on
// X64, undef it, and call its documented
// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
// implementation directly.
#undef MemoryBarrier
#endif
namespace v8 {
namespace internal {
@ -58,9 +67,7 @@ typedef int32_t Atomic32;
#ifdef V8_HOST_ARCH_64_BIT
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
// means Atomic64 and AtomicWord should be the same type on 64-bit.
#if defined(__ILP32__) || defined(__APPLE__)
// MacOS is an exception to the implicit conversion rule above,
// because it uses long for intptr_t.
#if defined(__ILP32__)
typedef int64_t Atomic64;
#else
typedef intptr_t Atomic64;
@ -69,11 +76,7 @@ typedef intptr_t Atomic64;
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture.
#if defined(__OpenBSD__) && defined(__i386__)
typedef Atomic32 AtomicWord;
#else
typedef intptr_t AtomicWord;
#endif
// Atomically execute:
// result = *ptr;
@ -155,16 +158,24 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "atomicops_internals_tsan.h"
#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_macosx.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_gcc.h"
#elif defined(__APPLE__)
#include "atomicops_internals_mac.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
#include "atomicops_internals_arm64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
#include "atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif
// On some platforms we need additional declarations to make
// AtomicWord compatible with our other Atomic* types.
#if defined(__APPLE__) || defined(__OpenBSD__)
#include "atomicops_internals_atomicword_compat.h"
#endif
#endif // V8_ATOMICOPS_H_

372
deps/v8/src/atomicops_internals_arm64_gcc.h

@ -0,0 +1,372 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
namespace v8 {
namespace internal {
inline void MemoryBarrier() {
__asm__ __volatile__ ( // NOLINT
"dmb ish \n\t" // Data memory barrier.
::: "memory"
); // NOLINT
}
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
"cmp %w[prev], %w[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
"1: \n\t"
"clrex \n\t" // In case we didn't swap.
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
return prev;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[result], %[ptr] \n\t" // Load the previous value.
"stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [new_value]"r" (new_value)
: "memory"
); // NOLINT
return result;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[result], %[ptr] \n\t" // Load the previous value.
"add %w[result], %w[result], %w[increment]\n\t"
"stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
"cbnz %w[temp], 0b \n\t" // Retry on failure.
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [increment]"r" (increment)
: "memory"
); // NOLINT
return result;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
MemoryBarrier();
Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return result;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
"cmp %w[prev], %w[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
"dmb ish \n\t" // Data memory barrier.
"1: \n\t"
// If the compare failed the 'dmb' is unnecessary, but we still need a
// 'clrex'.
"clrex \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
return prev;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
int32_t temp;
MemoryBarrier();
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
"cmp %w[prev], %w[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
"1: \n\t"
// If the compare failed the we still need a 'clrex'.
"clrex \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
return prev;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions of the operations.
// See the 32-bit versions for comments.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[prev], %[ptr] \n\t"
"cmp %[prev], %[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
"1: \n\t"
"clrex \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[result], %[ptr] \n\t"
"stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [new_value]"r" (new_value)
: "memory"
); // NOLINT
return result;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[result], %[ptr] \n\t"
"add %[result], %[result], %[increment] \n\t"
"stxr %w[temp], %[result], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [increment]"r" (increment)
: "memory"
); // NOLINT
return result;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
MemoryBarrier();
Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return result;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[prev], %[ptr] \n\t"
"cmp %[prev], %[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
"dmb ish \n\t"
"1: \n\t"
"clrex \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
return prev;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
int32_t temp;
MemoryBarrier();
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[prev], %[ptr] \n\t"
"cmp %[prev], %[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
"1: \n\t"
"clrex \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
return prev;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
} } // namespace v8::internal
#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save