Browse Source

deps: update v8 to 3.24.40

v0.11.13-release
Fedor Indutny 11 years ago
parent
commit
1c7bf245dc
  1. 4
      deps/v8/.clang-format
  2. 6
      deps/v8/AUTHORS
  3. 528
      deps/v8/ChangeLog
  4. 4
      deps/v8/DEPS
  5. 2
      deps/v8/LICENSE
  6. 67
      deps/v8/Makefile
  7. 58
      deps/v8/Makefile.android
  8. 8
      deps/v8/Makefile.nacl
  9. 4
      deps/v8/OWNERS
  10. 13
      deps/v8/PRESUBMIT.py
  11. 8
      deps/v8/build/all.gyp
  12. 24
      deps/v8/build/android.gypi
  13. 8
      deps/v8/build/features.gypi
  14. 23
      deps/v8/build/gyp_v8
  15. 41
      deps/v8/build/gyp_v8.py
  16. 49
      deps/v8/build/standalone.gypi
  17. 45
      deps/v8/build/toolchain.gypi
  18. 7
      deps/v8/codereview.settings
  19. 86
      deps/v8/include/v8-platform.h
  20. 67
      deps/v8/include/v8-profiler.h
  21. 1092
      deps/v8/include/v8.h
  22. 20
      deps/v8/include/v8config.h
  23. 35
      deps/v8/samples/lineprocessor.cc
  24. 93
      deps/v8/samples/process.cc
  25. 60
      deps/v8/samples/shell.cc
  26. 1
      deps/v8/src/a64/OWNERS
  27. 1200
      deps/v8/src/a64/assembler-a64-inl.h
  28. 2606
      deps/v8/src/a64/assembler-a64.cc
  29. 2085
      deps/v8/src/a64/assembler-a64.h
  30. 1479
      deps/v8/src/a64/builtins-a64.cc
  31. 5809
      deps/v8/src/a64/code-stubs-a64.cc
  32. 469
      deps/v8/src/a64/code-stubs-a64.h
  33. 616
      deps/v8/src/a64/codegen-a64.cc
  34. 66
      deps/v8/src/a64/codegen-a64.h
  35. 1262
      deps/v8/src/a64/constants-a64.h
  36. 199
      deps/v8/src/a64/cpu-a64.cc
  37. 107
      deps/v8/src/a64/cpu-a64.h
  38. 394
      deps/v8/src/a64/debug-a64.cc
  39. 111
      deps/v8/src/a64/debugger-a64.cc
  40. 56
      deps/v8/src/a64/debugger-a64.h
  41. 726
      deps/v8/src/a64/decoder-a64.cc
  42. 202
      deps/v8/src/a64/decoder-a64.h
  43. 376
      deps/v8/src/a64/deoptimizer-a64.cc
  44. 1854
      deps/v8/src/a64/disasm-a64.cc
  45. 115
      deps/v8/src/a64/disasm-a64.h
  46. 41
      deps/v8/src/a64/frames-a64.cc
  47. 131
      deps/v8/src/a64/frames-a64.h
  48. 5010
      deps/v8/src/a64/full-codegen-a64.cc
  49. 1413
      deps/v8/src/a64/ic-a64.cc
  50. 334
      deps/v8/src/a64/instructions-a64.cc
  51. 516
      deps/v8/src/a64/instructions-a64.h
  52. 618
      deps/v8/src/a64/instrument-a64.cc
  53. 108
      deps/v8/src/a64/instrument-a64.h
  54. 2449
      deps/v8/src/a64/lithium-a64.cc
  55. 2967
      deps/v8/src/a64/lithium-a64.h
  56. 5692
      deps/v8/src/a64/lithium-codegen-a64.cc
  57. 473
      deps/v8/src/a64/lithium-codegen-a64.h
  58. 326
      deps/v8/src/a64/lithium-gap-resolver-a64.cc
  59. 90
      deps/v8/src/a64/lithium-gap-resolver-a64.h
  60. 1647
      deps/v8/src/a64/macro-assembler-a64-inl.h
  61. 4975
      deps/v8/src/a64/macro-assembler-a64.cc
  62. 2238
      deps/v8/src/a64/macro-assembler-a64.h
  63. 1730
      deps/v8/src/a64/regexp-macro-assembler-a64.cc
  64. 315
      deps/v8/src/a64/regexp-macro-assembler-a64.h
  65. 3414
      deps/v8/src/a64/simulator-a64.cc
  66. 868
      deps/v8/src/a64/simulator-a64.h
  67. 1548
      deps/v8/src/a64/stub-cache-a64.cc
  68. 112
      deps/v8/src/a64/utils-a64.cc
  69. 109
      deps/v8/src/a64/utils-a64.h
  70. 118
      deps/v8/src/accessors.cc
  71. 7
      deps/v8/src/accessors.h
  72. 32
      deps/v8/src/allocation-site-scopes.cc
  73. 53
      deps/v8/src/allocation-site-scopes.h
  74. 32
      deps/v8/src/allocation-tracker.cc
  75. 9
      deps/v8/src/allocation-tracker.h
  76. 41
      deps/v8/src/allocation.cc
  77. 29
      deps/v8/src/allocation.h
  78. 1388
      deps/v8/src/api.cc
  79. 53
      deps/v8/src/api.h
  80. 1
      deps/v8/src/arm/OWNERS
  81. 83
      deps/v8/src/arm/assembler-arm-inl.h
  82. 190
      deps/v8/src/arm/assembler-arm.cc
  83. 51
      deps/v8/src/arm/assembler-arm.h
  84. 174
      deps/v8/src/arm/builtins-arm.cc
  85. 1982
      deps/v8/src/arm/code-stubs-arm.cc
  86. 95
      deps/v8/src/arm/code-stubs-arm.h
  87. 60
      deps/v8/src/arm/codegen-arm.cc
  88. 41
      deps/v8/src/arm/codegen-arm.h
  89. 3
      deps/v8/src/arm/constants-arm.h
  90. 9
      deps/v8/src/arm/cpu-arm.cc
  91. 10
      deps/v8/src/arm/debug-arm.cc
  92. 2
      deps/v8/src/arm/deoptimizer-arm.cc
  93. 8
      deps/v8/src/arm/disasm-arm.cc
  94. 15
      deps/v8/src/arm/frames-arm.cc
  95. 12
      deps/v8/src/arm/frames-arm.h
  96. 887
      deps/v8/src/arm/full-codegen-arm.cc
  97. 414
      deps/v8/src/arm/ic-arm.cc
  98. 489
      deps/v8/src/arm/lithium-arm.cc
  99. 421
      deps/v8/src/arm/lithium-arm.h
  100. 1047
      deps/v8/src/arm/lithium-codegen-arm.cc

4
deps/v8/.clang-format

@ -0,0 +1,4 @@
# Defines the Google C++ style for automatic reformatting.
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
BasedOnStyle: Google
MaxEmptyLinesToKeep: 2

6
deps/v8/AUTHORS

@ -11,12 +11,16 @@ Igalia, S.L.
Joyent, Inc.
Bloomberg Finance L.P.
NVIDIA Corporation
BlackBerry Limited
Opera Software ASA
Akinori MUSHA <knu@FreeBSD.org>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com>
Alexandre Rames <alexandre.rames@arm.com>
Alexandre Vassalotti <avassalotti@gmail.com>
Andreas Anyuru <andreas.anyuru@gmail.com>
Baptiste Afsa <baptiste.afsa@arm.com>
Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
@ -29,6 +33,7 @@ Fedor Indutny <fedor@indutny.com>
Filipe David Manana <fdmanana@gmail.com>
Haitao Feng <haitao.feng@intel.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Jacob Bramley <jacob.bramley@arm.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
@ -57,6 +62,7 @@ Sandro Santilli <strk@keybit.net>
Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org>
Tobias Burnus <burnus@net-b.de>
Vincent Belliard <vincent.belliard@arm.com>
Vlad Burlik <vladbph@gmail.com>
Xi Qian <xi.qian@intel.com>
Yuqiang Xian <yuqiang.xian@intel.com>

528
deps/v8/ChangeLog

@ -1,3 +1,531 @@
2014-02-19: Version 3.24.40
A64: Let the MacroAssembler resolve branches to distant targets (issue
3148).
Fixed and improved code for integral division. Fixed and extended tests
(issue 3151).
MIPS: Fix assignment of function name constant (issue 3138).
Fix assignment of function name constant (issue 3138).
Performance and stability improvements on all platforms.
2014-02-14: Version 3.24.39
Introduce --job-based-sweeping flag and use individual jobs for sweeping
if set (issue 3104).
Performance and stability improvements on all platforms.
2014-02-13: Version 3.24.38
Merge experimental/a64 to bleeding_edge (issue 3113).
Performance and stability improvements on all platforms.
2014-02-12: Version 3.24.37
Fix spec violations in JSON.stringify wrt replacer array (issue 3135).
Performance and stability improvements on all platforms.
2014-02-11: Version 3.24.36
Fix inconsistencies wrt whitespaces (issue 3109).
Performance and stability improvements on all platforms.
2014-02-10: Version 3.24.35
Fix inconsistencies wrt whitespaces (issue 3109).
Performance and stability improvements on all platforms.
2014-02-07: Version 3.24.34
Performance and stability improvements on all platforms.
2014-02-06: Version 3.24.33
Allow externalizing strings in old pointer space (Chromium issue
268686).
Performance and stability improvements on all platforms.
2014-02-05: Version 3.24.32
Add Box object to heap profiler.
Check the offset argument of TypedArray.set for fitting into Smi
(Chromium issue 340125).
Performance and stability improvements on all platforms.
2014-02-04: Version 3.24.31
Fix short-circuiting logical and/or in HOptimizedGraphBuilder (Chromium
issue 336148).
Elements field of newly allocated JSArray could be left uninitialized in
some cases (fast literal case) (Chromium issue 340124).
Re-enable escape analysis.
Performance and stability improvements on all platforms.
2014-02-03: Version 3.24.30
Performance and stability improvements on all platforms.
2014-02-01: Version 3.24.29
Performance and stability improvements on all platforms.
2014-01-31: Version 3.24.28
Don't crash in Array.join() if the resulting string exceeds the max
string length (Chromium issue 336820).
Implements ES6 String.prototype.normalize method (issue 2943).
Performance and stability improvements on all platforms.
2014-01-30: Version 3.24.27
Performance and stability improvements on all platforms.
2014-01-29: Version 3.24.26
ES6: Map and Set needs to normalize minus zero (issue 3069).
Make `String.prototype.{starts,ends}With` throw when passing a regular
expression (issue 3070).
Performance and stability improvements on all platforms.
2014-01-28: Version 3.24.25
Performance and stability improvements on all platforms.
2014-01-27: Version 3.24.24
MIPS: Reduce the stack requirements of GetNoCodeAgeSequence (issue
3111).
Delete v8_shell target now that chrome uses d8 (Chromium issue 331585).
ARM: Reduce the stack requirements of GetNoCodeAgeSequence (issue 3111).
Performance and stability improvements on all platforms.
2014-01-24: Version 3.24.23
Performance and stability improvements on all platforms.
2014-01-23: Version 3.24.22
Fix compilation on x64 architectures (issue 3110).
Ensure we don't overwrite transitions in SetPropertyIgnoreAttributes
(Chromium issue 326155).
ES6: Implement Object.setPrototypeOf (issue 2675).
Fixed floor-of-div optimization (Chromium issue 334708).
Performance and stability improvements on all platforms.
2014-01-22: Version 3.24.21
Performance and stability improvements on all platforms.
2014-01-21: Version 3.24.20
ES6: Implement Object.setPrototypeOf (issue 2675).
Performance and stability improvements on all platforms.
2014-01-20: Version 3.24.19
Introduce a setting to control the toolset for which d8 is compiled
(issue 1775).
Performance and stability improvements on all platforms.
2014-01-17: Version 3.24.18
Performance and stability improvements on all platforms.
2014-01-16: Version 3.24.17
Make cells pointing to JSObjects weak in optimized code (issue 2073).
Performance and stability improvements on all platforms.
2014-01-15: Version 3.24.16
Annotate mapped memory regions for LeakSanitizer (Chromium issue
328552).
Turn Runtime_MigrateInstance into Runtime_TryMigrateInstance (Chromium
issue 315252).
Performance and stability improvements on all platforms.
2014-01-14: Version 3.24.15
Introduce an API mirroring the gc extension.
Performance and stability improvements on all platforms.
2014-01-10: Version 3.24.14
ES6: Add Object.getOwnPropertySymbols (issue 3049).
Performance and stability improvements on all platforms.
2014-01-09: Version 3.24.13
Add Isolate parameter to HandleScope::NumberOfHandles (Chromium issue
324225).
Removed v8::AssertNoGCScope.
Performance and stability improvements on all platforms.
2014-01-08: Version 3.24.12
Correctly handle instances without elements in polymorphic keyed
load/store (Chromium issue 331416).
Fix selection of popular pages in store buffer (Chromium issue 331444).
Prepare removal of ObjectTemplate::New without Isolate parameter
(Chromium issue 324225).
Performance and stability improvements on all platforms.
2014-01-07: Version 3.24.11
Remove generated makefiles on linux when running gyp_v8
(Chromium issue 331475)
Fix building d8 with readline support due to API changes
Performance and stability improvements on all platforms.
2014-01-03: Version 3.24.10
Reland r18383: More API cleanup (Chromium issue 324225).
MIPS: Fix loading of global object in LWrapReceiver (Chromium issue
318420).
Performance and stability improvements on all platforms.
2014-01-02: Version 3.24.9
Performance and stability improvements on all platforms.
2013-12-30: Version 3.24.8
ARM: fix loading of global object in LWrapReceiver (Chromium issue
318420).
Fix a race between concurrent recompilation and OSR (Chromium issue
330046).
Turn off concurrent sweeping (issue 3071).
Performance and stability improvements on all platforms.
2013-12-23: Version 3.24.7
Fix small spec violation in String.prototype.split (issue 3026).
Correctly resolve forcibly context allocated parameters in debug-
evaluate (Chromium issue 325676).
Introduce Function::GetBoundFunction.
Performance and stability improvements on all platforms.
2013-12-20: Version 3.24.6
Performance and stability improvements on all platforms.
2013-12-19: Version 3.24.5
Performance and stability improvements on all platforms.
2013-12-18: Version 3.24.4
Removed all stuff marked as V8_DEPRECATED.
Performance and stability improvements on all platforms.
2013-12-17: Version 3.24.3
Performance and stability improvements on all platforms.
2013-12-17: Version 3.24.2
Flush instruction cache for deserialized code objects.
Performance and stability improvements on all platforms.
2013-12-13: Version 3.24.1
Fix polymorphic inlined calls with migrating prototypes.
Fixed global object leak caused by overwriting the global receiver (the
global proxy) in the global object with the global object itself
(Chromium issue 324812).
Initialize Date parse cache with SMI instead of double to workaround
sharing mutable heap numbers in snapshot (Chromium issue 280531).
Switch armv7 setting to arm_version==7 in v8 gyp files (Chromium issue
234135).
Performance and stability improvements on all platforms.
2013-12-09: Version 3.24.0
Performance and stability improvements on all platforms.
2013-12-04: Version 3.23.18
Performance and stability improvements on all platforms.
2013-12-03: Version 3.23.17
Performance and stability improvements on all platforms.
2013-12-02: Version 3.23.16
Array builtins need to be prevented from changing frozen objects, and
changing structure on sealed objects (Chromium issue 299979).
Performance and stability improvements on all platforms.
2013-11-29: Version 3.23.15
Fix context register allocation in LTransitionElementsKind
(Chromium issue 324306).
Fix bug in inlining Function.apply (Chromium issue 323942).
Ensure that length is Smi in TypedArrayFromArrayLike constructor
(Chromium issue 324028).
Performance and stability improvements on all platforms.
2013-11-28: Version 3.23.14
Shorten autogenerated error message (issue 3019).
Performance and stability improvements on all platforms.
2013-11-27: Version 3.23.13
Increase precision for base conversion for large integers (issue 3025).
Flatten cons string for single character substrings (Chromium issue
323041).
Performance and stability improvements on all platforms.
2013-11-26: Version 3.23.12
Performance and stability improvements on all platforms.
2013-11-25: Version 3.23.11
Deprecate old versions of Isolate::SetData and GetData.
Performance and stability improvements on all platforms.
2013-11-22: Version 3.23.10
Remove preemption thread and API.
(issue 3004)
Performance and stability improvements on all platforms.
2013-11-21: Version 3.23.9
API: Change AdjustAmountOfExternalAllocatedMemory calls to use int64_t
instead of intptr_t.
Remove deprecated v8::SetResourceConstraints without Isolate parameter.
Remove deprecated v8-defaults.h and defaults.cc.
(Chromium issue 312241)
Make it possible to add more than one piece of embedder data to
isolates.
(Chromium issue 317398)
Performance and stability improvements on all platforms.
2013-11-20: Version 3.23.8
Fixed crashes exposed though fuzzing.
(Chromium issue 320948)
Deprecated v8::External::New without Isolate parameter.
Made number of available threads isolate-dependent and exposed it to
ResourceConstraints.
(issue 2991)
Performance and stability improvements on all platforms.
2013-11-19: Version 3.23.7
Bugfix: dependent code field in AllocationSite was keeping code objects
alive even after context death.
(Chromium issue 320532)
Fixed data view accessors to throw execptions on offsets bigger than
size_t.
(issue 3013)
Performance and stability improvements on all platforms.
2013-11-18: Version 3.23.6
Limit size of dehoistable array indices.
(Chromium issues 319835, 319860)
Limit the size for typed arrays to MaxSmi.
(Chromium issue 319722)
Performance and stability improvements on all platforms.
2013-11-15: Version 3.23.5
Fixed missing type feedback check for Generic*String addition.
(Chromium issue 318671)
Fixed duplicate check in DependentCode::Insert.
(Chromium issue 318454)
Performance and stability improvements on all platforms.
2013-11-14: Version 3.23.4
Fixed overflow in TypedArray initialization function.
(Chromium issue 319120)
Performance and stability improvements on all platforms.
2013-11-13: Version 3.23.3
Fixed compilation with GCC 4.8.
(issue 2767, 2149)
Added explicit Isolate parameter to External::New.
(Chromium issue 266838)
Performance and stability improvements on all platforms.
2013-11-12: Version 3.23.2
Fixed --extra-code flag for snapshot creation.
(issue 2994)
Fixed error message wording when instanceof throws.
(Chromium issue 82797, issue 1593)
Performance and stability improvements on all platforms.
2013-11-08: Version 3.23.1
Made HCapturedObjects non-deletable for DCE. (issue 2987)
Use a fixed random seed per default. (issue 1880, 2885)
Fixed y-umlaut to uppercase. (issue 2984)
Performance and stability improvements on all platforms.
2013-11-06: Version 3.23.0
Fixed loading message from an Error object. (Chromium issue 306220)
Made Object.freeze/seal/preventExtensions observable. (issue 2975, 2941)
Made snapshots reproducible. (issue 2885)
Added missing negative dictionary lookup to NonexistentHandlerFrontend.
(issue 2980)
Performance and stability improvements on all platforms.
2013-10-31: Version 3.22.24
Fixed uint32-to-smi conversion in Lithium.

4
deps/v8/DEPS

@ -5,10 +5,10 @@
deps = {
# Remember to keep the revision in sync with the Makefile.
"v8/build/gyp":
"http://gyp.googlecode.com/svn/trunk@1685",
"http://gyp.googlecode.com/svn/trunk@1831",
"v8/third_party/icu":
"https://src.chromium.org/chrome/trunk/deps/third_party/icu46@214189",
"https://src.chromium.org/chrome/trunk/deps/third_party/icu46@239289",
}
deps_os = {

2
deps/v8/LICENSE

@ -26,7 +26,7 @@ are:
These libraries have their own licenses; we recommend you read them,
as their terms may differ from the terms below.
Copyright 2006-2012, the V8 project authors. All rights reserved.
Copyright 2014, the V8 project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:

67
deps/v8/Makefile

@ -94,7 +94,7 @@ ifeq ($(vtunejit), on)
endif
# optdebug=on
ifeq ($(optdebug), on)
GYPFLAGS += -Dv8_optimized_debug=1
GYPFLAGS += -Dv8_optimized_debug=2
endif
# debuggersupport=off
ifeq ($(debuggersupport), off)
@ -104,6 +104,10 @@ endif
ifeq ($(unalignedaccess), on)
GYPFLAGS += -Dv8_can_use_unaligned_accesses=true
endif
# randomseed=12345, disable random seed via randomseed=0
ifdef randomseed
GYPFLAGS += -Dv8_random_seed=$(randomseed)
endif
# soname_version=1.2.3
ifdef soname_version
GYPFLAGS += -Dsoname_version=$(soname_version)
@ -134,12 +138,17 @@ ifeq ($(deprecationwarnings), on)
GYPFLAGS += -Dv8_deprecation_warnings=1
endif
# arm specific flags.
# armv7=false/true
# arm_version=<number | "default">
ifneq ($(strip $(arm_version)),)
GYPFLAGS += -Darm_version=$(arm_version)
else
# Deprecated (use arm_version instead): armv7=false/true
ifeq ($(armv7), false)
GYPFLAGS += -Darmv7=0
GYPFLAGS += -Darm_version=6
else
ifeq ($(armv7), true)
GYPFLAGS += -Darmv7=1
GYPFLAGS += -Darm_version=7
endif
endif
endif
# vfp2=off. Deprecated, use armfpu=
@ -214,10 +223,11 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
ARCHES = ia32 x64 arm mipsel
ARCHES = ia32 x64 arm a64 mipsel
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug
ANDROID_ARCHES = android_ia32 android_arm android_mipsel
MODES = release debug optdebug
DEFAULT_MODES = release debug
ANDROID_ARCHES = android_ia32 android_arm android_a64 android_mipsel
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
@ -243,6 +253,7 @@ NACL_CHECKS = $(addsuffix .check,$(NACL_BUILDS))
ENVFILE = $(OUTDIR)/environment
.PHONY: all check clean dependencies $(ENVFILE).new native \
qc quickcheck \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
$(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \
@ -251,7 +262,7 @@ ENVFILE = $(OUTDIR)/environment
must-set-NACL_SDK_ROOT
# Target definitions. "all" is the default.
all: $(MODES)
all: $(DEFAULT_MODES)
# Special target for the buildbots to use. Depends on $(OUTDIR)/Makefile
# having been created before.
@ -267,14 +278,15 @@ mips mips.release mips.debug:
.SECONDEXPANSION:
$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
$(ARCHES): $(addprefix $$@.,$(MODES))
$(ARCHES): $(addprefix $$@.,$(DEFAULT_MODES))
# Defines how to build a particular target (e.g. ia32.release).
$(BUILDS): $(OUTDIR)/Makefile.$$(basename $$@)
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
$(BUILDS): $(OUTDIR)/Makefile.$$@
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
CXX="$(CXX)" LINK="$(LINK)" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \
python -c "print \
raw_input().replace('opt', '').capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
native: $(OUTDIR)/Makefile.native
@ -346,32 +358,47 @@ native.check: native
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
FASTTESTMODES = ia32.release,x64.release,ia32.optdebug,x64.optdebug,arm.optdebug
FASTCOMPILEMODES = $(FASTTESTMODES),a64.optdebug
COMMA = ,
EMPTY =
SPACE = $(EMPTY) $(EMPTY)
quickcheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) --quickcheck
qc: quickcheck
# Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)):
rm -f $(OUTDIR)/Makefile.$(basename $@)
rm -f $(OUTDIR)/Makefile.$(basename $@)*
rm -rf $(OUTDIR)/$(basename $@).release
rm -rf $(OUTDIR)/$(basename $@).debug
find $(OUTDIR) -regex '.*\(host\|target\).$(basename $@)\.mk' -delete
rm -rf $(OUTDIR)/$(basename $@).optdebug
find $(OUTDIR) -regex '.*\(host\|target\)\.$(basename $@).*\.mk' -delete
native.clean:
rm -f $(OUTDIR)/Makefile.native
rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\).native\.mk' -delete
find $(OUTDIR) -regex '.*\(host\|target\)\.native\.mk' -delete
clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.clean
# GYP file generation targets.
OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(BUILDS))
$(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
PYTHONPATH="$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
-Dv8_target_arch=$(subst .,,$(suffix $@)) \
-S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
-Dv8_target_arch=$(subst .,,$(suffix $(basename $@))) \
-Dv8_optimized_debug=$(if $(findstring optdebug,$@),2,0) \
-S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS)
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
PYTHONPATH="$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
@ -417,7 +444,7 @@ grokdump: ia32.release
# Remember to keep these in sync with the DEPS file.
dependencies:
svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \
--revision 1685
--revision 1831
svn checkout --force \
https://src.chromium.org/chrome/trunk/deps/third_party/icu46 \
third_party/icu --revision 214189
third_party/icu --revision 239289

58
deps/v8/Makefile.android

@ -26,7 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
ANDROID_ARCHES = android_ia32 android_arm android_mipsel
ANDROID_ARCHES = android_ia32 android_arm android_a64 android_mipsel
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
@ -48,25 +48,41 @@ endif
ifeq ($(ARCH), android_arm)
DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm
DEFINES += arm_neon=0 armv7=1
TOOLCHAIN_ARCH = arm-linux-androideabi-4.6
DEFINES += arm_neon=0 arm_version=7
TOOLCHAIN_ARCH = arm-linux-androideabi
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.6
else
ifeq ($(ARCH), android_a64)
DEFINES = target_arch=a64 v8_target_arch=a64 android_target_arch=arm64
TOOLCHAIN_ARCH = aarch64-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8
else
ifeq ($(ARCH), android_mipsel)
DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_arch=mips
DEFINES += mips_arch_variant=mips32r2
TOOLCHAIN_ARCH = mipsel-linux-android-4.6
DEFINES = target_arch=mipsel v8_target_arch=mipsel
DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
TOOLCHAIN_ARCH = mipsel-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.6
else
ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
TOOLCHAIN_ARCH = x86-4.6
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.6
else
$(error Target architecture "${ARCH}" is not supported)
endif
endif
endif
endif
TOOLCHAIN_PATH = ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}/prebuilt
TOOLCHAIN_PATH = \
${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}-${TOOLCHAIN_VER}/prebuilt
ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),)
$(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}". Please \
check that ANDROID_NDK_ROOT and ANDROID_NDK_HOST_ARCH are set \
@ -77,26 +93,26 @@ endif
DEFINES += host_os=${HOST_OS}
.SECONDEXPANSION:
$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$(basename $$@)
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
CXX="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
AR="$(ANDROID_TOOLCHAIN)/bin/*-ar" \
RANLIB="$(ANDROID_TOOLCHAIN)/bin/*-ranlib" \
CC="$(ANDROID_TOOLCHAIN)/bin/*-gcc" \
LD="$(ANDROID_TOOLCHAIN)/bin/*-ld" \
LINK="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
CXX="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
AR="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ar" \
RANLIB="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ranlib" \
CC="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-gcc" \
LD="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ld" \
LINK="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
# Android GYP file generation targets.
ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_ARCHES))
ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS))
$(ANDROID_MAKEFILES):
@GYP_GENERATORS=make-android \
GYP_GENERATORS=make-android \
GYP_DEFINES="${DEFINES}" \
CC="${ANDROID_TOOLCHAIN}/bin/*-gcc" \
CXX="${ANDROID_TOOLCHAIN}/bin/*-g++" \
CC="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-gcc" \
CXX="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-g++" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-S.${ARCH} ${GYPFLAGS}
-S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS}

8
deps/v8/Makefile.nacl

@ -77,11 +77,11 @@ GYPENV += host_os=${HOST_OS}
# ICU doesn't support NaCl.
GYPENV += v8_enable_i18n_support=0
NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_ARCHES))
NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_BUILDS))
.SECONDEXPANSION:
# For some reason the $$(basename $$@) expansion didn't work here...
$(NACL_BUILDS): $(NACL_MAKEFILES)
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
CXX=${NACL_CXX} \
LINK=${NACL_LINK} \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
@ -90,12 +90,12 @@ $(NACL_BUILDS): $(NACL_MAKEFILES)
# NACL GYP file generation targets.
$(NACL_MAKEFILES):
@GYP_GENERATORS=make \
GYP_GENERATORS=make \
GYP_DEFINES="${GYPENV}" \
CC=${NACL_CC} \
CXX=${NACL_CXX} \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
-S.$(subst .,,$(suffix $@)) $(GYPFLAGS) \
-S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS) \
-Dwno_array_bounds=-Wno-array-bounds

4
deps/v8/OWNERS

@ -1,10 +1,14 @@
bmeurer@chromium.org
danno@chromium.org
dcarney@chromium.org
dslomov@chromium.org
hpayer@chromium.org
ishell@chromium.org
jarin@chromium.org
jkummerow@chromium.org
jochen@chromium.org
machenbach@chromium.org
marja@chromium.org
mstarzinger@chromium.org
mvstanton@chromium.org
rossberg@chromium.org

13
deps/v8/PRESUBMIT.py

@ -69,15 +69,28 @@ def _SkipTreeCheck(input_api, output_api):
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
def _CheckChangeLogFlag(input_api, output_api):
"""Checks usage of LOG= flag in the commit message."""
results = []
if input_api.change.BUG and not 'LOG' in input_api.change.tags:
results.append(output_api.PresubmitError(
'An issue reference (BUG=) requires a change log flag (LOG=). '
'Use LOG=Y for including this commit message in the change log. '
'Use LOG=N or leave blank otherwise.'))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
if not _SkipTreeCheck(input_api, output_api):

8
deps/v8/build/all.gyp

@ -12,7 +12,13 @@
'../src/d8.gyp:d8',
'../test/cctest/cctest.gyp:*',
],
'conditions': [
['component!="shared_library"', {
'dependencies': [
'../tools/lexer-shell.gyp:lexer-shell',
],
}],
]
}
]
}

24
deps/v8/build/android.gypi

@ -146,7 +146,7 @@
'-Wl,--icf=safe',
],
}],
['target_arch=="arm" and armv7==1', {
['target_arch=="arm" and arm_version==7', {
'cflags': [
'-march=armv7-a',
'-mtune=cortex-a8',
@ -164,12 +164,12 @@
'-I<(android_stlport_include)',
],
'conditions': [
['target_arch=="arm" and armv7==1', {
['target_arch=="arm" and arm_version==7', {
'ldflags': [
'-L<(android_stlport_libs)/armeabi-v7a',
],
}],
['target_arch=="arm" and armv7==0', {
['target_arch=="arm" and arm_version < 7', {
'ldflags': [
'-L<(android_stlport_libs)/armeabi',
],
@ -184,6 +184,11 @@
'-L<(android_stlport_libs)/x86',
],
}],
['target_arch=="a64"', {
'ldflags': [
'-L<(android_stlport_libs)/arm64',
],
}],
],
}],
['target_arch=="ia32"', {
@ -208,10 +213,19 @@
],
'target_conditions': [
['_type=="executable"', {
'conditions': [
['target_arch=="a64"', {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker64',
],
}, {
'ldflags': [
'-Bdynamic',
'-Wl,-dynamic-linker,/system/bin/linker',
'-Wl,--gc-sections',
],
}]
],
'ldflags': [
'-Bdynamic',
'-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags.
'<(android_lib)/crtbegin_dynamic.o',

8
deps/v8/build/features.gypi

@ -58,6 +58,9 @@
# Enable compiler warnings when using V8_DEPRECATED apis.
'v8_deprecation_warnings%': 0,
# Use the v8 provided v8::Platform implementation.
'v8_use_default_platform%': 1,
},
'target_defaults': {
'conditions': [
@ -85,6 +88,9 @@
['v8_enable_i18n_support==1', {
'defines': ['V8_I18N_SUPPORT',],
}],
['v8_use_default_platform==1', {
'defines': ['V8_USE_DEFAULT_PLATFORM',],
}],
['v8_compress_startup_data=="bz2"', {
'defines': [
'COMPRESS_STARTUP_DATA_BZ2',
@ -109,7 +115,7 @@
'Release': {
'variables': {
'v8_enable_extra_checks%': 0,
'v8_enable_handle_zapping%': 0,
'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_extra_checks==1', {

23
deps/v8/build/gyp_v8

@ -36,13 +36,8 @@ import platform
import shlex
import sys
script_dir = os.path.dirname(__file__)
v8_root = os.path.normpath(os.path.join(script_dir, os.pardir))
if __name__ == '__main__':
os.chdir(v8_root)
script_dir = os.path.dirname(__file__)
v8_root = '.'
script_dir = os.path.dirname(os.path.realpath(__file__))
v8_root = os.path.abspath(os.path.join(script_dir, os.pardir))
sys.path.insert(0, os.path.join(v8_root, 'build', 'gyp', 'pylib'))
import gyp
@ -142,10 +137,7 @@ if __name__ == '__main__':
# path separators even on Windows due to the use of shlex.split().
args.extend(shlex.split(gyp_file))
else:
# Note that this must not start with "./" or things break.
# So we rely on having done os.chdir(v8_root) above and use the
# relative path.
args.append(os.path.join('build', 'all.gyp'))
args.append(os.path.join(script_dir, 'all.gyp'))
args.extend(['-I' + i for i in additional_include_files(args)])
@ -153,7 +145,7 @@ if __name__ == '__main__':
args.append('--no-circular-check')
# Set the GYP DEPTH variable to the root of the V8 project.
args.append('--depth=' + v8_root)
args.append('--depth=' + os.path.relpath(v8_root))
# If V8_GYP_SYNTAX_CHECK is set to 1, it will invoke gyp with --check
# to enfore syntax checking.
@ -167,5 +159,12 @@ if __name__ == '__main__':
# Generate for the architectures supported on the given platform.
gyp_args = list(args)
if platform.system() == 'Linux':
# Work around for crbug.com/331475.
for f in glob.glob(os.path.join(v8_root, 'out', 'Makefile.*')):
os.unlink(f)
# --generator-output defines where the Makefile goes.
gyp_args.append('--generator-output=out')
# -Goutput_dir defines where the build output goes, relative to the
# Makefile. Set it to . so that the build output doesn't end up in out/out.
gyp_args.append('-Goutput_dir=.')
run_gyp(gyp_args)

41
deps/v8/build/gyp_v8.py

@ -0,0 +1,41 @@
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file is (possibly, depending on python version) imported by
# gyp_v8 when GYP_PARALLEL=1 and it creates sub-processes through the
# multiprocessing library.
# Importing in Python 2.6 (fixed in 2.7) on Windows doesn't search for imports
# that don't end in .py (and aren't directories with an __init__.py). This
# wrapper makes "import gyp_v8" work with those old versions and makes it
# possible to execute gyp_v8.py directly on Windows where the extension is
# useful.
import os
path = os.path.abspath(os.path.split(__file__)[0])
execfile(os.path.join(path, 'gyp_v8'))

49
deps/v8/build/standalone.gypi

@ -37,6 +37,7 @@
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 1,
'v8_deprecation_warnings': 1,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'variables': {
@ -44,14 +45,18 @@
'variables': {
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
OS=="netbsd" or OS=="mac"', {
OS=="netbsd" or OS=="mac" or OS=="qnx"', {
# This handles the Unix platforms we generally deal with.
# Anything else gets passed through, which probably won't work
# very well; such hosts should pass an explicit target_arch
# to gyp.
'host_arch%':
'<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mipsel/")',
s/x86_64/x64/;\
s/amd64/x64/;\
s/aarch64/a64/;\
s/arm.*/arm/;\
s/mips.*/mipsel/")',
}, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac"
@ -96,9 +101,10 @@
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="a64" and host_arch!="a64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="x64" and host_arch!="x64") or \
(OS=="android")', {
(OS=="android" or OS=="qnx")', {
'want_separate_host_toolset': 1,
}, {
'want_separate_host_toolset': 0,
@ -116,7 +122,7 @@
}],
],
# Default ARM variable settings.
'armv7%': 'default',
'arm_version%': 'default',
'arm_neon%': 0,
'arm_fpu%': 'vfpv3',
'arm_float_abi%': 'default',
@ -185,6 +191,32 @@
}],
# 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"
# or OS=="netbsd"'
['OS=="qnx"', {
'target_defaults': {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-fno-exceptions' ],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti' ],
'conditions': [
[ 'visibility=="hidden"', {
'cflags': [ '-fvisibility=hidden' ],
}],
[ 'component=="shared_library"', {
'cflags': [ '-fPIC' ],
}],
],
'target_conditions': [
[ '_toolset=="host" and host_os=="linux"', {
'cflags': [ '-pthread' ],
'ldflags': [ '-pthread' ],
'libraries': [ '-lrt' ],
}],
[ '_toolset=="target"', {
'cflags': [ '-Wno-psabi' ],
'libraries': [ '-lbacktrace', '-lsocket', '-lm' ],
}],
],
},
}], # OS=="qnx"
['OS=="win"', {
'target_defaults': {
'defines': [
@ -272,8 +304,6 @@
'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES',
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden
'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics
'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES', # -Werror
'GCC_VERSION': 'com.apple.compilers.llvmgcc42',
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof
'GCC_WARN_NON_VIRTUAL_DESTRUCTOR': 'YES', # -Wnon-virtual-dtor
# MACOSX_DEPLOYMENT_TARGET maps to -mmacosx-version-min
@ -291,6 +321,13 @@
'-Wno-unused-parameter',
],
},
'conditions': [
['werror==""', {
'xcode_settings': {'GCC_TREAT_WARNINGS_AS_ERRORS': 'NO'},
}, {
'xcode_settings': {'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES'},
}],
],
'target_conditions': [
['_type!="static_library"', {
'xcode_settings': {'OTHER_LDFLAGS': ['-Wl,-search_paths_first']},

45
deps/v8/build/toolchain.gypi

@ -70,6 +70,11 @@
# it's handled in build/standalone.gypi.
'want_separate_host_toolset%': 1,
# Toolset the d8 binary should be compiled for. Possible values are 'host'
# and 'target'. If you want to run v8 tests, it needs to be set to 'target'.
# The setting is ignored if want_separate_host_toolset is 0.
'v8_toolset_for_d8%': 'target',
'host_os%': '<(OS)',
'werror%': '-Werror',
# For a shared library build, results in "libv8-<(soname_version).so".
@ -92,10 +97,10 @@
'conditions': [
['armcompiler=="yes"', {
'conditions': [
[ 'armv7==1', {
[ 'arm_version==7', {
'cflags': ['-march=armv7-a',],
}],
[ 'armv7==1 or armv7=="default"', {
[ 'arm_version==7 or arm_version=="default"', {
'conditions': [
[ 'arm_neon==1', {
'cflags': ['-mfpu=neon',],
@ -127,7 +132,7 @@
}, {
# armcompiler=="no"
'conditions': [
[ 'armv7==1 or armv7=="default"', {
[ 'arm_version==7 or arm_version=="default"', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
@ -180,10 +185,10 @@
'conditions': [
['armcompiler=="yes"', {
'conditions': [
[ 'armv7==1', {
[ 'arm_version==7', {
'cflags': ['-march=armv7-a',],
}],
[ 'armv7==1 or armv7=="default"', {
[ 'arm_version==7 or arm_version=="default"', {
'conditions': [
[ 'arm_neon==1', {
'cflags': ['-mfpu=neon',],
@ -215,7 +220,7 @@
}, {
# armcompiler=="no"
'conditions': [
[ 'armv7==1 or armv7=="default"', {
[ 'arm_version==7 or arm_version=="default"', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
@ -263,6 +268,11 @@
}], # _toolset=="target"
],
}], # v8_target_arch=="arm"
['v8_target_arch=="a64"', {
'defines': [
'V8_TARGET_ARCH_A64',
],
}],
['v8_target_arch=="ia32"', {
'defines': [
'V8_TARGET_ARCH_IA32',
@ -357,7 +367,7 @@
},
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', {
or OS=="netbsd" or OS=="qnx"', {
'conditions': [
[ 'v8_no_strict_aliasing==1', {
'cflags': [ '-fno-strict-aliasing' ],
@ -368,7 +378,7 @@
'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android") and \
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="mipsel")', {
# Check whether the host compiler and target compiler support the
@ -376,7 +386,7 @@
'target_conditions': [
['_toolset=="host"', {
'variables': {
'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
'm32flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
@ -386,11 +396,11 @@
}],
['_toolset=="target"', {
'variables': {
'm32flag': '<!((echo | $(echo ${CXX_target:-<(CXX)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
'm32flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)',
'clang%': 0,
},
'conditions': [
['(OS!="android" or clang==1) and \
['((OS!="android" and OS!="qnx") or clang==1) and \
nacl_target_arch!="nacl_x64"', {
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
@ -402,20 +412,21 @@
}],
],
}],
['(OS=="linux") and (v8_target_arch=="x64")', {
['(OS=="linux" or OS=="android") and \
(v8_target_arch=="x64" or v8_target_arch=="a64")', {
# Check whether the host compiler and target compiler support the
# '-m64' option and set it if so.
'target_conditions': [
['_toolset=="host"', {
'variables': {
'm64flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m64 -E - > /dev/null 2>&1) && echo "-m64" || true)',
'm64flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
},
'cflags': [ '<(m64flag)' ],
'ldflags': [ '<(m64flag)' ],
}],
['_toolset=="target"', {
'variables': {
'm64flag': '<!((echo | $(echo ${CXX_target:-<(CXX)}) -m64 -E - > /dev/null 2>&1) && echo "-m64" || true)',
'm64flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
},
'cflags': [ '<(m64flag)' ],
'ldflags': [ '<(m64flag)' ],
@ -504,10 +515,12 @@
},
},
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
OS=="qnx"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual',
'<(wno_array_bounds)' ],
'<(wno_array_bounds)',
],
'conditions': [
['v8_optimized_debug==0', {
'cflags!': [

7
deps/v8/codereview.settings

@ -0,0 +1,7 @@
CODE_REVIEW_SERVER: https://codereview.chromium.org
CC_LIST: v8-dev@googlegroups.com
VIEW_VC: https://code.google.com/p/v8/source/detail?r=
STATUS: http://v8-status.appspot.com/status
TRY_ON_UPLOAD: False
TRYSERVER_SVN_URL: svn://svn.chromium.org/chrome-try-v8
TRYSERVER_ROOT: v8

86
deps/v8/include/v8-platform.h

@ -0,0 +1,86 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_V8_PLATFORM_H_
#define V8_V8_PLATFORM_H_
#include "v8.h"
namespace v8 {
/**
* A Task represents a unit of work.
*/
class Task {
public:
virtual ~Task() {}
virtual void Run() = 0;
};
/**
* V8 Platform abstraction layer.
*
* The embedder has to provide an implementation of this interface before
* initializing the rest of V8.
*/
class Platform {
public:
/**
* This enum is used to indicate whether a task is potentially long running,
* or causes a long wait. The embedder might want to use this hint to decide
* whether to execute the task on a dedicated thread.
*/
enum ExpectedRuntime {
kShortRunningTask,
kLongRunningTask
};
/**
* Schedules a task to be invoked on a background thread. |expected_runtime|
* indicates that the task will run a long time. The Platform implementation
* takes ownership of |task|. There is no guarantee about order of execution
* of tasks wrt order of scheduling, nor is there a guarantee about the
* thread the task will be run on.
*/
virtual void CallOnBackgroundThread(Task* task,
ExpectedRuntime expected_runtime) = 0;
/**
* Schedules a task to be invoked on a foreground thread wrt a specific
* |isolate|. Tasks posted for the same isolate should be execute in order of
* scheduling. The definition of "foreground" is opaque to V8.
*/
virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0;
protected:
virtual ~Platform() {}
};
} // namespace v8
#endif // V8_V8_PLATFORM_H_

67
deps/v8/include/v8-profiler.h

@ -96,9 +96,6 @@ class V8_EXPORT CpuProfileNode {
*/
class V8_EXPORT CpuProfile {
public:
/** Returns CPU profile UID (assigned by the profiler.) */
unsigned GetUid() const;
/** Returns CPU profile title. */
Handle<String> GetTitle() const;
@ -132,10 +129,6 @@ class V8_EXPORT CpuProfile {
/**
* Deletes the profile and removes it from CpuProfiler's list.
* All pointers to nodes previously returned become invalid.
* Profiles with the same uid but obtained using different
* security token are not deleted, but become inaccessible
* using FindProfile method. It is embedder's responsibility
* to call Delete on these profiles.
*/
void Delete();
};
@ -154,15 +147,6 @@ class V8_EXPORT CpuProfiler {
*/
void SetSamplingInterval(int us);
/**
* Returns the number of profiles collected (doesn't include
* profiles that are being collected at the moment of call.)
*/
int GetProfileCount();
/** Returns a profile by index. */
const CpuProfile* GetCpuProfile(int index);
/**
* Starts collecting CPU profile. Title may be an empty string. It
* is allowed to have several profiles being collected at
@ -182,13 +166,6 @@ class V8_EXPORT CpuProfiler {
*/
const CpuProfile* StopCpuProfiling(Handle<String> title);
/**
* Deletes all existing profiles, also cancelling all profiling
* activity. All previously returned pointers to profiles and their
* contents become invalid after this call.
*/
void DeleteAllCpuProfiles();
/**
* Tells the profiler whether the embedder is idle.
*/
@ -280,19 +257,17 @@ class V8_EXPORT HeapGraphNode {
SnapshotObjectId GetId() const;
/** Returns node's own size, in bytes. */
int GetSelfSize() const;
V8_DEPRECATED("Use GetShallowSize instead",
int GetSelfSize() const);
/** Returns node's own size, in bytes. */
size_t GetShallowSize() const;
/** Returns child nodes count of the node. */
int GetChildrenCount() const;
/** Retrieves a child by index. */
const HeapGraphEdge* GetChild(int index) const;
/**
* Finds and returns a value from the heap corresponding to this node,
* if the value is still reachable.
*/
Handle<Value> GetHeapValue() const;
};
@ -392,6 +367,19 @@ class V8_EXPORT HeapProfiler {
*/
SnapshotObjectId GetObjectId(Handle<Value> value);
/**
* Returns heap object with given SnapshotObjectId if the object is alive,
* otherwise empty handle is returned.
*/
Handle<Value> FindObjectById(SnapshotObjectId id);
/**
* Clears internal map from SnapshotObjectId to heap object. The new objects
* will not be added into it unless a heap snapshot is taken or heap object
* tracking is kicked off.
*/
void ClearObjectIds();
/**
* A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
* it in case heap profiler cannot find id for the object passed as
@ -425,8 +413,12 @@ class V8_EXPORT HeapProfiler {
* Starts tracking of heap objects population statistics. After calling
* this method, all heap objects relocations done by the garbage collector
* are being registered.
*
* |track_allocations| parameter controls whether stack trace of each
* allocation in the heap will be recorded and reported as part of
* HeapSnapshot.
*/
void StartTrackingHeapObjects();
void StartTrackingHeapObjects(bool track_allocations = false);
/**
* Adds a new time interval entry to the aggregated statistics array. The
@ -475,19 +467,6 @@ class V8_EXPORT HeapProfiler {
*/
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
/**
* Starts recording JS allocations immediately as they arrive and tracking of
* heap objects population statistics.
*/
void StartRecordingHeapAllocations();
/**
* Stops recording JS allocations and tracking of heap objects population
* statistics, cleans all collected heap objects population statistics data.
*/
void StopRecordingHeapAllocations();
private:
HeapProfiler();
~HeapProfiler();

1092
deps/v8/include/v8.h

File diff suppressed because it is too large

20
deps/v8/include/v8config.h

@ -88,6 +88,7 @@
// V8_OS_NETBSD - NetBSD
// V8_OS_OPENBSD - OpenBSD
// V8_OS_POSIX - POSIX compatible (mostly everything except Windows)
// V8_OS_QNX - QNX Neutrino
// V8_OS_SOLARIS - Sun Solaris and OpenSolaris
// V8_OS_WIN - Microsoft Windows
@ -127,6 +128,9 @@
# define V8_OS_BSD 1
# define V8_OS_OPENBSD 1
# define V8_OS_POSIX 1
#elif defined(__QNXNTO__)
# define V8_OS_POSIX 1
# define V8_OS_QNX 1
#elif defined(_WIN32)
# define V8_OS_WIN 1
#endif
@ -135,6 +139,7 @@
// -----------------------------------------------------------------------------
// C library detection
//
// V8_LIBC_MSVCRT - MSVC libc
// V8_LIBC_BIONIC - Bionic libc
// V8_LIBC_BSD - BSD libc derivate
// V8_LIBC_GLIBC - GNU C library
@ -146,7 +151,9 @@
// ...
// #endif
#if defined(__BIONIC__)
#if defined (_MSC_VER)
# define V8_LIBC_MSVCRT 1
#elif defined(__BIONIC__)
# define V8_LIBC_BIONIC 1
# define V8_LIBC_BSD 1
#elif defined(__UCLIBC__)
@ -187,6 +194,7 @@
// supported
// V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported
// V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported
// V8_HAS_ATTRIBUTE_UNUSED - __attribute__((unused)) supported
// V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported
// V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result))
// supported
@ -216,6 +224,7 @@
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
# define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(__has_attribute(warn_unused_result))
@ -247,6 +256,7 @@
# define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE (V8_GNUC_PREREQ(4, 5, 0))
# define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_UNUSED (V8_GNUC_PREREQ(2, 95, 0))
# define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0))
@ -334,6 +344,14 @@ declarator __attribute__((deprecated))
#endif
// A macro to mark variables or types as unused, avoiding compiler warnings.
#if V8_HAS_ATTRIBUTE_UNUSED
# define V8_UNUSED __attribute__((unused))
#else
# define V8_UNUSED
#endif
// Annotate a function indicating the caller must examine the return value.
// Use like:
// int foo() V8_WARN_UNUSED_RESULT;

35
deps/v8/samples/lineprocessor.cc

@ -99,7 +99,7 @@ enum MainCycleType {
const char* ToCString(const v8::String::Utf8Value& value);
void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
v8::Handle<v8::String> ReadFile(const char* name);
v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name);
v8::Handle<v8::String> ReadLine();
void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
@ -174,14 +174,14 @@ int RunMain(int argc, char* argv[]) {
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
script_source = v8::String::New(argv[i + 1]);
script_name = v8::String::New("unnamed");
script_source = v8::String::NewFromUtf8(isolate, argv[i + 1]);
script_name = v8::String::NewFromUtf8(isolate, "unnamed");
i++;
script_param_counter++;
} else {
// Use argument as a name of file to load.
script_source = ReadFile(str);
script_name = v8::String::New(str);
script_source = ReadFile(isolate, str);
script_name = v8::String::NewFromUtf8(isolate, str);
if (script_source.IsEmpty()) {
printf("Error reading '%s'\n", str);
return 1;
@ -200,15 +200,16 @@ int RunMain(int argc, char* argv[]) {
}
// Create a template for the global object.
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
// Bind the global 'print' function to the C++ Print callback.
global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
global->Set(v8::String::NewFromUtf8(isolate, "print"),
v8::FunctionTemplate::New(isolate, Print));
if (cycle_type == CycleInJs) {
// Bind the global 'read_line' function to the C++ Print callback.
global->Set(v8::String::New("read_line"),
v8::FunctionTemplate::New(ReadLine));
global->Set(v8::String::NewFromUtf8(isolate, "read_line"),
v8::FunctionTemplate::New(isolate, ReadLine));
}
// Create a new execution environment containing the built-in
@ -277,7 +278,8 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Locker lock(isolate);
#endif // ENABLE_DEBUGGER_SUPPORT
v8::Handle<v8::String> fun_name = v8::String::New("ProcessLine");
v8::Handle<v8::String> fun_name =
v8::String::NewFromUtf8(isolate, "ProcessLine");
v8::Handle<v8::Value> process_val = context->Global()->Get(fun_name);
// If there is no Process function, or if it is not a function,
@ -338,7 +340,7 @@ const char* ToCString(const v8::String::Utf8Value& value) {
// Reads a file into a v8 string.
v8::Handle<v8::String> ReadFile(const char* name) {
v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
FILE* file = fopen(name, "rb");
if (file == NULL) return v8::Handle<v8::String>();
@ -353,7 +355,8 @@ v8::Handle<v8::String> ReadFile(const char* name) {
i += read;
}
fclose(file);
v8::Handle<v8::String> result = v8::String::New(chars, size);
v8::Handle<v8::String> result =
v8::String::NewFromUtf8(isolate, chars, v8::String::kNormalString, size);
delete[] chars;
return result;
}
@ -417,7 +420,8 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
// function is called. Reads a string from standard input and returns.
void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() > 0) {
args.GetIsolate()->ThrowException(v8::String::New("Unexpected arguments"));
args.GetIsolate()->ThrowException(
v8::String::NewFromUtf8(args.GetIsolate(), "Unexpected arguments"));
return;
}
args.GetReturnValue().Set(ReadLine());
@ -435,8 +439,9 @@ v8::Handle<v8::String> ReadLine() {
#endif // ENABLE_DEBUGGER_SUPPORT
res = fgets(buffer, kBufferSize, stdin);
}
v8::Isolate* isolate = v8::Isolate::GetCurrent();
if (res == NULL) {
v8::Handle<v8::Primitive> t = v8::Undefined(v8::Isolate::GetCurrent());
v8::Handle<v8::Primitive> t = v8::Undefined(isolate);
return v8::Handle<v8::String>::Cast(t);
}
// Remove newline char
@ -446,5 +451,5 @@ v8::Handle<v8::String> ReadLine() {
break;
}
}
return v8::String::New(buffer);
return v8::String::NewFromUtf8(isolate, buffer);
}

93
deps/v8/samples/process.cc

@ -160,8 +160,9 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// Create a template for the global object where we set the
// built-in global functions.
Handle<ObjectTemplate> global = ObjectTemplate::New();
global->Set(String::New("log"), FunctionTemplate::New(LogCallback));
Handle<ObjectTemplate> global = ObjectTemplate::New(GetIsolate());
global->Set(String::NewFromUtf8(GetIsolate(), "log"),
FunctionTemplate::New(GetIsolate(), LogCallback));
// Each processor gets its own context so different processors don't
// affect each other. Context::New returns a persistent handle which
@ -185,7 +186,7 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// The script compiled and ran correctly. Now we fetch out the
// Process function from the global object.
Handle<String> process_name = String::New("Process");
Handle<String> process_name = String::NewFromUtf8(GetIsolate(), "Process");
Handle<Value> process_val = context->Global()->Get(process_name);
// If there is no Process function, or if it is not a function,
@ -244,10 +245,12 @@ bool JsHttpRequestProcessor::InstallMaps(map<string, string>* opts,
v8::Local<v8::Context>::New(GetIsolate(), context_);
// Set the options object as a property on the global object.
context->Global()->Set(String::New("options"), opts_obj);
context->Global()->Set(String::NewFromUtf8(GetIsolate(), "options"),
opts_obj);
Handle<Object> output_obj = WrapMap(output);
context->Global()->Set(String::New("output"), output_obj);
context->Global()->Set(String::NewFromUtf8(GetIsolate(), "output"),
output_obj);
return true;
}
@ -291,8 +294,8 @@ JsHttpRequestProcessor::~JsHttpRequestProcessor() {
// Dispose the persistent handles. When noone else has any
// references to the objects stored in the handles they will be
// automatically reclaimed.
context_.Dispose();
process_.Dispose();
context_.Reset();
process_.Reset();
}
@ -308,7 +311,7 @@ Persistent<ObjectTemplate> JsHttpRequestProcessor::map_template_;
// JavaScript object.
Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// Handle scope for temporary handles.
HandleScope handle_scope(GetIsolate());
EscapableHandleScope handle_scope(GetIsolate());
// Fetch the template for creating JavaScript map wrappers.
// It only has to be created once, which we do on demand.
@ -320,11 +323,11 @@ Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
Local<ObjectTemplate>::New(GetIsolate(), map_template_);
// Create an empty map wrapper.
Handle<Object> result = templ->NewInstance();
Local<Object> result = templ->NewInstance();
// Wrap the raw C++ pointer in an External so it can be referenced
// from within JavaScript.
Handle<External> map_ptr = External::New(obj);
Handle<External> map_ptr = External::New(GetIsolate(), obj);
// Store the map pointer in the JavaScript wrapper.
result->SetInternalField(0, map_ptr);
@ -333,7 +336,7 @@ Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// of these handles will go away when the handle scope is deleted
// we need to call Close to let one, the result, escape into the
// outer handle scope.
return handle_scope.Close(result);
return handle_scope.Escape(result);
}
@ -370,8 +373,9 @@ void JsHttpRequestProcessor::MapGet(Local<String> name,
// Otherwise fetch the value and wrap it in a JavaScript string
const string& value = (*iter).second;
info.GetReturnValue().Set(
String::New(value.c_str(), static_cast<int>(value.length())));
info.GetReturnValue().Set(String::NewFromUtf8(
info.GetIsolate(), value.c_str(), String::kNormalString,
static_cast<int>(value.length())));
}
@ -395,14 +399,14 @@ void JsHttpRequestProcessor::MapSet(Local<String> name,
Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate(
Isolate* isolate) {
HandleScope handle_scope(isolate);
EscapableHandleScope handle_scope(isolate);
Handle<ObjectTemplate> result = ObjectTemplate::New();
Local<ObjectTemplate> result = ObjectTemplate::New(isolate);
result->SetInternalFieldCount(1);
result->SetNamedPropertyHandler(MapGet, MapSet);
// Again, return the result through the current handle scope.
return handle_scope.Close(result);
return handle_scope.Escape(result);
}
@ -416,7 +420,7 @@ Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate(
*/
Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
// Handle scope for temporary handles.
HandleScope handle_scope(GetIsolate());
EscapableHandleScope handle_scope(GetIsolate());
// Fetch the template for creating JavaScript http request wrappers.
// It only has to be created once, which we do on demand.
@ -428,11 +432,11 @@ Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
Local<ObjectTemplate>::New(GetIsolate(), request_template_);
// Create an empty http request wrapper.
Handle<Object> result = templ->NewInstance();
Local<Object> result = templ->NewInstance();
// Wrap the raw C++ pointer in an External so it can be referenced
// from within JavaScript.
Handle<External> request_ptr = External::New(request);
Handle<External> request_ptr = External::New(GetIsolate(), request);
// Store the request pointer in the JavaScript wrapper.
result->SetInternalField(0, request_ptr);
@ -441,7 +445,7 @@ Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
// of these handles will go away when the handle scope is deleted
// we need to call Close to let one, the result, escape into the
// outer handle scope.
return handle_scope.Close(result);
return handle_scope.Escape(result);
}
@ -465,8 +469,9 @@ void JsHttpRequestProcessor::GetPath(Local<String> name,
const string& path = request->Path();
// Wrap the result in a JavaScript string and return it.
info.GetReturnValue().Set(
String::New(path.c_str(), static_cast<int>(path.length())));
info.GetReturnValue().Set(String::NewFromUtf8(
info.GetIsolate(), path.c_str(), String::kNormalString,
static_cast<int>(path.length())));
}
@ -475,8 +480,9 @@ void JsHttpRequestProcessor::GetReferrer(
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Referrer();
info.GetReturnValue().Set(
String::New(path.c_str(), static_cast<int>(path.length())));
info.GetReturnValue().Set(String::NewFromUtf8(
info.GetIsolate(), path.c_str(), String::kNormalString,
static_cast<int>(path.length())));
}
@ -484,8 +490,9 @@ void JsHttpRequestProcessor::GetHost(Local<String> name,
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Host();
info.GetReturnValue().Set(
String::New(path.c_str(), static_cast<int>(path.length())));
info.GetReturnValue().Set(String::NewFromUtf8(
info.GetIsolate(), path.c_str(), String::kNormalString,
static_cast<int>(path.length())));
}
@ -494,26 +501,35 @@ void JsHttpRequestProcessor::GetUserAgent(
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->UserAgent();
info.GetReturnValue().Set(
String::New(path.c_str(), static_cast<int>(path.length())));
info.GetReturnValue().Set(String::NewFromUtf8(
info.GetIsolate(), path.c_str(), String::kNormalString,
static_cast<int>(path.length())));
}
Handle<ObjectTemplate> JsHttpRequestProcessor::MakeRequestTemplate(
Isolate* isolate) {
HandleScope handle_scope(isolate);
EscapableHandleScope handle_scope(isolate);
Handle<ObjectTemplate> result = ObjectTemplate::New();
Local<ObjectTemplate> result = ObjectTemplate::New(isolate);
result->SetInternalFieldCount(1);
// Add accessors for each of the fields of the request.
result->SetAccessor(String::NewSymbol("path"), GetPath);
result->SetAccessor(String::NewSymbol("referrer"), GetReferrer);
result->SetAccessor(String::NewSymbol("host"), GetHost);
result->SetAccessor(String::NewSymbol("userAgent"), GetUserAgent);
result->SetAccessor(
String::NewFromUtf8(isolate, "path", String::kInternalizedString),
GetPath);
result->SetAccessor(
String::NewFromUtf8(isolate, "referrer", String::kInternalizedString),
GetReferrer);
result->SetAccessor(
String::NewFromUtf8(isolate, "host", String::kInternalizedString),
GetHost);
result->SetAccessor(
String::NewFromUtf8(isolate, "userAgent", String::kInternalizedString),
GetUserAgent);
// Again, return the result through the current handle scope.
return handle_scope.Close(result);
return handle_scope.Escape(result);
}
@ -575,7 +591,7 @@ void ParseOptions(int argc,
// Reads a file into a v8 string.
Handle<String> ReadFile(const string& name) {
Handle<String> ReadFile(Isolate* isolate, const string& name) {
FILE* file = fopen(name.c_str(), "rb");
if (file == NULL) return Handle<String>();
@ -590,7 +606,8 @@ Handle<String> ReadFile(const string& name) {
i += read;
}
fclose(file);
Handle<String> result = String::New(chars, size);
Handle<String> result =
String::NewFromUtf8(isolate, chars, String::kNormalString, size);
delete[] chars;
return result;
}
@ -636,7 +653,7 @@ int main(int argc, char* argv[]) {
}
Isolate* isolate = Isolate::GetCurrent();
HandleScope scope(isolate);
Handle<String> source = ReadFile(file);
Handle<String> source = ReadFile(isolate, file);
if (source.IsEmpty()) {
fprintf(stderr, "Error reading '%s'.\n", file.c_str());
return 1;

60
deps/v8/samples/shell.cc

@ -58,7 +58,7 @@ void Read(const v8::FunctionCallbackInfo<v8::Value>& args);
void Load(const v8::FunctionCallbackInfo<v8::Value>& args);
void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
v8::Handle<v8::String> ReadFile(const char* name);
v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name);
void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
@ -98,17 +98,22 @@ const char* ToCString(const v8::String::Utf8Value& value) {
// functions.
v8::Handle<v8::Context> CreateShellContext(v8::Isolate* isolate) {
// Create a template for the global object.
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
// Bind the global 'print' function to the C++ Print callback.
global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
global->Set(v8::String::NewFromUtf8(isolate, "print"),
v8::FunctionTemplate::New(isolate, Print));
// Bind the global 'read' function to the C++ Read callback.
global->Set(v8::String::New("read"), v8::FunctionTemplate::New(Read));
global->Set(v8::String::NewFromUtf8(isolate, "read"),
v8::FunctionTemplate::New(isolate, Read));
// Bind the global 'load' function to the C++ Load callback.
global->Set(v8::String::New("load"), v8::FunctionTemplate::New(Load));
global->Set(v8::String::NewFromUtf8(isolate, "load"),
v8::FunctionTemplate::New(isolate, Load));
// Bind the 'quit' function
global->Set(v8::String::New("quit"), v8::FunctionTemplate::New(Quit));
global->Set(v8::String::NewFromUtf8(isolate, "quit"),
v8::FunctionTemplate::New(isolate, Quit));
// Bind the 'version' function
global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
global->Set(v8::String::NewFromUtf8(isolate, "version"),
v8::FunctionTemplate::New(isolate, Version));
return v8::Context::New(isolate, NULL, global);
}
@ -141,19 +146,19 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
args.GetIsolate()->ThrowException(
v8::String::New("Bad parameters"));
v8::String::NewFromUtf8(args.GetIsolate(), "Bad parameters"));
return;
}
v8::String::Utf8Value file(args[0]);
if (*file == NULL) {
args.GetIsolate()->ThrowException(
v8::String::New("Error loading file"));
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
return;
}
v8::Handle<v8::String> source = ReadFile(*file);
v8::Handle<v8::String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
args.GetIsolate()->ThrowException(
v8::String::New("Error loading file"));
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
return;
}
args.GetReturnValue().Set(source);
@ -169,22 +174,22 @@ void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::String::Utf8Value file(args[i]);
if (*file == NULL) {
args.GetIsolate()->ThrowException(
v8::String::New("Error loading file"));
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
return;
}
v8::Handle<v8::String> source = ReadFile(*file);
v8::Handle<v8::String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
args.GetIsolate()->ThrowException(
v8::String::New("Error loading file"));
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
return;
}
if (!ExecuteString(args.GetIsolate(),
source,
v8::String::New(*file),
v8::String::NewFromUtf8(args.GetIsolate(), *file),
false,
false)) {
args.GetIsolate()->ThrowException(
v8::String::New("Error executing file"));
v8::String::NewFromUtf8(args.GetIsolate(), "Error executing file"));
return;
}
}
@ -204,12 +209,13 @@ void Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(v8::String::New(v8::V8::GetVersion()));
args.GetReturnValue().Set(
v8::String::NewFromUtf8(args.GetIsolate(), v8::V8::GetVersion()));
}
// Reads a file into a v8 string.
v8::Handle<v8::String> ReadFile(const char* name) {
v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
FILE* file = fopen(name, "rb");
if (file == NULL) return v8::Handle<v8::String>();
@ -224,7 +230,8 @@ v8::Handle<v8::String> ReadFile(const char* name) {
i += read;
}
fclose(file);
v8::Handle<v8::String> result = v8::String::New(chars, size);
v8::Handle<v8::String> result =
v8::String::NewFromUtf8(isolate, chars, v8::String::kNormalString, size);
delete[] chars;
return result;
}
@ -245,13 +252,15 @@ int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
"Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
// Execute argument given to -e option directly.
v8::Handle<v8::String> file_name = v8::String::New("unnamed");
v8::Handle<v8::String> source = v8::String::New(argv[++i]);
v8::Handle<v8::String> file_name =
v8::String::NewFromUtf8(isolate, "unnamed");
v8::Handle<v8::String> source =
v8::String::NewFromUtf8(isolate, argv[++i]);
if (!ExecuteString(isolate, source, file_name, false, true)) return 1;
} else {
// Use all other arguments as names of files to load and run.
v8::Handle<v8::String> file_name = v8::String::New(str);
v8::Handle<v8::String> source = ReadFile(str);
v8::Handle<v8::String> file_name = v8::String::NewFromUtf8(isolate, str);
v8::Handle<v8::String> source = ReadFile(isolate, str);
if (source.IsEmpty()) {
fprintf(stderr, "Error reading '%s'\n", str);
continue;
@ -269,7 +278,8 @@ void RunShell(v8::Handle<v8::Context> context) {
static const int kBufferSize = 256;
// Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context);
v8::Local<v8::String> name(v8::String::New("(shell)"));
v8::Local<v8::String> name(
v8::String::NewFromUtf8(context->GetIsolate(), "(shell)"));
while (true) {
char buffer[kBufferSize];
fprintf(stderr, "> ");
@ -277,7 +287,7 @@ void RunShell(v8::Handle<v8::Context> context) {
if (str == NULL) break;
v8::HandleScope handle_scope(context->GetIsolate());
ExecuteString(context->GetIsolate(),
v8::String::New(str),
v8::String::NewFromUtf8(context->GetIsolate(), str),
name,
true,
true);

1
deps/v8/src/a64/OWNERS

@ -0,0 +1 @@
rmcilroy@chromium.org

1200
deps/v8/src/a64/assembler-a64-inl.h

File diff suppressed because it is too large

2606
deps/v8/src/a64/assembler-a64.cc

File diff suppressed because it is too large

2085
deps/v8/src/a64/assembler-a64.h

File diff suppressed because it is too large

1479
deps/v8/src/a64/builtins-a64.cc

File diff suppressed because it is too large

5809
deps/v8/src/a64/code-stubs-a64.cc

File diff suppressed because it is too large

469
deps/v8/src/a64/code-stubs-a64.h

@ -0,0 +1,469 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_CODE_STUBS_A64_H_
#define V8_A64_CODE_STUBS_A64_H_
#include "ic-inl.h"
namespace v8 {
namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() { return StoreBufferOverflow; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
class StringHelper : public AllStatic {
public:
// TODO(all): These don't seem to be used any more. Delete them.
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character);
static void GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character);
static void GenerateHashGetHash(MacroAssembler* masm,
Register hash,
Register scratch);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
class RecordWriteStub: public PlatformCodeStub {
public:
// Stub to record the write of 'value' at 'address' in 'object'.
// Typically 'address' = 'object' + <some offset>.
// See MacroAssembler::RecordWriteField() for example.
RecordWriteStub(Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
}
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
virtual bool SometimesSetsUpAFrame() { return false; }
static Mode GetMode(Code* stub) {
// Find the mode depending on the first two instructions.
Instruction* instr1 =
reinterpret_cast<Instruction*>(stub->instruction_start());
Instruction* instr2 = instr1->following();
if (instr1->IsUncondBranchImm()) {
ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
return INCREMENTAL;
}
ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
if (instr2->IsUncondBranchImm()) {
return INCREMENTAL_COMPACTION;
}
ASSERT(instr2->IsPCRelAddressing());
return STORE_BUFFER_ONLY;
}
// We patch the two first instructions of the stub back and forth between an
// adr and branch when we start and stop incremental heap marking.
// The branch is
// b label
// The adr is
// adr xzr label
// so effectively a nop.
static void Patch(Code* stub, Mode mode) {
// We are going to patch the two first instructions of the stub.
PatchingAssembler patcher(
reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
Instruction* instr1 = patcher.InstructionAt(0);
Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
// Instructions must be either 'adr' or 'b'.
ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
// Retrieve the offsets to the labels.
int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
switch (mode) {
case STORE_BUFFER_ONLY:
ASSERT(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL_COMPACTION:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
break;
}
ASSERT(GetMode(stub) == mode);
}
private:
// This is a helper class to manage the registers associated with the stub.
// The 'object' and 'address' registers must be preserved.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
Register scratch)
: object_(object),
address_(address),
scratch0_(scratch),
saved_regs_(kCallerSaved) {
ASSERT(!AreAliased(scratch, object, address));
// We would like to require more scratch registers for this stub,
// but the number of registers comes down to the ones used in
// FullCodeGen::SetVar(), which is architecture independent.
// We allocate 2 extra scratch registers that we'll save on the stack.
CPURegList pool_available = GetValidRegistersForAllocation();
CPURegList used_regs(object, address, scratch);
pool_available.Remove(used_regs);
scratch1_ = Register(pool_available.PopLowestIndex());
scratch2_ = Register(pool_available.PopLowestIndex());
// SaveCallerRegisters method needs to save caller saved register, however
// we don't bother saving ip0 and ip1 because they are used as scratch
// registers by the MacroAssembler.
saved_regs_.Remove(ip0);
saved_regs_.Remove(ip1);
// The scratch registers will be restored by other means so we don't need
// to save them with the other caller saved registers.
saved_regs_.Remove(scratch0_);
saved_regs_.Remove(scratch1_);
saved_regs_.Remove(scratch2_);
}
void Save(MacroAssembler* masm) {
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
masm->Push(scratch1_, scratch2_);
}
void Restore(MacroAssembler* masm) {
masm->Pop(scratch2_, scratch1_);
}
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
// TODO(all): This can be very expensive, and it is likely that not every
// register will need to be preserved. Can we improve this?
masm->PushCPURegList(saved_regs_);
if (mode == kSaveFPRegs) {
masm->PushCPURegList(kCallerSavedFP);
}
}
void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
// TODO(all): This can be very expensive, and it is likely that not every
// register will need to be preserved. Can we improve this?
if (mode == kSaveFPRegs) {
masm->PopCPURegList(kCallerSavedFP);
}
masm->PopCPURegList(saved_regs_);
}
Register object() { return object_; }
Register address() { return address_; }
Register scratch0() { return scratch0_; }
Register scratch1() { return scratch1_; }
Register scratch2() { return scratch2_; }
private:
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
Register scratch2_;
CPURegList saved_regs_;
// TODO(all): We should consider moving this somewhere else.
static CPURegList GetValidRegistersForAllocation() {
// The list of valid registers for allocation is defined as all the
// registers without those with a special meaning.
//
// The default list excludes registers x26 to x31 because they are
// reserved for the following purpose:
// - x26 root register
// - x27 context pointer register
// - x28 jssp
// - x29 frame pointer
// - x30 link register(lr)
// - x31 xzr/stack pointer
CPURegList list(CPURegister::kRegister, kXRegSize, 0, 25);
// We also remove MacroAssembler's scratch registers.
list.Remove(ip0);
list.Remove(ip1);
list.Remove(x8);
list.Remove(x9);
return list;
}
friend class RecordWriteStub;
};
// A list of stub variants which are pregenerated.
// The variants are stored in the same format as the minor key, so
// MinorKeyFor() can be used to populate and check this list.
static const int kAheadOfTime[];
void Generate(MacroAssembler* masm);
void GenerateIncremental(MacroAssembler* masm, Mode mode);
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
Major MajorKey() { return RecordWrite; }
int MinorKey() {
return MinorKeyFor(object_, value_, address_, remembered_set_action_,
save_fp_regs_mode_);
}
static int MinorKeyFor(Register object,
Register value,
Register address,
RememberedSetAction action,
SaveFPRegsMode fp_mode) {
ASSERT(object.Is64Bits());
ASSERT(value.Is64Bits());
ASSERT(address.Is64Bits());
return ObjectBits::encode(object.code()) |
ValueBits::encode(value.code()) |
AddressBits::encode(address.code()) |
RememberedSetActionBits::encode(action) |
SaveFPRegsModeBits::encode(fp_mode);
}
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
class ObjectBits: public BitField<int, 0, 5> {};
class ValueBits: public BitField<int, 5, 5> {};
class AddressBits: public BitField<int, 10, 5> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
Register object_;
Register value_;
Register address_;
RememberedSetAction remembered_set_action_;
SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
};
// Helper to call C++ functions from generated code. The caller must prepare
// the exit frame before doing the call with GenerateCall.
class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target);
private:
Major MajorKey() { return DirectCEntry; }
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
};
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
void Generate(MacroAssembler* masm);
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register receiver,
Register properties,
Handle<Name> name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register scratch1,
Register scratch2);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
static const int kCapacityOffset =
NameDictionary::kHeaderSize +
NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() { return NameDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
}
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
LookupMode mode_;
};
class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
private:
Major MajorKey() { return SubString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
// Compares two flat ASCII strings and returns result in x0.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4);
// Compare two flat ASCII strings for equality and returns result
// in x0.
static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3);
private:
virtual Major MajorKey() { return StringCompare; }
virtual int MinorKey() { return 0; }
virtual void Generate(MacroAssembler* masm);
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
Register left,
Register right,
Register length,
Register scratch1,
Register scratch2,
Label* chars_not_equal);
};
struct PlatformCallInterfaceDescriptor {
explicit PlatformCallInterfaceDescriptor(
TargetAddressStorageMode storage_mode)
: storage_mode_(storage_mode) { }
TargetAddressStorageMode storage_mode() { return storage_mode_; }
private:
TargetAddressStorageMode storage_mode_;
};
} } // namespace v8::internal
#endif // V8_A64_CODE_STUBS_A64_H_

616
deps/v8/src/a64/codegen-a64.cc

@ -0,0 +1,616 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_A64
#include "codegen.h"
#include "macro-assembler.h"
#include "simulator-a64.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
#if defined(USE_SIMULATOR)
byte* fast_exp_a64_machine_code = NULL;
double fast_exp_simulator(double x) {
Simulator * simulator = Simulator::current(Isolate::Current());
Simulator::CallArgument args[] = {
Simulator::CallArgument(x),
Simulator::CallArgument::End()
};
return simulator->CallDouble(fast_exp_a64_machine_code, args);
}
#endif
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &std::exp;
// Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
// an AAPCS64-compliant exp() function. This will be faster than the C
// library's exp() function, but probably less accurate.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
masm.SetStackPointer(csp);
// The argument will be in d0 on entry.
DoubleRegister input = d0;
// Use other caller-saved registers for all other values.
DoubleRegister result = d1;
DoubleRegister double_temp1 = d2;
DoubleRegister double_temp2 = d3;
Register temp1 = x10;
Register temp2 = x11;
Register temp3 = x12;
MathExpGenerator::EmitMathExp(&masm, input, result,
double_temp1, double_temp2,
temp1, temp2, temp3);
// Move the result to the return register.
masm.Fmov(d0, result);
masm.Ret();
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#else
fast_exp_a64_machine_code = buffer;
return &fast_exp_simulator;
#endif
}
UnaryMathFunction CreateSqrtFunction() {
return &std::sqrt;
}
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
ASSERT(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
ASSERT(masm->has_frame());
masm->set_has_frame(false);
}
// -------------------------------------------------------------------------
// Code generators
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- x2 : receiver
// -- x3 : target map
// -----------------------------------
Register receiver = x2;
Register map = x3;
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
allocation_memento_found);
}
// Set transitioned map.
__ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
map,
x10,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
// ----------- S t a t e -------------
// -- lr : return address
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- x3 : target map, scratch for subsequent call
// -----------------------------------
Register receiver = x2;
Register target_map = x3;
Label gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
Register elements = x4;
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
__ Push(lr);
Register length = x5;
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray.
Register array_size = x6;
Register array = x7;
__ Lsl(array_size, length, kDoubleSizeLog2);
__ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
__ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
// Register array is non-tagged heap object.
// Set the destination FixedDoubleArray's length and map.
Register map_root = x6;
__ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ Add(x10, array, kHeapObjectTag);
__ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
x6, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Prepare for conversion loop.
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
__ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
__ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
FPRegister nan_d = d1;
__ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
Label entry, done;
__ B(&entry);
__ Bind(&only_change_map);
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ B(&done);
// Call into runtime if GC is required.
__ Bind(&gc_required);
__ Pop(lr);
__ B(fail);
// Iterate over the array, copying and coverting smis to doubles. If an
// element is non-smi, write a hole to the destination.
{
Label loop;
__ Bind(&loop);
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
__ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
__ Tst(x13, kSmiTagMask);
__ Fcsel(d0, d0, nan_d, eq);
__ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
__ Bind(&entry);
__ Cmp(dst_elements, dst_end);
__ B(lt, &loop);
}
__ Pop(lr);
__ Bind(&done);
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
// ----------- S t a t e -------------
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- lr : return address
// -- x3 : target map, scratch for subsequent call
// -- x4 : scratch (elements)
// -----------------------------------
Register value = x0;
Register key = x1;
Register receiver = x2;
Register target_map = x3;
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
Label only_change_map;
Register elements = x4;
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
__ Push(lr);
// TODO(all): These registers may not need to be pushed. Examine
// RecordWriteStub and check whether it's needed.
__ Push(target_map, receiver, key, value);
Register length = x5;
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedArray.
Register array_size = x6;
Register array = x7;
Label gc_required;
__ Mov(array_size, FixedDoubleArray::kHeaderSize);
__ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
__ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
// Set destination FixedDoubleArray's length and map.
Register map_root = x6;
__ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
// Prepare for conversion loop.
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
__ Add(src_elements, elements,
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedArray::kHeaderSize);
__ Add(array, array, kHeapObjectTag);
__ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
Register the_hole = x14;
Register heap_num_map = x15;
__ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
__ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
Label entry;
__ B(&entry);
// Call into runtime if GC is required.
__ Bind(&gc_required);
__ Pop(value, key, receiver, target_map);
__ Pop(lr);
__ B(fail);
{
Label loop, convert_hole;
__ Bind(&loop);
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
__ Cmp(x13, kHoleNanInt64);
__ B(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
Register heap_num = x5;
__ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map);
__ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset));
__ Mov(x13, dst_elements);
__ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
__ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ B(&entry);
// Replace the-hole NaN with the-hole pointer.
__ Bind(&convert_hole);
__ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
__ Bind(&entry);
__ Cmp(dst_elements, dst_end);
__ B(lt, &loop);
}
__ Pop(value, key, receiver, target_map);
// Replace receiver's backing store with newly created and filled FixedArray.
__ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Pop(lr);
__ Bind(&only_change_map);
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
bool Code::IsYoungSequence(byte* sequence) {
return MacroAssembler::IsYoungSequence(sequence);
}
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
byte* target = sequence + kCodeAgeStubEntryOffset;
Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
GetCodeAgeAndParity(stub, age, parity);
}
}
void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
PatchingAssembler patcher(sequence, kCodeAgeSequenceSize / kInstructionSize);
if (age == kNoAgeCodeAge) {
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
} else {
Code * stub = GetCodeAgeStub(isolate, age, parity);
MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
}
}
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,
Register result,
Label* call_runtime) {
// Fetch the instance type of the receiver into result register.
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label check_sequential;
__ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
// Dispatch on the indirect string shape: slice or cons.
Label cons_string;
__ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
// Handle slices.
Label indirect_string_loaded;
__ Ldrsw(result,
UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
__ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ Add(index, index, result);
__ B(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
__ Bind(&cons_string);
__ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
__ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
// Get the first of the two strings and load its instance type.
__ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
__ Bind(&indirect_string_loaded);
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
// reduced to the underlying sequential or external string).
Label external_string, check_encoding;
__ Bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
// Prepare sequential strings
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
__ B(&check_encoding);
// Handle external strings.
__ Bind(&external_string);
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ Tst(result, kIsIndirectStringMask);
__ Assert(eq, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
// TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
// can be bound far away in deferred code.
__ Tst(result, kShortExternalStringMask);
__ B(ne, call_runtime);
__ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
Label ascii, done;
__ Bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
// Two-byte string.
__ Ldrh(result, MemOperand(string, index, LSL, 1));
__ B(&done);
__ Bind(&ascii);
// Ascii string.
__ Ldrb(result, MemOperand(string, index));
__ Bind(&done);
}
static MemOperand ExpConstant(Register base, int index) {
return MemOperand(base, index * kDoubleSize);
}
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_temp1,
DoubleRegister double_temp2,
Register temp1,
Register temp2,
Register temp3) {
// TODO(jbramley): There are several instances where fnmsub could be used
// instead of fmul and fsub. Doing this changes the result, but since this is
// an estimation anyway, does it matter?
ASSERT(!AreAliased(input, result,
double_temp1, double_temp2,
temp1, temp2, temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
Label done;
DoubleRegister double_temp3 = result;
Register constants = temp3;
// The algorithm used relies on some magic constants which are initialized in
// ExternalReference::InitializeMathExpData().
// Load the address of the start of the array.
__ Mov(constants, Operand(ExternalReference::math_exp_constants(0)));
// We have to do a four-way split here:
// - If input <= about -708.4, the output always rounds to zero.
// - If input >= about 709.8, the output always rounds to +infinity.
// - If the input is NaN, the output is NaN.
// - Otherwise, the result needs to be calculated.
Label result_is_finite_non_zero;
// Assert that we can load offset 0 (the small input threshold) and offset 1
// (the large input threshold) with a single ldp.
ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 1).offset() -
ExpConstant(constants, 0).offset()));
__ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
__ Fcmp(input, double_temp1);
__ Fccmp(input, double_temp2, NoFlag, hi);
// At this point, the condition flags can be in one of five states:
// NZCV
// 1000 -708.4 < input < 709.8 result = exp(input)
// 0110 input == 709.8 result = +infinity
// 0010 input > 709.8 result = +infinity
// 0011 input is NaN result = input
// 0000 input <= -708.4 result = +0.0
// Continue the common case first. 'mi' tests N == 1.
__ B(&result_is_finite_non_zero, mi);
// TODO(jbramley): Add (and use) a zero D register for A64.
// TODO(jbramley): Consider adding a +infinity register for A64.
__ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
__ Fsub(double_temp1, double_temp1, double_temp1); // Synthesize +0.0.
// Select between +0.0 and +infinity. 'lo' tests C == 0.
__ Fcsel(result, double_temp1, double_temp2, lo);
// Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
__ Fcsel(result, result, input, vc);
__ B(&done);
// The rest is magic, as described in InitializeMathExpData().
__ Bind(&result_is_finite_non_zero);
// Assert that we can load offset 3 and offset 4 with a single ldp.
ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 4).offset() -
ExpConstant(constants, 3).offset()));
__ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
__ Fmadd(double_temp1, double_temp1, input, double_temp3);
__ Fmov(temp2.W(), double_temp1.S());
__ Fsub(double_temp1, double_temp1, double_temp3);
// Assert that we can load offset 5 and offset 6 with a single ldp.
ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 6).offset() -
ExpConstant(constants, 5).offset()));
__ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
// TODO(jbramley): Consider using Fnmsub here.
__ Fmul(double_temp1, double_temp1, double_temp2);
__ Fsub(double_temp1, double_temp1, input);
__ Fmul(double_temp2, double_temp1, double_temp1);
__ Fsub(double_temp3, double_temp3, double_temp1);
__ Fmul(double_temp3, double_temp3, double_temp2);
__ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
__ Ldr(double_temp2, ExpConstant(constants, 7));
// TODO(jbramley): Consider using Fnmsub here.
__ Fmul(double_temp3, double_temp3, double_temp2);
__ Fsub(double_temp3, double_temp3, double_temp1);
// The 8th constant is 1.0, so use an immediate move rather than a load.
// We can't generate a runtime assertion here as we would need to call Abort
// in the runtime and we don't have an Isolate when we generate this code.
__ Fmov(double_temp2, 1.0);
__ Fadd(double_temp3, double_temp3, double_temp2);
__ And(temp2, temp2, 0x7ff);
__ Add(temp1, temp1, 0x3ff);
// Do the final table lookup.
__ Mov(temp3, Operand(ExternalReference::math_exp_log_table()));
__ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeInBytesLog2));
__ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
__ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
__ Bfi(temp2, temp1, 32, 32);
__ Fmov(double_temp1, temp2);
__ Fmul(result, double_temp3, double_temp1);
__ Bind(&done);
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64

66
deps/v8/src/defaults.cc → deps/v8/src/a64/codegen-a64.h

@ -25,46 +25,46 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The GYP based build ends up defining USING_V8_SHARED when compiling this
// file.
#undef USING_V8_SHARED
#include "../include/v8-defaults.h"
#ifndef V8_A64_CODEGEN_A64_H_
#define V8_A64_CODEGEN_A64_H_
#include "platform.h"
#include "globals.h"
#include "v8.h"
#include "ast.h"
#include "ic-inl.h"
namespace v8 {
namespace internal {
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
// indexed character into |result|. We expect |index| as untagged input and
// |result| as untagged output.
static void Generate(MacroAssembler* masm,
Register string,
Register index,
Register result,
Label* call_runtime);
bool ConfigureResourceConstraintsForCurrentPlatform(
ResourceConstraints* constraints) {
if (constraints == NULL) {
return false;
}
private:
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
int lump_of_memory = (i::kPointerSize / 4) * i::MB;
// The young_space_size should be a power of 2 and old_generation_size should
// be a multiple of Page::kPageSize.
#if V8_OS_ANDROID
constraints->set_max_young_space_size(8 * lump_of_memory);
constraints->set_max_old_space_size(256 * lump_of_memory);
constraints->set_max_executable_size(192 * lump_of_memory);
#else
constraints->set_max_young_space_size(16 * lump_of_memory);
constraints->set_max_old_space_size(700 * lump_of_memory);
constraints->set_max_executable_size(256 * lump_of_memory);
#endif
return true;
}
class MathExpGenerator : public AllStatic {
public:
static void EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
DoubleRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3);
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
bool SetDefaultResourceConstraintsForCurrentPlatform() {
ResourceConstraints constraints;
if (!ConfigureResourceConstraintsForCurrentPlatform(&constraints))
return false;
return SetResourceConstraints(&constraints);
}
} } // namespace v8::internal
} // namespace v8
#endif // V8_A64_CODEGEN_A64_H_

1262
deps/v8/src/a64/constants-a64.h

File diff suppressed because it is too large

199
deps/v8/src/a64/cpu-a64.cc

@ -0,0 +1,199 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// CPU specific code for arm independent of OS goes here.
#include "v8.h"
#if V8_TARGET_ARCH_A64
#include "a64/cpu-a64.h"
#include "a64/utils-a64.h"
namespace v8 {
namespace internal {
#ifdef DEBUG
bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
unsigned CpuFeatures::cross_compile_ = 0;
// Initialise to smallest possible cache size.
unsigned CpuFeatures::dcache_line_size_ = 1;
unsigned CpuFeatures::icache_line_size_ = 1;
void CPU::SetUp() {
CpuFeatures::Probe();
}
bool CPU::SupportsCrankshaft() {
return true;
}
void CPU::FlushICache(void* address, size_t length) {
if (length == 0) {
return;
}
#ifdef USE_SIMULATOR
// TODO(all): consider doing some cache simulation to ensure every address
// run has been synced.
USE(address);
USE(length);
#else
// The code below assumes user space cache operations are allowed. The goal
// of this routine is to make sure the code generated is visible to the I
// side of the CPU.
uintptr_t start = reinterpret_cast<uintptr_t>(address);
// Sizes will be used to generate a mask big enough to cover a pointer.
uintptr_t dsize = static_cast<uintptr_t>(CpuFeatures::dcache_line_size());
uintptr_t isize = static_cast<uintptr_t>(CpuFeatures::icache_line_size());
// Cache line sizes are always a power of 2.
ASSERT(CountSetBits(dsize, 64) == 1);
ASSERT(CountSetBits(isize, 64) == 1);
uintptr_t dstart = start & ~(dsize - 1);
uintptr_t istart = start & ~(isize - 1);
uintptr_t end = start + length;
__asm__ __volatile__ ( // NOLINT
// Clean every line of the D cache containing the target data.
"0: \n\t"
// dc : Data Cache maintenance
// c : Clean
// va : by (Virtual) Address
// u : to the point of Unification
// The point of unification for a processor is the point by which the
// instruction and data caches are guaranteed to see the same copy of a
// memory location. See ARM DDI 0406B page B2-12 for more information.
"dc cvau, %[dline] \n\t"
"add %[dline], %[dline], %[dsize] \n\t"
"cmp %[dline], %[end] \n\t"
"b.lt 0b \n\t"
// Barrier to make sure the effect of the code above is visible to the rest
// of the world.
// dsb : Data Synchronisation Barrier
// ish : Inner SHareable domain
// The point of unification for an Inner Shareable shareability domain is
// the point by which the instruction and data caches of all the processors
// in that Inner Shareable shareability domain are guaranteed to see the
// same copy of a memory location. See ARM DDI 0406B page B2-12 for more
// information.
"dsb ish \n\t"
// Invalidate every line of the I cache containing the target data.
"1: \n\t"
// ic : instruction cache maintenance
// i : invalidate
// va : by address
// u : to the point of unification
"ic ivau, %[iline] \n\t"
"add %[iline], %[iline], %[isize] \n\t"
"cmp %[iline], %[end] \n\t"
"b.lt 1b \n\t"
// Barrier to make sure the effect of the code above is visible to the rest
// of the world.
"dsb ish \n\t"
// Barrier to ensure any prefetching which happened before this code is
// discarded.
// isb : Instruction Synchronisation Barrier
"isb \n\t"
: [dline] "+r" (dstart),
[iline] "+r" (istart)
: [dsize] "r" (dsize),
[isize] "r" (isize),
[end] "r" (end)
// This code does not write to memory but without the dependency gcc might
// move this code before the code is generated.
: "cc", "memory"
); // NOLINT
#endif
}
void CpuFeatures::Probe() {
// Compute I and D cache line size. The cache type register holds
// information about the caches.
uint32_t cache_type_register = GetCacheType();
static const int kDCacheLineSizeShift = 16;
static const int kICacheLineSizeShift = 0;
static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
// The cache type register holds the size of the I and D caches as a power of
// two.
uint32_t dcache_line_size_power_of_two =
(cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
uint32_t icache_line_size_power_of_two =
(cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
dcache_line_size_ = 1 << dcache_line_size_power_of_two;
icache_line_size_ = 1 << icache_line_size_power_of_two;
// AArch64 has no configuration options, no further probing is required.
supported_ = 0;
#ifdef DEBUG
initialized_ = true;
#endif
}
unsigned CpuFeatures::dcache_line_size() {
ASSERT(initialized_);
return dcache_line_size_;
}
unsigned CpuFeatures::icache_line_size() {
ASSERT(initialized_);
return icache_line_size_;
}
uint32_t CpuFeatures::GetCacheType() {
#ifdef USE_SIMULATOR
// This will lead to a cache with 1 byte long lines, which is fine since the
// simulator will not need this information.
return 0;
#else
uint32_t cache_type_register;
// Copy the content of the cache type register to a core register.
__asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
: [ctr] "=r" (cache_type_register));
return cache_type_register;
#endif
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64

107
deps/v8/src/a64/cpu-a64.h

@ -0,0 +1,107 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_CPU_A64_H_
#define V8_A64_CPU_A64_H_
#include <stdio.h>
#include "serialize.h"
#include "cpu.h"
namespace v8 {
namespace internal {
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a CpuFeatureScope before use.
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
// There are no optional features for A64.
return false;
};
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
// There are no optional features for A64.
return false;
}
static bool IsSafeForSnapshot(CpuFeature f) {
return (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
// I and D cache line size in bytes.
static unsigned dcache_line_size();
static unsigned icache_line_size();
static unsigned supported_;
static bool VerifyCrossCompiling() {
// There are no optional features for A64.
ASSERT(cross_compile_ == 0);
return true;
}
static bool VerifyCrossCompiling(CpuFeature f) {
// There are no optional features for A64.
USE(f);
ASSERT(cross_compile_ == 0);
return true;
}
private:
// Return the content of the cache type register.
static uint32_t GetCacheType();
// I and D cache line size in bytes.
static unsigned icache_line_size_;
static unsigned dcache_line_size_;
#ifdef DEBUG
static bool initialized_;
#endif
// This isn't used (and is always 0), but it is required by V8.
static unsigned found_by_runtime_probing_only_;
static unsigned cross_compile_;
friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
} } // namespace v8::internal
#endif // V8_A64_CPU_A64_H_

394
deps/v8/src/a64/debug-a64.cc

@ -0,0 +1,394 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_A64
#include "codegen.h"
#include "debug.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
#ifdef ENABLE_DEBUGGER_SUPPORT
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
void BreakLocationIterator::SetDebugBreakAtReturn() {
// Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
// the return from JS function sequence from
// mov sp, fp
// ldp fp, lr, [sp] #16
// lrd ip0, [pc, #(3 * kInstructionSize)]
// add sp, sp, ip0
// ret
// <number of paramters ...
// ... plus one (64 bits)>
// to a call to the debug break return code.
// ldr ip0, [pc, #(3 * kInstructionSize)]
// blr ip0
// hlt kHltBadCode @ code should not return, catch if it does.
// <debug break return code ...
// ... entry point address (64 bits)>
// The patching code must not overflow the space occupied by the return
// sequence.
STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5);
byte* entry =
debug_info_->GetIsolate()->debug()->debug_break_return()->entry();
// The first instruction of a patched return sequence must be a load literal
// loading the address of the debug break return code.
patcher.LoadLiteral(ip0, 3 * kInstructionSize);
// TODO(all): check the following is correct.
// The debug break return code will push a frame and call statically compiled
// code. By using blr, even though control will not return after the branch,
// this call site will be registered in the frame (lr being saved as the pc
// of the next instruction to execute for this frame). The debugger can now
// iterate on the frames to find call to debug break return code.
patcher.blr(ip0);
patcher.hlt(kHltBadCode);
patcher.dc64(reinterpret_cast<int64_t>(entry));
}
void BreakLocationIterator::ClearDebugBreakAtReturn() {
// Reset the code emitted by EmitReturnSequence to its original state.
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kJSRetSequenceInstructions);
}
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
bool BreakLocationIterator::IsDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
// Check whether the debug break slot instructions have been patched.
return rinfo()->IsPatchedDebugBreakSlotSequence();
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
// Patch the code emitted by Debug::GenerateSlots, changing the debug break
// slot code from
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// to a call to the debug slot code.
// ldr ip0, [pc, #(2 * kInstructionSize)]
// blr ip0
// <debug break slot code ...
// ... entry point address (64 bits)>
// TODO(all): consider adding a hlt instruction after the blr as we don't
// expect control to return here. This implies increasing
// kDebugBreakSlotInstructions to 5 instructions.
// The patching code must not overflow the space occupied by the return
// sequence.
STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4);
byte* entry =
debug_info_->GetIsolate()->debug()->debug_break_slot()->entry();
// The first instruction of a patched debug break slot must be a load literal
// loading the address of the debug break slot code.
patcher.LoadLiteral(ip0, 2 * kInstructionSize);
// TODO(all): check the following is correct.
// The debug break slot code will push a frame and call statically compiled
// code. By using blr, event hough control will not return after the branch,
// this call site will be registered in the frame (lr being saved as the pc
// of the next instruction to execute for this frame). The debugger can now
// iterate on the frames to find call to debug break slot code.
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<int64_t>(entry));
}
void BreakLocationIterator::ClearDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kDebugBreakSlotInstructions);
}
const bool Debug::FramePaddingLayout::kIsSupported = false;
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs,
Register scratch) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Any live values (object_regs and non_object_regs) in caller-saved
// registers (or lr) need to be stored on the stack so that their values are
// safely preserved for a call into C code.
//
// Also:
// * object_regs may be modified during the C code by the garbage
// collector. Every object register must be a valid tagged pointer or
// SMI.
//
// * non_object_regs will be converted to SMIs so that the garbage
// collector doesn't try to interpret them as pointers.
//
// TODO(jbramley): Why can't this handle callee-saved registers?
ASSERT((~kCallerSaved.list() & object_regs) == 0);
ASSERT((~kCallerSaved.list() & non_object_regs) == 0);
ASSERT((object_regs & non_object_regs) == 0);
ASSERT((scratch.Bit() & object_regs) == 0);
ASSERT((scratch.Bit() & non_object_regs) == 0);
ASSERT((ip0.Bit() & (object_regs | non_object_regs)) == 0);
ASSERT((ip1.Bit() & (object_regs | non_object_regs)) == 0);
STATIC_ASSERT(kSmiValueSize == 32);
CPURegList non_object_list =
CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs);
while (!non_object_list.IsEmpty()) {
// Store each non-object register as two SMIs.
Register reg = Register(non_object_list.PopLowestIndex());
__ Push(reg);
__ Poke(wzr, 0);
__ Push(reg.W(), wzr);
// Stack:
// jssp[12]: reg[63:32]
// jssp[8]: 0x00000000 (SMI tag & padding)
// jssp[4]: reg[31:0]
// jssp[0]: 0x00000000 (SMI tag & padding)
STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
}
if (object_regs != 0) {
__ PushXRegList(object_regs);
}
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ Mov(x0, 0); // No arguments.
__ Mov(x1, Operand(ExternalReference::debug_break(masm->isolate())));
CEntryStub stub(1);
__ CallStub(&stub);
// Restore the register values from the expression stack.
if (object_regs != 0) {
__ PopXRegList(object_regs);
}
non_object_list =
CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs);
while (!non_object_list.IsEmpty()) {
// Load each non-object register from two SMIs.
// Stack:
// jssp[12]: reg[63:32]
// jssp[8]: 0x00000000 (SMI tag & padding)
// jssp[4]: reg[31:0]
// jssp[0]: 0x00000000 (SMI tag & padding)
Register reg = Register(non_object_list.PopHighestIndex());
__ Pop(scratch, reg);
__ Bfxil(reg, scratch, 32, 32);
}
// Leave the internal frame.
}
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target(Debug_Address::AfterBreakTarget(),
masm->isolate());
__ Mov(scratch, Operand(after_break_target));
__ Ldr(scratch, MemOperand(scratch));
__ Br(scratch);
}
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
// ----------- S t a t e -------------
// -- x2 : name
// -- lr : return address
// -- x0 : receiver
// -- [sp] : receiver
// -----------------------------------
// Registers x0 and x2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10);
}
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm.cc).
// ----------- S t a t e -------------
// -- x0 : value
// -- x1 : receiver
// -- x2 : name
// -- lr : return address
// -----------------------------------
// Registers x0, x1, and x2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
}
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- x0 : key
// -- x1 : receiver
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10);
}
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- lr : return address
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
}
void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- r0 : value
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-arm.cc)
// ----------- S t a t e -------------
// -- x2 : name
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x2.Bit(), 0, x10);
}
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
}
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-a64.cc).
// ----------- S t a t e -------------
// -- x1 : function
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10);
}
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-a64.cc).
// ----------- S t a t e -------------
// -- x1 : function
// -- x2 : feedback array
// -- x3 : slot in feedback array
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit() | x2.Bit() | x3.Bit(), 0, x10);
}
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-a64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
// -- x1 : constructor function
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10);
}
void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-a64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
// -- x1 : constructor function
// -- x2 : feedback array
// -- x3 : feedback slot (smi)
// -----------------------------------
Generate_DebugBreakCallHelper(
masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10);
}
void Debug::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
__ RecordDebugBreakSlot();
for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
__ nop(Assembler::DEBUG_BREAK_NOP);
}
}
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0, x10);
}
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
}
const bool Debug::kFrameDropperSupported = false;
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64

111
deps/v8/src/a64/debugger-a64.cc

@ -0,0 +1,111 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if V8_TARGET_ARCH_A64
#if defined(USE_SIMULATOR)
#include "a64/debugger-a64.h"
namespace v8 {
namespace internal {
void Debugger::VisitException(Instruction* instr) {
switch (instr->Mask(ExceptionMask)) {
case HLT: {
if (instr->ImmException() == kImmExceptionIsDebug) {
// Read the arguments encoded inline in the instruction stream.
uint32_t code;
uint32_t parameters;
char const * message;
ASSERT(sizeof(*pc_) == 1);
memcpy(&code, pc_ + kDebugCodeOffset, sizeof(code));
memcpy(&parameters, pc_ + kDebugParamsOffset, sizeof(parameters));
message = reinterpret_cast<char const *>(pc_ + kDebugMessageOffset);
if (message[0] == '\0') {
fprintf(stream_, "Debugger hit %" PRIu32 ".\n", code);
} else {
fprintf(stream_, "Debugger hit %" PRIu32 ": %s\n", code, message);
}
// Other options.
switch (parameters & kDebuggerTracingDirectivesMask) {
case TRACE_ENABLE:
set_log_parameters(log_parameters() | parameters);
break;
case TRACE_DISABLE:
set_log_parameters(log_parameters() & ~parameters);
break;
case TRACE_OVERRIDE:
set_log_parameters(parameters);
break;
default:
// We don't support a one-shot LOG_DISASM.
ASSERT((parameters & LOG_DISASM) == 0);
// Don't print information that is already being traced.
parameters &= ~log_parameters();
// Print the requested information.
if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
if (parameters & LOG_REGS) PrintRegisters(true);
if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
}
// Check if the debugger should break.
if (parameters & BREAK) OS::DebugBreak();
// The stop parameters are inlined in the code. Skip them:
// - Skip to the end of the message string.
pc_ += kDebugMessageOffset + strlen(message) + 1;
// - Advance to the next aligned location.
pc_ = AlignUp(pc_, kInstructionSize);
// - Verify that the unreachable marker is present.
ASSERT(reinterpret_cast<Instruction*>(pc_)->Mask(ExceptionMask) == HLT);
ASSERT(reinterpret_cast<Instruction*>(pc_)->ImmException() ==
kImmExceptionIsUnreachable);
// - Skip past the unreachable marker.
pc_ += kInstructionSize;
pc_modified_ = true;
} else {
Simulator::VisitException(instr);
}
break;
}
default:
UNIMPLEMENTED();
}
}
} } // namespace v8::internal
#endif // USE_SIMULATOR
#endif // V8_TARGET_ARCH_A64

56
deps/v8/src/a64/debugger-a64.h

@ -0,0 +1,56 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_DEBUGGER_A64_H_
#define V8_A64_DEBUGGER_A64_H_
#if defined(USE_SIMULATOR)
#include "globals.h"
#include "utils.h"
#include "a64/constants-a64.h"
#include "a64/simulator-a64.h"
namespace v8 {
namespace internal {
class Debugger : public Simulator {
public:
Debugger(Decoder* decoder, FILE* stream = stderr)
: Simulator(decoder, NULL, stream) {}
// Functions overloading.
void VisitException(Instruction* instr);
};
} } // namespace v8::internal
#endif // USE_SIMULATOR
#endif // V8_A64_DEBUGGER_A64_H_

726
deps/v8/src/a64/decoder-a64.cc

@ -0,0 +1,726 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_A64
#include "globals.h"
#include "utils.h"
#include "a64/decoder-a64.h"
namespace v8 {
namespace internal {
// Top-level instruction decode function.
void Decoder::Decode(Instruction *instr) {
if (instr->Bits(28, 27) == 0) {
VisitUnallocated(instr);
} else {
switch (instr->Bits(27, 24)) {
// 0: PC relative addressing.
case 0x0: DecodePCRelAddressing(instr); break;
// 1: Add/sub immediate.
case 0x1: DecodeAddSubImmediate(instr); break;
// A: Logical shifted register.
// Add/sub with carry.
// Conditional compare register.
// Conditional compare immediate.
// Conditional select.
// Data processing 1 source.
// Data processing 2 source.
// B: Add/sub shifted register.
// Add/sub extended register.
// Data processing 3 source.
case 0xA:
case 0xB: DecodeDataProcessing(instr); break;
// 2: Logical immediate.
// Move wide immediate.
case 0x2: DecodeLogical(instr); break;
// 3: Bitfield.
// Extract.
case 0x3: DecodeBitfieldExtract(instr); break;
// 4: Unconditional branch immediate.
// Exception generation.
// Compare and branch immediate.
// 5: Compare and branch immediate.
// Conditional branch.
// System.
// 6,7: Unconditional branch.
// Test and branch immediate.
case 0x4:
case 0x5:
case 0x6:
case 0x7: DecodeBranchSystemException(instr); break;
// 8,9: Load/store register pair post-index.
// Load register literal.
// Load/store register unscaled immediate.
// Load/store register immediate post-index.
// Load/store register immediate pre-index.
// Load/store register offset.
// C,D: Load/store register pair offset.
// Load/store register pair pre-index.
// Load/store register unsigned immediate.
// Advanced SIMD.
case 0x8:
case 0x9:
case 0xC:
case 0xD: DecodeLoadStore(instr); break;
// E: FP fixed point conversion.
// FP integer conversion.
// FP data processing 1 source.
// FP compare.
// FP immediate.
// FP data processing 2 source.
// FP conditional compare.
// FP conditional select.
// Advanced SIMD.
// F: FP data processing 3 source.
// Advanced SIMD.
case 0xE:
case 0xF: DecodeFP(instr); break;
}
}
}
void Decoder::AppendVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
visitors_.push_front(new_visitor);
}
void Decoder::PrependVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
visitors_.push_back(new_visitor);
}
void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor) {
visitors_.remove(new_visitor);
std::list<DecoderVisitor*>::iterator it;
for (it = visitors_.begin(); it != visitors_.end(); it++) {
if (*it == registered_visitor) {
visitors_.insert(it, new_visitor);
return;
}
}
// We reached the end of the list. The last element must be
// registered_visitor.
ASSERT(*it == registered_visitor);
visitors_.insert(it, new_visitor);
}
void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor) {
visitors_.remove(new_visitor);
std::list<DecoderVisitor*>::iterator it;
for (it = visitors_.begin(); it != visitors_.end(); it++) {
if (*it == registered_visitor) {
it++;
visitors_.insert(it, new_visitor);
return;
}
}
// We reached the end of the list. The last element must be
// registered_visitor.
ASSERT(*it == registered_visitor);
visitors_.push_back(new_visitor);
}
void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
visitors_.remove(visitor);
}
void Decoder::DecodePCRelAddressing(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x0);
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
// decode.
ASSERT(instr->Bit(28) == 0x1);
VisitPCRelAddressing(instr);
}
void Decoder::DecodeBranchSystemException(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0x4) ||
(instr->Bits(27, 24) == 0x5) ||
(instr->Bits(27, 24) == 0x6) ||
(instr->Bits(27, 24) == 0x7) );
switch (instr->Bits(31, 29)) {
case 0:
case 4: {
VisitUnconditionalBranch(instr);
break;
}
case 1:
case 5: {
if (instr->Bit(25) == 0) {
VisitCompareBranch(instr);
} else {
VisitTestBranch(instr);
}
break;
}
case 2: {
if (instr->Bit(25) == 0) {
if ((instr->Bit(24) == 0x1) ||
(instr->Mask(0x01000010) == 0x00000010)) {
VisitUnallocated(instr);
} else {
VisitConditionalBranch(instr);
}
} else {
VisitUnallocated(instr);
}
break;
}
case 6: {
if (instr->Bit(25) == 0) {
if (instr->Bit(24) == 0) {
if ((instr->Bits(4, 2) != 0) ||
(instr->Mask(0x00E0001D) == 0x00200001) ||
(instr->Mask(0x00E0001D) == 0x00400001) ||
(instr->Mask(0x00E0001E) == 0x00200002) ||
(instr->Mask(0x00E0001E) == 0x00400002) ||
(instr->Mask(0x00E0001C) == 0x00600000) ||
(instr->Mask(0x00E0001C) == 0x00800000) ||
(instr->Mask(0x00E0001F) == 0x00A00000) ||
(instr->Mask(0x00C0001C) == 0x00C00000)) {
VisitUnallocated(instr);
} else {
VisitException(instr);
}
} else {
if (instr->Bits(23, 22) == 0) {
const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
if ((instr->Bits(21, 19) == 0x4) ||
(masked_003FF0E0 == 0x00033000) ||
(masked_003FF0E0 == 0x003FF020) ||
(masked_003FF0E0 == 0x003FF060) ||
(masked_003FF0E0 == 0x003FF0E0) ||
(instr->Mask(0x00388000) == 0x00008000) ||
(instr->Mask(0x0038E000) == 0x00000000) ||
(instr->Mask(0x0039E000) == 0x00002000) ||
(instr->Mask(0x003AE000) == 0x00002000) ||
(instr->Mask(0x003CE000) == 0x00042000) ||
(instr->Mask(0x003FFFC0) == 0x000320C0) ||
(instr->Mask(0x003FF100) == 0x00032100) ||
(instr->Mask(0x003FF200) == 0x00032200) ||
(instr->Mask(0x003FF400) == 0x00032400) ||
(instr->Mask(0x003FF800) == 0x00032800) ||
(instr->Mask(0x0038F000) == 0x00005000) ||
(instr->Mask(0x0038E000) == 0x00006000)) {
VisitUnallocated(instr);
} else {
VisitSystem(instr);
}
} else {
VisitUnallocated(instr);
}
}
} else {
if ((instr->Bit(24) == 0x1) ||
(instr->Bits(20, 16) != 0x1F) ||
(instr->Bits(15, 10) != 0) ||
(instr->Bits(4, 0) != 0) ||
(instr->Bits(24, 21) == 0x3) ||
(instr->Bits(24, 22) == 0x3)) {
VisitUnallocated(instr);
} else {
VisitUnconditionalBranchToRegister(instr);
}
}
break;
}
case 3:
case 7: {
VisitUnallocated(instr);
break;
}
}
}
void Decoder::DecodeLoadStore(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0x8) ||
(instr->Bits(27, 24) == 0x9) ||
(instr->Bits(27, 24) == 0xC) ||
(instr->Bits(27, 24) == 0xD) );
if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) {
if (instr->Bit(26) == 0) {
// TODO(all): VisitLoadStoreExclusive.
VisitUnimplemented(instr);
} else {
DecodeAdvSIMDLoadStore(instr);
}
} else {
if ((instr->Bits(31, 30) == 0x3) ||
(instr->Mask(0xC4400000) == 0x40000000)) {
VisitUnallocated(instr);
} else {
if (instr->Bit(23) == 0) {
if (instr->Mask(0xC4400000) == 0xC0400000) {
VisitUnallocated(instr);
} else {
VisitLoadStorePairNonTemporal(instr);
}
} else {
VisitLoadStorePairPostIndex(instr);
}
}
}
} else {
if (instr->Bit(29) == 0) {
if (instr->Mask(0xC4000000) == 0xC4000000) {
VisitUnallocated(instr);
} else {
VisitLoadLiteral(instr);
}
} else {
if ((instr->Mask(0x84C00000) == 0x80C00000) ||
(instr->Mask(0x44800000) == 0x44800000) ||
(instr->Mask(0x84800000) == 0x84800000)) {
VisitUnallocated(instr);
} else {
if (instr->Bit(21) == 0) {
switch (instr->Bits(11, 10)) {
case 0: {
VisitLoadStoreUnscaledOffset(instr);
break;
}
case 1: {
if (instr->Mask(0xC4C00000) == 0xC0800000) {
VisitUnallocated(instr);
} else {
VisitLoadStorePostIndex(instr);
}
break;
}
case 2: {
// TODO(all): VisitLoadStoreRegisterOffsetUnpriv.
VisitUnimplemented(instr);
break;
}
case 3: {
if (instr->Mask(0xC4C00000) == 0xC0800000) {
VisitUnallocated(instr);
} else {
VisitLoadStorePreIndex(instr);
}
break;
}
}
} else {
if (instr->Bits(11, 10) == 0x2) {
if (instr->Bit(14) == 0) {
VisitUnallocated(instr);
} else {
VisitLoadStoreRegisterOffset(instr);
}
} else {
VisitUnallocated(instr);
}
}
}
}
}
} else {
if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) {
VisitUnallocated(instr);
} else {
if ((instr->Bits(31, 30) == 0x3) ||
(instr->Mask(0xC4400000) == 0x40000000)) {
VisitUnallocated(instr);
} else {
if (instr->Bit(23) == 0) {
VisitLoadStorePairOffset(instr);
} else {
VisitLoadStorePairPreIndex(instr);
}
}
}
} else {
if (instr->Bit(29) == 0) {
VisitUnallocated(instr);
} else {
if ((instr->Mask(0x84C00000) == 0x80C00000) ||
(instr->Mask(0x44800000) == 0x44800000) ||
(instr->Mask(0x84800000) == 0x84800000)) {
VisitUnallocated(instr);
} else {
VisitLoadStoreUnsignedOffset(instr);
}
}
}
}
}
void Decoder::DecodeLogical(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x2);
if (instr->Mask(0x80400000) == 0x00400000) {
VisitUnallocated(instr);
} else {
if (instr->Bit(23) == 0) {
VisitLogicalImmediate(instr);
} else {
if (instr->Bits(30, 29) == 0x1) {
VisitUnallocated(instr);
} else {
VisitMoveWideImmediate(instr);
}
}
}
}
void Decoder::DecodeBitfieldExtract(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x3);
if ((instr->Mask(0x80400000) == 0x80000000) ||
(instr->Mask(0x80400000) == 0x00400000) ||
(instr->Mask(0x80008000) == 0x00008000)) {
VisitUnallocated(instr);
} else if (instr->Bit(23) == 0) {
if ((instr->Mask(0x80200000) == 0x00200000) ||
(instr->Mask(0x60000000) == 0x60000000)) {
VisitUnallocated(instr);
} else {
VisitBitfield(instr);
}
} else {
if ((instr->Mask(0x60200000) == 0x00200000) ||
(instr->Mask(0x60000000) != 0x00000000)) {
VisitUnallocated(instr);
} else {
VisitExtract(instr);
}
}
}
void Decoder::DecodeAddSubImmediate(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x1);
if (instr->Bit(23) == 1) {
VisitUnallocated(instr);
} else {
VisitAddSubImmediate(instr);
}
}
void Decoder::DecodeDataProcessing(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0xA) ||
(instr->Bits(27, 24) == 0xB) );
if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) {
if (instr->Mask(0x80008000) == 0x00008000) {
VisitUnallocated(instr);
} else {
VisitLogicalShifted(instr);
}
} else {
switch (instr->Bits(23, 21)) {
case 0: {
if (instr->Mask(0x0000FC00) != 0) {
VisitUnallocated(instr);
} else {
VisitAddSubWithCarry(instr);
}
break;
}
case 2: {
if ((instr->Bit(29) == 0) ||
(instr->Mask(0x00000410) != 0)) {
VisitUnallocated(instr);
} else {
if (instr->Bit(11) == 0) {
VisitConditionalCompareRegister(instr);
} else {
VisitConditionalCompareImmediate(instr);
}
}
break;
}
case 4: {
if (instr->Mask(0x20000800) != 0x00000000) {
VisitUnallocated(instr);
} else {
VisitConditionalSelect(instr);
}
break;
}
case 6: {
if (instr->Bit(29) == 0x1) {
VisitUnallocated(instr);
} else {
if (instr->Bit(30) == 0) {
if ((instr->Bit(15) == 0x1) ||
(instr->Bits(15, 11) == 0) ||
(instr->Bits(15, 12) == 0x1) ||
(instr->Bits(15, 12) == 0x3) ||
(instr->Bits(15, 13) == 0x3) ||
(instr->Mask(0x8000EC00) == 0x00004C00) ||
(instr->Mask(0x8000E800) == 0x80004000) ||
(instr->Mask(0x8000E400) == 0x80004000)) {
VisitUnallocated(instr);
} else {
VisitDataProcessing2Source(instr);
}
} else {
if ((instr->Bit(13) == 1) ||
(instr->Bits(20, 16) != 0) ||
(instr->Bits(15, 14) != 0) ||
(instr->Mask(0xA01FFC00) == 0x00000C00) ||
(instr->Mask(0x201FF800) == 0x00001800)) {
VisitUnallocated(instr);
} else {
VisitDataProcessing1Source(instr);
}
}
break;
}
}
case 1:
case 3:
case 5:
case 7: VisitUnallocated(instr); break;
}
}
} else {
if (instr->Bit(28) == 0) {
if (instr->Bit(21) == 0) {
if ((instr->Bits(23, 22) == 0x3) ||
(instr->Mask(0x80008000) == 0x00008000)) {
VisitUnallocated(instr);
} else {
VisitAddSubShifted(instr);
}
} else {
if ((instr->Mask(0x00C00000) != 0x00000000) ||
(instr->Mask(0x00001400) == 0x00001400) ||
(instr->Mask(0x00001800) == 0x00001800)) {
VisitUnallocated(instr);
} else {
VisitAddSubExtended(instr);
}
}
} else {
if ((instr->Bit(30) == 0x1) ||
(instr->Bits(30, 29) == 0x1) ||
(instr->Mask(0xE0600000) == 0x00200000) ||
(instr->Mask(0xE0608000) == 0x00400000) ||
(instr->Mask(0x60608000) == 0x00408000) ||
(instr->Mask(0x60E00000) == 0x00E00000) ||
(instr->Mask(0x60E00000) == 0x00800000) ||
(instr->Mask(0x60E00000) == 0x00600000)) {
VisitUnallocated(instr);
} else {
VisitDataProcessing3Source(instr);
}
}
}
}
void Decoder::DecodeFP(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0xE) ||
(instr->Bits(27, 24) == 0xF) );
if (instr->Bit(28) == 0) {
DecodeAdvSIMDDataProcessing(instr);
} else {
if (instr->Bit(29) == 1) {
VisitUnallocated(instr);
} else {
if (instr->Bits(31, 30) == 0x3) {
VisitUnallocated(instr);
} else if (instr->Bits(31, 30) == 0x1) {
DecodeAdvSIMDDataProcessing(instr);
} else {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if ((instr->Bit(23) == 1) ||
(instr->Bit(18) == 1) ||
(instr->Mask(0x80008000) == 0x00000000) ||
(instr->Mask(0x000E0000) == 0x00000000) ||
(instr->Mask(0x000E0000) == 0x000A0000) ||
(instr->Mask(0x00160000) == 0x00000000) ||
(instr->Mask(0x00160000) == 0x00120000)) {
VisitUnallocated(instr);
} else {
VisitFPFixedPointConvert(instr);
}
} else {
if (instr->Bits(15, 10) == 32) {
VisitUnallocated(instr);
} else if (instr->Bits(15, 10) == 0) {
if ((instr->Bits(23, 22) == 0x3) ||
(instr->Mask(0x000E0000) == 0x000A0000) ||
(instr->Mask(0x000E0000) == 0x000C0000) ||
(instr->Mask(0x00160000) == 0x00120000) ||
(instr->Mask(0x00160000) == 0x00140000) ||
(instr->Mask(0x20C40000) == 0x00800000) ||
(instr->Mask(0x20C60000) == 0x00840000) ||
(instr->Mask(0xA0C60000) == 0x80060000) ||
(instr->Mask(0xA0C60000) == 0x00860000) ||
(instr->Mask(0xA0C60000) == 0x00460000) ||
(instr->Mask(0xA0CE0000) == 0x80860000) ||
(instr->Mask(0xA0CE0000) == 0x804E0000) ||
(instr->Mask(0xA0CE0000) == 0x000E0000) ||
(instr->Mask(0xA0D60000) == 0x00160000) ||
(instr->Mask(0xA0D60000) == 0x80560000) ||
(instr->Mask(0xA0D60000) == 0x80960000)) {
VisitUnallocated(instr);
} else {
VisitFPIntegerConvert(instr);
}
} else if (instr->Bits(14, 10) == 16) {
const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
if ((instr->Mask(0x80180000) != 0) ||
(masked_A0DF8000 == 0x00020000) ||
(masked_A0DF8000 == 0x00030000) ||
(masked_A0DF8000 == 0x00068000) ||
(masked_A0DF8000 == 0x00428000) ||
(masked_A0DF8000 == 0x00430000) ||
(masked_A0DF8000 == 0x00468000) ||
(instr->Mask(0xA0D80000) == 0x00800000) ||
(instr->Mask(0xA0DE0000) == 0x00C00000) ||
(instr->Mask(0xA0DF0000) == 0x00C30000) ||
(instr->Mask(0xA0DC0000) == 0x00C40000)) {
VisitUnallocated(instr);
} else {
VisitFPDataProcessing1Source(instr);
}
} else if (instr->Bits(13, 10) == 8) {
if ((instr->Bits(15, 14) != 0) ||
(instr->Bits(2, 0) != 0) ||
(instr->Mask(0x80800000) != 0x00000000)) {
VisitUnallocated(instr);
} else {
VisitFPCompare(instr);
}
} else if (instr->Bits(12, 10) == 4) {
if ((instr->Bits(9, 5) != 0) ||
(instr->Mask(0x80800000) != 0x00000000)) {
VisitUnallocated(instr);
} else {
VisitFPImmediate(instr);
}
} else {
if (instr->Mask(0x80800000) != 0x00000000) {
VisitUnallocated(instr);
} else {
switch (instr->Bits(11, 10)) {
case 1: {
VisitFPConditionalCompare(instr);
break;
}
case 2: {
if ((instr->Bits(15, 14) == 0x3) ||
(instr->Mask(0x00009000) == 0x00009000) ||
(instr->Mask(0x0000A000) == 0x0000A000)) {
VisitUnallocated(instr);
} else {
VisitFPDataProcessing2Source(instr);
}
break;
}
case 3: {
VisitFPConditionalSelect(instr);
break;
}
default: UNREACHABLE();
}
}
}
}
} else {
// Bit 30 == 1 has been handled earlier.
ASSERT(instr->Bit(30) == 0);
if (instr->Mask(0xA0800000) != 0) {
VisitUnallocated(instr);
} else {
VisitFPDataProcessing3Source(instr);
}
}
}
}
}
}
void Decoder::DecodeAdvSIMDLoadStore(Instruction* instr) {
// TODO(all): Implement Advanced SIMD load/store instruction decode.
ASSERT(instr->Bits(29, 25) == 0x6);
VisitUnimplemented(instr);
}
void Decoder::DecodeAdvSIMDDataProcessing(Instruction* instr) {
// TODO(all): Implement Advanced SIMD data processing instruction decode.
ASSERT(instr->Bits(27, 25) == 0x7);
VisitUnimplemented(instr);
}
#define DEFINE_VISITOR_CALLERS(A) \
void Decoder::Visit##A(Instruction *instr) { \
if (!(instr->Mask(A##FMask) == A##Fixed)) { \
ASSERT(instr->Mask(A##FMask) == A##Fixed); \
} \
std::list<DecoderVisitor*>::iterator it; \
for (it = visitors_.begin(); it != visitors_.end(); it++) { \
(*it)->Visit##A(instr); \
} \
}
VISITOR_LIST(DEFINE_VISITOR_CALLERS)
#undef DEFINE_VISITOR_CALLERS
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64

202
deps/v8/src/a64/decoder-a64.h

@ -0,0 +1,202 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_DECODER_A64_H_
#define V8_A64_DECODER_A64_H_
#include <list>
#include "globals.h"
#include "a64/instructions-a64.h"
namespace v8 {
namespace internal {
// List macro containing all visitors needed by the decoder class.
#define VISITOR_LIST(V) \
V(PCRelAddressing) \
V(AddSubImmediate) \
V(LogicalImmediate) \
V(MoveWideImmediate) \
V(Bitfield) \
V(Extract) \
V(UnconditionalBranch) \
V(UnconditionalBranchToRegister) \
V(CompareBranch) \
V(TestBranch) \
V(ConditionalBranch) \
V(System) \
V(Exception) \
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
V(LoadStorePairNonTemporal) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \
V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \
V(LogicalShifted) \
V(AddSubShifted) \
V(AddSubExtended) \
V(AddSubWithCarry) \
V(ConditionalCompareRegister) \
V(ConditionalCompareImmediate) \
V(ConditionalSelect) \
V(DataProcessing1Source) \
V(DataProcessing2Source) \
V(DataProcessing3Source) \
V(FPCompare) \
V(FPConditionalCompare) \
V(FPConditionalSelect) \
V(FPImmediate) \
V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \
V(FPIntegerConvert) \
V(FPFixedPointConvert) \
V(Unallocated) \
V(Unimplemented)
// The Visitor interface. Disassembler and simulator (and other tools)
// must provide implementations for all of these functions.
class DecoderVisitor {
public:
#define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
VISITOR_LIST(DECLARE)
#undef DECLARE
virtual ~DecoderVisitor() {}
private:
// Visitors are registered in a list.
std::list<DecoderVisitor*> visitors_;
friend class Decoder;
};
class Decoder: public DecoderVisitor {
public:
explicit Decoder() {}
// Top-level instruction decoder function. Decodes an instruction and calls
// the visitor functions registered with the Decoder class.
void Decode(Instruction *instr);
// Register a new visitor class with the decoder.
// Decode() will call the corresponding visitor method from all registered
// visitor classes when decoding reaches the leaf node of the instruction
// decode tree.
// Visitors are called in the order.
// A visitor can only be registered once.
// Registering an already registered visitor will update its position.
//
// d.AppendVisitor(V1);
// d.AppendVisitor(V2);
// d.PrependVisitor(V2); // Move V2 at the start of the list.
// d.InsertVisitorBefore(V3, V2);
// d.AppendVisitor(V4);
// d.AppendVisitor(V4); // No effect.
//
// d.Decode(i);
//
// will call in order visitor methods in V3, V2, V1, V4.
void AppendVisitor(DecoderVisitor* visitor);
void PrependVisitor(DecoderVisitor* visitor);
void InsertVisitorBefore(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
void InsertVisitorAfter(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
// Remove a previously registered visitor class from the list of visitors
// stored by the decoder.
void RemoveVisitor(DecoderVisitor* visitor);
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
private:
// Decode the PC relative addressing instruction, and call the corresponding
// visitors.
// On entry, instruction bits 27:24 = 0x0.
void DecodePCRelAddressing(Instruction* instr);
// Decode the add/subtract immediate instruction, and call the corresponding
// visitors.
// On entry, instruction bits 27:24 = 0x1.
void DecodeAddSubImmediate(Instruction* instr);
// Decode the branch, system command, and exception generation parts of
// the instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
void DecodeBranchSystemException(Instruction* instr);
// Decode the load and store parts of the instruction tree, and call
// the corresponding visitors.
// On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
void DecodeLoadStore(Instruction* instr);
// Decode the logical immediate and move wide immediate parts of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x2.
void DecodeLogical(Instruction* instr);
// Decode the bitfield and extraction parts of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x3.
void DecodeBitfieldExtract(Instruction* instr);
// Decode the data processing parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
void DecodeDataProcessing(Instruction* instr);
// Decode the floating point parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0xE, 0xF}.
void DecodeFP(Instruction* instr);
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 29:25 = 0x6.
void DecodeAdvSIMDLoadStore(Instruction* instr);
// Decode the Advanced SIMD (NEON) data processing part of the instruction
// tree, and call the corresponding visitors.
// On entry, instruction bits 27:25 = 0x7.
void DecodeAdvSIMDDataProcessing(Instruction* instr);
};
} } // namespace v8::internal
#endif // V8_A64_DECODER_A64_H_

376
deps/v8/src/a64/deoptimizer-a64.cc

@ -0,0 +1,376 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "safepoint-table.h"
namespace v8 {
namespace internal {
int Deoptimizer::patch_size() {
// Size of the code used to patch lazy bailout points.
// Patching is done by Deoptimizer::DeoptimizeFunction.
return 4 * kInstructionSize;
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
Address code_start_address = code->instruction_start();
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
patcher.LoadLiteral(ip0, 2 * kInstructionSize);
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
ASSERT((prev_call_address == NULL) ||
(call_address >= prev_call_address + patch_size()));
ASSERT(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
}
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
// spilled. Registers fp and sp are set to the correct values though.
for (int i = 0; i < Register::NumRegisters(); i++) {
input_->SetRegister(i, 0);
}
// TODO(all): Do we also need to set a value to csp?
input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
// Fill the frame content from the actual data on the frame.
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
}
}
bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
// There is no dynamic alignment padding on A64 in the input frame.
return false;
}
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(x0.code(), params);
output_frame->SetRegister(x1.code(), handler);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
Code* Deoptimizer::NotifyStubFailureBuiltin() {
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
}
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
// TODO(all): This code needs to be revisited. We probably only need to save
// caller-saved registers here. Callee-saved registers can be stored directly
// in the input frame.
// Save all allocatable floating point registers.
CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSize,
0, FPRegister::NumAllocatableRegisters() - 1);
__ PushCPURegList(saved_fp_registers);
// We save all the registers expcept jssp, sp and lr.
CPURegList saved_registers(CPURegister::kRegister, kXRegSize, 0, 27);
saved_registers.Combine(fp);
__ PushCPURegList(saved_registers);
const int kSavedRegistersAreaSize =
(saved_registers.Count() * kXRegSizeInBytes) +
(saved_fp_registers.Count() * kDRegSizeInBytes);
// Floating point registers are saved on the stack above core registers.
const int kFPRegistersOffset = saved_registers.Count() * kXRegSizeInBytes;
// Get the bailout id from the stack.
Register bailout_id = x2;
__ Peek(bailout_id, kSavedRegistersAreaSize);
Register code_object = x3;
Register fp_to_sp = x4;
// Get the address of the location in the code object. This is the return
// address for lazy deoptimization.
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, and correct one word for bailout id.
__ Add(fp_to_sp, masm()->StackPointer(),
kSavedRegistersAreaSize + (1 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Mov(x1, type());
// Following arguments are already loaded:
// - x2: bailout id
// - x3: code object address
// - x4: fp-to-sp delta
__ Mov(x5, Operand(ExternalReference::isolate_address(isolate())));
{
// Call Deoptimizer::New().
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}
// Preserve "deoptimizer" object in register x0.
Register deoptimizer = x0;
// Get the input frame descriptor pointer.
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
// Copy core registers into the input frame.
CPURegList copy_to_input = saved_registers;
for (int i = 0; i < saved_registers.Count(); i++) {
// TODO(all): Look for opportunities to optimize this by using ldp/stp.
__ Peek(x2, i * kPointerSize);
CPURegister current_reg = copy_to_input.PopLowestIndex();
int offset = (current_reg.code() * kPointerSize) +
FrameDescription::registers_offset();
__ Str(x2, MemOperand(x1, offset));
}
// Copy FP registers to the input frame.
for (int i = 0; i < saved_fp_registers.Count(); i++) {
// TODO(all): Look for opportunities to optimize this by using ldp/stp.
int dst_offset = FrameDescription::double_registers_offset() +
(i * kDoubleSize);
int src_offset = kFPRegistersOffset + (i * kDoubleSize);
__ Peek(x2, src_offset);
__ Str(x2, MemOperand(x1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSizeInBytes));
// Compute a pointer to the unwinding limit in register x2; that is
// the first stack slot not part of the input frame.
Register unwind_limit = x2;
__ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
__ Add(unwind_limit, unwind_limit, __ StackPointer());
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ Add(x3, x1, FrameDescription::frame_content_offset());
Label pop_loop;
Label pop_loop_header;
__ B(&pop_loop_header);
__ Bind(&pop_loop);
__ Pop(x4);
__ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
__ Bind(&pop_loop_header);
__ Cmp(unwind_limit, __ StackPointer());
__ B(ne, &pop_loop);
// Compute the output frame in the deoptimizer.
__ Push(x0); // Preserve deoptimizer object across call.
{
// Call Deoptimizer::ComputeOutputFrames().
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
__ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
__ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
__ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
__ B(&outer_loop_header);
__ Bind(&outer_push_loop);
Register current_frame = x2;
__ Ldr(current_frame, MemOperand(x0, 0));
__ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
__ B(&inner_loop_header);
__ Bind(&inner_push_loop);
__ Sub(x3, x3, kPointerSize);
__ Add(x6, current_frame, x3);
__ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
__ Push(x7);
__ Bind(&inner_loop_header);
__ Cbnz(x3, &inner_push_loop);
__ Add(x0, x0, kPointerSize);
__ Bind(&outer_loop_header);
__ Cmp(x0, x1);
__ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
!saved_fp_registers.IncludesAliasOf(fp_zero) &&
!saved_fp_registers.IncludesAliasOf(fp_scratch));
int src_offset = FrameDescription::double_registers_offset();
while (!saved_fp_registers.IsEmpty()) {
const CPURegister reg = saved_fp_registers.PopLowestIndex();
__ Ldr(reg, MemOperand(x1, src_offset));
src_offset += kDoubleSize;
}
// Push state from the last output frame.
__ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
__ Push(x6);
// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
// stack, then pops it all into registers. Here, we try to load it directly
// into the relevant registers. Is this correct? If so, we should improve the
// ARM code.
// TODO(all): This code needs to be revisited, We probably don't need to
// restore all the registers as fullcodegen does not keep live values in
// registers (note that at least fp must be restored though).
// Restore registers from the last output frame.
// Note that lr is not in the list of saved_registers and will be restored
// later. We can use it to hold the address of last output frame while
// reloading the other registers.
ASSERT(!saved_registers.IncludesAliasOf(lr));
Register last_output_frame = lr;
__ Mov(last_output_frame, current_frame);
// We don't need to restore x7 as it will be clobbered later to hold the
// continuation address.
Register continuation = x7;
saved_registers.Remove(continuation);
while (!saved_registers.IsEmpty()) {
// TODO(all): Look for opportunities to optimize this by using ldp.
CPURegister current_reg = saved_registers.PopLowestIndex();
int offset = (current_reg.code() * kPointerSize) +
FrameDescription::registers_offset();
__ Ldr(current_reg, MemOperand(last_output_frame, offset));
}
__ Ldr(continuation, MemOperand(last_output_frame,
FrameDescription::continuation_offset()));
__ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
__ InitializeRootRegister();
__ Br(continuation);
}
// Size of an entry of the second level deopt table.
// This is the code size generated by GeneratePrologue for one entry.
const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
Label done;
{
InstructionAccurateScope scope(masm());
// The number of entry will never exceed kMaxNumberOfEntries.
// As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
// a movz instruction to load the entry id.
ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ movz(masm()->Tmp0(), i);
__ b(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
}
}
__ Bind(&done);
// TODO(all): We need to add some kind of assertion to verify that Tmp0()
// is not clobbered by Push.
__ Push(masm()->Tmp0());
}
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
#undef __
} } // namespace v8::internal

1854
deps/v8/src/a64/disasm-a64.cc

File diff suppressed because it is too large

115
deps/v8/src/a64/disasm-a64.h

@ -0,0 +1,115 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_DISASM_A64_H
#define V8_A64_DISASM_A64_H
#include "v8.h"
#include "globals.h"
#include "utils.h"
#include "instructions-a64.h"
#include "decoder-a64.h"
namespace v8 {
namespace internal {
class Disassembler: public DecoderVisitor {
public:
Disassembler();
Disassembler(char* text_buffer, int buffer_size);
virtual ~Disassembler();
char* GetOutput();
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
protected:
virtual void ProcessOutput(Instruction* instr);
void Format(Instruction* instr, const char* mnemonic, const char* format);
void Substitute(Instruction* instr, const char* string);
int SubstituteField(Instruction* instr, const char* format);
int SubstituteRegisterField(Instruction* instr, const char* format);
int SubstituteImmediateField(Instruction* instr, const char* format);
int SubstituteLiteralField(Instruction* instr, const char* format);
int SubstituteBitfieldImmediateField(Instruction* instr, const char* format);
int SubstituteShiftField(Instruction* instr, const char* format);
int SubstituteExtendField(Instruction* instr, const char* format);
int SubstituteConditionField(Instruction* instr, const char* format);
int SubstitutePCRelAddressField(Instruction* instr, const char* format);
int SubstituteBranchTargetField(Instruction* instr, const char* format);
int SubstituteLSRegOffsetField(Instruction* instr, const char* format);
int SubstitutePrefetchField(Instruction* instr, const char* format);
int SubstituteBarrierField(Instruction* instr, const char* format);
bool RdIsZROrSP(Instruction* instr) const {
return (instr->Rd() == kZeroRegCode);
}
bool RnIsZROrSP(Instruction* instr) const {
return (instr->Rn() == kZeroRegCode);
}
bool RmIsZROrSP(Instruction* instr) const {
return (instr->Rm() == kZeroRegCode);
}
bool RaIsZROrSP(Instruction* instr) const {
return (instr->Ra() == kZeroRegCode);
}
bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
void ResetOutput();
void AppendToOutput(const char* string, ...);
char* buffer_;
uint32_t buffer_pos_;
uint32_t buffer_size_;
bool own_buffer_;
};
class PrintDisassembler: public Disassembler {
public:
explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
~PrintDisassembler() { }
virtual void ProcessOutput(Instruction* instr);
private:
FILE *stream_;
};
} } // namespace v8::internal
#endif // V8_A64_DISASM_A64_H

41
deps/v8/include/v8-defaults.h → deps/v8/src/a64/frames-a64.cc

@ -25,30 +25,33 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_V8_DEFAULTS_H_
#define V8_V8_DEFAULTS_H_
#include "v8.h"
/**
* Default configuration support for the V8 JavaScript engine.
*/
#if V8_TARGET_ARCH_A64
#include "assembler.h"
#include "assembler-a64.h"
#include "assembler-a64-inl.h"
#include "frames.h"
namespace v8 {
namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
/**
* Configures the constraints with reasonable default values based on the
* capabilities of the current device the VM is running on.
*/
bool V8_EXPORT ConfigureResourceConstraintsForCurrentPlatform(
ResourceConstraints* constraints);
Object*& ExitFrame::constant_pool_slot() const {
UNREACHABLE();
return Memory::Object_at(NULL);
}
/**
* Convience function which performs SetResourceConstraints with the settings
* returned by ConfigureResourceConstraintsForCurrentPlatform.
*/
bool V8_EXPORT SetDefaultResourceConstraintsForCurrentPlatform();
} // namespace v8
} } // namespace v8::internal
#endif // V8_V8_DEFAULTS_H_
#endif // V8_TARGET_ARCH_A64

131
deps/v8/src/a64/frames-a64.h

@ -0,0 +1,131 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "a64/constants-a64.h"
#include "a64/assembler-a64.h"
#ifndef V8_A64_FRAMES_A64_H_
#define V8_A64_FRAMES_A64_H_
namespace v8 {
namespace internal {
const int kNumRegs = kNumberOfRegisters;
// Registers x0-x17 are caller-saved.
const int kNumJSCallerSaved = 18;
const RegList kJSCallerSaved = 0x3ffff;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of eight.
// TODO(all): Refine this number.
const int kNumSafepointRegisters = 32;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
#define kNumSafepointSavedRegisters \
CPURegList::GetSafepointSavedRegisters().Count();
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public AllStatic {
public:
static const int kFrameSize = 2 * kPointerSize;
static const int kCallerSPDisplacement = 2 * kPointerSize;
static const int kCallerPCOffset = 1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
static const int kSPOffset = -1 * kPointerSize;
static const int kCodeOffset = -2 * kPointerSize;
static const int kLastExitFrameField = kCodeOffset;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
// There are two words on the stack (saved fp and saved lr) between fp and
// the arguments.
static const int kLastParameterOffset = 2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
};
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
class ConstructFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
static const int kLengthOffset = -4 * kPointerSize;
static const int kConstructorOffset = -5 * kPointerSize;
static const int kImplicitReceiverOffset = -6 * kPointerSize;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
};
class InternalFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
}
inline void StackHandler::SetFp(Address slot, Address fp) {
Memory::Address_at(slot) = fp;
}
} } // namespace v8::internal
#endif // V8_A64_FRAMES_A64_H_

5010
deps/v8/src/a64/full-codegen-a64.cc

File diff suppressed because it is too large

1413
deps/v8/src/a64/ic-a64.cc

File diff suppressed because it is too large

334
deps/v8/src/a64/instructions-a64.cc

@ -0,0 +1,334 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_A64
#define A64_DEFINE_FP_STATICS
#include "a64/instructions-a64.h"
#include "a64/assembler-a64-inl.h"
namespace v8 {
namespace internal {
bool Instruction::IsLoad() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) != 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case LDRB_w:
case LDRH_w:
case LDR_w:
case LDR_x:
case LDRSB_w:
case LDRSB_x:
case LDRSH_w:
case LDRSH_x:
case LDRSW_x:
case LDR_s:
case LDR_d: return true;
default: return false;
}
}
}
bool Instruction::IsStore() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) == 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case STRB_w:
case STRH_w:
case STR_w:
case STR_x:
case STR_s:
case STR_d: return true;
default: return false;
}
}
}
static uint64_t RotateRight(uint64_t value,
unsigned int rotate,
unsigned int width) {
ASSERT(width <= 64);
rotate &= 63;
return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
(value >> rotate);
}
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
uint64_t value,
unsigned width) {
ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
(width == 32));
ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
uint64_t result = value & ((1UL << width) - 1UL);
for (unsigned i = width; i < reg_size; i *= 2) {
result |= (result << i);
}
return result;
}
// Logical immediates can't encode zero, so a return value of zero is used to
// indicate a failure case. Specifically, where the constraints on imm_s are not
// met.
uint64_t Instruction::ImmLogical() {
unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
int64_t n = BitN();
int64_t imm_s = ImmSetBits();
int64_t imm_r = ImmRotate();
// An integer is constructed from the n, imm_s and imm_r bits according to
// the following table:
//
// N imms immr size S R
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
// (s bits must not be all set)
//
// A pattern is constructed of size bits, where the least significant S+1
// bits are set. The pattern is rotated right by R, and repeated across a
// 32 or 64-bit value, depending on destination register width.
//
if (n == 1) {
if (imm_s == 0x3F) {
return 0;
}
uint64_t bits = (1UL << (imm_s + 1)) - 1;
return RotateRight(bits, imm_r, 64);
} else {
if ((imm_s >> 1) == 0x1F) {
return 0;
}
for (int width = 0x20; width >= 0x2; width >>= 1) {
if ((imm_s & width) == 0) {
int mask = width - 1;
if ((imm_s & mask) == mask) {
return 0;
}
uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
return RepeatBitsAcrossReg(reg_size,
RotateRight(bits, imm_r & mask, width),
width);
}
}
}
UNREACHABLE();
return 0;
}
float Instruction::ImmFP32() {
// ImmFP: abcdefgh (8 bits)
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
// where B is b ^ 1
uint32_t bits = ImmFP();
uint32_t bit7 = (bits >> 7) & 0x1;
uint32_t bit6 = (bits >> 6) & 0x1;
uint32_t bit5_to_0 = bits & 0x3f;
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
return rawbits_to_float(result);
}
double Instruction::ImmFP64() {
// ImmFP: abcdefgh (8 bits)
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
// where B is b ^ 1
uint32_t bits = ImmFP();
uint64_t bit7 = (bits >> 7) & 0x1;
uint64_t bit6 = (bits >> 6) & 0x1;
uint64_t bit5_to_0 = bits & 0x3f;
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
return rawbits_to_double(result);
}
LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
switch (op) {
case STP_x:
case LDP_x:
case STP_d:
case LDP_d: return LSDoubleWord;
default: return LSWord;
}
}
ptrdiff_t Instruction::ImmPCOffset() {
ptrdiff_t offset;
if (IsPCRelAddressing()) {
// PC-relative addressing. Only ADR is supported.
offset = ImmPCRel();
} else if (BranchType() != UnknownBranchType) {
// All PC-relative branches.
// Relative branch offsets are instruction-size-aligned.
offset = ImmBranch() << kInstructionSizeLog2;
} else {
// Load literal (offset from PC).
ASSERT(IsLdrLiteral());
// The offset is always shifted by 2 bits, even for loads to 64-bits
// registers.
offset = ImmLLiteral() << kInstructionSizeLog2;
}
return offset;
}
Instruction* Instruction::ImmPCOffsetTarget() {
return this + ImmPCOffset();
}
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
int32_t offset) {
return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
}
bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
int offset = target - this;
return IsValidImmPCOffset(BranchType(), offset);
}
void Instruction::SetImmPCOffsetTarget(Instruction* target) {
if (IsPCRelAddressing()) {
SetPCRelImmTarget(target);
} else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target);
} else {
SetImmLLiteral(target);
}
}
void Instruction::SetPCRelImmTarget(Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction.
ASSERT(Mask(PCRelAddressingMask) == ADR);
Instr imm = Assembler::ImmPCRelAddress(target - this);
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
}
void Instruction::SetBranchImmTarget(Instruction* target) {
ASSERT(((target - this) & 3) == 0);
Instr branch_imm = 0;
uint32_t imm_mask = 0;
int offset = (target - this) >> kInstructionSizeLog2;
switch (BranchType()) {
case CondBranchType: {
branch_imm = Assembler::ImmCondBranch(offset);
imm_mask = ImmCondBranch_mask;
break;
}
case UncondBranchType: {
branch_imm = Assembler::ImmUncondBranch(offset);
imm_mask = ImmUncondBranch_mask;
break;
}
case CompareBranchType: {
branch_imm = Assembler::ImmCmpBranch(offset);
imm_mask = ImmCmpBranch_mask;
break;
}
case TestBranchType: {
branch_imm = Assembler::ImmTestBranch(offset);
imm_mask = ImmTestBranch_mask;
break;
}
default: UNREACHABLE();
}
SetInstructionBits(Mask(~imm_mask) | branch_imm);
}
void Instruction::SetImmLLiteral(Instruction* source) {
ASSERT(((source - this) & 3) == 0);
int offset = (source - this) >> kLiteralEntrySizeLog2;
Instr imm = Assembler::ImmLLiteral(offset);
Instr mask = ImmLLiteral_mask;
SetInstructionBits(Mask(~mask) | imm);
}
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-a64-inl.h to work around this.
bool InstructionSequence::IsInlineData() const {
// Inline data is encoded as a single movz instruction which writes to xzr
// (x31).
return IsMovz() && SixtyFourBits() && (Rd() == xzr.code());
// TODO(all): If we extend ::InlineData() to support bigger data, we need
// to update this method too.
}
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-a64-inl.h to work around this.
uint64_t InstructionSequence::InlineData() const {
ASSERT(IsInlineData());
uint64_t payload = ImmMoveWide();
// TODO(all): If we extend ::InlineData() to support bigger data, we need
// to update this method too.
return payload;
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64

516
deps/v8/src/a64/instructions-a64.h

@ -0,0 +1,516 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_INSTRUCTIONS_A64_H_
#define V8_A64_INSTRUCTIONS_A64_H_
#include "globals.h"
#include "utils.h"
#include "a64/constants-a64.h"
#include "a64/utils-a64.h"
namespace v8 {
namespace internal {
// ISA constants. --------------------------------------------------------------
typedef uint32_t Instr;
// The following macros initialize a float/double variable with a bit pattern
// without using static initializers: If A64_DEFINE_FP_STATICS is defined, the
// symbol is defined as uint32_t/uint64_t initialized with the desired bit
// pattern. Otherwise, the same symbol is declared as an external float/double.
#if defined(A64_DEFINE_FP_STATICS)
#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
#else
#define DEFINE_FLOAT(name, value) extern const float name
#define DEFINE_DOUBLE(name, value) extern const double name
#endif // defined(A64_DEFINE_FP_STATICS)
DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL);
// This value is a signalling NaN as both a double and as a float (taking the
// least-significant word).
DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001);
DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001);
// A similar value, but as a quiet NaN.
DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001);
DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
#undef DEFINE_FLOAT
#undef DEFINE_DOUBLE
enum LSDataSize {
LSByte = 0,
LSHalfword = 1,
LSWord = 2,
LSDoubleWord = 3
};
LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
enum ImmBranchType {
UnknownBranchType = 0,
CondBranchType = 1,
UncondBranchType = 2,
CompareBranchType = 3,
TestBranchType = 4
};
enum AddrMode {
Offset,
PreIndex,
PostIndex
};
enum FPRounding {
// The first four values are encodable directly by FPCR<RMode>.
FPTieEven = 0x0,
FPPositiveInfinity = 0x1,
FPNegativeInfinity = 0x2,
FPZero = 0x3,
// The final rounding mode is only available when explicitly specified by the
// instruction (such as with fcvta). It cannot be set in FPCR.
FPTieAway
};
enum Reg31Mode {
Reg31IsStackPointer,
Reg31IsZeroRegister
};
// Instructions. ---------------------------------------------------------------
class Instruction {
public:
Instr InstructionBits() const {
Instr bits;
memcpy(&bits, this, sizeof(bits));
return bits;
}
void SetInstructionBits(Instr new_instr) {
memcpy(this, &new_instr, sizeof(new_instr));
}
int Bit(int pos) const {
return (InstructionBits() >> pos) & 1;
}
uint32_t Bits(int msb, int lsb) const {
return unsigned_bitextract_32(msb, lsb, InstructionBits());
}
int32_t SignedBits(int msb, int lsb) const {
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
return signed_bitextract_32(msb, lsb, bits);
}
Instr Mask(uint32_t mask) const {
return InstructionBits() & mask;
}
Instruction* following(int count = 1) {
return this + count * kInstructionSize;
}
Instruction* preceding(int count = 1) {
return this - count * kInstructionSize;
}
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int64_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
// formed from ImmPCRelLo and ImmPCRelHi.
int ImmPCRel() const {
int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
int const width = ImmPCRelLo_width + ImmPCRelHi_width;
return signed_bitextract_32(width-1, 0, offset);
}
uint64_t ImmLogical();
float ImmFP32();
double ImmFP64();
LSDataSize SizeLSPair() const {
return CalcLSPairDataSize(
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
}
// Helpers.
bool IsCondBranchImm() const {
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
}
bool IsUncondBranchImm() const {
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
}
bool IsCompareBranch() const {
return Mask(CompareBranchFMask) == CompareBranchFixed;
}
bool IsTestBranch() const {
return Mask(TestBranchFMask) == TestBranchFixed;
}
bool IsLdrLiteral() const {
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
}
bool IsLdrLiteralX() const {
return Mask(LoadLiteralMask) == LDR_x_lit;
}
bool IsPCRelAddressing() const {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
}
bool IsLogicalImmediate() const {
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
}
bool IsAddSubImmediate() const {
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
}
bool IsAddSubExtended() const {
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
}
// Match any loads or stores, including pairs.
bool IsLoadOrStore() const {
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
}
// Match any loads, including pairs.
bool IsLoad() const;
// Match any stores, including pairs.
bool IsStore() const;
// Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field.
Reg31Mode RdMode() const {
// The following instructions use csp or wsp as Rd:
// Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags.
// Logical (immediate) when not setting the flags.
// Otherwise, r31 is the zero register.
if (IsAddSubImmediate() || IsAddSubExtended()) {
if (Mask(AddSubSetFlagsBit)) {
return Reg31IsZeroRegister;
} else {
return Reg31IsStackPointer;
}
}
if (IsLogicalImmediate()) {
// Of the logical (immediate) instructions, only ANDS (and its aliases)
// can set the flags. The others can all write into csp.
// Note that some logical operations are not available to
// immediate-operand instructions, so we have to combine two masks here.
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
return Reg31IsZeroRegister;
} else {
return Reg31IsStackPointer;
}
}
return Reg31IsZeroRegister;
}
// Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field.
Reg31Mode RnMode() const {
// The following instructions use csp or wsp as Rn:
// All loads and stores.
// Add/sub (immediate).
// Add/sub (extended).
// Otherwise, r31 is the zero register.
if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
return Reg31IsStackPointer;
}
return Reg31IsZeroRegister;
}
ImmBranchType BranchType() const {
if (IsCondBranchImm()) {
return CondBranchType;
} else if (IsUncondBranchImm()) {
return UncondBranchType;
} else if (IsCompareBranch()) {
return CompareBranchType;
} else if (IsTestBranch()) {
return TestBranchType;
} else {
return UnknownBranchType;
}
}
static int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
switch (branch_type) {
case UncondBranchType:
return ImmUncondBranch_width;
case CondBranchType:
return ImmCondBranch_width;
case CompareBranchType:
return ImmCmpBranch_width;
case TestBranchType:
return ImmTestBranch_width;
default:
UNREACHABLE();
return 0;
}
}
// The range of the branch instruction, expressed as 'instr +- range'.
static int32_t ImmBranchRange(ImmBranchType branch_type) {
return
(1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 -
kInstructionSize;
}
int ImmBranch() const {
switch (BranchType()) {
case CondBranchType: return ImmCondBranch();
case UncondBranchType: return ImmUncondBranch();
case CompareBranchType: return ImmCmpBranch();
case TestBranchType: return ImmTestBranch();
default: UNREACHABLE();
}
return 0;
}
bool IsBranchAndLinkToRegister() const {
return Mask(UnconditionalBranchToRegisterMask) == BLR;
}
bool IsMovz() const {
return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
(Mask(MoveWideImmediateMask) == MOVZ_w);
}
bool IsMovk() const {
return (Mask(MoveWideImmediateMask) == MOVK_x) ||
(Mask(MoveWideImmediateMask) == MOVK_w);
}
bool IsMovn() const {
return (Mask(MoveWideImmediateMask) == MOVN_x) ||
(Mask(MoveWideImmediateMask) == MOVN_w);
}
bool IsNop(int n) {
// A marking nop is an instruction
// mov r<n>, r<n>
// which is encoded as
// orr r<n>, xzr, r<n>
return (Mask(LogicalShiftedMask) == ORR_x) &&
(Rd() == Rm()) &&
(Rd() == n);
}
// Find the PC offset encoded in this instruction. 'this' may be a branch or
// a PC-relative addressing instruction.
// The offset returned is unscaled.
ptrdiff_t ImmPCOffset();
// Find the target of this instruction. 'this' may be a branch or a
// PC-relative addressing instruction.
Instruction* ImmPCOffsetTarget();
static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
void SetImmPCOffsetTarget(Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
uint8_t* LiteralAddress() {
int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
return reinterpret_cast<uint8_t*>(this) + offset;
}
uint32_t Literal32() {
uint32_t literal;
memcpy(&literal, LiteralAddress(), sizeof(literal));
return literal;
}
uint64_t Literal64() {
uint64_t literal;
memcpy(&literal, LiteralAddress(), sizeof(literal));
return literal;
}
float LiteralFP32() {
return rawbits_to_float(Literal32());
}
double LiteralFP64() {
return rawbits_to_double(Literal64());
}
Instruction* NextInstruction() {
return this + kInstructionSize;
}
Instruction* InstructionAtOffset(int64_t offset) {
ASSERT(IsAligned(reinterpret_cast<uintptr_t>(this) + offset,
kInstructionSize));
return this + offset;
}
template<typename T> static Instruction* Cast(T src) {
return reinterpret_cast<Instruction*>(src);
}
void SetPCRelImmTarget(Instruction* target);
void SetBranchImmTarget(Instruction* target);
};
// Where Instruction looks at instructions generated by the Assembler,
// InstructionSequence looks at instructions sequences generated by the
// MacroAssembler.
class InstructionSequence : public Instruction {
public:
static InstructionSequence* At(Address address) {
return reinterpret_cast<InstructionSequence*>(address);
}
// Sequences generated by MacroAssembler::InlineData().
bool IsInlineData() const;
uint64_t InlineData() const;
};
// Simulator/Debugger debug instructions ---------------------------------------
// Each debug marker is represented by a HLT instruction. The immediate comment
// field in the instruction is used to identify the type of debug marker. Each
// marker encodes arguments in a different way, as described below.
// Indicate to the Debugger that the instruction is a redirected call.
const Instr kImmExceptionIsRedirectedCall = 0xca11;
// Represent unreachable code. This is used as a guard in parts of the code that
// should not be reachable, such as in data encoded inline in the instructions.
const Instr kImmExceptionIsUnreachable = 0xdebf;
// A pseudo 'printf' instruction. The arguments will be passed to the platform
// printf method.
const Instr kImmExceptionIsPrintf = 0xdeb1;
// Parameters are stored in A64 registers as if the printf pseudo-instruction
// was a call to the real printf method:
//
// x0: The format string, then either of:
// x1-x7: Optional arguments.
// d0-d7: Optional arguments.
//
// Floating-point and integer arguments are passed in separate sets of
// registers in AAPCS64 (even for varargs functions), so it is not possible to
// determine the type of location of each arguments without some information
// about the values that were passed in. This information could be retrieved
// from the printf format string, but the format string is not trivial to
// parse so we encode the relevant information with the HLT instruction.
// - Type
// Either kRegister or kFPRegister, but stored as a uint32_t because there's
// no way to guarantee the size of the CPURegister::RegisterType enum.
const unsigned kPrintfTypeOffset = 1 * kInstructionSize;
const unsigned kPrintfLength = 2 * kInstructionSize;
// A pseudo 'debug' instruction.
const Instr kImmExceptionIsDebug = 0xdeb0;
// Parameters are inlined in the code after a debug pseudo-instruction:
// - Debug code.
// - Debug parameters.
// - Debug message string. This is a NULL-terminated ASCII string, padded to
// kInstructionSize so that subsequent instructions are correctly aligned.
// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
// string data.
const unsigned kDebugCodeOffset = 1 * kInstructionSize;
const unsigned kDebugParamsOffset = 2 * kInstructionSize;
const unsigned kDebugMessageOffset = 3 * kInstructionSize;
// Debug parameters.
// Used without a TRACE_ option, the Debugger will print the arguments only
// once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
// before every instruction for the specified LOG_ parameters.
//
// TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
// others that were not specified.
//
// For example:
//
// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
// will print the registers and fp registers only once.
//
// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
// starts disassembling the code.
//
// __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
// adds the general purpose registers to the trace.
//
// __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
// stops tracing the registers.
const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
enum DebugParameters {
NO_PARAM = 0,
BREAK = 1 << 0,
LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
LOG_REGS = 1 << 2, // Log general purpose registers.
LOG_FP_REGS = 1 << 3, // Log floating-point registers.
LOG_SYS_REGS = 1 << 4, // Log the status flags.
LOG_WRITE = 1 << 5, // Log any memory write.
LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS,
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
// Trace control.
TRACE_ENABLE = 1 << 6,
TRACE_DISABLE = 2 << 6,
TRACE_OVERRIDE = 3 << 6
};
} } // namespace v8::internal
#endif // V8_A64_INSTRUCTIONS_A64_H_

618
deps/v8/src/a64/instrument-a64.cc

@ -0,0 +1,618 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "a64/instrument-a64.h"
namespace v8 {
namespace internal {
Counter::Counter(const char* name, CounterType type)
: count_(0), enabled_(false), type_(type) {
ASSERT(name != NULL);
strncpy(name_, name, kCounterNameMaxLength);
}
void Counter::Enable() {
enabled_ = true;
}
void Counter::Disable() {
enabled_ = false;
}
bool Counter::IsEnabled() {
return enabled_;
}
void Counter::Increment() {
if (enabled_) {
count_++;
}
}
uint64_t Counter::count() {
uint64_t result = count_;
if (type_ == Gauge) {
// If the counter is a Gauge, reset the count after reading.
count_ = 0;
}
return result;
}
const char* Counter::name() {
return name_;
}
CounterType Counter::type() {
return type_;
}
typedef struct {
const char* name;
CounterType type;
} CounterDescriptor;
static const CounterDescriptor kCounterList[] = {
{"Instruction", Cumulative},
{"Move Immediate", Gauge},
{"Add/Sub DP", Gauge},
{"Logical DP", Gauge},
{"Other Int DP", Gauge},
{"FP DP", Gauge},
{"Conditional Select", Gauge},
{"Conditional Compare", Gauge},
{"Unconditional Branch", Gauge},
{"Compare and Branch", Gauge},
{"Test and Branch", Gauge},
{"Conditional Branch", Gauge},
{"Load Integer", Gauge},
{"Load FP", Gauge},
{"Load Pair", Gauge},
{"Load Literal", Gauge},
{"Store Integer", Gauge},
{"Store FP", Gauge},
{"Store Pair", Gauge},
{"PC Addressing", Gauge},
{"Other", Gauge},
{"SP Adjust", Gauge},
};
Instrument::Instrument(const char* datafile, uint64_t sample_period)
: output_stream_(stderr), sample_period_(sample_period) {
// Set up the output stream. If datafile is non-NULL, use that file. If it
// can't be opened, or datafile is NULL, use stderr.
if (datafile != NULL) {
output_stream_ = fopen(datafile, "w");
if (output_stream_ == NULL) {
fprintf(stderr, "Can't open output file %s. Using stderr.\n", datafile);
output_stream_ = stderr;
}
}
static const int num_counters =
sizeof(kCounterList) / sizeof(CounterDescriptor);
// Dump an instrumentation description comment at the top of the file.
fprintf(output_stream_, "# counters=%d\n", num_counters);
fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
// Construct Counter objects from counter description array.
for (int i = 0; i < num_counters; i++) {
Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type);
counters_.push_back(counter);
}
DumpCounterNames();
}
Instrument::~Instrument() {
// Dump any remaining instruction data to the output file.
DumpCounters();
// Free all the counter objects.
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
delete *it;
}
if (output_stream_ != stderr) {
fclose(output_stream_);
}
}
void Instrument::Update() {
// Increment the instruction counter, and dump all counters if a sample period
// has elapsed.
static Counter* counter = GetCounter("Instruction");
ASSERT(counter->type() == Cumulative);
counter->Increment();
if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
DumpCounters();
}
}
void Instrument::DumpCounters() {
// Iterate through the counter objects, dumping their values to the output
// stream.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
fprintf(output_stream_, "%" PRIu64 ",", (*it)->count());
}
fprintf(output_stream_, "\n");
fflush(output_stream_);
}
void Instrument::DumpCounterNames() {
// Iterate through the counter objects, dumping the counter names to the
// output stream.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
fprintf(output_stream_, "%s,", (*it)->name());
}
fprintf(output_stream_, "\n");
fflush(output_stream_);
}
void Instrument::HandleInstrumentationEvent(unsigned event) {
switch (event) {
case InstrumentStateEnable: Enable(); break;
case InstrumentStateDisable: Disable(); break;
default: DumpEventMarker(event);
}
}
void Instrument::DumpEventMarker(unsigned marker) {
// Dumpan event marker to the output stream as a specially formatted comment
// line.
static Counter* counter = GetCounter("Instruction");
fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
(marker >> 8) & 0xff, counter->count());
}
Counter* Instrument::GetCounter(const char* name) {
// Get a Counter object by name from the counter list.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
if (strcmp((*it)->name(), name) == 0) {
return *it;
}
}
// A Counter by that name does not exist: print an error message to stderr
// and the output file, and exit.
static const char* error_message =
"# Error: Unknown counter \"%s\". Exiting.\n";
fprintf(stderr, error_message, name);
fprintf(output_stream_, error_message, name);
exit(1);
}
void Instrument::Enable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
(*it)->Enable();
}
}
void Instrument::Disable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
(*it)->Disable();
}
}
void Instrument::VisitPCRelAddressing(Instruction* instr) {
Update();
static Counter* counter = GetCounter("PC Addressing");
counter->Increment();
}
void Instrument::VisitAddSubImmediate(Instruction* instr) {
Update();
static Counter* sp_counter = GetCounter("SP Adjust");
static Counter* add_sub_counter = GetCounter("Add/Sub DP");
if (((instr->Mask(AddSubOpMask) == SUB) ||
(instr->Mask(AddSubOpMask) == ADD)) &&
(instr->Rd() == 31) && (instr->Rn() == 31)) {
// Count adjustments to the C stack pointer caused by V8 needing two SPs.
sp_counter->Increment();
} else {
add_sub_counter->Increment();
}
}
void Instrument::VisitLogicalImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Logical DP");
counter->Increment();
}
void Instrument::VisitMoveWideImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Move Immediate");
if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) {
unsigned imm = instr->ImmMoveWide();
HandleInstrumentationEvent(imm);
} else {
counter->Increment();
}
}
void Instrument::VisitBitfield(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitExtract(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitUnconditionalBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
void Instrument::VisitUnconditionalBranchToRegister(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
void Instrument::VisitCompareBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Compare and Branch");
counter->Increment();
}
void Instrument::VisitTestBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Test and Branch");
counter->Increment();
}
void Instrument::VisitConditionalBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Branch");
counter->Increment();
}
void Instrument::VisitSystem(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitException(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::InstrumentLoadStorePair(Instruction* instr) {
static Counter* load_pair_counter = GetCounter("Load Pair");
static Counter* store_pair_counter = GetCounter("Store Pair");
if (instr->Mask(LoadStorePairLBit) != 0) {
load_pair_counter->Increment();
} else {
store_pair_counter->Increment();
}
}
void Instrument::VisitLoadStorePairPostIndex(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairOffset(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadLiteral(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Load Literal");
counter->Increment();
}
void Instrument::InstrumentLoadStore(Instruction* instr) {
static Counter* load_int_counter = GetCounter("Load Integer");
static Counter* store_int_counter = GetCounter("Store Integer");
static Counter* load_fp_counter = GetCounter("Load FP");
static Counter* store_fp_counter = GetCounter("Store FP");
switch (instr->Mask(LoadStoreOpMask)) {
case STRB_w: // Fall through.
case STRH_w: // Fall through.
case STR_w: // Fall through.
case STR_x: store_int_counter->Increment(); break;
case STR_s: // Fall through.
case STR_d: store_fp_counter->Increment(); break;
case LDRB_w: // Fall through.
case LDRH_w: // Fall through.
case LDR_w: // Fall through.
case LDR_x: // Fall through.
case LDRSB_x: // Fall through.
case LDRSH_x: // Fall through.
case LDRSW_x: // Fall through.
case LDRSB_w: // Fall through.
case LDRSH_w: load_int_counter->Increment(); break;
case LDR_s: // Fall through.
case LDR_d: load_fp_counter->Increment(); break;
default: UNREACHABLE();
}
}
void Instrument::VisitLoadStoreUnscaledOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStorePostIndex(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStorePreIndex(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStoreRegisterOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLogicalShifted(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Logical DP");
counter->Increment();
}
void Instrument::VisitAddSubShifted(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitAddSubExtended(Instruction* instr) {
Update();
static Counter* sp_counter = GetCounter("SP Adjust");
static Counter* add_sub_counter = GetCounter("Add/Sub DP");
if (((instr->Mask(AddSubOpMask) == SUB) ||
(instr->Mask(AddSubOpMask) == ADD)) &&
(instr->Rd() == 31) && (instr->Rn() == 31)) {
// Count adjustments to the C stack pointer caused by V8 needing two SPs.
sp_counter->Increment();
} else {
add_sub_counter->Increment();
}
}
void Instrument::VisitAddSubWithCarry(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitConditionalCompareRegister(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitConditionalCompareImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitConditionalSelect(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
void Instrument::VisitDataProcessing1Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitDataProcessing2Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitDataProcessing3Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitFPCompare(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPConditionalCompare(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitFPConditionalSelect(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
void Instrument::VisitFPImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing1Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing2Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing3Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPIntegerConvert(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitUnallocated(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitUnimplemented(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
} } // namespace v8::internal

108
deps/v8/src/a64/instrument-a64.h

@ -0,0 +1,108 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_INSTRUMENT_A64_H_
#define V8_A64_INSTRUMENT_A64_H_
#include "globals.h"
#include "utils.h"
#include "a64/decoder-a64.h"
#include "a64/constants-a64.h"
#include "a64/instrument-a64.h"
namespace v8 {
namespace internal {
const int kCounterNameMaxLength = 256;
const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
enum InstrumentState {
InstrumentStateDisable = 0,
InstrumentStateEnable = 1
};
enum CounterType {
Gauge = 0, // Gauge counters reset themselves after reading.
Cumulative = 1 // Cumulative counters keep their value after reading.
};
class Counter {
public:
Counter(const char* name, CounterType type = Gauge);
void Increment();
void Enable();
void Disable();
bool IsEnabled();
uint64_t count();
const char* name();
CounterType type();
private:
char name_[kCounterNameMaxLength];
uint64_t count_;
bool enabled_;
CounterType type_;
};
class Instrument: public DecoderVisitor {
public:
explicit Instrument(const char* datafile = NULL,
uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
~Instrument();
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
private:
void Update();
void Enable();
void Disable();
void DumpCounters();
void DumpCounterNames();
void DumpEventMarker(unsigned marker);
void HandleInstrumentationEvent(unsigned event);
Counter* GetCounter(const char* name);
void InstrumentLoadStore(Instruction* instr);
void InstrumentLoadStorePair(Instruction* instr);
std::list<Counter*> counters_;
FILE *output_stream_;
uint64_t sample_period_;
};
} } // namespace v8::internal
#endif // V8_A64_INSTRUMENT_A64_H_

2449
deps/v8/src/a64/lithium-a64.cc

File diff suppressed because it is too large

2967
deps/v8/src/a64/lithium-a64.h

File diff suppressed because it is too large

5692
deps/v8/src/a64/lithium-codegen-a64.cc

File diff suppressed because it is too large

473
deps/v8/src/a64/lithium-codegen-a64.h

@ -0,0 +1,473 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_LITHIUM_CODEGEN_A64_H_
#define V8_A64_LITHIUM_CODEGEN_A64_H_
#include "a64/lithium-a64.h"
#include "a64/lithium-gap-resolver-a64.h"
#include "deoptimizer.h"
#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
class SafepointGenerator;
class BranchGenerator;
class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
// Simple accessors.
Scope* scope() const { return scope_; }
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
bool IsNextEmittedBlock(int block_id) const {
return LookupDestination(block_id) == GetNextEmittedBlock();
}
bool NeedsEagerFrame() const {
return GetStackSlotCount() > 0 ||
info()->is_non_deferred_calling() ||
!info()->IsStub() ||
info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
LinkRegisterStatus GetLinkRegisterState() const {
return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
}
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
void FinishCode(Handle<Code> code);
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
Register ToRegister32(LOperand* op) const;
Operand ToOperand(LOperand* op);
Operand ToOperand32I(LOperand* op);
Operand ToOperand32U(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// TODO(jbramley): Examine these helpers and check that they make sense.
// IsInteger32Constant returns true for smi constants, for example.
bool IsInteger32Constant(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
DoubleRegister ToDoubleRegister(LOperand* op) const;
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
// Return a double scratch register which can be used locally
// when generating code for a lithium instruction.
DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
Label* exit,
Label* allocation_entry);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
void DoDeferredNumberTagU(LInstruction* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2);
void DoDeferredTaggedToI(LTaggedToI* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void DoGap(LGap* instr);
// Generic version of EmitBranch. It contains some code to avoid emitting a
// branch on the next emitted basic block where we could just fall-through.
// You shouldn't use that directly but rather consider one of the helper like
// LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
template<class InstrType>
void EmitBranchGeneric(InstrType instr,
const BranchGenerator& branch);
template<class InstrType>
void EmitBranch(InstrType instr, Condition condition);
template<class InstrType>
void EmitCompareAndBranch(InstrType instr,
Condition condition,
const Register& lhs,
const Operand& rhs);
template<class InstrType>
void EmitTestAndBranch(InstrType instr,
Condition condition,
const Register& value,
uint64_t mask);
template<class InstrType>
void EmitBranchIfNonZeroNumber(InstrType instr,
const FPRegister& value,
const FPRegister& scratch);
template<class InstrType>
void EmitBranchIfHeapNumber(InstrType instr,
const Register& value);
template<class InstrType>
void EmitBranchIfRoot(InstrType instr,
const Register& value,
Heap::RootListIndex index);
// Emits optimized code to deep-copy the contents of statically known object
// graphs (e.g. object literal boilerplate). Expects a pointer to the
// allocated destination object in the result register, and a pointer to the
// source object in the source register.
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
Register scratch,
int* offset,
AllocationSiteMode mode);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
SmiCheck check_needed);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
MemOperand BuildSeqStringOperand(Register string,
Register temp,
LOperand* index,
String::Encoding encoding);
Deoptimizer::BailoutType DeoptimizeHeader(
LEnvironment* environment,
Deoptimizer::BailoutType* override_bailout_type);
void Deoptimize(LEnvironment* environment);
void Deoptimize(LEnvironment* environment,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void DeoptimizeIfZero(Register rt, LEnvironment* environment);
void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
void DeoptimizeIfRoot(Register rt,
Heap::RootListIndex index,
LEnvironment* environment);
void DeoptimizeIfNotRoot(Register rt,
Heap::RootListIndex index,
LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base,
Register scratch,
bool key_is_smi,
bool key_is_constant,
int constant_key,
ElementsKind elements_kind,
int additional_index);
void CalcKeyedArrayBaseRegister(Register base,
Register elements,
Register key,
bool key_is_tagged,
ElementsKind elements_kind);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation steps. Returns true if code generation should continue.
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
void GenerateOsrPrologue();
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
void CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context);
// Generate a direct call to a known function.
// If the function is already loaded into x1 by the caller, function_reg may
// be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
// automatically load it.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
Register function_reg = NoReg);
// Support for recording safepoint and position information.
void RecordAndWritePosition(int position) V8_OVERRIDE;
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table itself is
// emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
switch (codegen_->expected_safepoint_kind_) {
case Safepoint::kWithRegisters:
codegen_->masm_->PushSafepointRegisters();
break;
case Safepoint::kWithRegistersAndDoubles:
codegen_->masm_->PushSafepointRegisters();
codegen_->masm_->PushSafepointFPRegisters();
break;
default:
UNREACHABLE();
}
}
~PushSafepointRegistersScope() {
Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
ASSERT((kind & Safepoint::kWithRegisters) != 0);
switch (kind) {
case Safepoint::kWithRegisters:
codegen_->masm_->PopSafepointRegisters();
break;
case Safepoint::kWithRegistersAndDoubles:
codegen_->masm_->PopSafepointFPRegisters();
codegen_->masm_->PopSafepointRegisters();
break;
default:
UNREACHABLE();
}
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
}
private:
LCodeGen* codegen_;
};
friend class LDeferredCode;
friend class SafepointGenerator;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
MacroAssembler* masm() const { return codegen_->masm(); }
private:
LCodeGen* codegen_;
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
// This is the abstract class used by EmitBranchGeneric.
// It is used to emit code for conditional branching. The Emit() function
// emits code to branch when the condition holds and EmitInverted() emits
// the branch when the inverted condition is verified.
//
// For actual examples of condition see the concrete implementation in
// lithium-codegen-a64.cc (e.g. BranchOnCondition, CompareAndBranch).
class BranchGenerator BASE_EMBEDDED {
public:
explicit BranchGenerator(LCodeGen* codegen)
: codegen_(codegen) { }
virtual ~BranchGenerator() { }
virtual void Emit(Label* label) const = 0;
virtual void EmitInverted(Label* label) const = 0;
protected:
MacroAssembler* masm() const { return codegen_->masm(); }
LCodeGen* codegen_;
};
} } // namespace v8::internal
#endif // V8_A64_LITHIUM_CODEGEN_A64_H_

326
deps/v8/src/a64/lithium-gap-resolver-a64.cc

@ -0,0 +1,326 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "a64/lithium-gap-resolver-a64.h"
#include "a64/lithium-codegen-a64.h"
namespace v8 {
namespace internal {
// We use the root register to spill a value while breaking a cycle in parallel
// moves. We don't need access to roots while resolving the move list and using
// the root register has two advantages:
// - It is not in crankshaft allocatable registers list, so it can't interfere
// with any of the moves we are resolving.
// - We don't need to push it on the stack, as we can reload it with its value
// once we have resolved a cycle.
#define kSavedValue root
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
saved_destination_(NULL), need_to_restore_root_(false) { }
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::Resolve(LParallelMove* parallel_move) {
ASSERT(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
root_index_ = i; // Any cycle is found when we reach this move again.
PerformMove(i);
if (in_cycle_) RestoreValue();
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
if (!move.IsEliminated()) {
ASSERT(move.source()->IsConstantOperand());
EmitMove(i);
}
}
if (need_to_restore_root_) {
ASSERT(kSavedValue.Is(root));
__ InitializeRootRegister();
need_to_restore_root_ = false;
}
moves_.Rewind(0);
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph.
LMoveOperands& current_move = moves_[index];
ASSERT(!current_move.IsPending());
ASSERT(!current_move.IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
LOperand* destination = current_move.destination();
current_move.set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
PerformMove(i);
// If there is a blocking, pending move it must be moves_[root_index_]
// and all other moves with the same source as moves_[root_index_] are
// sucessfully executed (because they are cycle-free) by this loop.
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
current_move.set_destination(destination);
// The move may be blocked on a pending move, which must be the starting move.
// In this case, we have a cycle, and we save the source of this move to
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
ASSERT(other_move.IsPending());
BreakCycle(index);
return;
}
// This move is no longer blocked.
EmitMove(index);
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_ASSERTS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
void LGapResolver::BreakCycle(int index) {
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
ASSERT(!in_cycle_);
// We use a register which is not allocatable by crankshaft to break the cycle
// to be sure it doesn't interfere with the moves we are resolving.
ASSERT(!kSavedValue.IsAllocatable());
need_to_restore_root_ = true;
// We save in a register the source of that move and we remember its
// destination. Then we mark this move as resolved so the cycle is
// broken and we can perform the other moves.
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
__ Mov(kSavedValue, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
__ Ldr(kSavedValue, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
// TODO(all): We should use a double register to store the value to avoid
// the penalty of the mov across register banks. We are going to reserve
// d31 to hold 0.0 value. We could clobber this register while breaking the
// cycle and restore it after like we do with the root register.
// LGapResolver::RestoreValue() will need to be updated as well when we'll
// do that.
__ Fmov(kSavedValue, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ Ldr(kSavedValue, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
// Mark this move as resolved.
// This move will be actually performed by moving the saved value to this
// move's destination in LGapResolver::RestoreValue().
moves_[index].Eliminate();
}
void LGapResolver::RestoreValue() {
ASSERT(in_cycle_);
ASSERT(saved_destination_ != NULL);
if (saved_destination_->IsRegister()) {
__ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
} else if (saved_destination_->IsStackSlot()) {
__ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedValue);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
in_cycle_ = false;
saved_destination_ = NULL;
}
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
Register source_register = cgen_->ToRegister(source);
if (destination->IsRegister()) {
__ Mov(cgen_->ToRegister(destination), source_register);
} else {
ASSERT(destination->IsStackSlot());
__ Str(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ Ldr(cgen_->ToRegister(destination), source_operand);
} else {
ASSERT(destination->IsStackSlot());
EmitStackSlotMove(index);
}
} else if (source->IsConstantOperand()) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
if (cgen_->IsSmi(constant_source)) {
__ Mov(dst, Operand(cgen_->ToSmi(constant_source)));
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ Mov(dst, cgen_->ToInteger32(constant_source));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
__ Fmov(result, cgen_->ToDouble(constant_source));
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
need_to_restore_root_ = true;
if (cgen_->IsSmi(constant_source)) {
__ Mov(kSavedValue, Operand(cgen_->ToSmi(constant_source)));
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
} else {
__ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
}
__ Str(kSavedValue, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleRegister()) {
DoubleRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ Fmov(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
__ Str(src, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleStackSlot()) {
MemOperand src = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ Ldr(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
EmitStackSlotMove(index);
}
} else {
UNREACHABLE();
}
// The move has been emitted, we can eliminate it.
moves_[index].Eliminate();
}
void LGapResolver::EmitStackSlotMove(int index) {
// We need a temp register to perform a stack slot to stack slot move, and
// the register must not be involved in breaking cycles.
// Use the Crankshaft double scratch register as the temporary.
DoubleRegister temp = crankshaft_fp_scratch;
LOperand* src = moves_[index].source();
LOperand* dst = moves_[index].destination();
ASSERT(src->IsStackSlot());
ASSERT(dst->IsStackSlot());
__ Ldr(temp, cgen_->ToMemOperand(src));
__ Str(temp, cgen_->ToMemOperand(dst));
}
} } // namespace v8::internal

90
deps/v8/src/a64/lithium-gap-resolver-a64.h

@ -0,0 +1,90 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
#define V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
#include "v8.h"
#include "lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// If a cycle is found in the series of moves, save the blocking value to
// a scratch register. The cycle must be found by hitting the root of the
// depth-first search.
void BreakCycle(int index);
// After a cycle has been resolved, restore the value from the scratch
// register to its proper destination.
void RestoreValue();
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Emit a move from one stack slot to another.
void EmitStackSlotMove(int index);
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
int root_index_;
bool in_cycle_;
LOperand* saved_destination_;
// We use the root register as a scratch in a few places. When that happens,
// this flag is set to indicate that it needs to be restored.
bool need_to_restore_root_;
};
} } // namespace v8::internal
#endif // V8_A64_LITHIUM_GAP_RESOLVER_A64_H_

1647
deps/v8/src/a64/macro-assembler-a64-inl.h

File diff suppressed because it is too large

4975
deps/v8/src/a64/macro-assembler-a64.cc

File diff suppressed because it is too large

2238
deps/v8/src/a64/macro-assembler-a64.h

File diff suppressed because it is too large

1730
deps/v8/src/a64/regexp-macro-assembler-a64.cc

File diff suppressed because it is too large

315
deps/v8/src/a64/regexp-macro-assembler-a64.h

@ -0,0 +1,315 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
#define V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
#include "a64/assembler-a64.h"
#include "a64/assembler-a64-inl.h"
#include "macro-assembler.h"
namespace v8 {
namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerA64: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerA64(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerA64();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
virtual void CheckCharacter(unsigned c, Label* on_equal);
virtual void CheckCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
virtual void CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckNotAtStart(Label* on_not_at_start);
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_not_equal);
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
uc16 minus,
uc16 mask,
Label* on_not_equal);
virtual void CheckCharacterInRange(uc16 from,
uc16 to,
Label* on_in_range);
virtual void CheckCharacterNotInRange(uc16 from,
uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
virtual void LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds = true,
int characters = 1);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
virtual void PushCurrentPosition();
virtual void PushRegister(int register_index,
StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
static int CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame,
int start_offset,
const byte** input_start,
const byte** input_end);
private:
// Above the frame pointer - Stored registers and stack passed parameters.
// Callee-saved registers x19-x29, where x29 is the old frame pointer.
static const int kCalleeSavedRegisters = 0;
// Return address.
// It is placed above the 11 callee-saved registers.
static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize;
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameter placed by caller.
static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
static const int kDirectCall = kCalleeSavedRegisters - kPointerSize;
static const int kStackBase = kDirectCall - kPointerSize;
static const int kOutputSize = kStackBase - kPointerSize;
static const int kInput = kOutputSize - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessCounter = kInput - kPointerSize;
// First position register address on the stack. Following positions are
// below it. A position is a 32 bit value.
static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSizeInBytes;
// A capture is a 64 bit value holding two position.
static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSizeInBytes;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
// When initializing registers to a non-position value we can unroll
// the loop. Set the limit of registers to unroll.
static const int kNumRegistersToUnroll = 16;
// We are using x0 to x7 as a register cache. Each hardware register must
// contain one capture, that is two 32 bit registers. We can cache at most
// 16 registers.
static const int kNumCachedRegisters = 16;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
// Check whether preemption has been requested.
void CheckPreemption();
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
// Location of a 32 bit position register.
MemOperand register_location(int register_index);
// Location of a 64 bit capture, combining two position registers.
MemOperand capture_location(int register_index, Register scratch);
// Register holding the current input position as negative offset from
// the end of the string.
Register current_input_offset() { return w21; }
// The register containing the current character after LoadCurrentCharacter.
Register current_character() { return w22; }
// Register holding address of the end of the input string.
Register input_end() { return x25; }
// Register holding address of the start of the input string.
Register input_start() { return x26; }
// Register holding the offset from the start of the string where we should
// start matching.
Register start_offset() { return w27; }
// Pointer to the output array's first element.
Register output_array() { return x28; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
Register backtrack_stackpointer() { return x23; }
// Register holding pointer to the current code object.
Register code_pointer() { return x20; }
// Register holding the value used for clearing capture registers.
Register non_position_value() { return w24; }
// The top 32 bit of this register is used to store this value
// twice. This is used for clearing more than one register at a time.
Register twice_non_position_value() { return x24; }
// Byte size of chars in the string to match (decided by the Mode argument)
int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is NULL, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to);
// Compares reg against immmediate before calling BranchOrBacktrack.
// It makes use of the Cbz and Cbnz instructions.
void CompareAndBranchOrBacktrack(Register reg,
int immediate,
Condition condition,
Label* to);
inline void CallIf(Label* to, Condition condition);
// Save and restore the link register on the stack in a way that
// is GC-safe.
inline void SaveLinkRegister();
inline void RestoreLinkRegister();
// Pushes the value of a register on the backtrack stack. Decrements the
// stack pointer by a word size and stores the register's value there.
inline void Push(Register source);
// Pops a value from the backtrack stack. Reads the word at the stack pointer
// and increments it by a word size.
inline void Pop(Register target);
// This state indicates where the register actually is.
enum RegisterState {
STACKED, // Resides in memory.
CACHED_LSW, // Least Significant Word of a 64 bit hardware register.
CACHED_MSW // Most Significant Word of a 64 bit hardware register.
};
RegisterState GetRegisterState(int register_index) {
ASSERT(register_index >= 0);
if (register_index >= kNumCachedRegisters) {
return STACKED;
} else {
if ((register_index % 2) == 0) {
return CACHED_LSW;
} else {
return CACHED_MSW;
}
}
}
// Store helper that takes the state of the register into account.
inline void StoreRegister(int register_index, Register source);
// Returns a hardware W register that holds the value of the capture
// register.
//
// This function will try to use an existing cache register (w0-w7) for the
// result. Otherwise, it will load the value into maybe_result.
//
// If the returned register is anything other than maybe_result, calling code
// must not write to it.
inline Register GetRegister(int register_index, Register maybe_result);
// Returns the harware register (x0-x7) holding the value of the capture
// register.
// This assumes that the state of the register is not STACKED.
inline Register GetCachedRegister(int register_index);
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
// Which mode to generate code for (ASCII or UC16).
Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
int num_saved_registers_;
// Labels used internally.
Label entry_label_;
Label start_label_;
Label success_label_;
Label backtrack_label_;
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
};
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
#endif // V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_

3414
deps/v8/src/a64/simulator-a64.cc

File diff suppressed because it is too large

868
deps/v8/src/a64/simulator-a64.h

@ -0,0 +1,868 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_SIMULATOR_A64_H_
#define V8_A64_SIMULATOR_A64_H_
#include <stdarg.h>
#include <vector>
#include "v8.h"
#include "globals.h"
#include "utils.h"
#include "allocation.h"
#include "assembler.h"
#include "a64/assembler-a64.h"
#include "a64/decoder-a64.h"
#include "a64/disasm-a64.h"
#include "a64/instrument-a64.h"
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
namespace v8 {
namespace internal {
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native A64 platform.
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*a64_regexp_matcher)(String* input,
int64_t start_offset,
const byte* input_start,
const byte* input_end,
int* output,
int64_t output_size,
Address stack_base,
int64_t direct_call,
void* return_address,
Isolate* isolate);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type a64_regexp_matcher.
// The ninth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<a64_regexp_matcher>(entry)( \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
// Running without a simulator there is nothing to do.
class SimulatorStack : public v8::internal::AllStatic {
public:
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
USE(isolate);
return c_limit;
}
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
return try_catch_address;
}
static void UnregisterCTryCatch() { }
};
#else // !defined(USE_SIMULATOR)
enum ReverseByteMode {
Reverse16 = 0,
Reverse32 = 1,
Reverse64 = 2
};
// The proper way to initialize a simulated system register (such as NZCV) is as
// follows:
// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
class SimSystemRegister {
public:
// The default constructor represents a register which has no writable bits.
// It is not possible to set its value to anything other than 0.
SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
uint32_t RawValue() const {
return value_;
}
void SetRawValue(uint32_t new_value) {
value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
}
uint32_t Bits(int msb, int lsb) const {
return unsigned_bitextract_32(msb, lsb, value_);
}
int32_t SignedBits(int msb, int lsb) const {
return signed_bitextract_32(msb, lsb, value_);
}
void SetBits(int msb, int lsb, uint32_t bits);
// Default system register values.
static SimSystemRegister DefaultValueFor(SystemRegister id);
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
uint32_t Name() const { return Func(HighBit, LowBit); } \
void Set##Name(uint32_t bits) { SetBits(HighBit, LowBit, bits); }
#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
#undef DEFINE_ZERO_BITS
#undef DEFINE_GETTER
protected:
// Most system registers only implement a few of the bits in the word. Other
// bits are "read-as-zero, write-ignored". The write_ignore_mask argument
// describes the bits which are not modifiable.
SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
: value_(value), write_ignore_mask_(write_ignore_mask) { }
uint32_t value_;
uint32_t write_ignore_mask_;
};
// Represent a register (r0-r31, v0-v31).
template<int kSizeInBytes>
class SimRegisterBase {
public:
template<typename T>
void Set(T new_value, unsigned size = sizeof(T)) {
ASSERT(size <= kSizeInBytes);
ASSERT(size <= sizeof(new_value));
// All AArch64 registers are zero-extending; Writing a W register clears the
// top bits of the corresponding X register.
memset(value_, 0, kSizeInBytes);
memcpy(value_, &new_value, size);
}
// Copy 'size' bytes of the register to the result, and zero-extend to fill
// the result.
template<typename T>
T Get(unsigned size = sizeof(T)) const {
ASSERT(size <= kSizeInBytes);
T result;
memset(&result, 0, sizeof(result));
memcpy(&result, value_, size);
return result;
}
protected:
uint8_t value_[kSizeInBytes];
};
typedef SimRegisterBase<kXRegSizeInBytes> SimRegister; // r0-r31
typedef SimRegisterBase<kDRegSizeInBytes> SimFPRegister; // v0-v31
class Simulator : public DecoderVisitor {
public:
explicit Simulator(Decoder* decoder,
Isolate* isolate = NULL,
FILE* stream = stderr);
~Simulator();
// System functions.
static void Initialize(Isolate* isolate);
static Simulator* current(v8::internal::Isolate* isolate);
class CallArgument;
// Call an arbitrary function taking an arbitrary number of arguments. The
// varargs list must be a set of arguments with type CallArgument, and
// terminated by CallArgument::End().
void CallVoid(byte* entry, CallArgument* args);
// Like CallVoid, but expect a return value.
int64_t CallInt64(byte* entry, CallArgument* args);
double CallDouble(byte* entry, CallArgument* args);
// V8 calls into generated JS code with 5 parameters and into
// generated RegExp code with 10 parameters. These are convenience functions,
// which set up the simulator state and grab the result on return.
int64_t CallJS(byte* entry,
byte* function_entry,
JSFunction* func,
Object* revc,
int64_t argc,
Object*** argv);
int64_t CallRegExp(byte* entry,
String* input,
int64_t start_offset,
const byte* input_start,
const byte* input_end,
int* output,
int64_t output_size,
Address stack_base,
int64_t direct_call,
void* return_address,
Isolate* isolate);
// A wrapper class that stores an argument for one of the above Call
// functions.
//
// Only arguments up to 64 bits in size are supported.
class CallArgument {
public:
template<typename T>
explicit CallArgument(T argument) {
ASSERT(sizeof(argument) <= sizeof(bits_));
memcpy(&bits_, &argument, sizeof(argument));
type_ = X_ARG;
}
explicit CallArgument(double argument) {
ASSERT(sizeof(argument) == sizeof(bits_));
memcpy(&bits_, &argument, sizeof(argument));
type_ = D_ARG;
}
explicit CallArgument(float argument) {
// TODO(all): CallArgument(float) is untested, remove this check once
// tested.
UNIMPLEMENTED();
// Make the D register a NaN to try to trap errors if the callee expects a
// double. If it expects a float, the callee should ignore the top word.
ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_));
memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN));
// Write the float payload to the S register.
ASSERT(sizeof(argument) <= sizeof(bits_));
memcpy(&bits_, &argument, sizeof(argument));
type_ = D_ARG;
}
// This indicates the end of the arguments list, so that CallArgument
// objects can be passed into varargs functions.
static CallArgument End() { return CallArgument(); }
int64_t bits() const { return bits_; }
bool IsEnd() const { return type_ == NO_ARG; }
bool IsX() const { return type_ == X_ARG; }
bool IsD() const { return type_ == D_ARG; }
private:
enum CallArgumentType { X_ARG, D_ARG, NO_ARG };
// All arguments are aligned to at least 64 bits and we don't support
// passing bigger arguments, so the payload size can be fixed at 64 bits.
int64_t bits_;
CallArgumentType type_;
CallArgument() { type_ = NO_ARG; }
};
// Start the debugging command line.
void Debug();
bool GetValue(const char* desc, int64_t* value);
bool PrintValue(const char* desc);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
// Pop an address from the JS stack.
uintptr_t PopAddress();
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
void ResetState();
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
ExternalReference::Type type);
// Run the simulator.
static const Instruction* kEndOfSimAddress;
void DecodeInstruction();
void Run();
void RunFrom(Instruction* start);
// Simulation helpers.
template <typename T>
void set_pc(T new_pc) {
ASSERT(sizeof(T) == sizeof(pc_));
memcpy(&pc_, &new_pc, sizeof(T));
pc_modified_ = true;
}
Instruction* pc() { return pc_; }
void increment_pc() {
if (!pc_modified_) {
pc_ = pc_->NextInstruction();
}
pc_modified_ = false;
}
void ExecuteInstruction() {
ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
CheckBreakNext();
decoder_->Decode(pc_);
LogProcessorState();
increment_pc();
CheckBreakpoints();
}
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
// Register accessors.
// Return 'size' bits of the value of an integer register, as the specified
// type. The value is zero-extended to fill the result.
//
// The only supported values of 'size' are kXRegSize and kWRegSize.
template<typename T>
T reg(unsigned size, unsigned code,
Reg31Mode r31mode = Reg31IsZeroRegister) const {
unsigned size_in_bytes = size / 8;
ASSERT(size_in_bytes <= sizeof(T));
ASSERT((size == kXRegSize) || (size == kWRegSize));
ASSERT(code < kNumberOfRegisters);
if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
T result;
memset(&result, 0, sizeof(result));
return result;
}
return registers_[code].Get<T>(size_in_bytes);
}
// Like reg(), but infer the access size from the template type.
template<typename T>
T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<T>(sizeof(T) * 8, code, r31mode);
}
// Common specialized accessors for the reg() template.
int32_t wreg(unsigned code,
Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<int32_t>(code, r31mode);
}
int64_t xreg(unsigned code,
Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<int64_t>(code, r31mode);
}
int64_t reg(unsigned size, unsigned code,
Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<int64_t>(size, code, r31mode);
}
// Write 'size' bits of 'value' into an integer register. The value is
// zero-extended. This behaviour matches AArch64 register writes.
//
// The only supported values of 'size' are kXRegSize and kWRegSize.
template<typename T>
void set_reg(unsigned size, unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
unsigned size_in_bytes = size / 8;
ASSERT(size_in_bytes <= sizeof(T));
ASSERT((size == kXRegSize) || (size == kWRegSize));
ASSERT(code < kNumberOfRegisters);
if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
return;
}
return registers_[code].Set(value, size_in_bytes);
}
// Like set_reg(), but infer the access size from the template type.
template<typename T>
void set_reg(unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg(sizeof(value) * 8, code, value, r31mode);
}
// Common specialized accessors for the set_reg() template.
void set_wreg(unsigned code, int32_t value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg(kWRegSize, code, value, r31mode);
}
void set_xreg(unsigned code, int64_t value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg(kXRegSize, code, value, r31mode);
}
// Commonly-used special cases.
template<typename T>
void set_lr(T value) {
ASSERT(sizeof(T) == kPointerSize);
set_reg(kLinkRegCode, value);
}
template<typename T>
void set_sp(T value) {
ASSERT(sizeof(T) == kPointerSize);
set_reg(31, value, Reg31IsStackPointer);
}
int64_t sp() { return xreg(31, Reg31IsStackPointer); }
int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
int64_t fp() {
return xreg(kFramePointerRegCode, Reg31IsStackPointer);
}
Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); }
// Return 'size' bits of the value of a floating-point register, as the
// specified type. The value is zero-extended to fill the result.
//
// The only supported values of 'size' are kDRegSize and kSRegSize.
template<typename T>
T fpreg(unsigned size, unsigned code) const {
unsigned size_in_bytes = size / 8;
ASSERT(size_in_bytes <= sizeof(T));
ASSERT((size == kDRegSize) || (size == kSRegSize));
ASSERT(code < kNumberOfFPRegisters);
return fpregisters_[code].Get<T>(size_in_bytes);
}
// Like fpreg(), but infer the access size from the template type.
template<typename T>
T fpreg(unsigned code) const {
return fpreg<T>(sizeof(T) * 8, code);
}
// Common specialized accessors for the fpreg() template.
float sreg(unsigned code) const {
return fpreg<float>(code);
}
uint32_t sreg_bits(unsigned code) const {
return fpreg<uint32_t>(code);
}
double dreg(unsigned code) const {
return fpreg<double>(code);
}
uint64_t dreg_bits(unsigned code) const {
return fpreg<uint64_t>(code);
}
double fpreg(unsigned size, unsigned code) const {
switch (size) {
case kSRegSize: return sreg(code);
case kDRegSize: return dreg(code);
default:
UNREACHABLE();
return 0.0;
}
}
// Write 'value' into a floating-point register. The value is zero-extended.
// This behaviour matches AArch64 register writes.
template<typename T>
void set_fpreg(unsigned code, T value) {
ASSERT((sizeof(value) == kDRegSizeInBytes) ||
(sizeof(value) == kSRegSizeInBytes));
ASSERT(code < kNumberOfFPRegisters);
fpregisters_[code].Set(value, sizeof(value));
}
// Common specialized accessors for the set_fpreg() template.
void set_sreg(unsigned code, float value) {
set_fpreg(code, value);
}
void set_sreg_bits(unsigned code, uint32_t value) {
set_fpreg(code, value);
}
void set_dreg(unsigned code, double value) {
set_fpreg(code, value);
}
void set_dreg_bits(unsigned code, uint64_t value) {
set_fpreg(code, value);
}
bool N() { return nzcv_.N() != 0; }
bool Z() { return nzcv_.Z() != 0; }
bool C() { return nzcv_.C() != 0; }
bool V() { return nzcv_.V() != 0; }
SimSystemRegister& nzcv() { return nzcv_; }
// TODO(jbramley): Find a way to make the fpcr_ members return the proper
// types, so this accessor is not necessary.
FPRounding RMode() { return static_cast<FPRounding>(fpcr_.RMode()); }
SimSystemRegister& fpcr() { return fpcr_; }
// Debug helpers
// Simulator breakpoints.
struct Breakpoint {
Instruction* location;
bool enabled;
};
std::vector<Breakpoint> breakpoints_;
void SetBreakpoint(Instruction* breakpoint);
void ListBreakpoints();
void CheckBreakpoints();
// Helpers for the 'next' command.
// When this is set, the Simulator will insert a breakpoint after the next BL
// instruction it meets.
bool break_on_next_;
// Check if the Simulator should insert a break after the current instruction
// for the 'next' command.
void CheckBreakNext();
// Disassemble instruction at the given address.
void PrintInstructionsAt(Instruction* pc, uint64_t count);
void PrintSystemRegisters(bool print_all = false);
void PrintRegisters(bool print_all_regs = false);
void PrintFPRegisters(bool print_all_regs = false);
void PrintProcessorState();
void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
void LogSystemRegisters() {
if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters();
}
void LogRegisters() {
if (log_parameters_ & LOG_REGS) PrintRegisters();
}
void LogFPRegisters() {
if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters();
}
void LogProcessorState() {
LogSystemRegisters();
LogRegisters();
LogFPRegisters();
}
void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) {
if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes);
}
int log_parameters() { return log_parameters_; }
void set_log_parameters(int new_parameters) {
if (new_parameters & LOG_DISASM) {
decoder_->InsertVisitorBefore(print_disasm_, this);
} else {
decoder_->RemoveVisitor(print_disasm_);
}
log_parameters_ = new_parameters;
}
static inline const char* WRegNameForCode(unsigned code,
Reg31Mode mode = Reg31IsZeroRegister);
static inline const char* XRegNameForCode(unsigned code,
Reg31Mode mode = Reg31IsZeroRegister);
static inline const char* SRegNameForCode(unsigned code);
static inline const char* DRegNameForCode(unsigned code);
static inline const char* VRegNameForCode(unsigned code);
static inline int CodeFromName(const char* name);
protected:
// Simulation helpers ------------------------------------
bool ConditionPassed(Condition cond) {
switch (cond) {
case eq:
return Z();
case ne:
return !Z();
case hs:
return C();
case lo:
return !C();
case mi:
return N();
case pl:
return !N();
case vs:
return V();
case vc:
return !V();
case hi:
return C() && !Z();
case ls:
return !(C() && !Z());
case ge:
return N() == V();
case lt:
return N() != V();
case gt:
return !Z() && (N() == V());
case le:
return !(!Z() && (N() == V()));
case nv: // Fall through.
case al:
return true;
default:
UNREACHABLE();
return false;
}
}
bool ConditionFailed(Condition cond) {
return !ConditionPassed(cond);
}
void AddSubHelper(Instruction* instr, int64_t op2);
int64_t AddWithCarry(unsigned reg_size,
bool set_flags,
int64_t src1,
int64_t src2,
int64_t carry_in = 0);
void LogicalHelper(Instruction* instr, int64_t op2);
void ConditionalCompareHelper(Instruction* instr, int64_t op2);
void LoadStoreHelper(Instruction* instr,
int64_t offset,
AddrMode addrmode);
void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
uint8_t* LoadStoreAddress(unsigned addr_reg,
int64_t offset,
AddrMode addrmode);
void LoadStoreWriteBack(unsigned addr_reg,
int64_t offset,
AddrMode addrmode);
void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
uint64_t MemoryRead(uint8_t* address, unsigned num_bytes);
uint8_t MemoryRead8(uint8_t* address);
uint16_t MemoryRead16(uint8_t* address);
uint32_t MemoryRead32(uint8_t* address);
float MemoryReadFP32(uint8_t* address);
uint64_t MemoryRead64(uint8_t* address);
double MemoryReadFP64(uint8_t* address);
void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
void MemoryWrite32(uint8_t* address, uint32_t value);
void MemoryWriteFP32(uint8_t* address, float value);
void MemoryWrite64(uint8_t* address, uint64_t value);
void MemoryWriteFP64(uint8_t* address, double value);
int64_t ShiftOperand(unsigned reg_size,
int64_t value,
Shift shift_type,
unsigned amount);
int64_t Rotate(unsigned reg_width,
int64_t value,
Shift shift_type,
unsigned amount);
int64_t ExtendValue(unsigned reg_width,
int64_t value,
Extend extend_type,
unsigned left_shift = 0);
uint64_t ReverseBits(uint64_t value, unsigned num_bits);
uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
void FPCompare(double val0, double val1);
double FPRoundInt(double value, FPRounding round_mode);
double FPToDouble(float value);
float FPToFloat(double value, FPRounding round_mode);
double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
int32_t FPToInt32(double value, FPRounding rmode);
int64_t FPToInt64(double value, FPRounding rmode);
uint32_t FPToUInt32(double value, FPRounding rmode);
uint64_t FPToUInt64(double value, FPRounding rmode);
template <typename T>
T FPMax(T a, T b);
template <typename T>
T FPMin(T a, T b);
template <typename T>
T FPMaxNM(T a, T b);
template <typename T>
T FPMinNM(T a, T b);
void CheckStackAlignment();
inline void CheckPCSComplianceAndRun();
#ifdef DEBUG
// Corruption values should have their least significant byte cleared to
// allow the code of the register being corrupted to be inserted.
static const uint64_t kCallerSavedRegisterCorruptionValue =
0xca11edc0de000000UL;
// This value is a NaN in both 32-bit and 64-bit FP.
static const uint64_t kCallerSavedFPRegisterCorruptionValue =
0x7ff000007f801000UL;
// This value is a mix of 32/64-bits NaN and "verbose" immediate.
static const uint64_t kDefaultCPURegisterCorruptionValue =
0x7ffbad007f8bad00UL;
void CorruptRegisters(CPURegList* list,
uint64_t value = kDefaultCPURegisterCorruptionValue);
void CorruptAllCallerSavedCPURegisters();
#endif
// Processor state ---------------------------------------
// Output stream.
FILE* stream_;
PrintDisassembler* print_disasm_;
// Instrumentation.
Instrument* instrument_;
// General purpose registers. Register 31 is the stack pointer.
SimRegister registers_[kNumberOfRegisters];
// Floating point registers
SimFPRegister fpregisters_[kNumberOfFPRegisters];
// Processor state
// bits[31, 27]: Condition flags N, Z, C, and V.
// (Negative, Zero, Carry, Overflow)
SimSystemRegister nzcv_;
// Floating-Point Control Register
SimSystemRegister fpcr_;
// Only a subset of FPCR features are supported by the simulator. This helper
// checks that the FPCR settings are supported.
//
// This is checked when floating-point instructions are executed, not when
// FPCR is set. This allows generated code to modify FPCR for external
// functions, or to save and restore it when entering and leaving generated
// code.
void AssertSupportedFPCR() {
ASSERT(fpcr().DN() == 0); // No default-NaN support.
ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
// The simulator does not support half-precision operations so fpcr().AHP()
// is irrelevant, and is not checked here.
}
static int CalcNFlag(uint64_t result, unsigned reg_size) {
return (result >> (reg_size - 1)) & 1;
}
static int CalcZFlag(uint64_t result) {
return result == 0;
}
static const uint32_t kConditionFlagsMask = 0xf0000000;
// Stack
byte* stack_;
static const intptr_t stack_protection_size_ = KB;
intptr_t stack_size_;
byte* stack_limit_;
// TODO(aleram): protect the stack.
Decoder* decoder_;
Decoder* disassembler_decoder_;
// Indicates if the pc has been modified by the instruction and should not be
// automatically incremented.
bool pc_modified_;
Instruction* pc_;
static const char* xreg_names[];
static const char* wreg_names[];
static const char* sreg_names[];
static const char* dreg_names[];
static const char* vreg_names[];
// Debugger input.
void set_last_debugger_input(char* input) {
DeleteArray(last_debugger_input_);
last_debugger_input_ = input;
}
char* last_debugger_input() { return last_debugger_input_; }
char* last_debugger_input_;
private:
int log_parameters_;
Isolate* isolate_;
};
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
FUNCTION_ADDR(entry), \
p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->CallRegExp( \
entry, \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code.
// See also 'class SimulatorStack' in arm/simulator-arm.h.
class SimulatorStack : public v8::internal::AllStatic {
public:
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
return Simulator::current(isolate)->StackLimit();
}
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
Simulator* sim = Simulator::current(Isolate::Current());
return sim->PushAddress(try_catch_address);
}
static void UnregisterCTryCatch() {
Simulator::current(Isolate::Current())->PopAddress();
}
};
#endif // !defined(USE_SIMULATOR)
} } // namespace v8::internal
#endif // V8_A64_SIMULATOR_A64_H_

1548
deps/v8/src/a64/stub-cache-a64.cc

File diff suppressed because it is too large

112
deps/v8/src/a64/utils-a64.cc

@ -0,0 +1,112 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if V8_TARGET_ARCH_A64
#include "a64/utils-a64.h"
namespace v8 {
namespace internal {
#define __ assm->
int CountLeadingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for A64 hosts.
ASSERT((width == 32) || (width == 64));
int count = 0;
uint64_t bit_test = 1UL << (width - 1);
while ((count < width) && ((bit_test & value) == 0)) {
count++;
bit_test >>= 1;
}
return count;
}
int CountLeadingSignBits(int64_t value, int width) {
// TODO(jbramley): Optimize this for A64 hosts.
ASSERT((width == 32) || (width == 64));
if (value >= 0) {
return CountLeadingZeros(value, width) - 1;
} else {
return CountLeadingZeros(~value, width) - 1;
}
}
int CountTrailingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for A64 hosts.
ASSERT((width == 32) || (width == 64));
int count = 0;
while ((count < width) && (((value >> count) & 1) == 0)) {
count++;
}
return count;
}
int CountSetBits(uint64_t value, int width) {
// TODO(jbramley): Would it be useful to allow other widths? The
// implementation already supports them.
ASSERT((width == 32) || (width == 64));
// Mask out unused bits to ensure that they are not counted.
value &= (0xffffffffffffffffUL >> (64-width));
// Add up the set bits.
// The algorithm works by adding pairs of bit fields together iteratively,
// where the size of each bit field doubles each time.
// An example for an 8-bit value:
// Bits: h g f e d c b a
// \ | \ | \ | \ |
// value = h+g f+e d+c b+a
// \ | \ |
// value = h+g+f+e d+c+b+a
// \ |
// value = h+g+f+e+d+c+b+a
value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
return value;
}
int MaskToBit(uint64_t mask) {
ASSERT(CountSetBits(mask, 64) == 1);
return CountTrailingZeros(mask, 64);
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64

109
deps/v8/src/a64/utils-a64.h

@ -0,0 +1,109 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_UTILS_A64_H_
#define V8_A64_UTILS_A64_H_
#include <cmath>
#include "v8.h"
#include "a64/constants-a64.h"
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
namespace v8 {
namespace internal {
// Floating point representation.
static inline uint32_t float_to_rawbits(float value) {
uint32_t bits = 0;
memcpy(&bits, &value, 4);
return bits;
}
static inline uint64_t double_to_rawbits(double value) {
uint64_t bits = 0;
memcpy(&bits, &value, 8);
return bits;
}
static inline float rawbits_to_float(uint32_t bits) {
float value = 0.0;
memcpy(&value, &bits, 4);
return value;
}
static inline double rawbits_to_double(uint64_t bits) {
double value = 0.0;
memcpy(&value, &bits, 8);
return value;
}
// Bits counting.
int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
int CountTrailingZeros(uint64_t value, int width);
int CountSetBits(uint64_t value, int width);
int MaskToBit(uint64_t mask);
// NaN tests.
inline bool IsSignallingNaN(double num) {
const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL;
uint64_t raw = double_to_rawbits(num);
if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
return true;
}
return false;
}
inline bool IsSignallingNaN(float num) {
const uint64_t kFP32QuietNaNMask = 0x00400000UL;
uint32_t raw = float_to_rawbits(num);
if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
return true;
}
return false;
}
template <typename T>
inline bool IsQuietNaN(T num) {
return std::isnan(num) && !IsSignallingNaN(num);
}
} } // namespace v8::internal
#endif // V8_A64_UTILS_A64_H_

118
deps/v8/src/accessors.cc

@ -28,6 +28,7 @@
#include "v8.h"
#include "accessors.h"
#include "compiler.h"
#include "contexts.h"
#include "deoptimizer.h"
#include "execution.h"
@ -90,10 +91,22 @@ static V8_INLINE bool CheckForName(Handle<String> name,
}
bool Accessors::IsJSObjectFieldAccessor(
Handle<Map> map, Handle<String> name,
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
template <class T>
bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
Handle<String> name,
int* object_offset) {
Isolate* isolate = map->GetIsolate();
Isolate* isolate = name->GetIsolate();
if (type->Is(T::String())) {
return CheckForName(name, isolate->heap()->length_string(),
String::kLengthOffset, object_offset);
}
if (!type->IsClass()) return false;
Handle<Map> map = type->AsClass();
switch (map->instance_type()) {
case JS_ARRAY_TYPE:
return
@ -121,18 +134,24 @@ bool Accessors::IsJSObjectFieldAccessor(
JSDataView::kByteOffsetOffset, object_offset) ||
CheckForName(name, isolate->heap()->buffer_string(),
JSDataView::kBufferOffset, object_offset);
default: {
if (map->instance_type() < FIRST_NONSTRING_TYPE) {
return
CheckForName(name, isolate->heap()->length_string(),
String::kLengthOffset, object_offset);
}
default:
return false;
}
}
}
template
bool Accessors::IsJSObjectFieldAccessor<Type>(Type* type,
Handle<String> name,
int* object_offset);
template
bool Accessors::IsJSObjectFieldAccessor<HeapType>(Handle<HeapType> type,
Handle<String> name,
int* object_offset);
//
// Accessors::ArrayLength
//
@ -148,45 +167,49 @@ MaybeObject* Accessors::ArrayGetLength(Isolate* isolate,
// The helper function will 'flatten' Number objects.
Object* Accessors::FlattenNumber(Isolate* isolate, Object* value) {
Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
Handle<Object> value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
JSValue* wrapper = JSValue::cast(value);
Handle<JSValue> wrapper = Handle<JSValue>::cast(value);
ASSERT(wrapper->GetIsolate()->context()->native_context()->number_function()->
has_initial_map());
Map* number_map = isolate->context()->native_context()->
number_function()->initial_map();
if (wrapper->map() == number_map) return wrapper->value();
if (wrapper->map() ==
isolate->context()->native_context()->number_function()->initial_map()) {
return handle(wrapper->value(), isolate);
}
return value;
}
MaybeObject* Accessors::ArraySetLength(Isolate* isolate,
JSObject* object,
Object* value,
JSObject* object_raw,
Object* value_raw,
void*) {
HandleScope scope(isolate);
Handle<JSObject> object(object_raw, isolate);
Handle<Object> value(value_raw, isolate);
// This means one of the object's prototypes is a JSArray and the
// object does not have a 'length' property. Calling SetProperty
// causes an infinite loop.
if (!object->IsJSArray()) {
return object->SetLocalPropertyIgnoreAttributesTrampoline(
isolate->heap()->length_string(), value, NONE);
Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(object,
isolate->factory()->length_string(), value, NONE);
RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
value = FlattenNumber(isolate, value);
// Need to call methods that may trigger GC.
HandleScope scope(isolate);
// Protect raw pointers.
Handle<JSArray> array_handle(JSArray::cast(object), isolate);
Handle<Object> value_handle(value, isolate);
Handle<JSArray> array_handle = Handle<JSArray>::cast(object);
bool has_exception;
Handle<Object> uint32_v =
Execution::ToUint32(isolate, value_handle, &has_exception);
Execution::ToUint32(isolate, value, &has_exception);
if (has_exception) return Failure::Exception();
Handle<Object> number_v =
Execution::ToNumber(isolate, value_handle, &has_exception);
Execution::ToNumber(isolate, value, &has_exception);
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
@ -578,26 +601,28 @@ MaybeObject* Accessors::FunctionGetPrototype(Isolate* isolate,
MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
JSObject* object,
JSObject* object_raw,
Object* value_raw,
void*) {
Heap* heap = isolate->heap();
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
if (function_raw == NULL) return heap->undefined_value();
if (!function_raw->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
return object->SetLocalPropertyIgnoreAttributesTrampoline(
heap->prototype_string(), value_raw, NONE);
}
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object_raw);
if (function_raw == NULL) return isolate->heap()->undefined_value();
HandleScope scope(isolate);
Handle<JSFunction> function(function_raw, isolate);
Handle<JSObject> object(object_raw, isolate);
Handle<Object> value(value_raw, isolate);
if (!function->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(object,
isolate->factory()->prototype_string(), value, NONE);
RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
Handle<Object> old_value;
bool is_observed =
FLAG_harmony_observation &&
*function == object &&
*function == *object &&
function->map()->is_observed();
if (is_observed) {
if (function->has_prototype())
@ -611,7 +636,7 @@ MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
if (is_observed && !old_value->SameValue(*value)) {
JSObject::EnqueueChangeRecord(
function, "updated", isolate->factory()->prototype_string(), old_value);
function, "update", isolate->factory()->prototype_string(), old_value);
}
return *function;
@ -642,9 +667,9 @@ MaybeObject* Accessors::FunctionGetLength(Isolate* isolate,
// If the function isn't compiled yet, the length is not computed correctly
// yet. Compile it now and return the right length.
HandleScope scope(isolate);
Handle<JSFunction> handle(function);
if (JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
return Smi::FromInt(handle->shared()->length());
Handle<JSFunction> function_handle(function);
if (Compiler::EnsureCompiled(function_handle, KEEP_EXCEPTION)) {
return Smi::FromInt(function_handle->shared()->length());
}
return Failure::Exception();
}
@ -699,21 +724,22 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
int inlined_frame_index) {
Isolate* isolate = inlined_function->GetIsolate();
Factory* factory = isolate->factory();
Vector<SlotRef> args_slots =
SlotRef::ComputeSlotMappingForArguments(
SlotRefValueBuilder slot_refs(
frame,
inlined_frame_index,
inlined_function->shared()->formal_parameter_count());
int args_count = args_slots.length();
int args_count = slot_refs.args_length();
Handle<JSObject> arguments =
factory->NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count);
slot_refs.Prepare(isolate);
for (int i = 0; i < args_count; ++i) {
Handle<Object> value = args_slots[i].GetValue(isolate);
Handle<Object> value = slot_refs.GetNext(isolate, 0);
array->set(i, *value);
}
slot_refs.Finish(isolate);
arguments->set_elements(*array);
args_slots.Dispose();
// Return the freshly allocated arguments object.
return *arguments;

7
deps/v8/src/accessors.h

@ -88,8 +88,9 @@ class Accessors : public AllStatic {
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
static bool IsJSObjectFieldAccessor(
Handle<Map> map, Handle<String> name,
template <class T>
static bool IsJSObjectFieldAccessor(typename T::TypeHandle type,
Handle<String> name,
int* object_offset);
@ -149,7 +150,7 @@ class Accessors : public AllStatic {
void*);
// Helper functions.
static Object* FlattenNumber(Isolate* isolate, Object* value);
static Handle<Object> FlattenNumber(Isolate* isolate, Handle<Object> value);
static MaybeObject* IllegalSetter(Isolate* isolate,
JSObject*,
Object*,

32
deps/v8/src/allocation-site-scopes.cc

@ -83,26 +83,20 @@ void AllocationSiteCreationContext::ExitScope(
}
Handle<AllocationSite> AllocationSiteUsageContext::EnterNewScope() {
if (top().is_null()) {
InitializeTraversal(top_site_);
} else {
// Advance current site
Object* nested_site = current()->nested_site();
// Something is wrong if we advance to the end of the list here.
ASSERT(nested_site->IsAllocationSite());
update_current_site(AllocationSite::cast(nested_site));
bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
if (activated_ && AllocationSite::CanTrack(object->map()->instance_type())) {
if (FLAG_allocation_site_pretenuring ||
AllocationSite::GetMode(object->GetElementsKind()) ==
TRACK_ALLOCATION_SITE) {
if (FLAG_trace_creation_allocation_sites) {
PrintF("*** Creating Memento for %s %p\n",
object->IsJSArray() ? "JSArray" : "JSObject",
static_cast<void*>(*object));
}
return Handle<AllocationSite>(*current(), isolate());
}
void AllocationSiteUsageContext::ExitScope(
Handle<AllocationSite> scope_site,
Handle<JSObject> object) {
// This assert ensures that we are pointing at the right sub-object in a
// recursive walk of a nested literal.
ASSERT(object.is_null() || *object == scope_site->transition_info());
return true;
}
}
return false;
}
} } // namespace v8::internal

53
deps/v8/src/allocation-site-scopes.h

@ -41,31 +41,22 @@ namespace internal {
// boilerplate with AllocationSite and AllocationMemento support.
class AllocationSiteContext {
public:
AllocationSiteContext(Isolate* isolate, bool activated) {
explicit AllocationSiteContext(Isolate* isolate) {
isolate_ = isolate;
activated_ = activated;
};
virtual ~AllocationSiteContext() {}
Handle<AllocationSite> top() { return top_; }
Handle<AllocationSite> current() { return current_; }
// If activated, then recursively create mementos
bool activated() const { return activated_; }
bool ShouldCreateMemento(Handle<JSObject> object) { return false; }
// Returns the AllocationSite that matches this scope.
virtual Handle<AllocationSite> EnterNewScope() = 0;
// scope_site should be the handle returned by the matching EnterNewScope()
virtual void ExitScope(Handle<AllocationSite> scope_site,
Handle<JSObject> object) = 0;
Isolate* isolate() { return isolate_; }
protected:
void update_current_site(AllocationSite* site) {
*(current_.location()) = site;
}
Isolate* isolate() { return isolate_; }
void InitializeTraversal(Handle<AllocationSite> site) {
top_ = site;
current_ = Handle<AllocationSite>(*top_, isolate());
@ -75,7 +66,6 @@ class AllocationSiteContext {
Isolate* isolate_;
Handle<AllocationSite> top_;
Handle<AllocationSite> current_;
bool activated_;
};
@ -84,11 +74,10 @@ class AllocationSiteContext {
class AllocationSiteCreationContext : public AllocationSiteContext {
public:
explicit AllocationSiteCreationContext(Isolate* isolate)
: AllocationSiteContext(isolate, true) { }
: AllocationSiteContext(isolate) { }
virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE;
virtual void ExitScope(Handle<AllocationSite> site,
Handle<JSObject> object) V8_OVERRIDE;
Handle<AllocationSite> EnterNewScope();
void ExitScope(Handle<AllocationSite> site, Handle<JSObject> object);
};
@ -98,15 +87,35 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
public:
AllocationSiteUsageContext(Isolate* isolate, Handle<AllocationSite> site,
bool activated)
: AllocationSiteContext(isolate, activated),
top_site_(site) { }
: AllocationSiteContext(isolate),
top_site_(site),
activated_(activated) { }
inline Handle<AllocationSite> EnterNewScope() {
if (top().is_null()) {
InitializeTraversal(top_site_);
} else {
// Advance current site
Object* nested_site = current()->nested_site();
// Something is wrong if we advance to the end of the list here.
ASSERT(nested_site->IsAllocationSite());
update_current_site(AllocationSite::cast(nested_site));
}
return Handle<AllocationSite>(*current(), isolate());
}
inline void ExitScope(Handle<AllocationSite> scope_site,
Handle<JSObject> object) {
// This assert ensures that we are pointing at the right sub-object in a
// recursive walk of a nested literal.
ASSERT(object.is_null() || *object == scope_site->transition_info());
}
virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE;
virtual void ExitScope(Handle<AllocationSite> site,
Handle<JSObject> object) V8_OVERRIDE;
bool ShouldCreateMemento(Handle<JSObject> object);
private:
Handle<AllocationSite> top_site_;
bool activated_;
};

32
deps/v8/src/allocation-tracker.cc

@ -46,6 +46,7 @@ AllocationTraceNode::AllocationTraceNode(
AllocationTraceNode::~AllocationTraceNode() {
for (int i = 0; i < children_.length(); i++) delete children_[i];
}
@ -155,6 +156,11 @@ AllocationTracker::AllocationTracker(
AllocationTracker::~AllocationTracker() {
unresolved_locations_.Iterate(DeleteUnresolvedLocation);
for (HashMap::Entry* p = id_to_function_info_.Start();
p != NULL;
p = id_to_function_info_.Next(p)) {
delete reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
}
}
@ -169,7 +175,7 @@ void AllocationTracker::PrepareForSerialization() {
}
void AllocationTracker::NewObjectEvent(Address addr, int size) {
void AllocationTracker::AllocationEvent(Address addr, int size) {
DisallowHeapAllocation no_allocation;
Heap* heap = ids_->heap();
@ -185,7 +191,8 @@ void AllocationTracker::NewObjectEvent(Address addr, int size) {
while (!it.done() && length < kMaxAllocationTraceLength) {
JavaScriptFrame* frame = it.frame();
SharedFunctionInfo* shared = frame->function()->shared();
SnapshotObjectId id = ids_->FindEntry(shared->address());
SnapshotObjectId id = ids_->FindOrAddEntry(
shared->address(), shared->Size(), false);
allocation_trace_buffer_[length++] = id;
AddFunctionInfo(shared, id);
it.Advance();
@ -245,34 +252,33 @@ AllocationTracker::UnresolvedLocation::UnresolvedLocation(
info_(info) {
script_ = Handle<Script>::cast(
script->GetIsolate()->global_handles()->Create(script));
GlobalHandles::MakeWeak(
reinterpret_cast<Object**>(script_.location()),
this, &HandleWeakScript);
GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
this,
&HandleWeakScript);
}
AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
if (!script_.is_null()) {
script_->GetIsolate()->global_handles()->Destroy(
reinterpret_cast<Object**>(script_.location()));
GlobalHandles::Destroy(reinterpret_cast<Object**>(script_.location()));
}
}
void AllocationTracker::UnresolvedLocation::Resolve() {
if (script_.is_null()) return;
HandleScope scope(script_->GetIsolate());
info_->line = GetScriptLineNumber(script_, start_position_);
info_->column = GetScriptColumnNumber(script_, start_position_);
}
void AllocationTracker::UnresolvedLocation::HandleWeakScript(
v8::Isolate* isolate,
v8::Persistent<v8::Value>* obj,
void* data) {
UnresolvedLocation* location = reinterpret_cast<UnresolvedLocation*>(data);
location->script_ = Handle<Script>::null();
obj->Dispose();
const v8::WeakCallbackData<v8::Value, void>& data) {
UnresolvedLocation* loc =
reinterpret_cast<UnresolvedLocation*>(data.GetParameter());
GlobalHandles::Destroy(reinterpret_cast<Object**>(loc->script_.location()));
loc->script_ = Handle<Script>::null();
}

9
deps/v8/src/allocation-tracker.h

@ -96,7 +96,7 @@ class AllocationTracker {
~AllocationTracker();
void PrepareForSerialization();
void NewObjectEvent(Address addr, int size);
void AllocationEvent(Address addr, int size);
AllocationTraceTree* trace_tree() { return &trace_tree_; }
HashMap* id_to_function_info() { return &id_to_function_info_; }
@ -112,9 +112,9 @@ class AllocationTracker {
void Resolve();
private:
static void HandleWeakScript(v8::Isolate* isolate,
v8::Persistent<v8::Value>* obj,
void* data);
static void HandleWeakScript(
const v8::WeakCallbackData<v8::Value, void>& data);
Handle<Script> script_;
int start_position_;
FunctionInfo* info_;
@ -135,4 +135,3 @@ class AllocationTracker {
} } // namespace v8::internal
#endif // V8_ALLOCATION_TRACKER_H_

41
deps/v8/src/allocation.cc

@ -32,6 +32,10 @@
#include "platform.h"
#include "utils.h"
#if V8_LIBC_BIONIC
#include <malloc.h> // NOLINT
#endif
namespace v8 {
namespace internal {
@ -101,23 +105,32 @@ char* StrNDup(const char* str, int n) {
}
void PreallocatedStorage::LinkTo(PreallocatedStorage* other) {
next_ = other->next_;
other->next_->previous_ = this;
previous_ = other;
other->next_ = this;
}
void PreallocatedStorage::Unlink() {
next_->previous_ = previous_;
previous_->next_ = next_;
void* AlignedAlloc(size_t size, size_t alignment) {
ASSERT(IsPowerOf2(alignment) && alignment >= V8_ALIGNOF(void*)); // NOLINT
void* ptr;
#if V8_OS_WIN
ptr = _aligned_malloc(size, alignment);
#elif V8_LIBC_BIONIC
// posix_memalign is not exposed in some Android versions, so we fall back to
// memalign. See http://code.google.com/p/android/issues/detail?id=35391.
ptr = memalign(alignment, size);
#else
if (posix_memalign(&ptr, alignment, size)) ptr = NULL;
#endif
if (ptr == NULL) FatalProcessOutOfMemory("AlignedAlloc");
return ptr;
}
PreallocatedStorage::PreallocatedStorage(size_t size)
: size_(size) {
previous_ = next_ = this;
void AlignedFree(void *ptr) {
#if V8_OS_WIN
_aligned_free(ptr);
#elif V8_LIBC_BIONIC
// Using free is not correct in general, but for V8_LIBC_BIONIC it is.
free(ptr);
#else
free(ptr);
#endif
}
} } // namespace v8::internal

29
deps/v8/src/allocation.h

@ -109,33 +109,8 @@ class FreeStoreAllocationPolicy {
};
// Allocation policy for allocating in preallocated space.
// Used as an allocation policy for ScopeInfo when generating
// stack traces.
class PreallocatedStorage {
public:
explicit PreallocatedStorage(size_t size);
size_t size() { return size_; }
private:
size_t size_;
PreallocatedStorage* previous_;
PreallocatedStorage* next_;
void LinkTo(PreallocatedStorage* other);
void Unlink();
friend class Isolate;
DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage);
};
struct PreallocatedStorageAllocationPolicy {
INLINE(void* New(size_t size));
INLINE(static void Delete(void* ptr));
};
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
} } // namespace v8::internal

1388
deps/v8/src/api.cc

File diff suppressed because it is too large

53
deps/v8/src/api.h

@ -31,7 +31,6 @@
#include "v8.h"
#include "../include/v8-testing.h"
#include "apiutils.h"
#include "contexts.h"
#include "factory.h"
#include "isolate.h"
@ -56,7 +55,7 @@ class Consts {
// env-independent JSObjects used by the api.
class NeanderObject {
public:
explicit NeanderObject(int size);
explicit NeanderObject(v8::internal::Isolate* isolate, int size);
explicit inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
explicit inline NeanderObject(v8::internal::Object* obj);
inline v8::internal::Object* get(int index);
@ -72,7 +71,7 @@ class NeanderObject {
// array abstraction built on neander-objects.
class NeanderArray {
public:
NeanderArray();
explicit NeanderArray(v8::internal::Isolate* isolate);
explicit inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
inline v8::internal::Handle<v8::internal::JSObject> value() {
return obj_.value();
@ -196,7 +195,12 @@ class RegisteredExtension {
class Utils {
public:
static bool ReportApiFailure(const char* location, const char* message);
static inline bool ApiCheck(bool condition,
const char* location,
const char* message) {
if (!condition) Utils::ReportApiFailure(location, message);
return condition;
}
static Local<FunctionTemplate> ToFunctionTemplate(NeanderObject obj);
static Local<ObjectTemplate> ToObjectTemplate(NeanderObject obj);
@ -303,17 +307,20 @@ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
static inline v8::internal::Handle<To> OpenHandle(v8::Local<From> handle) {
return OpenHandle(*handle);
}
private:
static void ReportApiFailure(const char* location, const char* message);
};
template <class T>
v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
v8::HandleScope* scope) {
v8::EscapableHandleScope* scope) {
v8::internal::Handle<T> handle;
if (!is_null()) {
handle = *this;
}
return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle)), true);
return Utils::OpenHandle(*scope->Escape(Utils::ToLocal(handle)), true);
}
@ -337,11 +344,11 @@ inline v8::Local<T> ToApiHandle(
}
#define MAKE_TO_LOCAL_TYPED_ARRAY(TypedArray, typeConst) \
Local<v8::TypedArray> Utils::ToLocal##TypedArray( \
#define MAKE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \
Local<v8::Type##Array> Utils::ToLocal##Type##Array( \
v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
ASSERT(obj->type() == typeConst); \
return Convert<v8::internal::JSTypedArray, v8::TypedArray>(obj); \
ASSERT(obj->type() == kExternal##Type##Array); \
return Convert<v8::internal::JSTypedArray, v8::Type##Array>(obj); \
}
@ -358,15 +365,7 @@ MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Uint8Array, kExternalUnsignedByteArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Uint8ClampedArray, kExternalPixelArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Int8Array, kExternalByteArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Uint16Array, kExternalUnsignedShortArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Int16Array, kExternalShortArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Uint32Array, kExternalUnsignedIntArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Int32Array, kExternalIntArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Float32Array, kExternalFloatArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Float64Array, kExternalDoubleArray)
TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
@ -543,7 +542,8 @@ class HandleScopeImplementer {
inline bool CallDepthIsZero() { return call_depth_ == 0; }
inline void EnterContext(Handle<Context> context);
inline bool LeaveContext(Handle<Context> context);
inline void LeaveContext();
inline bool LastEnteredContextWas(Handle<Context> context);
// Returns the last entered context or an empty handle if no
// contexts have been entered.
@ -599,7 +599,7 @@ class HandleScopeImplementer {
int call_depth_;
Object** last_handle_before_deferred_block_;
// This is only used for threading support.
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
HandleScopeData handle_scope_data_;
void IterateThis(ObjectVisitor* v);
char* RestoreThreadHelper(char* from);
@ -635,12 +635,13 @@ void HandleScopeImplementer::EnterContext(Handle<Context> context) {
}
bool HandleScopeImplementer::LeaveContext(Handle<Context> context) {
if (entered_contexts_.is_empty()) return false;
// TODO(dcarney): figure out what's wrong here
// if (entered_contexts_.last() != *context) return false;
void HandleScopeImplementer::LeaveContext() {
entered_contexts_.RemoveLast();
return true;
}
bool HandleScopeImplementer::LastEnteredContextWas(Handle<Context> context) {
return !entered_contexts_.is_empty() && entered_contexts_.last() == *context;
}

1
deps/v8/src/arm/OWNERS

@ -0,0 +1 @@
rmcilroy@chromium.org

83
deps/v8/src/arm/assembler-arm-inl.h

@ -57,6 +57,11 @@ int DwVfpRegister::NumRegisters() {
}
int DwVfpRegister::NumReservedRegisters() {
return kNumReservedRegisters;
}
int DwVfpRegister::NumAllocatableRegisters() {
return NumRegisters() - kNumReservedRegisters;
}
@ -104,7 +109,7 @@ Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
return Assembler::target_pointer_address_at(pc_);
}
@ -126,31 +131,21 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
Assembler::target_pointer_at(pc_)));
}
Object** RelocInfo::target_object_address() {
// Provide a "natural pointer" to the embedded object,
// which can be de-referenced during heap iteration.
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
reconstructed_obj_ptr_ =
reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
return &reconstructed_obj_ptr_;
Assembler::target_address_at(pc_)));
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
@ -160,10 +155,9 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
}
Address* RelocInfo::target_reference_address() {
Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
return &reconstructed_adr_ptr_;
return Assembler::target_address_at(pc_);
}
@ -269,6 +263,15 @@ Object** RelocInfo::call_object_address() {
}
void RelocInfo::WipeOut() {
ASSERT(IsEmbeddedObject(rmode_) ||
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
Assembler::set_target_address_at(pc_, NULL);
}
bool RelocInfo::IsPatchedReturnSequence() {
Instr current_instr = Assembler::instr_at(pc_);
Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
@ -394,33 +397,12 @@ void Assembler::emit(Instr x) {
Address Assembler::target_pointer_address_at(Address pc) {
Address target_pc = pc;
Instr instr = Memory::int32_at(target_pc);
// If we have a bx instruction, the instruction before the bx is
// what we need to patch.
static const int32_t kBxInstMask = 0x0ffffff0;
static const int32_t kBxInstPattern = 0x012fff10;
if ((instr & kBxInstMask) == kBxInstPattern) {
target_pc -= kInstrSize;
instr = Memory::int32_at(target_pc);
}
// With a blx instruction, the instruction before is what needs to be patched.
if ((instr & kBlxRegMask) == kBlxRegPattern) {
target_pc -= kInstrSize;
instr = Memory::int32_at(target_pc);
}
ASSERT(IsLdrPcImmediateOffset(instr));
int offset = instr & 0xfff; // offset_12 is unsigned
if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
// Verify that the constant pool comes after the instruction referencing it.
ASSERT(offset >= -4);
return target_pc + offset + 8;
Instr instr = Memory::int32_at(pc);
return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
}
Address Assembler::target_pointer_at(Address pc) {
Address Assembler::target_address_at(Address pc) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* instr = Instruction::At(pc);
@ -429,6 +411,7 @@ Address Assembler::target_pointer_at(Address pc) {
(next_instr->ImmedMovwMovtValue() << 16) |
instr->ImmedMovwMovtValue());
}
ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
return Memory::Address_at(target_pointer_address_at(pc));
}
@ -474,19 +457,13 @@ void Assembler::deserialization_set_special_target_at(
}
void Assembler::set_external_target_at(Address constant_pool_entry,
Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
static Instr EncodeMovwImmediate(uint32_t immediate) {
ASSERT(immediate < 0x10000);
return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
}
void Assembler::set_target_pointer_at(Address pc, Address target) {
void Assembler::set_target_address_at(Address pc, Address target) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
@ -517,16 +494,6 @@ void Assembler::set_target_pointer_at(Address pc, Address target) {
}
Address Assembler::target_address_at(Address pc) {
return target_pointer_at(pc);
}
void Assembler::set_target_address_at(Address pc, Address target) {
set_target_pointer_at(pc, target);
}
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_INL_H_

190
deps/v8/src/arm/assembler-arm.cc

@ -517,12 +517,13 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
recorded_ast_id_(TypeFeedbackId::None()),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
num_pending_reloc_info_ = 0;
num_pending_32_bit_reloc_info_ = 0;
num_pending_64_bit_reloc_info_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
first_const_pool_use_ = -1;
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
ClearRecordedAstId();
}
@ -536,7 +537,7 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
ASSERT(num_pending_reloc_info_ == 0);
ASSERT(num_pending_32_bit_reloc_info_ == 0);
ASSERT(num_pending_64_bit_reloc_info_ == 0);
// Set up code descriptor.
@ -544,6 +545,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
}
@ -3077,6 +3079,11 @@ bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
}
bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
return is_uint12(abs(imm32));
}
// Debugging.
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
@ -3149,14 +3156,19 @@ void Assembler::GrowBuffer() {
// to relocate any emitted relocation entries.
// Relocate pending relocation entries.
for (int i = 0; i < num_pending_reloc_info_; i++) {
RelocInfo& rinfo = pending_reloc_info_[i];
for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION);
if (rinfo.rmode() != RelocInfo::JS_RETURN) {
rinfo.set_pc(rinfo.pc() + pc_delta);
}
}
for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
ASSERT(rinfo.rmode() == RelocInfo::NONE64);
rinfo.set_pc(rinfo.pc() + pc_delta);
}
}
@ -3164,7 +3176,7 @@ void Assembler::db(uint8_t data) {
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
ASSERT(num_pending_reloc_info_ == 0);
ASSERT(num_pending_32_bit_reloc_info_ == 0);
ASSERT(num_pending_64_bit_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
@ -3176,7 +3188,7 @@ void Assembler::dd(uint32_t data) {
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
ASSERT(num_pending_reloc_info_ == 0);
ASSERT(num_pending_32_bit_reloc_info_ == 0);
ASSERT(num_pending_64_bit_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
@ -3184,6 +3196,14 @@ void Assembler::dd(uint32_t data) {
}
void Assembler::emit_code_stub_address(Code* stub) {
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) =
reinterpret_cast<uint32_t>(stub->instruction_start());
pc_ += sizeof(uint32_t);
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
UseConstantPoolMode mode) {
// We do not try to reuse pool constants.
@ -3238,15 +3258,19 @@ void Assembler::RecordRelocInfo(double data) {
void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
if (num_pending_reloc_info_ == 0) {
first_const_pool_use_ = pc_offset();
}
pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
if (rinfo.rmode() == RelocInfo::NONE64) {
++num_pending_64_bit_reloc_info_;
ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
if (num_pending_64_bit_reloc_info_ == 0) {
first_const_pool_64_use_ = pc_offset();
}
pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
} else {
ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
if (num_pending_32_bit_reloc_info_ == 0) {
first_const_pool_32_use_ = pc_offset();
}
pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
}
ASSERT(num_pending_64_bit_reloc_info_ <= num_pending_reloc_info_);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
@ -3256,12 +3280,15 @@ void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
// If there are some pending entries, the constant pool cannot be blocked
// further than constant pool instruction's reach.
ASSERT((num_pending_reloc_info_ == 0) ||
(pc_limit - first_const_pool_use_ < kMaxDistToIntPool));
// TODO(jfb) Also check 64-bit entries are in range (requires splitting
// them up from 32-bit entries).
// Max pool start (if we need a jump and an alignment).
#ifdef DEBUG
int start = pc_limit + kInstrSize + 2 * kPointerSize;
ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
(start - first_const_pool_32_use_ +
num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
(start - first_const_pool_64_use_ < kMaxDistToFPPool));
#endif
no_const_pool_before_ = pc_limit;
}
@ -3282,8 +3309,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// There is nothing to do if there are no pending constant pool entries.
if (num_pending_reloc_info_ == 0) {
ASSERT(num_pending_64_bit_reloc_info_ == 0);
if ((num_pending_32_bit_reloc_info_ == 0) &&
(num_pending_64_bit_reloc_info_ == 0)) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
@ -3292,24 +3319,18 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Check that the code buffer is large enough before emitting the constant
// pool (include the jump over the pool and the constant pool marker and
// the gap to the relocation information).
// Note 64-bit values are wider, and the first one needs to be 64-bit aligned.
int jump_instr = require_jump ? kInstrSize : 0;
int size_up_to_marker = jump_instr + kInstrSize;
int size_after_marker = num_pending_reloc_info_ * kPointerSize;
int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
// 64-bit values must be 64-bit aligned.
// We'll start emitting at PC: branch+marker, then 32-bit values, then
// 64-bit values which might need to be aligned.
bool require_64_bit_align = has_fp_values &&
(((uintptr_t)pc_ + size_up_to_marker + size_after_marker) & 0x3);
bool require_64_bit_align = false;
if (has_fp_values) {
require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
if (require_64_bit_align) {
size_after_marker += kInstrSize;
}
// num_pending_reloc_info_ also contains 64-bit entries, the above code
// therefore already counted half of the size for 64-bit entries. Add the
// remaining size.
STATIC_ASSERT(kPointerSize == kDoubleSize / 2);
size_after_marker += num_pending_64_bit_reloc_info_ * (kDoubleSize / 2);
size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
}
int size = size_up_to_marker + size_after_marker;
@ -3322,19 +3343,25 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// * the instruction doesn't require a jump after itself to jump over the
// constant pool, and we're getting close to running out of range.
if (!force_emit) {
ASSERT((first_const_pool_use_ >= 0) && (num_pending_reloc_info_ > 0));
int dist = pc_offset() + size - first_const_pool_use_;
ASSERT((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
bool need_emit = false;
if (has_fp_values) {
if ((dist < kMaxDistToFPPool - kCheckPoolInterval) &&
(require_jump || (dist < kMaxDistToFPPool / 2))) {
return;
int dist64 = pc_offset() +
size -
num_pending_32_bit_reloc_info_ * kPointerSize -
first_const_pool_64_use_;
if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
(!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
need_emit = true;
}
} else {
if ((dist < kMaxDistToIntPool - kCheckPoolInterval) &&
(require_jump || (dist < kMaxDistToIntPool / 2))) {
return;
}
int dist32 =
pc_offset() + size - first_const_pool_32_use_;
if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
(!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
need_emit = true;
}
if (!need_emit) return;
}
int needed_space = size + kGap;
@ -3363,15 +3390,10 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Emit 64-bit constant pool entries first: their range is smaller than
// 32-bit entries.
for (int i = 0; i < num_pending_reloc_info_; i++) {
RelocInfo& rinfo = pending_reloc_info_[i];
if (rinfo.rmode() != RelocInfo::NONE64) {
// 32-bit values emitted later.
continue;
}
for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
ASSERT(!((uintptr_t)pc_ & 0x3)); // Check 64-bit alignment.
ASSERT(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
@ -3381,53 +3403,85 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
ASSERT(is_uint10(delta));
bool found = false;
uint64_t value = rinfo.raw_data64();
for (int j = 0; j < i; j++) {
RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
if (value == rinfo2.raw_data64()) {
found = true;
ASSERT(rinfo2.rmode() == RelocInfo::NONE64);
Instr instr2 = instr_at(rinfo2.pc());
ASSERT(IsVldrDPcImmediateOffset(instr2));
delta = GetVldrDRegisterImmediateOffset(instr2);
delta += rinfo2.pc() - rinfo.pc();
break;
}
}
instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
const double double_data = rinfo.data64();
uint64_t uint_data = 0;
OS::MemCopy(&uint_data, &double_data, sizeof(double_data));
if (!found) {
uint64_t uint_data = rinfo.raw_data64();
emit(uint_data & 0xFFFFFFFF);
emit(uint_data >> 32);
}
}
// Emit 32-bit constant pool entries.
for (int i = 0; i < num_pending_reloc_info_; i++) {
RelocInfo& rinfo = pending_reloc_info_[i];
for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
rinfo.rmode() != RelocInfo::CONST_POOL);
if (rinfo.rmode() == RelocInfo::NONE64) {
// 64-bit values emitted earlier.
continue;
}
rinfo.rmode() != RelocInfo::CONST_POOL &&
rinfo.rmode() != RelocInfo::NONE64);
Instr instr = instr_at(rinfo.pc());
// 64-bit loads shouldn't get here.
ASSERT(!IsVldrDPcImmediateOffset(instr));
if (IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0) {
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
ASSERT(is_uint12(delta));
// 0 is the smallest delta:
// ldr rd, [pc, #0]
// constant pool marker
// data
if (IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0) {
ASSERT(is_uint12(delta));
bool found = false;
if (!Serializer::enabled() && (rinfo.rmode() >= RelocInfo::CELL)) {
for (int j = 0; j < i; j++) {
RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
if ((rinfo2.data() == rinfo.data()) &&
(rinfo2.rmode() == rinfo.rmode())) {
Instr instr2 = instr_at(rinfo2.pc());
if (IsLdrPcImmediateOffset(instr2)) {
delta = GetLdrRegisterImmediateOffset(instr2);
delta += rinfo2.pc() - rinfo.pc();
found = true;
break;
}
}
}
}
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
if (!found) {
emit(rinfo.data());
}
} else {
ASSERT(IsMovW(instr));
emit(rinfo.data());
}
}
num_pending_reloc_info_ = 0;
num_pending_32_bit_reloc_info_ = 0;
num_pending_64_bit_reloc_info_ = 0;
first_const_pool_use_ = -1;
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
RecordComment("]");

51
deps/v8/src/arm/assembler-arm.h

@ -164,18 +164,12 @@ struct Register {
inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
if (FLAG_enable_ool_constant_pool && (reg.code() >= kRegister_r8_Code)) {
return reg.code() - 1;
}
ASSERT(reg.code() < kMaxNumAllocatableRegisters);
return reg.code();
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
if (FLAG_enable_ool_constant_pool && (index >= 7)) {
return from_code(index + 1);
}
return from_code(index);
}
@ -285,6 +279,7 @@ struct DwVfpRegister {
// Any code included in the snapshot must be able to run both with 16 or 32
// registers.
inline static int NumRegisters();
inline static int NumReservedRegisters();
inline static int NumAllocatableRegisters();
inline static int ToAllocationIndex(DwVfpRegister reg);
@ -785,10 +780,6 @@ class Assembler : public AssemblerBase {
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
// Read/Modify the pointer in the branch/call/move instruction at pc.
INLINE(static Address target_pointer_at(Address pc));
INLINE(static void set_target_pointer_at(Address pc, Address target));
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target));
@ -806,11 +797,6 @@ class Assembler : public AssemblerBase {
inline static void deserialization_set_special_target_at(
Address constant_pool_entry, Address target);
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches to runtime code.
inline static void set_external_target_at(Address constant_pool_entry,
Address target);
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
// pointer.
@ -1325,6 +1311,9 @@ class Assembler : public AssemblerBase {
// Check whether an immediate fits an addressing mode 1 instruction.
bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
// Check whether an immediate fits an addressing mode 2 instruction.
bool ImmediateFitsAddrMode2Instruction(int32_t imm32);
// Class for scoping postponing the constant pool generation.
class BlockConstPoolScope {
public:
@ -1393,6 +1382,9 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
// Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
@ -1444,7 +1436,8 @@ class Assembler : public AssemblerBase {
static const int kMaxDistToIntPool = 4*KB;
static const int kMaxDistToFPPool = 1*KB;
// All relocations could be integer, it therefore acts as the limit.
static const int kMaxNumPendingRelocInfo = kMaxDistToIntPool/kInstrSize;
static const int kMaxNumPending32RelocInfo = kMaxDistToIntPool/kInstrSize;
static const int kMaxNumPending64RelocInfo = kMaxDistToFPPool/kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
@ -1482,11 +1475,16 @@ class Assembler : public AssemblerBase {
// StartBlockConstPool to have an effect.
void EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
#ifdef DEBUG
// Max pool start (if we need a jump and an alignment).
int start = pc_offset() + kInstrSize + 2 * kPointerSize;
// Check the constant pool hasn't been blocked for too long.
ASSERT((num_pending_reloc_info_ == 0) ||
(pc_offset() < (first_const_pool_use_ + kMaxDistToIntPool)));
ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
(start + num_pending_64_bit_reloc_info_ * kDoubleSize <
(first_const_pool_32_use_ + kMaxDistToIntPool)));
ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
(pc_offset() < (first_const_pool_use_ + kMaxDistToFPPool)));
(start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
#endif
// Two cases:
// * no_const_pool_before_ >= next_buffer_check_ and the emission is
// still blocked
@ -1535,7 +1533,8 @@ class Assembler : public AssemblerBase {
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
int first_const_pool_use_;
int first_const_pool_32_use_;
int first_const_pool_64_use_;
// Relocation info generation
// Each relocation is encoded as a variable size value
@ -1549,12 +1548,12 @@ class Assembler : public AssemblerBase {
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
// the buffer of pending relocation info
RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
// number of pending reloc info entries in the buffer
int num_pending_reloc_info_;
// Number of pending reloc info entries included above which also happen to
// be 64-bit.
// The buffers of pending relocation info.
RelocInfo pending_32_bit_reloc_info_[kMaxNumPending32RelocInfo];
RelocInfo pending_64_bit_reloc_info_[kMaxNumPending64RelocInfo];
// Number of pending reloc info entries in the 32 bits buffer.
int num_pending_32_bit_reloc_info_;
// Number of pending reloc info entries in the 64 bits buffer.
int num_pending_64_bit_reloc_info_;
// The bound position, before this we cannot do instruction elimination.

174
deps/v8/src/arm/builtins-arm.cc

@ -34,6 +34,7 @@
#include "deoptimizer.h"
#include "full-codegen.h"
#include "runtime.h"
#include "stub-cache.h"
namespace v8 {
namespace internal {
@ -289,19 +290,15 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
static void CallRuntimePassFunction(MacroAssembler* masm,
Runtime::FunctionId function_id) {
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
// Push call kind information.
__ push(r5);
// Function is also the parameter to the runtime call.
__ push(r1);
// Push function as parameter to the runtime call.
__ Push(r1);
__ CallRuntime(function_id, 1);
// Restore call kind information.
__ pop(r5);
// Restore receiver.
__ pop(r1);
}
@ -315,7 +312,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
}
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
__ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r0);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
@ -326,22 +329,14 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
// Tail call to returned code.
__ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r0);
CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@ -406,9 +401,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ strb(r4, constructor_count);
__ b(ne, &allocate);
__ Push(r1, r2);
__ push(r1);
__ push(r1); // constructor
__ Push(r2, r1); // r1 = constructor
// The call will replace the stub, so the countdown is only done once.
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
@ -610,13 +605,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
ParameterCount expected(0);
__ InvokeCode(code, expected, expected,
RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Store offset of return address for deoptimizer.
@ -695,7 +687,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r2: receiver
// r3: argc
// r4: argv
// r5-r6, r7 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
// r5-r6, r8 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
@ -736,7 +728,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
if (!FLAG_enable_ool_constant_pool) {
__ mov(r7, Operand(r4));
__ mov(r8, Operand(r4));
}
if (kR9Available == 1) {
__ mov(r9, Operand(r4));
@ -753,8 +745,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ CallStub(&stub);
} else {
ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Exit the JS frame and remove the parameters (except function), and
// return.
@ -776,19 +767,36 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kLazyCompile);
// Do a tail-call of the compiled function.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
// Do a tail-call of the compiled function.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
// Push function as parameter to the runtime call.
__ Push(r1);
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
__ CallRuntime(Runtime::kCompileOptimized, 2);
// Restore receiver.
__ pop(r1);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
CallCompileOptimized(masm, false);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
CallCompileOptimized(masm, true);
GenerateTailCallToReturnedCode(masm);
}
@ -805,7 +813,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// r1 - isolate
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(1, 0, r2);
__ PrepareCallCFunction(2, 0, r2);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
@ -838,15 +846,15 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// r1 - isolate
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(1, 0, r2);
__ PrepareCallCFunction(2, 0, r2);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(ExternalReference::get_mark_code_as_executed_function(
masm->isolate()), 2);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
// Perform prologue operations usually performed by the young code stub.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
__ add(fp, sp, Operand(2 * kPointerSize));
__ PushFixedFrame(r1);
__ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Jump to point after the code-age stub.
__ add(r0, r0, Operand(kNoCodeAgeSequenceLength * Assembler::kInstrSize));
@ -940,18 +948,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Lookup and calculate pc offset.
__ ldr(r1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ ldr(r2, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
__ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ sub(r1, r1, r2);
__ SmiTag(r1);
// Pass both function and pc offset as arguments.
// Pass function as argument.
__ push(r0);
__ push(r1);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
// If the code object is null, just return to the unoptimized code.
@ -1082,14 +1081,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(r4, Operand::Zero());
__ jmp(&patch_receiver);
// Use the global receiver object from the called function as the
// receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
__ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@ -1150,18 +1143,17 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ b(eq, &function);
// Expected number of arguments is 0 for CALL_NON_FUNCTION.
__ mov(r2, Operand::Zero());
__ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r4, Operand(1));
__ b(ne, &non_proxy);
__ push(r1); // re-add proxy object as additional argument
__ add(r0, r0, Operand(1));
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
__ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&non_proxy);
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
@ -1176,22 +1168,22 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ SmiUntag(r2);
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r2, r0); // Check formal and actual parameter counts.
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET,
ne);
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(0);
__ InvokeCode(r3, expected, expected, JUMP_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ InvokeCode(r3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kIndexOffset = -5 * kPointerSize;
const int kLimitOffset = -4 * kPointerSize;
const int kIndexOffset =
StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
const int kArgsOffset = 2 * kPointerSize;
const int kRecvOffset = 3 * kPointerSize;
const int kFunctionOffset = 4 * kPointerSize;
@ -1219,8 +1211,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Out of stack space.
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ push(r1);
__ push(r0);
__ Push(r1, r0);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
// End of stack check.
@ -1278,13 +1269,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ b(&push_receiver);
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
__ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
__ ldr(r0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
@ -1302,8 +1288,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// r0: current argument index
__ bind(&loop);
__ ldr(r1, MemOperand(fp, kArgsOffset));
__ push(r1);
__ push(r0);
__ Push(r1, r0);
// Call the runtime to access the property in the arguments array.
__ CallRuntime(Runtime::kGetProperty, 2);
@ -1321,27 +1306,25 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ cmp(r0, r1);
__ b(ne, &loop);
// Invoke the function.
// Call the function.
Label call_proxy;
ParameterCount actual(r0);
__ SmiUntag(r0);
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &call_proxy);
__ InvokeFunction(r1, actual, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ add(sp, sp, Operand(3 * kPointerSize));
__ Jump(lr);
// Invoke the function proxy.
// Call the function proxy.
__ bind(&call_proxy);
__ push(r1); // add function proxy as last argument
__ add(r0, r0, Operand(1));
__ mov(r2, Operand::Zero());
__ SetCallKind(r5, CALL_AS_METHOD);
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
__ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@ -1355,8 +1338,11 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
__ add(fp, sp, Operand(3 * kPointerSize));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
(FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
__ add(fp, sp,
Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@ -1366,7 +1352,8 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
__ ldr(r1, MemOperand(fp, -3 * kPointerSize));
__ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize)));
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
@ -1379,13 +1366,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r0 : actual number of arguments
// -- r1 : function (passed through to callee)
// -- r2 : expected number of arguments
// -- r3 : code entry to call
// -- r5 : call kind information
// -----------------------------------
Label invoke, dont_adapt_arguments;
Label enough, too_few;
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ cmp(r0, r2);
__ b(lt, &too_few);
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@ -1453,7 +1439,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: code entry to call
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ sub(r2, fp, Operand(r2, LSL, kPointerSizeLog2));
__ sub(r2, r2, Operand(4 * kPointerSize)); // Adjust for frame.
// Adjust for frame.
__ sub(r2, r2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
2 * kPointerSize));
Label fill;
__ bind(&fill);

1982
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

95
deps/v8/src/arm/code-stubs-arm.h

@ -37,30 +37,6 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
};
TranscendentalCacheStub(TranscendentalCache::Type type,
ArgumentType argument_type)
: type_(type), argument_type_(argument_type) { }
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
ArgumentType argument_type_;
void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction();
};
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
@ -68,7 +44,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@ -82,18 +57,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersLong adds too much
// overhead. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii);
// Generate code for copying a large number of characters. This function
// is allowed to spend extra time setting up conditions to make copying
// faster. Copying of overlapping regions is not supported.
@ -109,23 +72,6 @@ class StringHelper : public AllStatic {
int flags);
// Probe the string table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
// does not guarantee that the string is not in the string table. If the
// string is found the code falls through with the string in register r0.
// Contents of both c1 and c2 registers are modified. At the exit c1 is
// guaranteed to contain halfword with low and high bytes equal to
// initial contents of c1 and c2 respectively.
static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Register scratch5,
Label* not_found);
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
@ -143,32 +89,6 @@ class StringHelper : public AllStatic {
};
class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
private:
Major MajorKey() { return StringAdd; }
int MinorKey() { return flags_; }
void Generate(MacroAssembler* masm);
void GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* slow);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateRegisterArgsPop(MacroAssembler* masm);
const StringAddFlags flags_;
};
class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@ -231,7 +151,6 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
the_heap_number_(the_heap_number),
scratch_(scratch) { }
virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
@ -279,8 +198,6 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
@ -508,6 +425,18 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
};
struct PlatformCallInterfaceDescriptor {
explicit PlatformCallInterfaceDescriptor(
TargetAddressStorageMode storage_mode)
: storage_mode_(storage_mode) { }
TargetAddressStorageMode storage_mode() { return storage_mode_; }
private:
TargetAddressStorageMode storage_mode_;
};
} } // namespace v8::internal
#endif // V8_ARM_CODE_STUBS_ARM_H_

60
deps/v8/src/arm/codegen-arm.cc

@ -37,18 +37,6 @@ namespace v8 {
namespace internal {
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
case TranscendentalCache::SIN: return &sin;
case TranscendentalCache::COS: return &cos;
case TranscendentalCache::TAN: return &tan;
case TranscendentalCache::LOG: return &log;
default: UNIMPLEMENTED();
}
return NULL;
}
#define __ masm.
@ -62,10 +50,10 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &exp;
if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &exp;
if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@ -359,13 +347,33 @@ OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
}
#endif
#undef __
UnaryMathFunction CreateSqrtFunction() {
#if defined(USE_SIMULATOR)
return &std::sqrt;
#else
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
__ MovFromFloatParameter(d0);
__ vsqrt(d0, d0);
__ MovToFloatResult(d0);
__ Ret();
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
}
#undef __
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@ -836,8 +844,10 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
#ifdef DEBUG
// add(r0, pc, Operand(-8))
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
#endif
static byte* GetNoCodeAgeSequence(uint32_t* length) {
// The sequence of instructions that is patched out for aging code is the
@ -847,11 +857,15 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
byte* byte_sequence = reinterpret_cast<byte*>(sequence);
*length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
if (!initialized) {
CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
PredictableCodeSizeScope scope(patcher.masm(), *length);
patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
patcher.masm()->nop(ip.code());
patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
SmartPointer<CodePatcher>
patcher(new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength));
PredictableCodeSizeScope scope(patcher->masm(), *length);
patcher->masm()->PushFixedFrame(r1);
patcher->masm()->nop(ip.code());
patcher->masm()->add(
fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
initialized = true;
}
return byte_sequence;
@ -896,7 +910,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4));
patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
patcher.masm()->emit_code_stub_address(stub);
}
}

41
deps/v8/src/arm/codegen-arm.h

@ -34,50 +34,9 @@
namespace v8 {
namespace internal {
// Forward declarations
class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
public:
explicit CodeGenerator(Isolate* isolate) {
InitializeAstVisitor(isolate);
}
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(CompilationInfo* info, const char* kind);
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
Code::Flags flags,
CompilationInfo* info);
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
bool is_toplevel,
Handle<Script> script);
static bool RecordPositions(MacroAssembler* masm,
int pos,
bool right_here = false);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
class StringCharLoadGenerator : public AllStatic {
public:

3
deps/v8/src/arm/constants-arm.h

@ -50,6 +50,9 @@ inline int DecodeConstantPoolLength(int instr) {
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
}
// Used in code age prologue - ldr(pc, MemOperand(pc, -4))
const int kCodeAgeJumpInstruction = 0xe51ff004;
// Number of registers in normal ARM mode.
const int kNumRegisters = 16;

9
deps/v8/src/arm/cpu-arm.cc

@ -27,8 +27,13 @@
// CPU specific code for arm independent of OS goes here.
#ifdef __arm__
#ifdef __QNXNTO__
#include <sys/mman.h> // for cache flushing.
#undef MAP_TYPE
#else
#include <sys/syscall.h> // for cache flushing.
#endif
#endif
#include "v8.h"
@ -57,13 +62,15 @@ void CPU::FlushICache(void* start, size_t size) {
return;
}
#if defined (USE_SIMULATOR)
#if defined(USE_SIMULATOR)
// Not generating ARM instructions for C-code. This means that we are
// building an ARM emulator based target. We should notify the simulator
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
#elif V8_OS_QNX
msync(start, size, MS_SYNC | MS_INVALIDATE_ICACHE);
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,

10
deps/v8/src/arm/debug-arm.cc

@ -265,9 +265,10 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
// -- r2 : cache cell for call target
// -- r2 : feedback array
// -- r3 : slot in feedback array
// -----------------------------------
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), 0);
}
@ -286,9 +287,10 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
// -- r1 : constructor function
// -- r2 : cache cell for call target
// -- r2 : feedback array
// -- r3 : feedback slot (smi)
// -----------------------------------
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), r0.bit());
}

2
deps/v8/src/arm/deoptimizer-arm.cc

@ -107,7 +107,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->environment_length();
int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(r0.code(), params);
output_frame->SetRegister(r1.code(), handler);
}

8
deps/v8/src/arm/disasm-arm.cc

@ -1679,6 +1679,14 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
"constant pool begin (length %d)",
DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
} else if (instruction_bits == kCodeAgeJumpInstruction) {
// The code age prologue has a constant immediatly following the jump
// instruction.
Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
DecodeType2(instr);
OS::SNPrintF(out_buffer_ + out_buffer_pos_,
" (0x%08x)", target->InstructionBits());
return 2 * Instruction::kInstrSize;
}
switch (instr->TypeValue()) {
case 0:

15
deps/v8/src/arm/frames-arm.cc

@ -42,10 +42,25 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
ASSERT(FLAG_enable_ool_constant_pool);
return pp;
}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
ASSERT(FLAG_enable_ool_constant_pool);
return pp;
}
Object*& ExitFrame::constant_pool_slot() const {
ASSERT(FLAG_enable_ool_constant_pool);
const int offset = ExitFrameConstants::kConstantPoolOffset;
return Memory::Object_at(fp() + offset);
}
} } // namespace v8::internal

12
deps/v8/src/arm/frames-arm.h

@ -64,8 +64,8 @@ const RegList kCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
1 << 7 | // r7 v4 (pp in JavaScript code)
1 << 8 | // r8 v5 (cp in JavaScript code)
1 << 7 | // r7 v4 (cp in JavaScript code)
1 << 8 | // r8 v5 (pp in JavaScript code)
kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
@ -102,12 +102,18 @@ const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -3 * kPointerSize;
static const int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public AllStatic {
public:
static const int kFrameSize = FLAG_enable_ool_constant_pool ?
3 * kPointerSize : 2 * kPointerSize;
static const int kConstantPoolOffset = FLAG_enable_ool_constant_pool ?
-3 * kPointerSize : 0;
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;

887
deps/v8/src/arm/full-codegen-arm.cc

File diff suppressed because it is too large

414
deps/v8/src/arm/ic-arm.cc

@ -104,7 +104,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
}
// Helper function used from LoadIC/CallIC GenerateNormal.
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
@ -333,320 +333,6 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- r1 : receiver
// -- r2 : name
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
extra_state,
Code::NORMAL,
argc);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
// the corresponding JSValue for the cache and that is what we need
// to probe.
//
// Check for number.
__ JumpIfSmi(r1, &number);
__ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
__ b(ne, &non_number);
__ bind(&number);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::NUMBER_FUNCTION_INDEX, r1);
__ b(&probe);
// Check for string.
__ bind(&non_number);
__ cmp(r3, Operand(FIRST_NONSTRING_TYPE));
__ b(hs, &non_string);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::STRING_FUNCTION_INDEX, r1);
__ b(&probe);
// Check for boolean.
__ bind(&non_string);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r1, ip);
__ b(eq, &boolean);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(r1, ip);
__ b(ne, &miss);
__ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::BOOLEAN_FUNCTION_INDEX, r1);
// Probe the stub cache for the value object.
__ bind(&probe);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
__ bind(&miss);
}
static void GenerateFunctionTailCall(MacroAssembler* masm,
int argc,
Label* miss,
Register scratch) {
// r1: function
// Check that the value isn't a smi.
__ JumpIfSmi(r1, miss);
// Check that the value is a JSFunction.
__ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
__ b(ne, miss);
// Invoke the function.
ParameterCount actual(argc);
__ InvokeFunction(r1, actual, JUMP_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
}
void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label miss;
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
GenerateNameDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
// r0: elements
// Search the dictionary - put result in register r1.
GenerateDictionaryLoad(masm, &miss, r0, r2, r1, r3, r4);
GenerateFunctionTailCall(masm, argc, &miss, r4);
__ bind(&miss);
}
void CallICBase::GenerateMiss(MacroAssembler* masm,
int argc,
IC::UtilityId id,
Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
Isolate* isolate = masm->isolate();
if (id == IC::kCallIC_Miss) {
__ IncrementCounter(isolate->counters()->call_miss(), 1, r3, r4);
} else {
__ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, r3, r4);
}
// Get the receiver of the function from the stack.
__ ldr(r3, MemOperand(sp, argc * kPointerSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the receiver and the name of the function.
__ Push(r3, r2);
// Call the entry.
__ mov(r0, Operand(2));
__ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
CEntryStub stub(1);
__ CallStub(&stub);
// Move result to r1 and leave the internal frame.
__ mov(r1, Operand(r0));
}
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
if (id == IC::kCallIC_Miss) {
Label invoke, global;
__ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
__ JumpIfSmi(r2, &invoke);
__ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
__ b(eq, &global);
__ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
__ b(ne, &invoke);
// Patch the receiver on the stack.
__ bind(&global);
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
__ str(r2, MemOperand(sp, argc * kPointerSize));
__ bind(&invoke);
}
// Invoke the function.
CallKind call_kind = CallICBase::Contextual::decode(extra_state)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
ParameterCount actual(argc);
__ InvokeFunction(r1,
actual,
JUMP_FUNCTION,
NullCallWrapper(),
call_kind);
}
void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
GenerateMiss(masm, argc, extra_ic_state);
}
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
Label do_call, slow_call, slow_load, slow_reload_receiver;
Label check_number_dictionary, check_name, lookup_monomorphic_cache;
Label index_smi, index_name;
// Check that the key is a smi.
__ JumpIfNotSmi(r2, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(
masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
GenerateFastArrayLoad(
masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, r0, r3);
__ bind(&do_call);
// receiver in r1 is not used after this point.
// r2: key
// r1: function
GenerateFunctionTailCall(masm, argc, &slow_call, r0);
__ bind(&check_number_dictionary);
// r2: key
// r3: elements map
// r4: elements
// Check whether the elements is a number dictionary.
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow_load);
__ SmiUntag(r0, r2);
// r0: untagged index
__ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
__ jmp(&do_call);
__ bind(&slow_load);
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(r2); // save the key
__ Push(r1, r2); // pass the receiver and the key
__ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ pop(r2); // restore the key
}
__ mov(r1, r0);
__ jmp(&do_call);
__ bind(&check_name);
GenerateKeyNameCheck(masm, r2, r0, r3, &index_name, &slow_call);
// The key is known to be a unique name.
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
GenerateKeyedLoadReceiverCheck(
masm, r1, r0, r3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
__ ldr(r0, FieldMemOperand(r1, JSObject::kPropertiesOffset));
__ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &lookup_monomorphic_cache);
GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
__ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, r0, r3);
__ jmp(&do_call);
__ bind(&lookup_monomorphic_cache);
__ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, r0, r3);
GenerateMonomorphicCacheProbe(masm,
argc,
Code::KEYED_CALL_IC,
Code::kNoExtraICState);
// Fall through on miss.
__ bind(&slow_call);
// This branch is taken if:
// - the receiver requires boxing or access check,
// - the key is neither smi nor a unique name,
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
__ IncrementCounter(counters->keyed_call_generic_slow(), 1, r0, r3);
GenerateMiss(masm, argc);
__ bind(&index_name);
__ IndexFromHash(r3, r2);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
// Check if the name is really a name.
Label miss;
__ JumpIfSmi(r2, &miss);
__ IsObjectNameType(r2, r0, &miss);
CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
@ -655,9 +341,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@ -827,7 +511,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(r0, r2);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, MISS);
GenerateMiss(masm);
}
@ -856,38 +540,11 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, MISS);
}
void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label slow, notin;
// Load receiver.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
MemOperand mapped_location =
GenerateMappedArgumentsLookup(masm, r1, r2, r3, r4, r5, &notin, &slow);
__ ldr(r1, mapped_location);
GenerateFunctionTailCall(masm, argc, &slow, r3);
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in r3.
MemOperand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, r2, r3, r4, &slow);
__ ldr(r1, unmapped_location);
__ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
__ cmp(r1, r3);
__ b(eq, &slow);
GenerateFunctionTailCall(masm, argc, &slow, r3);
__ bind(&slow);
GenerateMiss(masm, argc);
GenerateMiss(masm);
}
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@ -900,9 +557,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ Push(r1, r0);
// Perform tail call to the entry.
ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
: ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
__ TailCallExternalReference(ref, 2, 1);
}
@ -1120,7 +776,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
GenerateMiss(masm, MISS);
GenerateMiss(masm);
}
@ -1160,11 +816,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1);
__ bind(&slow);
GenerateMiss(masm, MISS);
GenerateMiss(masm);
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@ -1175,10 +831,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
@ -1268,6 +922,21 @@ static void KeyedStoreGenerateGenericHelper(
Operand(masm->isolate()->factory()->fixed_array_map()));
__ b(ne, fast_double);
}
// HOLECHECK: guards "A[i] = V"
// We have to go to the runtime if the current value is the hole because
// there may be a callback on the element
Label holecheck_passed1;
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(scratch_value,
MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
__ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
__ b(ne, &holecheck_passed1);
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
slow);
__ bind(&holecheck_passed1);
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(value, &non_smi_value);
@ -1315,6 +984,20 @@ static void KeyedStoreGenerateGenericHelper(
__ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
__ b(ne, slow);
}
// HOLECHECK: guards "A[i] double hole?"
// We have to see if the double version of the hole is present. If so
// go to the runtime.
__ add(address, elements,
Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32))
- kHeapObjectTag));
__ ldr(scratch_value,
MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
__ cmp(scratch_value, Operand(kHoleNanUpper32));
__ b(ne, &fast_double_without_map_check);
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
slow);
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value, key, elements, r3, d0,
&transition_double_elements);
@ -1403,10 +1086,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
// Check that the receiver does not require access checks and is not observed.
// The generic stub does not perform map checks or handle observed objects.
__ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
__ b(ne, &slow);
// Check if the object is a JS array or not.
__ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@ -1476,8 +1159,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
StrictModeFlag strict_mode) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@ -1486,9 +1168,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
Code::HANDLER, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
@ -1615,12 +1295,10 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
return;
}
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
address, cmp_instruction_address, delta);
}
#endif
Address patch_address =
cmp_instruction_address - delta * Instruction::kInstrSize;

489
deps/v8/src/arm/lithium-arm.cc

@ -256,7 +256,7 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
hydrogen()->type_literal()->ToCString().get(),
true_block_id(), false_block_id());
}
@ -272,11 +272,23 @@ void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
stream->Add(" + %d", offset());
stream->Add(" + ");
offset()->PrintTo(stream);
}
void LCallConstantFunction::PrintDataTo(StringStream* stream) {
void LCallJSFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
function()->PrintTo(stream);
stream->Add("#%d / ", arity());
}
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
stream->Add(" ");
}
stream->Add("#%d / ", arity());
}
@ -301,28 +313,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[r2] #%d / ", arity());
}
void LCallNamed::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
void LCallGlobal::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@ -359,7 +349,7 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@ -562,8 +552,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
}
template<int I, int T>
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
LUnallocated* result) {
result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
@ -571,40 +560,35 @@ LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
LTemplateInstruction<1, I, T>* instr) {
LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
LTemplateInstruction<1, I, T>* instr, int index) {
LTemplateResultInstruction<1>* instr, int index) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
LTemplateInstruction<1, I, T>* instr) {
LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineFixed(
LTemplateInstruction<1, I, T>* instr, Register reg) {
LTemplateResultInstruction<1>* instr, Register reg) {
return Define(instr, ToUnallocated(reg));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@ -758,13 +742,10 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
if (op == Token::MOD) {
LOperand* left = UseFixedDouble(instr->left(), d1);
LOperand* right = UseFixedDouble(instr->right(), d2);
LOperand* left = UseFixedDouble(instr->left(), d0);
LOperand* right = UseFixedDouble(instr->right(), d1);
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
// We call a C function for double modulo. It can't trigger a GC. We need
// to use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
return MarkAsCall(DefineFixedDouble(result, d1), instr);
return MarkAsCall(DefineFixedDouble(result, d0), instr);
} else {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
@ -859,17 +840,18 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
if (current->OperandCount() == 0) {
instr = DefineAsRegister(new(zone()) LDummy());
} else {
ASSERT(!current->OperandAt(0)->IsControlInstruction());
instr = DefineAsRegister(new(zone())
LDummyUse(UseAny(current->OperandAt(0))));
}
for (int i = 1; i < current->OperandCount(); ++i) {
if (current->OperandAt(i)->IsControlInstruction()) continue;
LInstruction* dummy =
new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
dummy->set_hydrogen_value(current);
@ -928,90 +910,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
argument_index_accumulator,
objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
LEnvironment* result = new(zone()) LEnvironment(
hydrogen_env->closure(),
hydrogen_env->frame_type(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer,
hydrogen_env->entry(),
zone());
int argument_index = *argument_index_accumulator;
int object_index = objects_to_materialize->length();
for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
objects_to_materialize->Add(value, zone());
op = LEnvironment::materialization_marker();
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
op = UseAny(value);
}
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
}
for (int i = object_index; i < objects_to_materialize->length(); ++i) {
HValue* object_to_materialize = objects_to_materialize->at(i);
int previously_materialized_object = -1;
for (int prev = 0; prev < i; ++prev) {
if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
previously_materialized_object = prev;
break;
}
}
int length = object_to_materialize->OperandCount();
bool is_arguments = object_to_materialize->IsArgumentsObject();
if (previously_materialized_object >= 0) {
result->AddDuplicateObject(previously_materialized_object);
continue;
} else {
result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
}
for (int i = is_arguments ? 1 : 0; i < length; ++i) {
LOperand* op;
HValue* value = object_to_materialize->OperandAt(i);
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
objects_to_materialize->Add(value, zone());
op = LEnvironment::materialization_marker();
} else {
ASSERT(!value->IsPushArgument());
op = UseAny(value);
}
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
}
}
if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
return result;
}
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new(zone()) LGoto(instr->FirstSuccessor());
}
@ -1043,6 +941,9 @@ LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
LInstruction* goto_instr = CheckElideControlInstruction(instr);
if (goto_instr != NULL) return goto_instr;
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
@ -1087,7 +988,7 @@ LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
return AssignEnvironment(DefineSameAsFirst(result));
return AssignEnvironment(DefineAsRegister(result));
}
@ -1119,11 +1020,11 @@ LInstruction* LChunkBuilder::DoStoreCodeEntry(
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
LInnerAllocatedObject* result =
new(zone()) LInnerAllocatedObject(base_object);
return DefineAsRegister(result);
HInnerAllocatedObject* instr) {
LOperand* base_object = UseRegisterAtStart(instr->base_object());
LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
return DefineAsRegister(
new(zone()) LInnerAllocatedObject(base_object, offset));
}
@ -1145,33 +1046,38 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
}
LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LOuterContext(context));
}
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LGlobalObject(context));
}
LInstruction* LChunkBuilder::DoCallJSFunction(
HCallJSFunction* instr) {
LOperand* function = UseFixed(instr->function(), r1);
LCallJSFunction* result = new(zone()) LCallJSFunction(function);
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LOperand* global_object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
const CallInterfaceDescriptor* descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) {
LOperand* op = UseFixed(instr->OperandAt(i),
descriptor->GetParameterRegister(i - 1));
ops.Add(op, zone());
}
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -1189,9 +1095,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathRound: return DoMathRound(instr);
case kMathAbs: return DoMathAbs(instr);
case kMathLog: return DoMathLog(instr);
case kMathSin: return DoMathSin(instr);
case kMathCos: return DoMathCos(instr);
case kMathTan: return DoMathTan(instr);
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
@ -1229,30 +1132,10 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LMathLog* result = new(zone()) LMathLog(input);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
}
LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LMathSin* result = new(zone()) LMathSin(input);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
}
LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LMathCos* result = new(zone()) LMathCos(input);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
}
LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LMathTan* result = new(zone()) LMathTan(input);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseFixedDouble(instr->value(), d0);
return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), d0), instr);
}
@ -1269,43 +1152,16 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
LOperand* input = UseRegisterAtStart(instr->value());
LMathSqrt* result = new(zone()) LMathSqrt(input);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LOperand* temp = FixedTemp(d3);
LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
return DefineFixedDouble(result, d2);
}
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), cp);
LOperand* key = UseFixed(instr->key(), r2);
return MarkAsCall(
DefineFixed(new(zone()) LCallKeyed(context, key), r0), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), r0), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), r0), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
LOperand* input = UseRegisterAtStart(instr->value());
LMathPowHalf* result = new(zone()) LMathPowHalf(input);
return DefineAsRegister(result);
}
@ -1328,8 +1184,8 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
return MarkAsCall(
DefineFixed(new(zone()) LCallFunction(context, function), r0), instr);
LCallFunction* call = new(zone()) LCallFunction(context, function);
return MarkAsCall(DefineFixed(call, r0), instr);
}
@ -1378,9 +1234,9 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
if (instr->RightIsPowerOf2()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
LOperand* value = UseRegister(instr->left());
LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
return AssignEnvironment(DefineAsRegister(div));
}
@ -1424,43 +1280,25 @@ bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
}
HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
if (CpuFeatures::IsSupported(SUDIV)) {
// A value with an integer representation does not need to be transformed.
if (divisor->representation().IsInteger32()) {
return divisor;
// A change from an integer32 can be replaced by the integer32 value.
} else if (divisor->IsChange() &&
HChange::cast(divisor)->from().IsInteger32()) {
return HChange::cast(divisor)->value();
}
}
if (divisor->IsConstant() && HConstant::cast(divisor)->HasInteger32Value()) {
HConstant* constant_val = HConstant::cast(divisor);
int32_t int32_val = constant_val->Integer32Value();
if (LChunkBuilder::HasMagicNumberForDivisor(int32_val) ||
CpuFeatures::IsSupported(SUDIV)) {
return constant_val->CopyToRepresentation(Representation::Integer32(),
divisor->block()->zone());
}
}
return NULL;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
// LMathFloorOfDiv can only handle a subset of divisors, so fall
// back to a flooring division in all other cases.
HValue* right = instr->right();
if (!right->IsInteger32Constant() ||
(!CpuFeatures::IsSupported(SUDIV) &&
!HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()))) {
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(right);
LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineAsRegister(div));
}
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
? UseRegister(right)
: UseOrConstant(right);
LOperand* remainder = TempRegister();
ASSERT(CpuFeatures::IsSupported(SUDIV) ||
(right->IsConstant() &&
HConstant::cast(right)->HasInteger32Value() &&
HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value())));
return AssignEnvironment(DefineAsRegister(
new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
}
@ -1472,19 +1310,15 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
if (instr->RightIsPowerOf2()) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
UseOrConstant(right));
UseConstant(right));
LInstruction* result = DefineAsRegister(mod);
return (left->CanBeNegative() &&
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
} else if (instr->fixed_right_arg().has_value) {
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
UseRegisterAtStart(right));
return AssignEnvironment(DefineAsRegister(mod));
} else if (CpuFeatures::IsSupported(SUDIV)) {
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegister(right));
@ -1669,6 +1503,15 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
result = AssignEnvironment(result);
}
return result;
} else if (instr->representation().IsExternal()) {
ASSERT(instr->left()->representation().IsExternal());
ASSERT(instr->right()->representation().IsInteger32());
ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineAsRegister(add);
return result;
} else if (instr->representation().IsDouble()) {
if (instr->left()->IsMul()) {
return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
@ -1711,30 +1554,17 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
ASSERT(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), d1);
LOperand* left = UseFixedDouble(instr->left(), d0);
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), d2) :
UseFixedDouble(instr->right(), d1) :
UseFixed(instr->right(), r2);
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, d3),
return MarkAsCall(DefineFixedDouble(result, d2),
instr,
CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
LOperand* global_object = UseTempRegister(instr->global_object());
LOperand* scratch = TempRegister();
LOperand* scratch2 = TempRegister();
LOperand* scratch3 = TempRegister();
LRandom* result = new(zone()) LRandom(
global_object, scratch, scratch2, scratch3);
return DefineFixedDouble(result, d7);
}
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
@ -1783,6 +1613,16 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
}
LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
HCompareMinusZeroAndBranch* instr) {
LInstruction* goto_instr = CheckElideControlInstruction(instr);
if (goto_instr != NULL) return goto_instr;
LOperand* value = UseRegister(instr->value());
LOperand* scratch = TempRegister();
return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
}
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@ -1865,19 +1705,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LElementsKind(object));
}
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new(zone()) LValueOf(object, TempRegister());
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0);
LDateField* result =
@ -1886,11 +1713,21 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
}
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegisterOrConstant(instr->index());
LOperand* value = UseRegister(instr->value());
return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = FLAG_debug_code
? UseRegisterAtStart(instr->index())
: UseRegisterOrConstantAtStart(instr->index());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
return new(zone()) LSeqStringSetChar(context, string, index, value);
}
@ -1915,13 +1752,6 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
}
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), r0);
return MarkAsCall(new(zone()) LThrow(context, value), instr);
}
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
@ -2006,7 +1836,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegisterAtStart(val);
if (val->CheckFlag(HInstruction::kUint32)) {
LNumberTagU* result = new(zone()) LNumberTagU(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineAsRegister(new(zone()) LSmiTag(value));
} else {
@ -2017,8 +1847,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
LInstruction* result = val->CheckFlag(HInstruction::kUint32)
? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
: DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
? DefineAsRegister(new(zone()) LUint32ToSmi(value))
: DefineAsRegister(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
@ -2153,16 +1983,6 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
}
LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), r1);
LOperand* value = UseFixed(instr->value(), r0);
LStoreGlobalGeneric* result =
new(zone()) LStoreGlobalGeneric(context, global_object, value);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@ -2213,20 +2033,13 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
}
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL;
if (!instr->is_external()) {
if (!instr->is_typed_elements()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseRegister(instr->elements());
@ -2238,20 +2051,19 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
} else {
ASSERT(
(instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
LOperand* external_pointer = UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
IsDoubleOrFloatElementsKind(instr->elements_kind())));
LOperand* backing_store = UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(backing_store, key);
}
DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
bool can_deoptimize = instr->RequiresHoleCheck() ||
(elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
elements_kind == EXTERNAL_UINT32_ELEMENTS ||
elements_kind == UINT32_ELEMENTS;
return can_deoptimize ? AssignEnvironment(result) : result;
}
@ -2268,7 +2080,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (!instr->is_external()) {
if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
@ -2297,16 +2109,17 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ASSERT(
(instr->value()->representation().IsInteger32() &&
(instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
(instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
(instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->elements()->representation().IsExternal());
IsDoubleOrFloatElementsKind(instr->elements_kind())));
ASSERT((instr->is_fixed_typed_array() &&
instr->elements()->representation().IsTagged()) ||
(instr->is_external() &&
instr->elements()->representation().IsExternal()));
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* external_pointer = UseRegister(instr->elements());
return new(zone()) LStoreKeyed(external_pointer, key, val);
LOperand* backing_store = UseRegister(instr->elements());
return new(zone()) LStoreKeyed(backing_store, key, val);
}
@ -2406,8 +2219,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
return MarkAsCall(
DefineFixed(new(zone()) LStringAdd(context, left, right), r0),
instr);
@ -2478,7 +2291,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
int index = static_cast<int>(instr->index());
Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
}
}
@ -2528,15 +2341,8 @@ LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
LOperand* length;
LOperand* index;
if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
length = UseRegisterOrConstant(instr->length());
index = UseOrConstant(instr->index());
} else {
length = UseTempRegister(instr->length());
index = UseRegisterAtStart(instr->index());
}
LOperand* length = UseRegisterOrConstantAtStart(instr->length());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@ -2556,7 +2362,10 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
LInstruction* goto_instr = CheckElideControlInstruction(instr);
if (goto_instr != NULL) return goto_instr;
return new(zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
}
@ -2607,8 +2416,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
instr->inlining_kind(),
instr->undefined_receiver());
instr->inlining_kind());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@ -2666,5 +2474,4 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
}
} } // namespace v8::internal

421
deps/v8/src/arm/lithium-arm.h

@ -52,12 +52,9 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
V(CallGlobal) \
V(CallKeyed) \
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
@ -72,6 +69,7 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@ -93,13 +91,10 @@ class LCodeGen;
V(Drop) \
V(Dummy) \
V(DummyUse) \
V(ElementsKind) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
@ -118,7 +113,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
@ -130,7 +124,6 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
V(MathCos) \
V(MathExp) \
V(MathFloor) \
V(MathFloorOfDiv) \
@ -138,9 +131,7 @@ class LCodeGen;
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
V(MathSin) \
V(MathSqrt) \
V(MathTan) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@ -150,13 +141,12 @@ class LCodeGen;
V(NumberTagU) \
V(NumberUntagD) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
V(Random) \
V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
@ -165,7 +155,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@ -178,7 +167,6 @@ class LCodeGen;
V(RSubI) \
V(TaggedToI) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
@ -187,7 +175,6 @@ class LCodeGen;
V(Uint32ToDouble) \
V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
V(WrapReceiver)
@ -305,10 +292,8 @@ class LInstruction : public ZoneObject {
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
class LTemplateInstruction : public LInstruction {
template<int R>
class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
@ -320,10 +305,20 @@ class LTemplateInstruction : public LInstruction {
protected:
EmbeddedContainer<LOperand*, R> results_;
};
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
class LTemplateInstruction : public LTemplateResultInstruction<R> {
protected:
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
private:
// Iterator support.
virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
@ -490,10 +485,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
TranscendentalCache::Type transcendental_type() {
return hydrogen()->transcendental_type();
}
};
@ -558,6 +549,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
LOperand* receiver() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
@ -656,6 +648,8 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
@ -815,42 +809,6 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSin(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
};
class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathCos(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
};
class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathTan(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
};
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
@ -885,15 +843,13 @@ class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LMathPowHalf(LOperand* value, LOperand* temp) {
explicit LMathPowHalf(LOperand* value) {
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
};
@ -927,6 +883,22 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
"cmp-minus-zero-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
};
class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
@ -1321,34 +1293,6 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
};
class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
};
class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
@ -1368,41 +1312,39 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringSetChar(String::Encoding encoding,
LOperand* string,
LOperand* index,
LOperand* value) : encoding_(encoding) {
LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
inputs_[2] = value;
}
String::Encoding encoding() { return encoding_; }
LOperand* string() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
LOperand* string() const { return inputs_[0]; }
LOperand* index() const { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
private:
String::Encoding encoding_;
DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
};
class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LThrow(LOperand* context, LOperand* value) {
LSeqStringSetChar(LOperand* context,
LOperand* string,
LOperand* index,
LOperand* value) {
inputs_[0] = context;
inputs_[1] = value;
inputs_[1] = string;
inputs_[2] = index;
inputs_[3] = value;
}
LOperand* context() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
LOperand* string() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
};
@ -1451,28 +1393,6 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LRandom(LOperand* global_object,
LOperand* scratch,
LOperand* scratch2,
LOperand* scratch3) {
inputs_[0] = global_object;
temps_[0] = scratch;
temps_[1] = scratch2;
temps_[2] = scratch3;
}
LOperand* global_object() const { return inputs_[0]; }
LOperand* scratch() const { return temps_[0]; }
LOperand* scratch2() const { return temps_[1]; }
LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@ -1599,20 +1519,6 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
"load-external-array-pointer")
};
class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
@ -1628,6 +1534,12 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
bool is_external() const {
return hydrogen()->is_external();
}
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
bool is_typed_elements() const {
return is_external() || is_fixed_typed_array();
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@ -1693,28 +1605,6 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreGlobalGeneric(LOperand* context,
LOperand* global_object,
LOperand* value) {
inputs_[0] = context;
inputs_[1] = global_object;
inputs_[2] = value;
}
LOperand* context() { return inputs_[0]; }
LOperand* global_object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@ -1793,19 +1683,19 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
};
class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
inputs_[1] = offset;
}
LOperand* base_object() { return inputs_[0]; }
int offset() { return hydrogen()->offset(); }
LOperand* base_object() const { return inputs_[0]; }
LOperand* offset() const { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
@ -1823,18 +1713,6 @@ class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
};
class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
@ -1848,95 +1726,73 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
LOperand* context() { return inputs_[0]; }
};
class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
explicit LCallJSFunction(LOperand* function) {
inputs_[0] = function;
}
LOperand* global_object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
};
LOperand* function() { return inputs_[0]; }
class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
public:
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
inputs_[1] = function;
LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
ZoneList<LOperand*>& operands,
Zone* zone)
: descriptor_(descriptor),
inputs_(descriptor->environment_length() + 1, zone) {
ASSERT(descriptor->environment_length() + 1 == operands.length());
inputs_.AddAll(operands, zone);
}
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
LOperand* target() const { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
const CallInterfaceDescriptor* descriptor() { return descriptor_; }
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
const CallInterfaceDescriptor* descriptor_;
ZoneList<LOperand*> inputs_;
class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallKeyed(LOperand* context, LOperand* key) {
inputs_[0] = context;
inputs_[1] = key;
}
LOperand* context() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
// Iterator support.
virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
int arity() const { return hydrogen()->argument_count() - 1; }
virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
};
class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LCallNamed(LOperand* context) {
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
inputs_[1] = function;
}
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@ -1958,35 +1814,6 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallGlobal(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
@ -2287,6 +2114,12 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
}
bool is_external() const { return hydrogen()->is_external(); }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
bool is_typed_elements() const {
return is_external() || is_fixed_typed_array();
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
@ -2735,20 +2568,18 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
class LChunkBuilder V8_FINAL BASE_EMBEDDED {
class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
: LChunkBuilderBase(graph->zone()),
chunk_(NULL),
info_(info),
graph_(graph),
zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
argument_count_(0),
allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
@ -2767,15 +2598,11 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoRSub(HSub* instr);
static bool HasMagicNumberForDivisor(int32_t divisor);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
LInstruction* DoMathSin(HUnaryMathOperation* instr);
LInstruction* DoMathCos(HUnaryMathOperation* instr);
LInstruction* DoMathTan(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
@ -2791,7 +2618,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@ -2841,7 +2667,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@ -2850,21 +2676,15 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
template<int I, int T>
LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
LInstruction* Define(LTemplateResultInstruction<1>* instr,
LUnallocated* result);
template<int I, int T>
LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
template<int I, int T>
LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
int index);
template<int I, int T>
LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
template<int I, int T>
LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
Register reg);
template<int I, int T>
LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
DoubleRegister reg);
LInstruction* AssignEnvironment(LInstruction* instr);
LInstruction* AssignPointerMap(LInstruction* instr);
@ -2879,10 +2699,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize);
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
@ -2895,14 +2711,11 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
int argument_count_;
LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;

1047
deps/v8/src/arm/lithium-codegen-arm.cc

File diff suppressed because it is too large

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save