Browse Source

deps: update v8 to 3.15.11

v0.9.6-release
Fedor Indutny 12 years ago
parent
commit
7b4d95a976
  1. 22
      deps/v8/.gitignore
  2. 3
      deps/v8/AUTHORS
  3. 307
      deps/v8/ChangeLog
  4. 2
      deps/v8/DEPS
  5. 36
      deps/v8/Makefile
  6. 4
      deps/v8/Makefile.android
  7. 11
      deps/v8/OWNERS
  8. 71
      deps/v8/PRESUBMIT.py
  9. 49
      deps/v8/SConstruct
  10. 9
      deps/v8/build/android.gypi
  11. 89
      deps/v8/build/common.gypi
  12. 13
      deps/v8/build/standalone.gypi
  13. 17
      deps/v8/include/v8-profiler.h
  14. 721
      deps/v8/include/v8.h
  15. 27
      deps/v8/samples/shell.cc
  16. 174
      deps/v8/src/accessors.cc
  17. 500
      deps/v8/src/api.cc
  18. 6
      deps/v8/src/api.h
  19. 153
      deps/v8/src/arm/assembler-arm-inl.h
  20. 275
      deps/v8/src/arm/assembler-arm.cc
  21. 113
      deps/v8/src/arm/assembler-arm.h
  22. 33
      deps/v8/src/arm/builtins-arm.cc
  23. 1013
      deps/v8/src/arm/code-stubs-arm.cc
  24. 127
      deps/v8/src/arm/code-stubs-arm.h
  25. 256
      deps/v8/src/arm/codegen-arm.cc
  26. 16
      deps/v8/src/arm/codegen-arm.h
  27. 22
      deps/v8/src/arm/constants-arm.h
  28. 4
      deps/v8/src/arm/debug-arm.cc
  29. 46
      deps/v8/src/arm/deoptimizer-arm.cc
  30. 43
      deps/v8/src/arm/disasm-arm.cc
  31. 313
      deps/v8/src/arm/full-codegen-arm.cc
  32. 305
      deps/v8/src/arm/ic-arm.cc
  33. 320
      deps/v8/src/arm/lithium-arm.cc
  34. 590
      deps/v8/src/arm/lithium-arm.h
  35. 1216
      deps/v8/src/arm/lithium-codegen-arm.cc
  36. 38
      deps/v8/src/arm/lithium-codegen-arm.h
  37. 288
      deps/v8/src/arm/macro-assembler-arm.cc
  38. 105
      deps/v8/src/arm/macro-assembler-arm.h
  39. 15
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  40. 1
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  41. 290
      deps/v8/src/arm/simulator-arm.cc
  42. 4
      deps/v8/src/arm/simulator-arm.h
  43. 206
      deps/v8/src/arm/stub-cache-arm.cc
  44. 20
      deps/v8/src/array.js
  45. 197
      deps/v8/src/assembler.cc
  46. 86
      deps/v8/src/assembler.h
  47. 22
      deps/v8/src/ast.cc
  48. 50
      deps/v8/src/ast.h
  49. 8
      deps/v8/src/atomicops.h
  50. 335
      deps/v8/src/atomicops_internals_tsan.h
  51. 69
      deps/v8/src/bootstrapper.cc
  52. 2
      deps/v8/src/bootstrapper.h
  53. 704
      deps/v8/src/builtins.cc
  54. 31
      deps/v8/src/builtins.h
  55. 185
      deps/v8/src/code-stubs.cc
  56. 278
      deps/v8/src/code-stubs.h
  57. 1
      deps/v8/src/codegen.cc
  58. 14
      deps/v8/src/codegen.h
  59. 46
      deps/v8/src/collection.js
  60. 2
      deps/v8/src/compilation-cache.cc
  61. 117
      deps/v8/src/compiler.cc
  62. 29
      deps/v8/src/compiler.h
  63. 37
      deps/v8/src/contexts.cc
  64. 29
      deps/v8/src/contexts.h
  65. 7
      deps/v8/src/counters.cc
  66. 415
      deps/v8/src/d8.cc
  67. 5
      deps/v8/src/d8.gyp
  68. 28
      deps/v8/src/d8.h
  69. 2
      deps/v8/src/date.js
  70. 3
      deps/v8/src/dateparser-inl.h
  71. 121
      deps/v8/src/debug-debugger.js
  72. 21
      deps/v8/src/debug.cc
  73. 1
      deps/v8/src/debug.h
  74. 263
      deps/v8/src/deoptimizer.cc
  75. 55
      deps/v8/src/deoptimizer.h
  76. 9
      deps/v8/src/elements-kind.cc
  77. 8
      deps/v8/src/elements-kind.h
  78. 663
      deps/v8/src/elements.cc
  79. 43
      deps/v8/src/elements.h
  80. 36
      deps/v8/src/execution.cc
  81. 7
      deps/v8/src/execution.h
  82. 5
      deps/v8/src/extensions/externalize-string-extension.cc
  83. 6
      deps/v8/src/extensions/gc-extension.cc
  84. 54
      deps/v8/src/factory.cc
  85. 12
      deps/v8/src/factory.h
  86. 48
      deps/v8/src/flag-definitions.h
  87. 4
      deps/v8/src/frames.cc
  88. 240
      deps/v8/src/full-codegen.cc
  89. 19
      deps/v8/src/full-codegen.h
  90. 103
      deps/v8/src/global-handles.cc
  91. 23
      deps/v8/src/global-handles.h
  92. 15
      deps/v8/src/globals.h
  93. 81
      deps/v8/src/handles.cc
  94. 12
      deps/v8/src/handles.h
  95. 47
      deps/v8/src/heap-inl.h
  96. 43
      deps/v8/src/heap-profiler.cc
  97. 32
      deps/v8/src/heap-profiler.h
  98. 706
      deps/v8/src/heap.cc
  99. 177
      deps/v8/src/heap.h
  100. 502
      deps/v8/src/hydrogen-instructions.cc

22
deps/v8/.gitignore

@ -18,6 +18,7 @@
#*# #*#
*~ *~
.cpplint-cache .cpplint-cache
.d8_history
d8 d8
d8_g d8_g
shell shell
@ -25,17 +26,32 @@ shell_g
/build/Debug /build/Debug
/build/gyp /build/gyp
/build/Release /build/Release
/obj/ /obj
/out/ /out
/test/cctest/cctest.status2
/test/es5conform/data /test/es5conform/data
/test/message/message.status2
/test/mjsunit/mjsunit.status2
/test/mozilla/CHECKED_OUT_VERSION
/test/mozilla/data /test/mozilla/data
/test/mozilla/downloaded_*
/test/mozilla/mozilla.status2
/test/preparser/preparser.status2
/test/sputnik/sputniktests /test/sputnik/sputniktests
/test/test262/data /test/test262/data
/test/test262/test262-*
/test/test262/test262.status2
/third_party /third_party
/tools/jsfunfuzz
/tools/jsfunfuzz.zip
/tools/oom_dump/oom_dump /tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o /tools/oom_dump/oom_dump.o
/tools/visual_studio/Debug /tools/visual_studio/Debug
/tools/visual_studio/Release /tools/visual_studio/Release
/xcodebuild/ /xcodebuild
TAGS TAGS
*.Makefile *.Makefile
GTAGS
GRTAGS
GSYMS
GPATH

3
deps/v8/AUTHORS

@ -20,6 +20,7 @@ Burcu Dogan <burcujdogan@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com> Craig Schlenter <craig.schlenter@gmail.com>
Daniel Andersson <kodandersson@gmail.com> Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com> Daniel James <dnljms@gmail.com>
Derek J Conrod <dconrod@codeaurora.org>
Dineel D Sule <dsule@codeaurora.org> Dineel D Sule <dsule@codeaurora.org>
Erich Ocean <erich.ocean@me.com> Erich Ocean <erich.ocean@me.com>
Fedor Indutny <fedor@indutny.com> Fedor Indutny <fedor@indutny.com>
@ -44,6 +45,7 @@ Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com> Patrick Gansterer <paroga@paroga.com>
Peter Varga <pvarga@inf.u-szeged.hu> Peter Varga <pvarga@inf.u-szeged.hu>
Rafal Krypa <rafal@krypa.net> Rafal Krypa <rafal@krypa.net>
Rajeev R Krithivasan <rkrithiv@codeaurora.org>
Rene Rebe <rene@exactcode.de> Rene Rebe <rene@exactcode.de>
Robert Mustacchi <rm@fingolfin.org> Robert Mustacchi <rm@fingolfin.org>
Rodolph Perfetta <rodolph.perfetta@arm.com> Rodolph Perfetta <rodolph.perfetta@arm.com>
@ -53,6 +55,7 @@ Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org> Subrato K De <subratokde@codeaurora.org>
Tobias Burnus <burnus@net-b.de> Tobias Burnus <burnus@net-b.de>
Vlad Burlik <vladbph@gmail.com> Vlad Burlik <vladbph@gmail.com>
Xi Qian <xi.qian@intel.com>
Yuqiang Xian <yuqiang.xian@intel.com> Yuqiang Xian <yuqiang.xian@intel.com>
Zaheer Ahmad <zahmad@codeaurora.org> Zaheer Ahmad <zahmad@codeaurora.org>
Zhongping Wang <kewpie.w.zp@gmail.com> Zhongping Wang <kewpie.w.zp@gmail.com>

307
deps/v8/ChangeLog

@ -1,3 +1,310 @@
2012-12-10: Version 3.15.11
Define CAN_USE_VFP2/3_INSTRUCTIONS based on arm_neon and arm_fpu GYP
flags.
Performance and stability improvements on all platforms.
2012-12-07: Version 3.15.10
Enabled optimisation of functions inside eval. (issue 2315)
Fixed spec violations in methods of Number.prototype. (issue 2443)
Added GCTracer metrics for a scavenger GC for DOM wrappers.
Performance and stability improvements on all platforms.
2012-12-06: Version 3.15.9
Fixed candidate eviction in code flusher.
(Chromium issue 159140)
Iterate through all arguments for side effects in Math.min/max.
(issue 2444)
Fixed spec violations related to regexp.lastIndex
(issue 2437, issue 2438)
Performance and stability improvements on all platforms.
2012-12-04: Version 3.15.8
Enforced stack allocation of TryCatch blocks.
(issue 2166,chromium:152389)
Fixed external exceptions in external try-catch handlers.
(issue 2166)
Activated incremental code flushing by default.
Performance and stability improvements on all platforms.
2012-11-30: Version 3.15.7
Activated code aging by default.
Included more information in --prof log.
Removed eager sweeping for lazy swept spaces. Try to find in
SlowAllocateRaw a bounded number of times a big enough memory slot.
(issue 2194)
Performance and stability improvements on all platforms.
2012-11-26: Version 3.15.6
Ensure double arrays are filled with holes when extended from
variations of empty arrays. (Chromium issue 162085)
Performance and stability improvements on all platforms.
2012-11-23: Version 3.15.5
Fixed JSON.stringify for objects with interceptor handlers.
(Chromium issue 161028)
Fixed corner case in x64 compare stubs. (issue 2416)
Performance and stability improvements on all platforms.
2012-11-16: Version 3.15.4
Fixed Array.prototype.join evaluation order. (issue 2263)
Perform CPU sampling by CPU sampling thread only iff processing thread
is not running. (issue 2364)
When using an Object as a set in Object.getOwnPropertyNames, null out
the proto. (issue 2410)
Disabled EXTRA_CHECKS in Release build.
Heap explorer: Show representation of strings.
Removed 'type' and 'arguments' properties from Error object.
(issue 2397)
Added atomics implementation for ThreadSanitizer v2.
(Chromium issue 128314)
Fixed LiveEdit crashes when object/array literal is added. (issue 2368)
Performance and stability improvements on all platforms.
2012-11-13: Version 3.15.3
Changed sample shell to send non-JS output (e.g. errors) to stderr
instead of stdout.
Correctly check for stack overflow even when interrupt is pending.
(issue 214)
Collect stack trace on stack overflow. (issue 2394)
Performance and stability improvements on all platforms.
2012-11-12: Version 3.15.2
Function::GetScriptOrigin supplies sourceURL when script name is
not available. (Chromium issue 159413)
Made formatting error message side-effect-free. (issue 2398)
Fixed length check in JSON.stringify. (Chromium issue 160010)
ES6: Added support for Set and Map clear method (issue 2400)
Fixed slack tracking when instance prototype changes.
(Chromium issue 157019)
Fixed disabling of code flusher while marking. (Chromium issue 159140)
Added a test case for object grouping in a scavenger GC (issue 2077)
Support shared library build of Android for v8.
(Chromium issue 158821)
ES6: Added support for size to Set and Map (issue 2395)
Performance and stability improvements on all platforms.
2012-11-06: Version 3.15.1
Put incremental code flushing behind a flag. (Chromium issue 159140)
Performance and stability improvements on all platforms.
2012-10-31: Version 3.15.0
Loosened aligned code target requirement on ARM (issue 2380)
Fixed JSON.parse to treat leading zeros correctly.
(Chromium issue 158185)
Performance and stability improvements on all platforms.
2012-10-22: Version 3.14.5
Killed off the SCons based build.
Added a faster API for creating v8::Integer objects.
Speeded up function deoptimization by avoiding quadratic pass over
optimized function list. (Chromium issue 155270)
Always invoke the default Array.sort functions from builtin functions.
(issue 2372)
Reverted recent CPU profiler changes because they broke --prof.
(issue 2364)
Switched code flushing to use different JSFunction field.
(issue 1609)
Performance and stability improvements on all platforms.
2012-10-15: Version 3.14.4
Allow evals for debugger even if they are prohibited in the debugee
context. (Chromium issue 154733)
Enabled --verify-heap in release mode (issue 2120)
Performance and stability improvements on all platforms.
2012-10-11: Version 3.14.3
Use native context to retrieve ErrorMessageForCodeGenerationFromStrings
(Chromium issue 155076).
Bumped variable limit further to 2^17 (Chromium issue 151625).
Performance and stability improvements on all platforms.
2012-10-10: Version 3.14.2
ARM: allowed VFP3 instructions when hardfloat is enabled.
(Chromium issue 152506)
Fixed instance_descriptors() and PushStackTraceAndDie regressions.
(Chromium issue 151749)
Made GDBJIT interface compile again. (issue 1804)
Fixed Accessors::FunctionGetPrototype's proto chain traversal.
(Chromium issue 143967)
Made sure that names of temporaries do not clash with real variables.
(issue 2322)
Rejected local module declarations. (Chromium issue 150628)
Rejected uses of lexical for-loop variable on the RHS. (issue 2322)
Fixed slot recording of code target patches.
(Chromium issue 152615,chromium:144230)
Changed the Android makefile to use GCC 4.6 instead of GCC 4.4.3.
Performance and stability improvements on all platforms.
2012-10-01: Version 3.14.1
Don't set -m32 flag when compiling with Android ARM compiler.
(Chromium issue 143889)
Restore the descriptor array before returning allocation failure.
(Chromium issue 151750)
Lowered kMaxVirtualRegisters (v8 issue 2139, Chromium issues 123822 and
128252).
Pull more recent gyp in 'make dependencies'.
Made sure that the generic KeyedStoreIC changes length and element_kind
atomically (issue 2346).
Bumped number of allowed variables per scope to 65535, to address GWT.
(Chromium issue 151625)
Support sourceURL for dynamically inserted scripts (issue 2342).
Performance and stability improvements on all platforms.
2012-09-20: Version 3.14.0
Fixed missing slot recording during clearing of CallICs.
(Chromium issue 144230)
Fixed LBoundsCheck on x64 to handle (stack slot + constant) correctly.
(Chromium issue 150729)
Fixed minus zero test. (Issue 2133)
Fixed setting array length to zero for slow elements.
(Chromium issue 146910)
Fixed lost arguments dropping in HLeaveInlined.
(Chromium issue 150545)
Fixed casting error for receiver of interceptors.
(Chromium issue 149912)
Throw a more descriptive exception when blocking 'eval' via CSP.
(Chromium issue 140191)
Fixed debugger's eval when close to stack overflow. (issue 2318)
Added checks to live edit. (issue 2297)
Switched on code compaction on incremental GCs.
Fixed caching of optimized code for OSR. (issue 2326)
Not mask exception thrown by toString in String::UtfValue etc.
(issue 2317)
Fixed API check for length of external arrays. (Chromium issue 148896)
Ensure correct enumeration indices in the dict (Chromium issue 148376)
Correctly initialize regexp global cache. (Chromium issue 148378)
Fixed arguments object materialization during deopt. (issue 2261)
Introduced new API to expose external string resource regardless of
encoding.
Fixed CHECK failure in LCodeGen::DoWrapReceiver when
--deopt-every-n-times flag is present
(Chromium issue 148389)
Fixed edge case of extension with NULL as source string.
(Chromium issue 144649)
Fixed array index dehoisting. (Chromium issue 141395)
Performance and stability improvements on all platforms.
2012-09-11: Version 3.13.7 2012-09-11: Version 3.13.7
Enable/disable LiveEdit using the (C++) debug API. Enable/disable LiveEdit using the (C++) debug API.

2
deps/v8/DEPS

@ -5,7 +5,7 @@
deps = { deps = {
# Remember to keep the revision in sync with the Makefile. # Remember to keep the revision in sync with the Makefile.
"v8/build/gyp": "v8/build/gyp":
"http://gyp.googlecode.com/svn/trunk@1282", "http://gyp.googlecode.com/svn/trunk@1501",
} }
deps_os = { deps_os = {

36
deps/v8/Makefile

@ -24,14 +24,13 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
PYTHON ?= python
# Variable default definitions. Override them by exporting them in your shell. # Variable default definitions. Override them by exporting them in your shell.
CXX ?= g++ CXX ?= g++
LINK ?= g++ LINK ?= g++
OUTDIR ?= out OUTDIR ?= out
TESTJOBS ?= -j16 TESTJOBS ?=
GYPFLAGS ?= GYPFLAGS ?=
TESTFLAGS ?= TESTFLAGS ?=
ANDROID_NDK_ROOT ?= ANDROID_NDK_ROOT ?=
@ -59,6 +58,10 @@ endif
ifeq ($(objectprint), on) ifeq ($(objectprint), on)
GYPFLAGS += -Dv8_object_print=1 GYPFLAGS += -Dv8_object_print=1
endif endif
# verifyheap=on
ifeq ($(verifyheap), on)
GYPFLAGS += -Dv8_enable_verify_heap=1
endif
# snapshot=off # snapshot=off
ifeq ($(snapshot), off) ifeq ($(snapshot), off)
GYPFLAGS += -Dv8_use_snapshot='false' GYPFLAGS += -Dv8_use_snapshot='false'
@ -80,9 +83,9 @@ ifeq ($(liveobjectlist), on)
endif endif
# vfp3=off # vfp3=off
ifeq ($(vfp3), off) ifeq ($(vfp3), off)
GYPFLAGS += -Dv8_can_use_vfp_instructions=false GYPFLAGS += -Dv8_can_use_vfp3_instructions=false
else else
GYPFLAGS += -Dv8_can_use_vfp_instructions=true GYPFLAGS += -Dv8_can_use_vfp3_instructions=true
endif endif
# debuggersupport=off # debuggersupport=off
ifeq ($(debuggersupport), off) ifeq ($(debuggersupport), off)
@ -113,8 +116,6 @@ ifeq ($(hardfp), on)
GYPFLAGS += -Dv8_use_arm_eabi_hardfloat=true GYPFLAGS += -Dv8_use_arm_eabi_hardfloat=true
endif endif
GYPFLAGS += "-Dpython=$(PYTHON)"
# ----------------- available targets: -------------------- # ----------------- available targets: --------------------
# - "dependencies": pulls in external dependencies (currently: GYP) # - "dependencies": pulls in external dependencies (currently: GYP)
# - any arch listed in ARCHES (see below) # - any arch listed in ARCHES (see below)
@ -182,7 +183,7 @@ $(BUILDS): $(OUTDIR)/Makefile.$$(basename $$@)
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \ @$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
CXX="$(CXX)" LINK="$(LINK)" \ CXX="$(CXX)" LINK="$(LINK)" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \ BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
$(PYTHON) -c "print raw_input().capitalize()") \ python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@" builddir="$(shell pwd)/$(OUTDIR)/$@"
native: $(OUTDIR)/Makefile.native native: $(OUTDIR)/Makefile.native
@ -202,20 +203,20 @@ $(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) build/android.gypi \
# Test targets. # Test targets.
check: all check: all
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch=$(shell echo $(DEFAULT_ARCHES) | sed -e 's/ /,/g') \ --arch=$(shell echo $(DEFAULT_ARCHES) | sed -e 's/ /,/g') \
$(TESTFLAGS) $(TESTFLAGS)
$(addsuffix .check,$(MODES)): $$(basename $$@) $(addsuffix .check,$(MODES)): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--mode=$(basename $@) $(TESTFLAGS) --mode=$(basename $@) $(TESTFLAGS)
$(addsuffix .check,$(ARCHES)): $$(basename $$@) $(addsuffix .check,$(ARCHES)): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch=$(basename $@) $(TESTFLAGS) --arch=$(basename $@) $(TESTFLAGS)
$(CHECKS): $$(basename $$@) $(CHECKS): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) $(TESTFLAGS) --arch-and-mode=$(basename $@) $(TESTFLAGS)
$(addsuffix .sync, $(ANDROID_BUILDS)): $$(basename $$@) $(addsuffix .sync, $(ANDROID_BUILDS)): $$(basename $$@)
@ -223,16 +224,16 @@ $(addsuffix .sync, $(ANDROID_BUILDS)): $$(basename $$@)
$(shell pwd) $(ANDROID_V8) $(shell pwd) $(ANDROID_V8)
$(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync $(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) \ --arch-and-mode=$(basename $@) \
--timeout=600 \ --timeout=600 \
--special-command="tools/android-run.py @" --command-prefix="tools/android-run.py"
$(addsuffix .check, $(ANDROID_ARCHES)): \ $(addsuffix .check, $(ANDROID_ARCHES)): \
$(addprefix $$(basename $$@).,$(MODES)).check $(addprefix $$(basename $$@).,$(MODES)).check
native.check: native native.check: native
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS) --arch-and-mode=. $(TESTFLAGS)
# Clean targets. You can clean each architecture individually, or everything. # Clean targets. You can clean each architecture individually, or everything.
@ -253,14 +254,14 @@ clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)) native.clean
OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES)) OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
$(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE) $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
GYP_GENERATORS=make \ GYP_GENERATORS=make \
$(PYTHON) build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \ -Ibuild/standalone.gypi --depth=. \
-Dv8_target_arch=$(subst .,,$(suffix $@)) \ -Dv8_target_arch=$(subst .,,$(suffix $@)) \
-S.$(subst .,,$(suffix $@)) $(GYPFLAGS) -S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE) $(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
GYP_GENERATORS=make \ GYP_GENERATORS=make \
$(PYTHON) build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS) -Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN: must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN:
@ -283,6 +284,7 @@ $(ENVFILE).new:
echo "CXX=$(CXX)" >> $(ENVFILE).new echo "CXX=$(CXX)" >> $(ENVFILE).new
# Dependencies. # Dependencies.
# Remember to keep these in sync with the DEPS file.
dependencies: dependencies:
svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \ svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \
--revision 1282 --revision 1501

4
deps/v8/Makefile.android

@ -48,11 +48,11 @@ endif
ifeq ($(ARCH), android_arm) ifeq ($(ARCH), android_arm)
DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm
DEFINES += arm_neon=0 armv7=1 DEFINES += arm_neon=0 armv7=1
TOOLCHAIN_ARCH = arm-linux-androideabi-4.4.3 TOOLCHAIN_ARCH = arm-linux-androideabi-4.6
else else
ifeq ($(ARCH), android_ia32) ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
TOOLCHAIN_ARCH = x86-4.4.3 TOOLCHAIN_ARCH = x86-4.6
else else
$(error Target architecture "${ARCH}" is not supported) $(error Target architecture "${ARCH}" is not supported)
endif endif

11
deps/v8/OWNERS

@ -0,0 +1,11 @@
danno@chromium.org
jkummerow@chromium.org
mmassi@chromium.org
mstarzinger@chromium.org
mvstanton@chromium.org
rossberg@chromium.org
svenpanne@chromium.org
ulan@chromium.org
vegorov@chromium.org
verwaest@chromium.org
yangguo@chromium.org

71
deps/v8/PRESUBMIT.py

@ -0,0 +1,71 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Top-level presubmit script for V8.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
def _V8PresubmitChecks(input_api, output_api):
"""Runs the V8 presubmit checks."""
import sys
sys.path.append(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor
from presubmit import SourceProcessor
results = []
if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError("C++ lint check failed"))
if not SourceProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"Copyright header and trailing whitespaces check failed"))
return results
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_V8PresubmitChecks(input_api, output_api))
return results

49
deps/v8/SConstruct

@ -59,7 +59,7 @@ LIBRARY_FLAGS = {
'CPPDEFINES': ['V8_INTERPRETED_REGEXP'] 'CPPDEFINES': ['V8_INTERPRETED_REGEXP']
}, },
'mode:debug': { 'mode:debug': {
'CPPDEFINES': ['V8_ENABLE_CHECKS', 'OBJECT_PRINT'] 'CPPDEFINES': ['V8_ENABLE_CHECKS', 'OBJECT_PRINT', 'VERIFY_HEAP']
}, },
'objectprint:on': { 'objectprint:on': {
'CPPDEFINES': ['OBJECT_PRINT'], 'CPPDEFINES': ['OBJECT_PRINT'],
@ -1157,6 +1157,11 @@ SIMPLE_OPTIONS = {
'default': 'on', 'default': 'on',
'help': 'use fpu instructions when building the snapshot [MIPS only]' 'help': 'use fpu instructions when building the snapshot [MIPS only]'
}, },
'I_know_I_should_build_with_GYP': {
'values': ['yes', 'no'],
'default': 'no',
'help': 'grace period: temporarily override SCons deprecation'
}
} }
@ -1257,7 +1262,35 @@ def IsLegal(env, option, values):
return True return True
def WarnAboutDeprecation():
print """
#####################################################################
# #
# LAST WARNING: Building V8 with SCons is deprecated. #
# #
# This only works because you have overridden the kill switch. #
# #
# MIGRATE TO THE GYP-BASED BUILD NOW! #
# #
# Instructions: http://code.google.com/p/v8/wiki/BuildingWithGYP. #
# #
#####################################################################
"""
def VerifyOptions(env): def VerifyOptions(env):
if env['I_know_I_should_build_with_GYP'] != 'yes':
Abort("Building V8 with SCons is no longer supported. Please use GYP "
"instead; you can find instructions are at "
"http://code.google.com/p/v8/wiki/BuildingWithGYP.\n\n"
"Quitting.\n\n"
"For a limited grace period, you can specify "
"\"I_know_I_should_build_with_GYP=yes\" to override.")
else:
WarnAboutDeprecation()
import atexit
atexit.register(WarnAboutDeprecation)
if not IsLegal(env, 'mode', ['debug', 'release']): if not IsLegal(env, 'mode', ['debug', 'release']):
return False return False
if not IsLegal(env, 'sample', ["shell", "process", "lineprocessor"]): if not IsLegal(env, 'sample', ["shell", "process", "lineprocessor"]):
@ -1600,18 +1633,4 @@ try:
except: except:
pass pass
def WarnAboutDeprecation():
print """
#######################################################
# WARNING: Building V8 with SCons is deprecated and #
# will not work much longer. Please switch to using #
# the GYP-based build now. Instructions are at #
# http://code.google.com/p/v8/wiki/BuildingWithGYP. #
#######################################################
"""
WarnAboutDeprecation()
import atexit
atexit.register(WarnAboutDeprecation)
Build() Build()

9
deps/v8/build/android.gypi

@ -122,8 +122,6 @@
'ldflags': [ 'ldflags': [
'-nostdlib', '-nostdlib',
'-Wl,--no-undefined', '-Wl,--no-undefined',
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
], ],
'libraries!': [ 'libraries!': [
'-lrt', # librt is built into Bionic. '-lrt', # librt is built into Bionic.
@ -219,6 +217,13 @@
['_type=="shared_library"', { ['_type=="shared_library"', {
'ldflags': [ 'ldflags': [
'-Wl,-shared,-Bsymbolic', '-Wl,-shared,-Bsymbolic',
'<(android_lib)/crtbegin_so.o',
],
}],
['_type=="static_library"', {
'ldflags': [
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
], ],
}], }],
], ],

89
deps/v8/build/common.gypi

@ -43,7 +43,7 @@
# access is allowed for all CPUs. # access is allowed for all CPUs.
'v8_can_use_unaligned_accesses%': 'default', 'v8_can_use_unaligned_accesses%': 'default',
# Setting 'v8_can_use_vfp_instructions' to 'true' will enable use of ARM VFP # Setting 'v8_can_use_vfp2_instructions' to 'true' will enable use of ARM VFP
# instructions in the V8 generated code. VFP instructions will be enabled # instructions in the V8 generated code. VFP instructions will be enabled
# both for the snapshot and for the ARM target. Leaving the default value # both for the snapshot and for the ARM target. Leaving the default value
# of 'false' will avoid VFP instructions in the snapshot and use CPU feature # of 'false' will avoid VFP instructions in the snapshot and use CPU feature
@ -70,16 +70,15 @@
'v8_enable_disassembler%': 0, 'v8_enable_disassembler%': 0,
# Enable extra checks in API functions and other strategic places. 'v8_enable_gdbjit%': 0,
'v8_enable_extra_checks%': 1,
'v8_object_print%': 0, 'v8_object_print%': 0,
'v8_enable_gdbjit%': 0,
# Enable profiling support. Only required on Windows. # Enable profiling support. Only required on Windows.
'v8_enable_prof%': 0, 'v8_enable_prof%': 0,
'v8_enable_verify_heap%': 0,
# Some versions of GCC 4.5 seem to need -fno-strict-aliasing. # Some versions of GCC 4.5 seem to need -fno-strict-aliasing.
'v8_no_strict_aliasing%': 0, 'v8_no_strict_aliasing%': 0,
@ -103,9 +102,6 @@
# Interpreted regexp engine exists as platform-independent alternative # Interpreted regexp engine exists as platform-independent alternative
# based where the regular expression is compiled to a bytecode. # based where the regular expression is compiled to a bytecode.
'v8_interpreted_regexp%': 0, 'v8_interpreted_regexp%': 0,
# Name of the python executable.
'python%': 'python',
}, },
'target_defaults': { 'target_defaults': {
'conditions': [ 'conditions': [
@ -115,14 +111,14 @@
['v8_enable_disassembler==1', { ['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',], 'defines': ['ENABLE_DISASSEMBLER',],
}], }],
['v8_enable_extra_checks==1', { ['v8_enable_gdbjit==1', {
'defines': ['ENABLE_EXTRA_CHECKS',], 'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}], }],
['v8_object_print==1', { ['v8_object_print==1', {
'defines': ['OBJECT_PRINT',], 'defines': ['OBJECT_PRINT',],
}], }],
['v8_enable_gdbjit==1', { ['v8_enable_verify_heap==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',], 'defines': ['VERIFY_HEAP',],
}], }],
['v8_interpreted_regexp==1', { ['v8_interpreted_regexp==1', {
'defines': ['V8_INTERPRETED_REGEXP',], 'defines': ['V8_INTERPRETED_REGEXP',],
@ -132,6 +128,11 @@
'V8_TARGET_ARCH_ARM', 'V8_TARGET_ARCH_ARM',
], ],
'conditions': [ 'conditions': [
['armv7==1', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
}],
[ 'v8_can_use_unaligned_accesses=="true"', { [ 'v8_can_use_unaligned_accesses=="true"', {
'defines': [ 'defines': [
'CAN_USE_UNALIGNED_ACCESSES=1', 'CAN_USE_UNALIGNED_ACCESSES=1',
@ -142,12 +143,16 @@
'CAN_USE_UNALIGNED_ACCESSES=0', 'CAN_USE_UNALIGNED_ACCESSES=0',
], ],
}], }],
[ 'v8_can_use_vfp2_instructions=="true"', { # NEON implies VFP3 and VFP3 implies VFP2.
[ 'v8_can_use_vfp2_instructions=="true" or arm_neon==1 or \
arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
'defines': [ 'defines': [
'CAN_USE_VFP2_INSTRUCTIONS', 'CAN_USE_VFP2_INSTRUCTIONS',
], ],
}], }],
[ 'v8_can_use_vfp3_instructions=="true"', { # NEON implies VFP3.
[ 'v8_can_use_vfp3_instructions=="true" or arm_neon==1 or \
arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
'defines': [ 'defines': [
'CAN_USE_VFP3_INSTRUCTIONS', 'CAN_USE_VFP3_INSTRUCTIONS',
], ],
@ -198,10 +203,11 @@
['mips_arch_variant=="mips32r2"', { ['mips_arch_variant=="mips32r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'], 'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}], }],
['mips_arch_variant=="mips32r1"', {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
['mips_arch_variant=="loongson"', { ['mips_arch_variant=="loongson"', {
'cflags': ['-mips3', '-Wa,-mips3'], 'cflags': ['-mips3', '-Wa,-mips3'],
}, {
'cflags': ['-mips32', '-Wa,-mips32'],
}], }],
], ],
}], }],
@ -274,7 +280,8 @@
}, },
}, },
}], }],
['OS in "linux freebsd dragonflybsd openbsd solaris netbsd".split()', { ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', {
'conditions': [ 'conditions': [
[ 'v8_no_strict_aliasing==1', { [ 'v8_no_strict_aliasing==1', {
'cflags': [ '-fno-strict-aliasing' ], 'cflags': [ '-fno-strict-aliasing' ],
@ -284,8 +291,8 @@
['OS=="solaris"', { ['OS=="solaris"', {
'defines': [ '__C99FEATURES__=1' ], # isinf() etc. 'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}], }],
['(OS=="linux" or OS=="freebsd" or OS=="dragonflybsd" or OS=="openbsd" \ ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="solaris" or OS=="netbsd" or OS=="mac" or OS=="android") and \ or OS=="netbsd" or OS=="mac" or OS=="android") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \ (v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="mipsel")', { v8_target_arch=="mipsel")', {
# Check whether the host compiler and target compiler support the # Check whether the host compiler and target compiler support the
@ -304,16 +311,21 @@
['_toolset=="target"', { ['_toolset=="target"', {
'variables': { 'variables': {
'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)', 'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
'clang%': 0,
}, },
'cflags': [ '<(m32flag)' ], 'conditions': [
'ldflags': [ '<(m32flag)' ], ['OS!="android" or clang==1', {
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
}],
],
'xcode_settings': { 'xcode_settings': {
'ARCHS': [ 'i386' ], 'ARCHS': [ 'i386' ],
}, },
}], }],
], ],
}], }],
['OS=="freebsd" or OS=="dragonflybsd" or OS=="openbsd"', { ['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ], 'cflags': [ '-I/usr/local/include' ],
}], }],
['OS=="netbsd"', { ['OS=="netbsd"', {
@ -322,11 +334,15 @@
], # conditions ], # conditions
'configurations': { 'configurations': {
'Debug': { 'Debug': {
'variables': {
'v8_enable_extra_checks%': 1,
},
'defines': [ 'defines': [
'DEBUG', 'DEBUG',
'ENABLE_DISASSEMBLER', 'ENABLE_DISASSEMBLER',
'V8_ENABLE_CHECKS', 'V8_ENABLE_CHECKS',
'OBJECT_PRINT', 'OBJECT_PRINT',
'VERIFY_HEAP',
], ],
'msvs_settings': { 'msvs_settings': {
'VCCLCompilerTool': { 'VCCLCompilerTool': {
@ -345,7 +361,10 @@
}, },
}, },
'conditions': [ 'conditions': [
['OS in "linux freebsd dragonflybsd openbsd netbsd".split()', { ['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ], '-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}], }],
@ -363,12 +382,32 @@
}], }],
], ],
}], }],
['OS=="mac"', {
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '0', # -O0
},
}],
], ],
}, # Debug }, # Debug
'Release': { 'Release': {
'variables': {
'v8_enable_extra_checks%': 0,
},
'conditions': [ 'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="dragonflybsd" \ ['v8_enable_extra_checks==1', {
or OS=="openbsd" or OS=="netbsd" or OS=="android"', { 'defines': ['ENABLE_EXTRA_CHECKS',],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', {
'cflags!': [
'-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
'-O3',
],
'conditions': [ 'conditions': [
[ 'gcc_version==44 and clang==0', { [ 'gcc_version==44 and clang==0', {
'cflags': [ 'cflags': [

13
deps/v8/build/standalone.gypi

@ -38,7 +38,8 @@
'variables': { 'variables': {
'variables': { 'variables': {
'conditions': [ 'conditions': [
['OS!="win"', { ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
OS=="netbsd" or OS=="mac"', {
# This handles the Unix platforms we generally deal with. # This handles the Unix platforms we generally deal with.
# Anything else gets passed through, which probably won't work # Anything else gets passed through, which probably won't work
# very well; such hosts should pass an explicit target_arch # very well; such hosts should pass an explicit target_arch
@ -46,8 +47,9 @@
'host_arch%': 'host_arch%':
'<!(uname -m | sed -e "s/i.86/ia32/;\ '<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mipsel/")', s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mipsel/")',
}], }, {
['OS=="win"', { # OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac"
'host_arch%': 'ia32', 'host_arch%': 'ia32',
}], }],
], ],
@ -87,7 +89,8 @@
}, },
}, },
'conditions': [ 'conditions': [
['OS!="win"', { ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', {
'target_defaults': { 'target_defaults': {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-pthread', '-fno-rtti', '-Wnon-virtual-dtor', '-pthread', '-fno-rtti',
@ -106,6 +109,8 @@
], ],
}, },
}], }],
# 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"
# or OS=="netbsd"'
['OS=="win"', { ['OS=="win"', {
'target_defaults': { 'target_defaults': {
'defines': [ 'defines': [

17
deps/v8/include/v8-profiler.h

@ -406,6 +406,20 @@ class V8EXPORT HeapProfiler {
*/ */
static const SnapshotObjectId kUnknownObjectId = 0; static const SnapshotObjectId kUnknownObjectId = 0;
/**
* Callback interface for retrieving user friendly names of global objects.
*/
class ObjectNameResolver {
public:
/**
* Returns name to be used in the heap snapshot for given node. Returned
* string must stay alive until snapshot collection is completed.
*/
virtual const char* GetName(Handle<Object> object) = 0;
protected:
virtual ~ObjectNameResolver() {}
};
/** /**
* Takes a heap snapshot and returns it. Title may be an empty string. * Takes a heap snapshot and returns it. Title may be an empty string.
* See HeapSnapshot::Type for types description. * See HeapSnapshot::Type for types description.
@ -413,7 +427,8 @@ class V8EXPORT HeapProfiler {
static const HeapSnapshot* TakeSnapshot( static const HeapSnapshot* TakeSnapshot(
Handle<String> title, Handle<String> title,
HeapSnapshot::Type type = HeapSnapshot::kFull, HeapSnapshot::Type type = HeapSnapshot::kFull,
ActivityControl* control = NULL); ActivityControl* control = NULL,
ObjectNameResolver* global_object_name_resolver = NULL);
/** /**
* Starts tracking of heap objects population statistics. After calling * Starts tracking of heap objects population statistics. After calling

721
deps/v8/include/v8.h

File diff suppressed because it is too large

27
deps/v8/samples/shell.cc

@ -72,7 +72,7 @@ int main(int argc, char* argv[]) {
v8::HandleScope handle_scope; v8::HandleScope handle_scope;
v8::Persistent<v8::Context> context = CreateShellContext(); v8::Persistent<v8::Context> context = CreateShellContext();
if (context.IsEmpty()) { if (context.IsEmpty()) {
printf("Error creating context\n"); fprintf(stderr, "Error creating context\n");
return 1; return 1;
} }
context->Enter(); context->Enter();
@ -226,7 +226,8 @@ int RunMain(int argc, char* argv[]) {
// alone JavaScript engines. // alone JavaScript engines.
continue; continue;
} else if (strncmp(str, "--", 2) == 0) { } else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str); fprintf(stderr,
"Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) { } else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
// Execute argument given to -e option directly. // Execute argument given to -e option directly.
v8::Handle<v8::String> file_name = v8::String::New("unnamed"); v8::Handle<v8::String> file_name = v8::String::New("unnamed");
@ -237,7 +238,7 @@ int RunMain(int argc, char* argv[]) {
v8::Handle<v8::String> file_name = v8::String::New(str); v8::Handle<v8::String> file_name = v8::String::New(str);
v8::Handle<v8::String> source = ReadFile(str); v8::Handle<v8::String> source = ReadFile(str);
if (source.IsEmpty()) { if (source.IsEmpty()) {
printf("Error reading '%s'\n", str); fprintf(stderr, "Error reading '%s'\n", str);
continue; continue;
} }
if (!ExecuteString(source, file_name, false, true)) return 1; if (!ExecuteString(source, file_name, false, true)) return 1;
@ -249,20 +250,20 @@ int RunMain(int argc, char* argv[]) {
// The read-eval-execute loop of the shell. // The read-eval-execute loop of the shell.
void RunShell(v8::Handle<v8::Context> context) { void RunShell(v8::Handle<v8::Context> context) {
printf("V8 version %s [sample shell]\n", v8::V8::GetVersion()); fprintf(stderr, "V8 version %s [sample shell]\n", v8::V8::GetVersion());
static const int kBufferSize = 256; static const int kBufferSize = 256;
// Enter the execution environment before evaluating any code. // Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context); v8::Context::Scope context_scope(context);
v8::Local<v8::String> name(v8::String::New("(shell)")); v8::Local<v8::String> name(v8::String::New("(shell)"));
while (true) { while (true) {
char buffer[kBufferSize]; char buffer[kBufferSize];
printf("> "); fprintf(stderr, "> ");
char* str = fgets(buffer, kBufferSize, stdin); char* str = fgets(buffer, kBufferSize, stdin);
if (str == NULL) break; if (str == NULL) break;
v8::HandleScope handle_scope; v8::HandleScope handle_scope;
ExecuteString(v8::String::New(str), name, true, true); ExecuteString(v8::String::New(str), name, true, true);
} }
printf("\n"); fprintf(stderr, "\n");
} }
@ -310,31 +311,31 @@ void ReportException(v8::TryCatch* try_catch) {
if (message.IsEmpty()) { if (message.IsEmpty()) {
// V8 didn't provide any extra information about this error; just // V8 didn't provide any extra information about this error; just
// print the exception. // print the exception.
printf("%s\n", exception_string); fprintf(stderr, "%s\n", exception_string);
} else { } else {
// Print (filename):(line number): (message). // Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptResourceName()); v8::String::Utf8Value filename(message->GetScriptResourceName());
const char* filename_string = ToCString(filename); const char* filename_string = ToCString(filename);
int linenum = message->GetLineNumber(); int linenum = message->GetLineNumber();
printf("%s:%i: %s\n", filename_string, linenum, exception_string); fprintf(stderr, "%s:%i: %s\n", filename_string, linenum, exception_string);
// Print line of source code. // Print line of source code.
v8::String::Utf8Value sourceline(message->GetSourceLine()); v8::String::Utf8Value sourceline(message->GetSourceLine());
const char* sourceline_string = ToCString(sourceline); const char* sourceline_string = ToCString(sourceline);
printf("%s\n", sourceline_string); fprintf(stderr, "%s\n", sourceline_string);
// Print wavy underline (GetUnderline is deprecated). // Print wavy underline (GetUnderline is deprecated).
int start = message->GetStartColumn(); int start = message->GetStartColumn();
for (int i = 0; i < start; i++) { for (int i = 0; i < start; i++) {
printf(" "); fprintf(stderr, " ");
} }
int end = message->GetEndColumn(); int end = message->GetEndColumn();
for (int i = start; i < end; i++) { for (int i = start; i < end; i++) {
printf("^"); fprintf(stderr, "^");
} }
printf("\n"); fprintf(stderr, "\n");
v8::String::Utf8Value stack_trace(try_catch->StackTrace()); v8::String::Utf8Value stack_trace(try_catch->StackTrace());
if (stack_trace.length() > 0) { if (stack_trace.length() > 0) {
const char* stack_trace_string = ToCString(stack_trace); const char* stack_trace_string = ToCString(stack_trace);
printf("%s\n", stack_trace_string); fprintf(stderr, "%s\n", stack_trace_string);
} }
} }
} }

174
deps/v8/src/accessors.cc

@ -42,15 +42,11 @@ namespace internal {
template <class C> template <class C>
static C* FindInPrototypeChain(Object* obj, bool* found_it) { static C* FindInstanceOf(Object* obj) {
ASSERT(!*found_it); for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype()) {
Heap* heap = HEAP; if (Is<C>(cur)) return C::cast(cur);
while (!Is<C>(obj)) {
if (obj == heap->null_value()) return NULL;
obj = obj->GetPrototype();
} }
*found_it = true; return NULL;
return C::cast(obj);
} }
@ -81,10 +77,8 @@ MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
MaybeObject* Accessors::ArrayGetLength(Object* object, void*) { MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
// Traverse the prototype chain until we reach an array. // Traverse the prototype chain until we reach an array.
bool found_it = false; JSArray* holder = FindInstanceOf<JSArray>(object);
JSArray* holder = FindInPrototypeChain<JSArray>(object, &found_it); return holder == NULL ? Smi::FromInt(0) : holder->length();
if (!found_it) return Smi::FromInt(0);
return holder->length();
} }
@ -118,7 +112,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
HandleScope scope(isolate); HandleScope scope(isolate);
// Protect raw pointers. // Protect raw pointers.
Handle<JSObject> object_handle(object, isolate); Handle<JSArray> array_handle(JSArray::cast(object), isolate);
Handle<Object> value_handle(value, isolate); Handle<Object> value_handle(value, isolate);
bool has_exception; bool has_exception;
@ -128,7 +122,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
if (has_exception) return Failure::Exception(); if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) { if (uint32_v->Number() == number_v->Number()) {
return Handle<JSArray>::cast(object_handle)->SetElementsLength(*uint32_v); return array_handle->SetElementsLength(*uint32_v);
} }
return isolate->Throw( return isolate->Throw(
*isolate->factory()->NewRangeError("invalid_array_length", *isolate->factory()->NewRangeError("invalid_array_length",
@ -448,15 +442,12 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
Heap* heap = Isolate::Current()->heap(); Heap* heap = Isolate::Current()->heap();
bool found_it = false; JSFunction* function = FindInstanceOf<JSFunction>(object);
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it); if (function == NULL) return heap->undefined_value();
if (!found_it) return heap->undefined_value();
while (!function->should_have_prototype()) { while (!function->should_have_prototype()) {
found_it = false; function = FindInstanceOf<JSFunction>(function->GetPrototype());
function = FindInPrototypeChain<JSFunction>(object->GetPrototype(),
&found_it);
// There has to be one because we hit the getter. // There has to be one because we hit the getter.
ASSERT(found_it); ASSERT(function != NULL);
} }
if (!function->has_prototype()) { if (!function->has_prototype()) {
@ -474,25 +465,46 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
MaybeObject* Accessors::FunctionSetPrototype(JSObject* object, MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
Object* value, Object* value_raw,
void*) { void*) {
Heap* heap = object->GetHeap(); Isolate* isolate = object->GetIsolate();
bool found_it = false; Heap* heap = isolate->heap();
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it); JSFunction* function_raw = FindInstanceOf<JSFunction>(object);
if (!found_it) return heap->undefined_value(); if (function_raw == NULL) return heap->undefined_value();
if (!function->should_have_prototype()) { if (!function_raw->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property. // Since we hit this accessor, object will have no prototype property.
return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(), return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(),
value, value_raw,
NONE); NONE);
} }
Object* prototype; HandleScope scope(isolate);
{ MaybeObject* maybe_prototype = function->SetPrototype(value); Handle<JSFunction> function(function_raw, isolate);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; Handle<Object> value(value_raw, isolate);
Handle<Object> old_value;
bool is_observed =
FLAG_harmony_observation &&
*function == object &&
function->map()->is_observed();
if (is_observed) {
if (function->has_prototype())
old_value = handle(function->prototype(), isolate);
else
old_value = isolate->factory()->NewFunctionPrototype(function);
}
Handle<Object> result;
MaybeObject* maybe_result = function->SetPrototype(*value);
if (!maybe_result->ToHandle(&result, isolate)) return maybe_result;
ASSERT(function->prototype() == *value);
if (is_observed && !old_value->SameValue(*value)) {
JSObject::EnqueueChangeRecord(
function, "updated", isolate->factory()->prototype_symbol(), old_value);
} }
ASSERT(function->prototype() == value);
return function; return *function;
} }
@ -509,22 +521,20 @@ const AccessorDescriptor Accessors::FunctionPrototype = {
MaybeObject* Accessors::FunctionGetLength(Object* object, void*) { MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
bool found_it = false; JSFunction* function = FindInstanceOf<JSFunction>(object);
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it); if (function == NULL) return Smi::FromInt(0);
if (!found_it) return Smi::FromInt(0);
// Check if already compiled. // Check if already compiled.
if (!function->shared()->is_compiled()) { if (function->shared()->is_compiled()) {
// If the function isn't compiled yet, the length is not computed
// correctly yet. Compile it now and return the right length.
HandleScope scope;
Handle<JSFunction> handle(function);
if (!JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
return Failure::Exception();
}
return Smi::FromInt(handle->shared()->length());
} else {
return Smi::FromInt(function->shared()->length()); return Smi::FromInt(function->shared()->length());
} }
// If the function isn't compiled yet, the length is not computed correctly
// yet. Compile it now and return the right length.
HandleScope scope;
Handle<JSFunction> handle(function);
if (JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
return Smi::FromInt(handle->shared()->length());
}
return Failure::Exception();
} }
@ -541,10 +551,8 @@ const AccessorDescriptor Accessors::FunctionLength = {
MaybeObject* Accessors::FunctionGetName(Object* object, void*) { MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
bool found_it = false; JSFunction* holder = FindInstanceOf<JSFunction>(object);
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it); return holder == NULL ? HEAP->undefined_value() : holder->shared()->name();
if (!found_it) return HEAP->undefined_value();
return holder->shared()->name();
} }
@ -589,9 +597,8 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) { MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
Isolate* isolate = Isolate::Current(); Isolate* isolate = Isolate::Current();
HandleScope scope(isolate); HandleScope scope(isolate);
bool found_it = false; JSFunction* holder = FindInstanceOf<JSFunction>(object);
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it); if (holder == NULL) return isolate->heap()->undefined_value();
if (!found_it) return isolate->heap()->undefined_value();
Handle<JSFunction> function(holder, isolate); Handle<JSFunction> function(holder, isolate);
if (function->shared()->native()) return isolate->heap()->null_value(); if (function->shared()->native()) return isolate->heap()->null_value();
@ -664,19 +671,6 @@ const AccessorDescriptor Accessors::FunctionArguments = {
// //
static MaybeObject* CheckNonStrictCallerOrThrow(
Isolate* isolate,
JSFunction* caller) {
DisableAssertNoAllocation enable_allocation;
if (!caller->shared()->is_classic_mode()) {
return isolate->Throw(
*isolate->factory()->NewTypeError("strict_caller",
HandleVector<Object>(NULL, 0)));
}
return caller;
}
class FrameFunctionIterator { class FrameFunctionIterator {
public: public:
FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise) FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise)
@ -727,9 +721,8 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
Isolate* isolate = Isolate::Current(); Isolate* isolate = Isolate::Current();
HandleScope scope(isolate); HandleScope scope(isolate);
AssertNoAllocation no_alloc; AssertNoAllocation no_alloc;
bool found_it = false; JSFunction* holder = FindInstanceOf<JSFunction>(object);
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it); if (holder == NULL) return isolate->heap()->undefined_value();
if (!found_it) return isolate->heap()->undefined_value();
if (holder->shared()->native()) return isolate->heap()->null_value(); if (holder->shared()->native()) return isolate->heap()->null_value();
Handle<JSFunction> function(holder, isolate); Handle<JSFunction> function(holder, isolate);
@ -764,7 +757,14 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
if (caller->shared()->bound()) { if (caller->shared()->bound()) {
return isolate->heap()->null_value(); return isolate->heap()->null_value();
} }
return CheckNonStrictCallerOrThrow(isolate, caller); // Censor if the caller is not a classic mode function.
// Change from ES5, which used to throw, see:
// https://bugs.ecmascript.org/show_bug.cgi?id=310
if (!caller->shared()->is_classic_mode()) {
return isolate->heap()->null_value();
}
return caller;
} }
@ -780,7 +780,7 @@ const AccessorDescriptor Accessors::FunctionCaller = {
// //
MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) { static inline Object* GetPrototypeSkipHiddenPrototypes(Object* receiver) {
Object* current = receiver->GetPrototype(); Object* current = receiver->GetPrototype();
while (current->IsJSObject() && while (current->IsJSObject() &&
JSObject::cast(current)->map()->is_hidden_prototype()) { JSObject::cast(current)->map()->is_hidden_prototype()) {
@ -790,12 +790,36 @@ MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
} }
MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver, MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
Object* value, return GetPrototypeSkipHiddenPrototypes(receiver);
}
MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver_raw,
Object* value_raw,
void*) { void*) {
const bool skip_hidden_prototypes = true; const bool kSkipHiddenPrototypes = true;
// To be consistent with other Set functions, return the value. // To be consistent with other Set functions, return the value.
return receiver->SetPrototype(value, skip_hidden_prototypes); if (!(FLAG_harmony_observation && receiver_raw->map()->is_observed()))
return receiver_raw->SetPrototype(value_raw, kSkipHiddenPrototypes);
Isolate* isolate = receiver_raw->GetIsolate();
HandleScope scope(isolate);
Handle<JSObject> receiver(receiver_raw);
Handle<Object> value(value_raw);
Handle<Object> old_value(GetPrototypeSkipHiddenPrototypes(*receiver));
MaybeObject* result = receiver->SetPrototype(*value, kSkipHiddenPrototypes);
Handle<Object> hresult;
if (!result->ToHandle(&hresult, isolate)) return result;
Handle<Object> new_value(GetPrototypeSkipHiddenPrototypes(*receiver));
if (!new_value->SameValue(*old_value)) {
JSObject::EnqueueChangeRecord(receiver, "prototype",
isolate->factory()->Proto_symbol(),
old_value);
}
return *hresult;
} }

500
deps/v8/src/api.cc

@ -630,7 +630,16 @@ void V8::MakeWeak(i::Object** object, void* parameters,
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "MakeWeak"); LOG_API(isolate, "MakeWeak");
isolate->global_handles()->MakeWeak(object, parameters, isolate->global_handles()->MakeWeak(object, parameters,
callback); callback);
}
void V8::MakeWeak(i::Isolate* isolate, i::Object** object,
void* parameters, WeakReferenceCallback callback) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "MakeWeak");
isolate->global_handles()->MakeWeak(object, parameters,
callback);
} }
@ -643,11 +652,48 @@ void V8::ClearWeak(i::Object** obj) {
void V8::MarkIndependent(i::Object** object) { void V8::MarkIndependent(i::Object** object) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "MakeIndependent"); LOG_API(isolate, "MarkIndependent");
isolate->global_handles()->MarkIndependent(object); isolate->global_handles()->MarkIndependent(object);
} }
void V8::MarkIndependent(i::Isolate* isolate, i::Object** object) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "MarkIndependent");
isolate->global_handles()->MarkIndependent(object);
}
void V8::MarkPartiallyDependent(i::Object** object) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "MarkPartiallyDependent");
isolate->global_handles()->MarkPartiallyDependent(object);
}
void V8::MarkPartiallyDependent(i::Isolate* isolate, i::Object** object) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "MarkPartiallyDependent");
isolate->global_handles()->MarkPartiallyDependent(object);
}
bool V8::IsGlobalIndependent(i::Object** obj) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "IsGlobalIndependent");
if (!isolate->IsInitialized()) return false;
return i::GlobalHandles::IsIndependent(obj);
}
bool V8::IsGlobalIndependent(i::Isolate* isolate, i::Object** obj) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "IsGlobalIndependent");
if (!isolate->IsInitialized()) return false;
return i::GlobalHandles::IsIndependent(obj);
}
bool V8::IsGlobalNearDeath(i::Object** obj) { bool V8::IsGlobalNearDeath(i::Object** obj) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "IsGlobalNearDeath"); LOG_API(isolate, "IsGlobalNearDeath");
@ -664,6 +710,14 @@ bool V8::IsGlobalWeak(i::Object** obj) {
} }
bool V8::IsGlobalWeak(i::Isolate* isolate, i::Object** obj) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "IsGlobalWeak");
if (!isolate->IsInitialized()) return false;
return i::GlobalHandles::IsWeak(obj);
}
void V8::DisposeGlobal(i::Object** obj) { void V8::DisposeGlobal(i::Object** obj) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "DisposeGlobal"); LOG_API(isolate, "DisposeGlobal");
@ -671,6 +725,14 @@ void V8::DisposeGlobal(i::Object** obj) {
isolate->global_handles()->Destroy(obj); isolate->global_handles()->Destroy(obj);
} }
void V8::DisposeGlobal(i::Isolate* isolate, i::Object** obj) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "DisposeGlobal");
if (!isolate->IsInitialized()) return;
isolate->global_handles()->Destroy(obj);
}
// --- H a n d l e s --- // --- H a n d l e s ---
@ -724,6 +786,12 @@ i::Object** HandleScope::CreateHandle(i::Object* value) {
} }
i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
ASSERT(isolate == i::Isolate::Current());
return i::HandleScope::CreateHandle(value, isolate);
}
i::Object** HandleScope::CreateHandle(i::HeapObject* value) { i::Object** HandleScope::CreateHandle(i::HeapObject* value) {
ASSERT(value->IsHeapObject()); ASSERT(value->IsHeapObject());
return reinterpret_cast<i::Object**>( return reinterpret_cast<i::Object**>(
@ -765,36 +833,77 @@ void Context::Exit() {
} }
void Context::SetData(v8::Handle<String> data) { static void* DecodeSmiToAligned(i::Object* value, const char* location) {
i::Handle<i::Context> env = Utils::OpenHandle(this); ApiCheck(value->IsSmi(), location, "Not a Smi");
i::Isolate* isolate = env->GetIsolate(); return reinterpret_cast<void*>(value);
if (IsDeadCheck(isolate, "v8::Context::SetData()")) return;
i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
ASSERT(env->IsNativeContext());
if (env->IsNativeContext()) {
env->set_data(*raw_data);
}
} }
v8::Local<v8::Value> Context::GetData() { static i::Smi* EncodeAlignedAsSmi(void* value, const char* location) {
i::Handle<i::Context> env = Utils::OpenHandle(this); i::Smi* smi = reinterpret_cast<i::Smi*>(value);
i::Isolate* isolate = env->GetIsolate(); ApiCheck(smi->IsSmi(), location, "Pointer is not aligned");
if (IsDeadCheck(isolate, "v8::Context::GetData()")) { return smi;
return v8::Local<Value>(); }
}
i::Object* raw_result = NULL;
ASSERT(env->IsNativeContext()); static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
if (env->IsNativeContext()) { int index,
raw_result = env->data(); bool can_grow,
} else { const char* location) {
return Local<Value>(); i::Handle<i::Context> env = Utils::OpenHandle(context);
bool ok = !IsDeadCheck(env->GetIsolate(), location) &&
ApiCheck(env->IsNativeContext(), location, "Not a native context") &&
ApiCheck(index >= 0, location, "Negative index");
if (!ok) return i::Handle<i::FixedArray>();
i::Handle<i::FixedArray> data(env->embedder_data());
if (index < data->length()) return data;
if (!can_grow) {
Utils::ReportApiFailure(location, "Index too large");
return i::Handle<i::FixedArray>();
} }
i::Handle<i::Object> result(raw_result, isolate); int new_size = i::Max(index, data->length() << 1) + 1;
data = env->GetIsolate()->factory()->CopySizeFixedArray(data, new_size);
env->set_embedder_data(*data);
return data;
}
v8::Local<v8::Value> Context::SlowGetEmbedderData(int index) {
const char* location = "v8::Context::GetEmbedderData()";
i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
if (data.is_null()) return Local<Value>();
i::Handle<i::Object> result(data->get(index), data->GetIsolate());
return Utils::ToLocal(result); return Utils::ToLocal(result);
} }
void Context::SetEmbedderData(int index, v8::Handle<Value> value) {
const char* location = "v8::Context::SetEmbedderData()";
i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
if (data.is_null()) return;
i::Handle<i::Object> val = Utils::OpenHandle(*value);
data->set(index, *val);
ASSERT_EQ(*Utils::OpenHandle(*value),
*Utils::OpenHandle(*GetEmbedderData(index)));
}
void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()";
i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
if (data.is_null()) return NULL;
return DecodeSmiToAligned(data->get(index), location);
}
void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
const char* location = "v8::Context::SetAlignedPointerInEmbedderData()";
i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
data->set(index, EncodeAlignedAsSmi(value, location));
ASSERT_EQ(value, GetAlignedPointerFromEmbedderData(index));
}
i::Object** v8::HandleScope::RawClose(i::Object** value) { i::Object** v8::HandleScope::RawClose(i::Object** value) {
if (!ApiCheck(!is_closed_, if (!ApiCheck(!is_closed_,
"v8::HandleScope::Close()", "v8::HandleScope::Close()",
@ -816,7 +925,7 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) {
} }
// Allocate a new handle on the previous handle block. // Allocate a new handle on the previous handle block.
i::Handle<i::Object> handle(result); i::Handle<i::Object> handle(result, isolate_);
return handle.location(); return handle.location();
} }
@ -1151,7 +1260,7 @@ void FunctionTemplate::SetHiddenPrototype(bool value) {
void FunctionTemplate::ReadOnlyPrototype() { void FunctionTemplate::ReadOnlyPrototype() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetPrototypeAttributes()")) { if (IsDeadCheck(isolate, "v8::FunctionTemplate::ReadOnlyPrototype()")) {
return; return;
} }
ENTER_V8(isolate); ENTER_V8(isolate);
@ -1595,6 +1704,8 @@ Local<Value> Script::Run() {
ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>()); ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
LOG_API(isolate, "Script::Run"); LOG_API(isolate, "Script::Run");
ENTER_V8(isolate); ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
i::Object* raw_result = NULL; i::Object* raw_result = NULL;
{ {
i::HandleScope scope(isolate); i::HandleScope scope(isolate);
@ -2193,7 +2304,7 @@ bool Value::IsExternal() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) { if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) {
return false; return false;
} }
return Utils::OpenHandle(this)->IsForeign(); return Utils::OpenHandle(this)->IsExternal();
} }
@ -2267,7 +2378,11 @@ static i::Object* LookupBuiltin(i::Isolate* isolate,
static bool CheckConstructor(i::Isolate* isolate, static bool CheckConstructor(i::Isolate* isolate,
i::Handle<i::JSObject> obj, i::Handle<i::JSObject> obj,
const char* class_name) { const char* class_name) {
return obj->map()->constructor() == LookupBuiltin(isolate, class_name); i::Object* constr = obj->map()->constructor();
if (!constr->IsJSFunction()) return false;
i::JSFunction* func = i::JSFunction::cast(constr);
return func->shared()->native() &&
constr == LookupBuiltin(isolate, class_name);
} }
@ -2422,8 +2537,7 @@ Local<Integer> Value::ToInteger() const {
void External::CheckCast(v8::Value* that) { void External::CheckCast(v8::Value* that) {
if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return; if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(Utils::OpenHandle(that)->IsExternal(),
ApiCheck(obj->IsForeign(),
"v8::External::Cast()", "v8::External::Cast()",
"Could not convert to external"); "Could not convert to external");
} }
@ -2768,6 +2882,7 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value); i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate); EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::SetProperty( i::Handle<i::Object> obj = i::SetProperty(
isolate,
self, self,
key_obj, key_obj,
value_obj, value_obj,
@ -3322,7 +3437,7 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key); i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj); i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol)); i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol), isolate);
if (result->IsUndefined()) return v8::Local<v8::Value>(); if (result->IsUndefined()) return v8::Local<v8::Value>();
return Utils::ToLocal(result); return Utils::ToLocal(result);
} }
@ -3559,6 +3674,8 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
return Local<v8::Value>()); return Local<v8::Value>());
LOG_API(isolate, "Object::CallAsFunction"); LOG_API(isolate, "Object::CallAsFunction");
ENTER_V8(isolate); ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
i::HandleScope scope(isolate); i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this); i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv); i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
@ -3590,6 +3707,8 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
return Local<v8::Object>()); return Local<v8::Object>());
LOG_API(isolate, "Object::CallAsConstructor"); LOG_API(isolate, "Object::CallAsConstructor");
ENTER_V8(isolate); ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
i::HandleScope scope(isolate); i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this); i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
@ -3632,6 +3751,8 @@ Local<v8::Object> Function::NewInstance(int argc,
return Local<v8::Object>()); return Local<v8::Object>());
LOG_API(isolate, "Function::NewInstance"); LOG_API(isolate, "Function::NewInstance");
ENTER_V8(isolate); ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
HandleScope scope; HandleScope scope;
i::Handle<i::JSFunction> function = Utils::OpenHandle(this); i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
@ -3650,6 +3771,8 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>()); ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>());
LOG_API(isolate, "Function::Call"); LOG_API(isolate, "Function::Call");
ENTER_V8(isolate); ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
i::Object* raw_result = NULL; i::Object* raw_result = NULL;
{ {
i::HandleScope scope(isolate); i::HandleScope scope(isolate);
@ -3693,8 +3816,9 @@ ScriptOrigin Function::GetScriptOrigin() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this); i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) { if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script())); i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
i::Handle<i::Object> scriptName = GetScriptNameOrSourceURL(script);
v8::ScriptOrigin origin( v8::ScriptOrigin origin(
Utils::ToLocal(i::Handle<i::Object>(script->name())), Utils::ToLocal(scriptName),
v8::Integer::New(script->line_offset()->value()), v8::Integer::New(script->line_offset()->value()),
v8::Integer::New(script->column_offset()->value())); v8::Integer::New(script->column_offset()->value()));
return origin; return origin;
@ -3757,7 +3881,7 @@ static int RecursivelySerializeToUtf8(i::String* string,
int32_t* last_character) { int32_t* last_character) {
int utf8_bytes = 0; int utf8_bytes = 0;
while (true) { while (true) {
if (string->IsAsciiRepresentation()) { if (string->IsOneByteRepresentation()) {
i::String::WriteToFlat(string, buffer, start, end); i::String::WriteToFlat(string, buffer, start, end);
*last_character = unibrow::Utf16::kNoPreviousCharacter; *last_character = unibrow::Utf16::kNoPreviousCharacter;
return utf8_bytes + end - start; return utf8_bytes + end - start;
@ -3857,7 +3981,7 @@ int String::WriteUtf8(char* buffer,
FlattenString(str); // Flatten the string for efficiency. FlattenString(str); // Flatten the string for efficiency.
} }
int string_length = str->length(); int string_length = str->length();
if (str->IsAsciiRepresentation()) { if (str->IsOneByteRepresentation()) {
int len; int len;
if (capacity == -1) { if (capacity == -1) {
capacity = str->length() + 1; capacity = str->length() + 1;
@ -3991,7 +4115,7 @@ int String::WriteAscii(char* buffer,
FlattenString(str); // Flatten the string for efficiency. FlattenString(str); // Flatten the string for efficiency.
} }
if (str->IsAsciiRepresentation()) { if (str->IsOneByteRepresentation()) {
// WriteToFlat is faster than using the StringInputBuffer. // WriteToFlat is faster than using the StringInputBuffer.
if (length == -1) length = str->length() + 1; if (length == -1) length = str->length() + 1;
int len = i::Min(length, str->length() - start); int len = i::Min(length, str->length() - start);
@ -4089,6 +4213,29 @@ void v8::String::VerifyExternalStringResource(
CHECK_EQ(expected, value); CHECK_EQ(expected, value);
} }
void v8::String::VerifyExternalStringResourceBase(
v8::String::ExternalStringResourceBase* value, Encoding encoding) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
const v8::String::ExternalStringResourceBase* expected;
Encoding expectedEncoding;
if (i::StringShape(*str).IsExternalAscii()) {
const void* resource =
i::Handle<i::ExternalAsciiString>::cast(str)->resource();
expected = reinterpret_cast<const ExternalStringResourceBase*>(resource);
expectedEncoding = ASCII_ENCODING;
} else if (i::StringShape(*str).IsExternalTwoByte()) {
const void* resource =
i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
expected = reinterpret_cast<const ExternalStringResourceBase*>(resource);
expectedEncoding = TWO_BYTE_ENCODING;
} else {
expected = NULL;
expectedEncoding = str->IsOneByteRepresentation() ? ASCII_ENCODING
: TWO_BYTE_ENCODING;
}
CHECK_EQ(expected, value);
CHECK_EQ(expectedEncoding, encoding);
}
const v8::String::ExternalAsciiStringResource* const v8::String::ExternalAsciiStringResource*
v8::String::GetExternalAsciiStringResource() const { v8::String::GetExternalAsciiStringResource() const {
@ -4163,75 +4310,65 @@ int v8::Object::InternalFieldCount() {
} }
Local<Value> v8::Object::CheckedGetInternalField(int index) { static bool InternalFieldOK(i::Handle<i::JSObject> obj,
int index,
const char* location) {
return !IsDeadCheck(obj->GetIsolate(), location) &&
ApiCheck(index < obj->GetInternalFieldCount(),
location,
"Internal field out of bounds");
}
Local<Value> v8::Object::SlowGetInternalField(int index) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this); i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
if (IsDeadCheck(obj->GetIsolate(), "v8::Object::GetInternalField()")) { const char* location = "v8::Object::GetInternalField()";
return Local<Value>(); if (!InternalFieldOK(obj, index, location)) return Local<Value>();
} i::Handle<i::Object> value(obj->GetInternalField(index), obj->GetIsolate());
if (!ApiCheck(index < obj->GetInternalFieldCount(), return Utils::ToLocal(value);
"v8::Object::GetInternalField()",
"Reading internal field out of bounds")) {
return Local<Value>();
}
i::Handle<i::Object> value(obj->GetInternalField(index));
Local<Value> result = Utils::ToLocal(value);
#ifdef DEBUG
Local<Value> unchecked = UncheckedGetInternalField(index);
ASSERT(unchecked.IsEmpty() || (unchecked == result));
#endif
return result;
} }
void v8::Object::SetInternalField(int index, v8::Handle<Value> value) { void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this); i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate(); const char* location = "v8::Object::SetInternalField()";
if (IsDeadCheck(isolate, "v8::Object::SetInternalField()")) { if (!InternalFieldOK(obj, index, location)) return;
return;
}
if (!ApiCheck(index < obj->GetInternalFieldCount(),
"v8::Object::SetInternalField()",
"Writing internal field out of bounds")) {
return;
}
ENTER_V8(isolate);
i::Handle<i::Object> val = Utils::OpenHandle(*value); i::Handle<i::Object> val = Utils::OpenHandle(*value);
obj->SetInternalField(index, *val); obj->SetInternalField(index, *val);
ASSERT_EQ(value, GetInternalField(index));
} }
static bool CanBeEncodedAsSmi(void* ptr) { void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) {
const uintptr_t address = reinterpret_cast<uintptr_t>(ptr); i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
return ((address & i::kEncodablePointerMask) == 0); const char* location = "v8::Object::GetAlignedPointerFromInternalField()";
if (!InternalFieldOK(obj, index, location)) return NULL;
return DecodeSmiToAligned(obj->GetInternalField(index), location);
} }
static i::Smi* EncodeAsSmi(void* ptr) { void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
ASSERT(CanBeEncodedAsSmi(ptr)); i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
const uintptr_t address = reinterpret_cast<uintptr_t>(ptr); const char* location = "v8::Object::SetAlignedPointerInInternalField()";
i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift); if (!InternalFieldOK(obj, index, location)) return;
ASSERT(i::Internals::HasSmiTag(result)); obj->SetInternalField(index, EncodeAlignedAsSmi(value, location));
ASSERT_EQ(result, i::Smi::FromInt(result->value())); ASSERT_EQ(value, GetAlignedPointerFromInternalField(index));
ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result));
return result;
} }
void v8::Object::SetPointerInInternalField(int index, void* value) { static void* ExternalValue(i::Object* obj) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); // Obscure semantics for undefined, but somehow checked in our unit tests...
ENTER_V8(isolate); if (obj->IsUndefined()) return NULL;
if (CanBeEncodedAsSmi(value)) { i::Object* foreign = i::JSObject::cast(obj)->GetInternalField(0);
Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value)); return i::Foreign::cast(foreign)->foreign_address();
} else { }
HandleScope scope;
i::Handle<i::Foreign> foreign =
isolate->factory()->NewForeign( void* Object::GetPointerFromInternalField(int index) {
reinterpret_cast<i::Address>(value), i::TENURED); i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
if (!foreign.is_null()) { const char* location = "v8::Object::GetPointerFromInternalField()";
Utils::OpenHandle(this)->SetInternalField(index, *foreign); if (!InternalFieldOK(obj, index, location)) return NULL;
} return ExternalValue(obj->GetInternalField(index));
}
ASSERT_EQ(value, GetPointerFromInternalField(index));
} }
@ -4286,6 +4423,7 @@ bool v8::V8::Dispose() {
HeapStatistics::HeapStatistics(): total_heap_size_(0), HeapStatistics::HeapStatistics(): total_heap_size_(0),
total_heap_size_executable_(0), total_heap_size_executable_(0),
total_physical_size_(0),
used_heap_size_(0), used_heap_size_(0),
heap_size_limit_(0) { } heap_size_limit_(0) { }
@ -4295,6 +4433,7 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
// Isolate is unitialized thus heap is not configured yet. // Isolate is unitialized thus heap is not configured yet.
heap_statistics->set_total_heap_size(0); heap_statistics->set_total_heap_size(0);
heap_statistics->set_total_heap_size_executable(0); heap_statistics->set_total_heap_size_executable(0);
heap_statistics->set_total_physical_size(0);
heap_statistics->set_used_heap_size(0); heap_statistics->set_used_heap_size(0);
heap_statistics->set_heap_size_limit(0); heap_statistics->set_heap_size_limit(0);
return; return;
@ -4304,6 +4443,7 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->set_total_heap_size(heap->CommittedMemory()); heap_statistics->set_total_heap_size(heap->CommittedMemory());
heap_statistics->set_total_heap_size_executable( heap_statistics->set_total_heap_size_executable(
heap->CommittedMemoryExecutable()); heap->CommittedMemoryExecutable());
heap_statistics->set_total_physical_size(heap->CommittedPhysicalMemory());
heap_statistics->set_used_heap_size(heap->SizeOfObjects()); heap_statistics->set_used_heap_size(heap->SizeOfObjects());
heap_statistics->set_heap_size_limit(heap->MaxReserved()); heap_statistics->set_heap_size_limit(heap->MaxReserved());
} }
@ -4316,6 +4456,30 @@ void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
} }
void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId");
i::AssertNoAllocation no_allocation;
class VisitorAdapter : public i::ObjectVisitor {
public:
explicit VisitorAdapter(PersistentHandleVisitor* visitor)
: visitor_(visitor) {}
virtual void VisitPointers(i::Object** start, i::Object** end) {
UNREACHABLE();
}
virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
visitor_->VisitPersistentHandle(ToApi<Value>(i::Handle<i::Object>(p)),
class_id);
}
private:
PersistentHandleVisitor* visitor_;
} visitor_adapter(visitor);
isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter);
}
bool v8::V8::IdleNotification(int hint) { bool v8::V8::IdleNotification(int hint) {
// Returning true tells the caller that it need not // Returning true tells the caller that it need not
// continue to call IdleNotification. // continue to call IdleNotification.
@ -4516,13 +4680,14 @@ v8::Local<v8::Context> Context::GetCalling() {
v8::Local<v8::Object> Context::Global() { v8::Local<v8::Object> Context::Global() {
if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) { i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Context::Global()")) {
return Local<v8::Object>(); return Local<v8::Object>();
} }
i::Object** ctx = reinterpret_cast<i::Object**>(this); i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context = i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx)); i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
i::Handle<i::Object> global(context->global_proxy()); i::Handle<i::Object> global(context->global_proxy(), isolate);
return Utils::ToLocal(i::Handle<i::JSObject>::cast(global)); return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
} }
@ -4579,11 +4744,32 @@ bool Context::IsCodeGenerationFromStringsAllowed() {
} }
void Context::SetErrorMessageForCodeGenerationFromStrings(
Handle<String> error) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate,
"v8::Context::SetErrorMessageForCodeGenerationFromStrings()")) {
return;
}
ENTER_V8(isolate);
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
i::Handle<i::Object> error_handle = Utils::OpenHandle(*error);
context->set_error_message_for_code_gen_from_strings(*error_handle);
}
void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) { void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) {
i::GlobalHandles::SetWrapperClassId(global_handle, class_id); i::GlobalHandles::SetWrapperClassId(global_handle, class_id);
} }
uint16_t V8::GetWrapperClassId(internal::Object** global_handle) {
return i::GlobalHandles::GetWrapperClassId(global_handle);
}
Local<v8::Object> ObjectTemplate::NewInstance() { Local<v8::Object> ObjectTemplate::NewInstance() {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()", ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()",
@ -4622,74 +4808,20 @@ bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
} }
static Local<External> ExternalNewImpl(void* data) { Local<External> v8::External::New(void* value) {
return Utils::ToLocal(FACTORY->NewForeign(static_cast<i::Address>(data))); STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
}
static void* ExternalValueImpl(i::Handle<i::Object> obj) {
return reinterpret_cast<void*>(i::Foreign::cast(*obj)->foreign_address());
}
Local<Value> v8::External::Wrap(void* data) {
i::Isolate* isolate = i::Isolate::Current();
STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
EnsureInitializedForIsolate(isolate, "v8::External::Wrap()");
LOG_API(isolate, "External::Wrap");
ENTER_V8(isolate);
v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
? Utils::ToLocal(i::Handle<i::Object>(EncodeAsSmi(data)))
: v8::Local<v8::Value>(ExternalNewImpl(data));
ASSERT_EQ(data, Unwrap(result));
return result;
}
void* v8::Object::SlowGetPointerFromInternalField(int index) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Object* value = obj->GetInternalField(index);
if (value->IsSmi()) {
return i::Internals::GetExternalPointerFromSmi(value);
} else if (value->IsForeign()) {
return reinterpret_cast<void*>(i::Foreign::cast(value)->foreign_address());
} else {
return NULL;
}
}
void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
if (IsDeadCheck(i::Isolate::Current(), "v8::External::Unwrap()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
void* result;
if (obj->IsSmi()) {
result = i::Internals::GetExternalPointerFromSmi(*obj);
} else if (obj->IsForeign()) {
result = ExternalValueImpl(obj);
} else {
result = NULL;
}
ASSERT_EQ(result, QuickUnwrap(wrapper));
return result;
}
Local<External> v8::External::New(void* data) {
STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::External::New()"); EnsureInitializedForIsolate(isolate, "v8::External::New()");
LOG_API(isolate, "External::New"); LOG_API(isolate, "External::New");
ENTER_V8(isolate); ENTER_V8(isolate);
return ExternalNewImpl(data); i::Handle<i::JSObject> external = isolate->factory()->NewExternal(value);
return Utils::ExternalToLocal(external);
} }
void* External::Value() const { void* External::Value() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return 0; if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return NULL;
i::Handle<i::Object> obj = Utils::OpenHandle(this); return ExternalValue(*Utils::OpenHandle(this));
return ExternalValueImpl(obj);
} }
@ -5155,24 +5287,39 @@ Local<Number> v8::Number::New(double value) {
Local<Integer> v8::Integer::New(int32_t value) { Local<Integer> v8::Integer::New(int32_t value) {
i::Isolate* isolate = i::Isolate::UncheckedCurrent(); i::Isolate* isolate = i::Isolate::UncheckedCurrent();
EnsureInitializedForIsolate(isolate, "v8::Integer::New()"); EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
return v8::Integer::New(value, reinterpret_cast<Isolate*>(isolate));
}
Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Integer::NewFromUnsigned()");
return Integer::NewFromUnsigned(value, reinterpret_cast<Isolate*>(isolate));
}
Local<Integer> v8::Integer::New(int32_t value, Isolate* isolate) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
ASSERT(internal_isolate->IsInitialized());
if (i::Smi::IsValid(value)) { if (i::Smi::IsValid(value)) {
return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value), return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
isolate)); internal_isolate));
} }
ENTER_V8(isolate); ENTER_V8(internal_isolate);
i::Handle<i::Object> result = isolate->factory()->NewNumber(value); i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
return Utils::IntegerToLocal(result); return Utils::IntegerToLocal(result);
} }
Local<Integer> Integer::NewFromUnsigned(uint32_t value) { Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
ASSERT(internal_isolate->IsInitialized());
bool fits_into_int32_t = (value & (1 << 31)) == 0; bool fits_into_int32_t = (value & (1 << 31)) == 0;
if (fits_into_int32_t) { if (fits_into_int32_t) {
return Integer::New(static_cast<int32_t>(value)); return Integer::New(static_cast<int32_t>(value), isolate);
} }
i::Isolate* isolate = i::Isolate::Current(); ENTER_V8(internal_isolate);
ENTER_V8(isolate); i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
return Utils::IntegerToLocal(result); return Utils::IntegerToLocal(result);
} }
@ -5182,19 +5329,14 @@ void V8::IgnoreOutOfMemoryException() {
} }
bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) { bool V8::AddMessageListener(MessageCallback that) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()"); EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()");
ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false); ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false);
ENTER_V8(isolate); ENTER_V8(isolate);
i::HandleScope scope(isolate); i::HandleScope scope(isolate);
NeanderArray listeners(isolate->factory()->message_listeners()); NeanderArray listeners(isolate->factory()->message_listeners());
NeanderObject obj(2); listeners.add(isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
obj.set(1, data.IsEmpty() ?
isolate->heap()->undefined_value() :
*Utils::OpenHandle(*data));
listeners.add(obj.value());
return true; return true;
} }
@ -5209,8 +5351,7 @@ void V8::RemoveMessageListeners(MessageCallback that) {
for (int i = 0; i < listeners.length(); i++) { for (int i = 0; i < listeners.length(); i++) {
if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
NeanderObject listener(i::JSObject::cast(listeners.get(i))); i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listeners.get(i)));
i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) { if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
listeners.set(i, isolate->heap()->undefined_value()); listeners.set(i, isolate->heap()->undefined_value());
} }
@ -5250,13 +5391,6 @@ void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
SetAddHistogramSampleFunction(callback); SetAddHistogramSampleFunction(callback);
} }
void V8::EnableSlidingStateWindow() {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::EnableSlidingStateWindow()")) return;
isolate->logger()->EnableSlidingStateWindow();
}
void V8::SetFailedAccessCheckCallbackFunction( void V8::SetFailedAccessCheckCallbackFunction(
FailedAccessCheckCallback callback) { FailedAccessCheckCallback callback) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
@ -5266,6 +5400,7 @@ void V8::SetFailedAccessCheckCallbackFunction(
isolate->SetFailedAccessCheckCallback(callback); isolate->SetFailedAccessCheckCallback(callback);
} }
void V8::AddObjectGroup(Persistent<Value>* objects, void V8::AddObjectGroup(Persistent<Value>* objects,
size_t length, size_t length,
RetainedObjectInfo* info) { RetainedObjectInfo* info) {
@ -5277,6 +5412,19 @@ void V8::AddObjectGroup(Persistent<Value>* objects,
} }
void V8::AddObjectGroup(Isolate* exportedIsolate,
Persistent<Value>* objects,
size_t length,
RetainedObjectInfo* info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exportedIsolate);
ASSERT(isolate == i::Isolate::Current());
if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
isolate->global_handles()->AddObjectGroup(
reinterpret_cast<i::Object***>(objects), length, info);
}
void V8::AddImplicitReferences(Persistent<Object> parent, void V8::AddImplicitReferences(Persistent<Object> parent,
Persistent<Value>* children, Persistent<Value>* children,
size_t length) { size_t length) {
@ -6287,7 +6435,8 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) {
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title, const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
HeapSnapshot::Type type, HeapSnapshot::Type type,
ActivityControl* control) { ActivityControl* control,
ObjectNameResolver* resolver) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot"); IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull; i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
@ -6300,7 +6449,7 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
} }
return reinterpret_cast<const HeapSnapshot*>( return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::TakeSnapshot( i::HeapProfiler::TakeSnapshot(
*Utils::OpenHandle(*title), internal_type, control)); *Utils::OpenHandle(*title), internal_type, control, resolver));
} }
@ -6411,6 +6560,7 @@ void Testing::PrepareStressRun(int run) {
void Testing::DeoptimizeAll() { void Testing::DeoptimizeAll() {
i::HandleScope scope;
internal::Deoptimizer::DeoptimizeAll(); internal::Deoptimizer::DeoptimizeAll();
} }

6
deps/v8/src/api.h

@ -201,8 +201,6 @@ class Utils {
v8::internal::Handle<v8::internal::JSObject> obj); v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal( static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj); v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<External> ToLocal(
v8::internal::Handle<v8::internal::Foreign> obj);
static inline Local<Message> MessageToLocal( static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj); v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal( static inline Local<StackTrace> StackTraceToLocal(
@ -225,6 +223,8 @@ class Utils {
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj); v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<TypeSwitch> ToLocal( static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj); v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
static inline Local<External> ExternalToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
#define DECLARE_OPEN_HANDLE(From, To) \ #define DECLARE_OPEN_HANDLE(From, To) \
static inline v8::internal::Handle<v8::internal::To> \ static inline v8::internal::Handle<v8::internal::To> \
@ -268,7 +268,6 @@ MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp) MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object) MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array) MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, Foreign, External)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate) MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate) MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature) MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@ -280,6 +279,7 @@ MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number) MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer) MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32) MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
#undef MAKE_TO_LOCAL #undef MAKE_TO_LOCAL

153
deps/v8/src/arm/assembler-arm-inl.h

@ -75,7 +75,7 @@ Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
|| rmode_ == EMBEDDED_OBJECT || rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE); || rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_)); return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
} }
@ -97,25 +97,30 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Object* RelocInfo::target_object() { Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_at(Assembler::target_address_address_at(pc_)); return reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
} }
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) { Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_)); return Handle<Object>(reinterpret_cast<Object**>(
Assembler::target_pointer_at(pc_)));
} }
Object** RelocInfo::target_object_address() { Object** RelocInfo::target_object_address() {
// Provide a "natural pointer" to the embedded object,
// which can be de-referenced during heap iteration.
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_)); reconstructed_obj_ptr_ =
reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
return &reconstructed_obj_ptr_;
} }
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) { void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target)); Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER && if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL && host() != NULL &&
target->IsHeapObject()) { target->IsHeapObject()) {
@ -127,7 +132,8 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address* RelocInfo::target_reference_address() { Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE); ASSERT(rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_)); reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
return &reconstructed_adr_ptr_;
} }
@ -159,6 +165,24 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
} }
static const int kNoCodeAgeSequenceLength = 3;
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
Memory::Address_at(pc_ + Assembler::kInstrSize *
(kNoCodeAgeSequenceLength - 1)));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Memory::Address_at(pc_ + Assembler::kInstrSize *
(kNoCodeAgeSequenceLength - 1)) =
stub->instruction_start();
}
Address RelocInfo::call_address() { Address RelocInfo::call_address() {
// The 2 instructions offset assumes patched debug break slot or return // The 2 instructions offset assumes patched debug break slot or return
// sequence. // sequence.
@ -232,6 +256,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitGlobalPropertyCell(this); visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this); visitor->VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below. // TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) && } else if (((RelocInfo::IsJSReturn(mode) &&
@ -258,6 +284,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitGlobalPropertyCell(heap, this); StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this); StaticVisitor::VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() && } else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) && ((RelocInfo::IsJSReturn(mode) &&
@ -326,7 +354,7 @@ void Assembler::emit(Instr x) {
} }
Address Assembler::target_address_address_at(Address pc) { Address Assembler::target_pointer_address_at(Address pc) {
Address target_pc = pc; Address target_pc = pc;
Instr instr = Memory::int32_at(target_pc); Instr instr = Memory::int32_at(target_pc);
// If we have a bx instruction, the instruction before the bx is // If we have a bx instruction, the instruction before the bx is
@ -356,8 +384,63 @@ Address Assembler::target_address_address_at(Address pc) {
} }
Address Assembler::target_address_at(Address pc) { Address Assembler::target_pointer_at(Address pc) {
return Memory::Address_at(target_address_address_at(pc)); if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* instr = Instruction::At(pc);
Instruction* next_instr = Instruction::At(pc + kInstrSize);
return reinterpret_cast<Address>(
(next_instr->ImmedMovwMovtValue() << 16) |
instr->ImmedMovwMovtValue());
}
return Memory::Address_at(target_pointer_address_at(pc));
}
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
#ifdef USE_BLX
// Call sequence on V7 or later is :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
// blx ip
// @ return address
// Or pre-V7 or cases that need frequent patching:
// ldr ip, [pc, #...] @ call address
// blx ip
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate));
if (IsLdrPcImmediateOffset(candidate_instr)) {
return candidate;
}
candidate = pc - 3 * Assembler::kInstrSize;
ASSERT(IsMovW(Memory::int32_at(candidate)) &&
IsMovT(Memory::int32_at(candidate + kInstrSize)));
return candidate;
#else
// Call sequence is:
// mov lr, pc
// ldr pc, [pc, #...] @ call address
// @ return address
return pc - kInstrSize;
#endif
}
Address Assembler::return_address_from_call_start(Address pc) {
#ifdef USE_BLX
if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
return pc + kInstrSize * 2;
} else {
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
return pc + kInstrSize * 3;
}
#else
return pc + kInstrSize;
#endif
} }
@ -373,17 +456,53 @@ void Assembler::set_external_target_at(Address constant_pool_entry,
} }
static Instr EncodeMovwImmediate(uint32_t immediate) {
ASSERT(immediate < 0x10000);
return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
}
void Assembler::set_target_pointer_at(Address pc, Address target) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
uint32_t immediate = reinterpret_cast<uint32_t>(target);
uint32_t intermediate = instr_ptr[0];
intermediate &= ~EncodeMovwImmediate(0xFFFF);
intermediate |= EncodeMovwImmediate(immediate & 0xFFFF);
instr_ptr[0] = intermediate;
intermediate = instr_ptr[1];
intermediate &= ~EncodeMovwImmediate(0xFFFF);
intermediate |= EncodeMovwImmediate(immediate >> 16);
instr_ptr[1] = intermediate;
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
CPU::FlushICache(pc, 2 * kInstrSize);
} else {
ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Memory::Address_at(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
// CPU::FlushICache(pc, sizeof(target));
// However, on ARM, no instruction is actually patched in the case
// of embedded constants of the form:
// ldr ip, [pc, #...]
// since the instruction accessing this address in the constant pool remains
// unchanged.
}
}
Address Assembler::target_address_at(Address pc) {
return target_pointer_at(pc);
}
void Assembler::set_target_address_at(Address pc, Address target) { void Assembler::set_target_address_at(Address pc, Address target) {
Memory::Address_at(target_address_address_at(pc)) = target; set_target_pointer_at(pc, target);
// Intuitively, we would think it is necessary to flush the instruction cache
// after patching a target address in the code as follows:
// CPU::FlushICache(pc, sizeof(target));
// However, on ARM, no instruction was actually patched by the assignment
// above; the target address is not part of an instruction, it is patched in
// the constant pool and is read via a data access; the instruction accessing
// this address in the constant pool remains unchanged.
} }
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_INL_H_ #endif // V8_ARM_ASSEMBLER_ARM_INL_H_

275
deps/v8/src/arm/assembler-arm.cc

@ -77,6 +77,9 @@ static unsigned CpuFeaturesImpliedByCompiler() {
#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) #endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
// && !defined(__SOFTFP__) // && !defined(__SOFTFP__)
#endif // _arm__ #endif // _arm__
if (answer & (1u << ARMv7)) {
answer |= 1u << UNALIGNED_ACCESSES;
}
return answer; return answer;
} }
@ -110,6 +113,14 @@ void CpuFeatures::Probe() {
if (FLAG_enable_armv7) { if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7; supported_ |= 1u << ARMv7;
} }
if (FLAG_enable_sudiv) {
supported_ |= 1u << SUDIV;
}
if (FLAG_enable_movw_movt) {
supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
}
#else // __arm__ #else // __arm__
// Probe for additional features not already known to be available. // Probe for additional features not already known to be available.
if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) { if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
@ -125,6 +136,19 @@ void CpuFeatures::Probe() {
found_by_runtime_probing_ |= 1u << ARMv7; found_by_runtime_probing_ |= 1u << ARMv7;
} }
if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) {
found_by_runtime_probing_ |= 1u << SUDIV;
}
if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) {
found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES;
}
if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER &&
OS::ArmCpuHasFeature(ARMv7)) {
found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
}
supported_ |= found_by_runtime_probing_; supported_ |= found_by_runtime_probing_;
#endif #endif
@ -294,46 +318,11 @@ const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
const Instr kLdrStrOffsetMask = 0x00000fff; const Instr kLdrStrOffsetMask = 0x00000fff;
// Spare buffer. Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
static const int kMinimalBufferSize = 4*KB; : AssemblerBase(isolate, buffer, buffer_size),
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
recorded_ast_id_(TypeFeedbackId::None()), recorded_ast_id_(TypeFeedbackId::None()),
positions_recorder_(this), positions_recorder_(this) {
emit_debug_code_(FLAG_debug_code), reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
predictable_code_size_(false) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
if (isolate()->assembler_spare_buffer() != NULL) {
buffer = isolate()->assembler_spare_buffer();
isolate()->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
buffer_ = NewArray<byte>(buffer_size);
} else {
buffer_ = static_cast<byte*>(buffer);
}
buffer_size_ = buffer_size;
own_buffer_ = true;
} else {
// Use externally provided buffer instead.
ASSERT(buffer_size > 0);
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
own_buffer_ = false;
}
// Set up buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
num_pending_reloc_info_ = 0; num_pending_reloc_info_ = 0;
next_buffer_check_ = 0; next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0; const_pool_blocked_nesting_ = 0;
@ -346,14 +335,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
Assembler::~Assembler() { Assembler::~Assembler() {
ASSERT(const_pool_blocked_nesting_ == 0); ASSERT(const_pool_blocked_nesting_ == 0);
if (own_buffer_) {
if (isolate()->assembler_spare_buffer() == NULL &&
buffer_size_ == kMinimalBufferSize) {
isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
}
} }
@ -715,12 +696,6 @@ void Assembler::next(Label* L) {
} }
static Instr EncodeMovwImmediate(uint32_t immediate) {
ASSERT(immediate < 0x10000);
return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
}
// Low-level code emission routines depending on the addressing mode. // Low-level code emission routines depending on the addressing mode.
// If this returns true then you have to use the rotate_imm and immed_8 // If this returns true then you have to use the rotate_imm and immed_8
// that it returns, because it may have already changed the instruction // that it returns, because it may have already changed the instruction
@ -785,7 +760,7 @@ static bool fits_shifter(uint32_t imm32,
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly // space. There is no guarantee that the relocated location can be similarly
// encoded. // encoded.
bool Operand::must_use_constant_pool(const Assembler* assembler) const { bool Operand::must_output_reloc_info(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG #ifdef DEBUG
if (!Serializer::enabled()) { if (!Serializer::enabled()) {
@ -801,25 +776,28 @@ bool Operand::must_use_constant_pool(const Assembler* assembler) const {
} }
static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
return true;
}
if (x.must_output_reloc_info(assembler)) {
return false;
}
return CpuFeatures::IsSupported(ARMv7);
}
bool Operand::is_single_instruction(const Assembler* assembler, bool Operand::is_single_instruction(const Assembler* assembler,
Instr instr) const { Instr instr) const {
if (rm_.is_valid()) return true; if (rm_.is_valid()) return true;
uint32_t dummy1, dummy2; uint32_t dummy1, dummy2;
if (must_use_constant_pool(assembler) || if (must_output_reloc_info(assembler) ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of // The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. For a mov instruction not setting the // constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used. // condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (must_use_constant_pool(assembler) || return !use_movw_movt(*this, assembler);
!CpuFeatures::IsSupported(ARMv7)) {
// mov instruction will be an ldr from constant pool (one instruction).
return true;
} else {
// mov instruction will be a mov or movw followed by movt (two
// instructions).
return false;
}
} else { } else {
// If this is not a mov or mvn instruction there will always an additional // If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two // instructions - either mov or ldr. The mov might actually be two
@ -835,6 +813,29 @@ bool Operand::is_single_instruction(const Assembler* assembler,
} }
void Assembler::move_32_bit_immediate(Condition cond,
Register rd,
SBit s,
const Operand& x) {
if (rd.code() != pc.code() && s == LeaveCC) {
if (use_movw_movt(x, this)) {
if (x.must_output_reloc_info(this)) {
RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
// Make sure the movw/movt doesn't get separated.
BlockConstPoolFor(2);
}
emit(cond | 0x30*B20 | rd.code()*B12 |
EncodeMovwImmediate(x.imm32_ & 0xffff));
movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
return;
}
}
RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
ldr(rd, MemOperand(pc, 0), cond);
}
void Assembler::addrmod1(Instr instr, void Assembler::addrmod1(Instr instr,
Register rn, Register rn,
Register rd, Register rd,
@ -845,7 +846,7 @@ void Assembler::addrmod1(Instr instr,
// Immediate. // Immediate.
uint32_t rotate_imm; uint32_t rotate_imm;
uint32_t immed_8; uint32_t immed_8;
if (x.must_use_constant_pool(this) || if (x.must_output_reloc_info(this) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load // The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip. // it first to register ip and change the original instruction to use ip.
@ -854,24 +855,19 @@ void Assembler::addrmod1(Instr instr,
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr); Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (x.must_use_constant_pool(this) || move_32_bit_immediate(cond, rd, LeaveCC, x);
!CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond);
} else {
// Will probably use movw, will certainly not use constant pool.
mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
}
} else { } else {
// If this is not a mov or mvn instruction we may still be able to avoid if ((instr & kMovMvnMask) == kMovMvnPattern) {
// a constant pool entry by using mvn or movw. // Moves need to use a constant pool entry.
if (!x.must_use_constant_pool(this) && RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
(instr & kMovMvnMask) != kMovMvnPattern) {
mov(ip, x, LeaveCC, cond);
} else {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(ip, MemOperand(pc, 0), cond); ldr(ip, MemOperand(pc, 0), cond);
} else if (x.must_output_reloc_info(this)) {
// Otherwise, use most efficient form of fetching from constant pool.
move_32_bit_immediate(cond, ip, LeaveCC, x);
} else {
// If this is not a mov or mvn instruction we may still be able to
// avoid a constant pool entry by using mvn or movw.
mov(ip, x, LeaveCC, cond);
} }
addrmod1(instr, rn, rd, Operand(ip)); addrmod1(instr, rn, rd, Operand(ip));
} }
@ -1178,6 +1174,9 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) { void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
ASSERT(immediate < 0x10000); ASSERT(immediate < 0x10000);
// May use movw if supported, but on unsupported platforms will try to use
// equivalent rotated immed_8 value and other tricks before falling back to a
// constant pool load.
mov(reg, Operand(immediate), LeaveCC, cond); mov(reg, Operand(immediate), LeaveCC, cond);
} }
@ -1207,6 +1206,22 @@ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
} }
void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
void Assembler::sdiv(Register dst, Register src1, Register src2,
Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
src2.code()*B8 | B4 | src1.code());
}
void Assembler::mul(Register dst, Register src1, Register src2, void Assembler::mul(Register dst, Register src1, Register src2,
SBit s, Condition cond) { SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
@ -1391,7 +1406,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Immediate. // Immediate.
uint32_t rotate_imm; uint32_t rotate_imm;
uint32_t immed_8; uint32_t immed_8;
if (src.must_use_constant_pool(this) || if (src.must_output_reloc_info(this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip. // Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_); RecordRelocInfo(src.rmode_, src.imm32_);
@ -1826,7 +1841,7 @@ void Assembler::vstr(const SwVfpRegister src,
const Condition cond) { const Condition cond) {
ASSERT(!operand.rm().is_valid()); ASSERT(!operand.rm().is_valid());
ASSERT(operand.am_ == Offset); ASSERT(operand.am_ == Offset);
vldr(src, operand.rn(), operand.offset(), cond); vstr(src, operand.rn(), operand.offset(), cond);
} }
@ -1975,6 +1990,7 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
void Assembler::vmov(const DwVfpRegister dst, void Assembler::vmov(const DwVfpRegister dst,
double imm, double imm,
const Register scratch,
const Condition cond) { const Condition cond) {
// Dd = immediate // Dd = immediate
// Instruction details available in ARM DDI 0406B, A8-640. // Instruction details available in ARM DDI 0406B, A8-640.
@ -1989,22 +2005,22 @@ void Assembler::vmov(const DwVfpRegister dst,
// using vldr from a constant pool. // using vldr from a constant pool.
uint32_t lo, hi; uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi); DoubleAsTwoUInt32(imm, &lo, &hi);
mov(ip, Operand(lo));
if (lo == hi) { if (scratch.is(no_reg)) {
// If the lo and hi parts of the double are equal, the literal is easier
// to create. This is the case with 0.0.
mov(ip, Operand(lo));
vmov(dst, ip, ip);
} else {
// Move the low part of the double into the lower of the corresponsing S // Move the low part of the double into the lower of the corresponsing S
// registers of D register dst. // registers of D register dst.
mov(ip, Operand(lo));
vmov(dst.low(), ip, cond); vmov(dst.low(), ip, cond);
// Move the high part of the double into the higher of the corresponsing S // Move the high part of the double into the higher of the corresponsing S
// registers of D register dst. // registers of D register dst.
mov(ip, Operand(hi)); mov(ip, Operand(hi));
vmov(dst.high(), ip, cond); vmov(dst.high(), ip, cond);
} else {
// Move the low and high parts of the double to a D register in one
// instruction.
mov(scratch, Operand(hi));
vmov(dst, ip, scratch, cond);
} }
} }
} }
@ -2333,6 +2349,20 @@ void Assembler::vmul(const DwVfpRegister dst,
} }
void Assembler::vmla(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-892.
// cond(31-28) | 11100(27-23) | D=?(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N=?(7) | op(6)=0 | M=?(5) | 0(4) |
// Vm(3-0)
unsigned x = (cond | 0x1C*B23 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
emit(x);
}
void Assembler::vdiv(const DwVfpRegister dst, void Assembler::vdiv(const DwVfpRegister dst,
const DwVfpRegister src1, const DwVfpRegister src1,
const DwVfpRegister src2, const DwVfpRegister src2,
@ -2408,15 +2438,35 @@ void Assembler::vsqrt(const DwVfpRegister dst,
// Pseudo instructions. // Pseudo instructions.
void Assembler::nop(int type) { void Assembler::nop(int type) {
// This is mov rx, rx. // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. // some of the CPU's pipeline and has to issue. Older ARM chips simply used
// MOV Rx, Rx as NOP and it performs better even in newer CPUs.
// We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
// a type.
ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
emit(al | 13*B21 | type*B12 | type); emit(al | 13*B21 | type*B12 | type);
} }
bool Assembler::IsMovT(Instr instr) {
instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
((kNumRegisters-1)*B12) | // mask out register
EncodeMovwImmediate(0xFFFF)); // mask out immediate value
return instr == 0x34*B20;
}
bool Assembler::IsMovW(Instr instr) {
instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
((kNumRegisters-1)*B12) | // mask out destination
EncodeMovwImmediate(0xFFFF)); // mask out immediate value
return instr == 0x30*B20;
}
bool Assembler::IsNop(Instr instr, int type) { bool Assembler::IsNop(Instr instr, int type) {
ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
// Check for mov rx, rx where x = type. // Check for mov rx, rx where x = type.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
return instr == (al | 13*B21 | type*B12 | type); return instr == (al | 13*B21 | type*B12 | type);
} }
@ -2532,18 +2582,21 @@ void Assembler::dd(uint32_t data) {
} }
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
UseConstantPoolMode mode) {
// We do not try to reuse pool constants. // We do not try to reuse pool constants.
RelocInfo rinfo(pc_, rmode, data, NULL); RelocInfo rinfo(pc_, rmode, data, NULL);
if (((rmode >= RelocInfo::JS_RETURN) && if (((rmode >= RelocInfo::JS_RETURN) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
(rmode == RelocInfo::CONST_POOL)) { (rmode == RelocInfo::CONST_POOL) ||
mode == DONT_USE_CONSTANT_POOL) {
// Adjust code for new modes. // Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode) ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsJSReturn(rmode) || RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode) || RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode) || RelocInfo::IsPosition(rmode)
|| RelocInfo::IsConstPool(rmode)); || RelocInfo::IsConstPool(rmode)
|| mode == DONT_USE_CONSTANT_POOL);
// These modes do not need an entry in the constant pool. // These modes do not need an entry in the constant pool.
} else { } else {
ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
@ -2648,9 +2701,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
b(&after_pool); b(&after_pool);
} }
// Put down constant pool marker "Undefined instruction" as specified by // Put down constant pool marker "Undefined instruction".
// A5.6 (ARMv7) Instruction set encoding. emit(kConstantPoolMarker |
emit(kConstantPoolMarker | num_pending_reloc_info_); EncodeConstantPoolLength(num_pending_reloc_info_));
// Emit constant pool entries. // Emit constant pool entries.
for (int i = 0; i < num_pending_reloc_info_; i++) { for (int i = 0; i < num_pending_reloc_info_; i++) {
@ -2662,17 +2715,19 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
Instr instr = instr_at(rinfo.pc()); Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
ASSERT(IsLdrPcImmediateOffset(instr) && if (IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0); GetLdrRegisterImmediateOffset(instr) == 0) {
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
int delta = pc_ - rinfo.pc() - kPcLoadDelta; // 0 is the smallest delta:
// 0 is the smallest delta: // ldr rd, [pc, #0]
// ldr rd, [pc, #0] // constant pool marker
// constant pool marker // data
// data ASSERT(is_uint12(delta));
ASSERT(is_uint12(delta));
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); } else {
ASSERT(IsMovW(instr));
}
emit(rinfo.data()); emit(rinfo.data());
} }

113
deps/v8/src/arm/assembler-arm.h

@ -425,7 +425,7 @@ class Operand BASE_EMBEDDED {
// actual instruction to use is required for this calculation. For other // actual instruction to use is required for this calculation. For other
// instructions instr is ignored. // instructions instr is ignored.
bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const; bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const;
bool must_use_constant_pool(const Assembler* assembler) const; bool must_output_reloc_info(const Assembler* assembler) const;
inline int32_t immediate() const { inline int32_t immediate() const {
ASSERT(!rm_.is_valid()); ASSERT(!rm_.is_valid());
@ -511,6 +511,10 @@ class CpuFeatures : public AllStatic {
ASSERT(initialized_); ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false; if (f == VFP3 && !FLAG_enable_vfp3) return false;
if (f == VFP2 && !FLAG_enable_vfp2) return false; if (f == VFP2 && !FLAG_enable_vfp2) return false;
if (f == SUDIV && !FLAG_enable_sudiv) return false;
if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
return false;
}
return (supported_ & (1u << f)) != 0; return (supported_ & (1u << f)) != 0;
} }
@ -643,15 +647,7 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done // is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler. // upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size); Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler(); virtual ~Assembler();
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
// Avoids using instructions that vary in size in unpredictable ways between
// the snapshot and the running VM. This is needed by the full compiler so
// that it can recompile code with debug support and fix the PC.
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor // GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other // desc. GetCode() is idempotent; it returns the same result if no other
@ -685,13 +681,25 @@ class Assembler : public AssemblerBase {
void label_at_put(Label* L, int at_offset); void label_at_put(Label* L, int at_offset);
// Return the address in the constant pool of the code target address used by // Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc. // the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_address_address_at(Address pc)); INLINE(static Address target_pointer_address_at(Address pc));
// Read/Modify the pointer in the branch/call/move instruction at pc.
INLINE(static Address target_pointer_at(Address pc));
INLINE(static void set_target_pointer_at(Address pc, Address target));
// Read/Modify the code target address in the branch/call instruction at pc. // Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc)); INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target)); INLINE(static void set_target_address_at(Address pc, Address target));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
INLINE(static Address target_address_from_return_address(Address pc));
// Given the address of the beginning of a call, return the address
// in the instruction stream that the call will return from.
INLINE(static Address return_address_from_call_start(Address pc));
// This sets the branch destination (which is in the constant pool on ARM). // This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code. // This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at( inline static void deserialization_set_special_target_at(
@ -710,22 +718,6 @@ class Assembler : public AssemblerBase {
// Size of an instruction. // Size of an instruction.
static const int kInstrSize = sizeof(Instr); static const int kInstrSize = sizeof(Instr);
// Distance between the instruction referring to the address of the call
// target and the return address.
#ifdef USE_BLX
// Call sequence is:
// ldr ip, [pc, #...] @ call address
// blx ip
// @ return address
static const int kCallTargetAddressOffset = 2 * kInstrSize;
#else
// Call sequence is:
// mov lr, pc
// ldr pc, [pc, #...] @ call address
// @ return address
static const int kCallTargetAddressOffset = kInstrSize;
#endif
// Distance between start of patched return sequence and the emitted address // Distance between start of patched return sequence and the emitted address
// to jump to. // to jump to.
#ifdef USE_BLX #ifdef USE_BLX
@ -754,6 +746,12 @@ class Assembler : public AssemblerBase {
static const int kPatchDebugBreakSlotAddressOffset = kInstrSize; static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
#endif #endif
#ifdef USE_BLX
static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstrSize;
#else
static const int kPatchDebugBreakSlotReturnOffset = kInstrSize;
#endif
// Difference between address of current opcode and value read from pc // Difference between address of current opcode and value read from pc
// register. // register.
static const int kPcLoadDelta = 8; static const int kPcLoadDelta = 8;
@ -869,6 +867,12 @@ class Assembler : public AssemblerBase {
void mla(Register dst, Register src1, Register src2, Register srcA, void mla(Register dst, Register src1, Register src2, Register srcA,
SBit s = LeaveCC, Condition cond = al); SBit s = LeaveCC, Condition cond = al);
void mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
void sdiv(Register dst, Register src1, Register src2,
Condition cond = al);
void mul(Register dst, Register src1, Register src2, void mul(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al); SBit s = LeaveCC, Condition cond = al);
@ -1053,6 +1057,7 @@ class Assembler : public AssemblerBase {
void vmov(const DwVfpRegister dst, void vmov(const DwVfpRegister dst,
double imm, double imm,
const Register scratch = no_reg,
const Condition cond = al); const Condition cond = al);
void vmov(const SwVfpRegister dst, void vmov(const SwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
@ -1121,6 +1126,10 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src1, const DwVfpRegister src1,
const DwVfpRegister src2, const DwVfpRegister src2,
const Condition cond = al); const Condition cond = al);
void vmla(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vdiv(const DwVfpRegister dst, void vdiv(const DwVfpRegister dst,
const DwVfpRegister src1, const DwVfpRegister src1,
const DwVfpRegister src2, const DwVfpRegister src2,
@ -1172,7 +1181,19 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label. // Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); } void jmp(Label* L) { b(L, al); }
bool predictable_code_size() const { return predictable_code_size_; } static bool use_immediate_embedded_pointer_loads(
const Assembler* assembler) {
#ifdef USE_BLX
return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size());
#else
// If not using BLX, all loads from the constant pool cannot be immediate,
// because the ldr pc, [pc + #xxxx] used for calls must be a single
// instruction and cannot be easily distinguished out of context from
// other loads that could use movw/movt.
return false;
#endif
}
// Check the code size generated from label to here. // Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) { int SizeOfCodeGeneratedSince(Label* label) {
@ -1255,8 +1276,6 @@ class Assembler : public AssemblerBase {
void db(uint8_t data); void db(uint8_t data);
void dd(uint32_t data); void dd(uint32_t data);
int pc_offset() const { return pc_ - buffer_; }
PositionsRecorder* positions_recorder() { return &positions_recorder_; } PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions // Read/patch instructions
@ -1294,12 +1313,16 @@ class Assembler : public AssemblerBase {
static Register GetCmpImmediateRegister(Instr instr); static Register GetCmpImmediateRegister(Instr instr);
static int GetCmpImmediateRawImmediate(Instr instr); static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP); static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
static bool IsMovT(Instr instr);
static bool IsMovW(Instr instr);
// Constants in pools are accessed via pc relative addressing, which can // Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the instruction // reach +/-4KB thereby defining a maximum distance between the instruction
// and the accessed constant. // and the accessed constant.
static const int kMaxDistToPool = 4*KB; static const int kMaxDistToPool = 4*KB;
static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize; static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
STATIC_ASSERT((kConstantPoolLengthMaxMask & kMaxNumPendingRelocInfo) ==
kMaxNumPendingRelocInfo);
// Postpone the generation of the constant pool for the specified number of // Postpone the generation of the constant pool for the specified number of
// instructions. // instructions.
@ -1314,8 +1337,6 @@ class Assembler : public AssemblerBase {
// the relocation info. // the relocation info.
TypeFeedbackId recorded_ast_id_; TypeFeedbackId recorded_ast_id_;
bool emit_debug_code() const { return emit_debug_code_; }
int buffer_space() const { return reloc_info_writer.pos() - pc_; } int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos // Decode branch instruction at pos and return branch target pos
@ -1357,13 +1378,6 @@ class Assembler : public AssemblerBase {
} }
private: private:
// Code buffer:
// The buffer into which code and relocation info are generated.
byte* buffer_;
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
int next_buffer_check_; // pc offset of next buffer check int next_buffer_check_; // pc offset of next buffer check
// Code generation // Code generation
@ -1372,7 +1386,6 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large // not have to check for overflow. The same is true for writes of large
// relocation info entries. // relocation info entries.
static const int kGap = 32; static const int kGap = 32;
byte* pc_; // the program counter; moves forward
// Constant pool generation // Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional // Pools are emitted in the instruction stream, preferably after unconditional
@ -1432,6 +1445,12 @@ class Assembler : public AssemblerBase {
void GrowBuffer(); void GrowBuffer();
inline void emit(Instr x); inline void emit(Instr x);
// 32-bit immediate values
void move_32_bit_immediate(Condition cond,
Register rd,
SBit s,
const Operand& x);
// Instruction generation // Instruction generation
void addrmod1(Instr instr, Register rn, Register rd, const Operand& x); void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
void addrmod2(Instr instr, Register rd, const MemOperand& x); void addrmod2(Instr instr, Register rd, const MemOperand& x);
@ -1445,8 +1464,14 @@ class Assembler : public AssemblerBase {
void link_to(Label* L, Label* appendix); void link_to(Label* L, Label* appendix);
void next(Label* L); void next(Label* L);
enum UseConstantPoolMode {
USE_CONSTANT_POOL,
DONT_USE_CONSTANT_POOL
};
// Record reloc info for current pc_ // Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
UseConstantPoolMode mode = USE_CONSTANT_POOL);
friend class RegExpMacroAssemblerARM; friend class RegExpMacroAssemblerARM;
friend class RelocInfo; friend class RelocInfo;
@ -1454,10 +1479,6 @@ class Assembler : public AssemblerBase {
friend class BlockConstPoolScope; friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_; PositionsRecorder positions_recorder_;
bool emit_debug_code_;
bool predictable_code_size_;
friend class PositionsRecorder; friend class PositionsRecorder;
friend class EnsureSpace; friend class EnsureSpace;
}; };

33
deps/v8/src/arm/builtins-arm.cc

@ -1226,6 +1226,39 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
} }
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// The following registers must be saved and restored when calling through to
// the runtime:
// r0 - contains return address (beginning of patch sequence)
// r1 - function object
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(1, 0, r1);
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 1);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ mov(pc, r0);
}
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
} \
void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) { Deoptimizer::BailoutType type) {
{ {

1013
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

127
deps/v8/src/arm/code-stubs-arm.h

@ -142,108 +142,6 @@ class UnaryOpStub: public CodeStub {
}; };
class BinaryOpStub: public CodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED) {
use_vfp2_ = CpuFeatures::IsSupported(VFP2);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
BinaryOpStub(
int key,
BinaryOpIC::TypeInfo operands_type,
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
use_vfp2_(VFP2Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type) { }
private:
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
};
Token::Value op_;
OverwriteMode mode_;
bool use_vfp2_;
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
virtual void PrintName(StringStream* stream);
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class VFP2Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
Major MajorKey() { return BinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| VFP2Bits::encode(use_vfp2_)
| OperandTypeInfoBits::encode(operands_type_)
| ResultTypeInfoBits::encode(result_type_);
}
void Generate(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
void GenerateSmiSmiOperation(MacroAssembler* masm);
void GenerateFPOperation(MacroAssembler* masm,
bool smi_operands,
Label* not_numbers,
Label* gc_required);
void GenerateSmiCode(MacroAssembler* masm,
Label* use_runtime,
Label* gc_required,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm,
Register result,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* gc_required);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Handle<Code> code) {
code->set_binary_op_type(operands_type_);
code->set_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
};
class StringHelper : public AllStatic { class StringHelper : public AllStatic {
public: public:
// Generate code for copying characters using a simple loop. This should only // Generate code for copying characters using a simple loop. This should only
@ -724,20 +622,6 @@ class FloatingPointHelper : public AllStatic {
Register scratch1, Register scratch1,
Register scratch2); Register scratch2);
// Loads objects from r0 and r1 (right and left in binary operations) into
// floating point registers. Depending on the destination the values ends up
// either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
// floating point registers VFP3 must be supported. If core registers are
// requested when VFP3 is supported d6 and d7 will still be scratched. If
// either r0 or r1 is not a number (not smi and not heap number object) the
// not_number label is jumped to with r0 and r1 intact.
static void LoadOperands(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* not_number);
// Convert the smi or heap number in object to an int32 using the rules // Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1. // and brought into the range -2^31 .. +2^31 - 1.
@ -773,6 +657,7 @@ class FloatingPointHelper : public AllStatic {
Register object, Register object,
Destination destination, Destination destination,
DwVfpRegister double_dst, DwVfpRegister double_dst,
DwVfpRegister double_scratch,
Register dst1, Register dst1,
Register dst2, Register dst2,
Register heap_number_map, Register heap_number_map,
@ -794,7 +679,8 @@ class FloatingPointHelper : public AllStatic {
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
DwVfpRegister double_scratch, DwVfpRegister double_scratch0,
DwVfpRegister double_scratch1,
Label* not_int32); Label* not_int32);
// Generate non VFP3 code to check if a double can be exactly represented by a // Generate non VFP3 code to check if a double can be exactly represented by a
@ -834,7 +720,12 @@ class FloatingPointHelper : public AllStatic {
Register heap_number_result, Register heap_number_result,
Register scratch); Register scratch);
private: // Loads the objects from |object| into floating point registers.
// Depending on |destination| the value ends up either in |dst| or
// in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3
// must be supported. If kCoreRegisters are requested and VFP3 is
// supported, |dst| will be scratched. If |object| is neither smi nor
// heap number, |not_number| is jumped to with |object| still intact.
static void LoadNumber(MacroAssembler* masm, static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination, FloatingPointHelper::Destination destination,
Register object, Register object,

256
deps/v8/src/arm/codegen-arm.cc

@ -31,11 +31,11 @@
#include "codegen.h" #include "codegen.h"
#include "macro-assembler.h" #include "macro-assembler.h"
#include "simulator-arm.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#define __ ACCESS_MASM(masm)
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) { switch (type) {
@ -49,6 +49,74 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
} }
#define __ masm.
#if defined(USE_SIMULATOR)
byte* fast_exp_arm_machine_code = NULL;
double fast_exp_simulator(double x) {
return Simulator::current(Isolate::Current())->CallFP(
fast_exp_arm_machine_code, x, 0);
}
#endif
UnaryMathFunction CreateExpFunction() {
if (!CpuFeatures::IsSupported(VFP2)) return &exp;
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
{
CpuFeatures::Scope use_vfp(VFP2);
DoubleRegister input = d0;
DoubleRegister result = d1;
DoubleRegister double_scratch1 = d2;
DoubleRegister double_scratch2 = d3;
Register temp1 = r4;
Register temp2 = r5;
Register temp3 = r6;
if (masm.use_eabi_hardfloat()) {
// Input value is in d0 anyway, nothing to do.
} else {
__ vmov(input, r0, r1);
}
__ Push(temp3, temp2, temp1);
MathExpGenerator::EmitMathExp(
&masm, input, result, double_scratch1, double_scratch2,
temp1, temp2, temp3);
__ Pop(temp3, temp2, temp1);
if (masm.use_eabi_hardfloat()) {
__ vmov(d0, result);
} else {
__ vmov(r0, r1, result);
}
__ Ret();
}
CodeDesc desc;
masm.GetCode(&desc);
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#else
fast_exp_arm_machine_code = buffer;
return &fast_exp_simulator;
#endif
}
#undef __
UnaryMathFunction CreateSqrtFunction() { UnaryMathFunction CreateSqrtFunction() {
return &sqrt; return &sqrt;
} }
@ -73,6 +141,8 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Code generators // Code generators
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) { MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -192,7 +262,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
HeapObject::kMapOffset, HeapObject::kMapOffset,
r3, r3,
r9, r9,
kLRHasBeenSaved, kLRHasNotBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
@ -416,7 +486,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ b(ne, &external_string); __ b(ne, &external_string);
// Prepare sequential strings // Prepare sequential strings
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ add(string, __ add(string,
string, string,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@ -450,8 +520,188 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done); __ bind(&done);
} }
void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
String::Encoding encoding,
Register string,
Register index,
Register value) {
if (FLAG_debug_code) {
__ tst(index, Operand(kSmiTagMask));
__ Check(eq, "Non-smi index");
__ tst(value, Operand(kSmiTagMask));
__ Check(eq, "Non-smi value");
__ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
__ cmp(index, ip);
__ Check(lt, "Index is too large");
__ cmp(index, Operand(Smi::FromInt(0)));
__ Check(ge, "Index is negative");
__ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
__ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(eq, "Unexpected string type");
}
__ add(ip,
string,
Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ SmiUntag(value, value);
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
if (encoding == String::ONE_BYTE_ENCODING) {
// Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline.
__ strb(value, MemOperand(ip, index, LSR, 1));
} else {
// No need to untag a smi for two-byte addressing.
__ strh(value, MemOperand(ip, index));
}
}
static MemOperand ExpConstant(int index, Register base) {
return MemOperand(base, index * kDoubleSize);
}
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
DoubleRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3) {
ASSERT(!input.is(result));
ASSERT(!input.is(double_scratch1));
ASSERT(!input.is(double_scratch2));
ASSERT(!result.is(double_scratch1));
ASSERT(!result.is(double_scratch2));
ASSERT(!double_scratch1.is(double_scratch2));
ASSERT(!temp1.is(temp2));
ASSERT(!temp1.is(temp3));
ASSERT(!temp2.is(temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
Label done;
__ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
__ vldr(double_scratch1, ExpConstant(0, temp3));
__ vmov(result, kDoubleRegZero);
__ VFPCompareAndSetFlags(double_scratch1, input);
__ b(ge, &done);
__ vldr(double_scratch2, ExpConstant(1, temp3));
__ VFPCompareAndSetFlags(input, double_scratch2);
__ vldr(result, ExpConstant(2, temp3));
__ b(ge, &done);
__ vldr(double_scratch1, ExpConstant(3, temp3));
__ vldr(result, ExpConstant(4, temp3));
__ vmul(double_scratch1, double_scratch1, input);
__ vadd(double_scratch1, double_scratch1, result);
__ vmov(temp2, temp1, double_scratch1);
__ vsub(double_scratch1, double_scratch1, result);
__ vldr(result, ExpConstant(6, temp3));
__ vldr(double_scratch2, ExpConstant(5, temp3));
__ vmul(double_scratch1, double_scratch1, double_scratch2);
__ vsub(double_scratch1, double_scratch1, input);
__ vsub(result, result, double_scratch1);
__ vmul(input, double_scratch1, double_scratch1);
__ vmul(result, result, input);
__ mov(temp1, Operand(temp2, LSR, 11));
__ vldr(double_scratch2, ExpConstant(7, temp3));
__ vmul(result, result, double_scratch2);
__ vsub(result, result, double_scratch1);
__ vldr(double_scratch2, ExpConstant(8, temp3));
__ vadd(result, result, double_scratch2);
__ movw(ip, 0x7ff);
__ and_(temp2, temp2, Operand(ip));
__ add(temp1, temp1, Operand(0x3ff));
__ mov(temp1, Operand(temp1, LSL, 20));
// Must not call ExpConstant() after overwriting temp3!
__ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
__ ldr(ip, MemOperand(temp3, temp2, LSL, 3));
__ add(temp3, temp3, Operand(kPointerSize));
__ ldr(temp2, MemOperand(temp3, temp2, LSL, 3));
__ orr(temp1, temp1, temp2);
__ vmov(input, ip, temp1);
__ vmul(result, result, input);
__ bind(&done);
}
#undef __ #undef __
// add(r0, pc, Operand(-8))
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
static byte* GetNoCodeAgeSequence(uint32_t* length) {
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found in FUNCTIONS
static bool initialized = false;
static uint32_t sequence[kNoCodeAgeSequenceLength];
byte* byte_sequence = reinterpret_cast<byte*>(sequence);
*length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
if (!initialized) {
CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
PredictableCodeSizeScope scope(patcher.masm(), *length);
patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
patcher.masm()->LoadRoot(ip, Heap::kUndefinedValueRootIndex);
patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
initialized = true;
}
return byte_sequence;
}
bool Code::IsYoungSequence(byte* sequence) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
bool result = !memcmp(sequence, young_sequence, young_length);
ASSERT(result ||
Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
return result;
}
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
*age = kNoAge;
*parity = NO_MARKING_PARITY;
} else {
Address target_address = Memory::Address_at(
sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity);
}
}
void Code::PatchPlatformCodeAge(byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
if (age == kNoAge) {
memcpy(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4));
patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
}
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

16
deps/v8/src/arm/codegen-arm.h

@ -88,6 +88,22 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator); DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
}; };
class MathExpGenerator : public AllStatic {
public:
static void EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
DoubleRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3);
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_ #endif // V8_ARM_CODEGEN_ARM_H_

22
deps/v8/src/arm/constants-arm.h

@ -75,10 +75,6 @@
#endif #endif
#if CAN_USE_UNALIGNED_ACCESSES
#define V8_TARGET_CAN_READ_UNALIGNED 1
#endif
// Using blx may yield better code, so use it when required or when available // Using blx may yield better code, so use it when required or when available
#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS) #if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
#define USE_BLX 1 #define USE_BLX 1
@ -88,9 +84,18 @@ namespace v8 {
namespace internal { namespace internal {
// Constant pool marker. // Constant pool marker.
const int kConstantPoolMarkerMask = 0xffe00000; // Use UDF, the permanently undefined instruction.
const int kConstantPoolMarker = 0x0c000000; const int kConstantPoolMarkerMask = 0xfff000f0;
const int kConstantPoolLengthMask = 0x001ffff; const int kConstantPoolMarker = 0xe7f000f0;
const int kConstantPoolLengthMaxMask = 0xffff;
inline int EncodeConstantPoolLength(int length) {
ASSERT((length & kConstantPoolLengthMaxMask) == length);
return ((length & 0xfff0) << 4) | (length & 0xf);
}
inline int DecodeConstantPoolLength(int instr) {
ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
}
// Number of registers in normal ARM mode. // Number of registers in normal ARM mode.
const int kNumRegisters = 16; const int kNumRegisters = 16;
@ -691,6 +696,9 @@ class Instruction {
&& (Bit(20) == 0) && (Bit(20) == 0)
&& ((Bit(7) == 0)); } && ((Bit(7) == 0)); }
// Test for a nop instruction, which falls under type 1.
inline bool IsNopType1() const { return Bits(24, 0) == 0x0120F000; }
// Test for a stop instruction. // Test for a stop instruction.
inline bool IsStop() const { inline bool IsStop() const {
return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode); return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);

4
deps/v8/src/arm/debug-arm.cc

@ -48,7 +48,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// add sp, sp, #4 // add sp, sp, #4
// bx lr // bx lr
// to a call to the debug break return code. // to a call to the debug break return code.
// #if USE_BLX // #ifdef USE_BLX
// ldr ip, [pc, #0] // ldr ip, [pc, #0]
// blx ip // blx ip
// #else // #else
@ -99,7 +99,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// mov r2, r2 // mov r2, r2
// mov r2, r2 // mov r2, r2
// to a call to the debug break slot code. // to a call to the debug break slot code.
// #if USE_BLX // #ifdef USE_BLX
// ldr ip, [pc, #0] // ldr ip, [pc, #0]
// blx ip // blx ip
// #else // #else

46
deps/v8/src/arm/deoptimizer-arm.cc

@ -104,19 +104,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// ignore all slots that might have been recorded on it. // ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code); isolate->heap()->mark_compact_collector()->InvalidateCode(code);
// Iterate over all the functions which share the same code object ReplaceCodeForRelatedFunctions(function, code);
// and make them use unoptimized version.
Context* context = function->context()->native_context();
Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
SharedFunctionInfo* shared = function->shared();
while (!element->IsUndefined()) {
JSFunction* func = JSFunction::cast(element);
// Grab element before code replacement as ReplaceCode alters the list.
element = func->next_function_link();
if (func->code() == code) {
func->ReplaceCode(shared->code());
}
}
if (FLAG_trace_deopt) { if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: "); PrintF("[forced deoptimization: ");
@ -126,7 +114,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
} }
static const int32_t kBranchBeforeStackCheck = 0x2a000001;
static const int32_t kBranchBeforeInterrupt = 0x5a000004; static const int32_t kBranchBeforeInterrupt = 0x5a000004;
@ -135,24 +122,21 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Code* check_code, Code* check_code,
Code* replacement_code) { Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize; const int kInstrSize = Assembler::kInstrSize;
// The call of the stack guard check has the following form: // The back edge bookkeeping code matches the pattern:
// e1 5d 00 0c cmp sp, <limit> //
// 2a 00 00 01 bcs ok // <decrement profiling counter>
// 2a 00 00 01 bpl ok
// e5 9f c? ?? ldr ip, [pc, <stack guard address>] // e5 9f c? ?? ldr ip, [pc, <stack guard address>]
// e1 2f ff 3c blx ip // e1 2f ff 3c blx ip
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp); ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
ASSERT(Assembler::IsLdrPcImmediateOffset( ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize))); Assembler::instr_at(pc_after - 2 * kInstrSize)));
if (FLAG_count_based_interrupts) { ASSERT_EQ(kBranchBeforeInterrupt,
ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(pc_after - 3 * kInstrSize));
Memory::int32_at(pc_after - 3 * kInstrSize));
} else {
ASSERT_EQ(kBranchBeforeStackCheck,
Memory::int32_at(pc_after - 3 * kInstrSize));
}
// We patch the code to the following form: // We patch the code to the following form:
// e1 5d 00 0c cmp sp, <limit> //
// <decrement profiling counter>
// e1 a0 00 00 mov r0, r0 (NOP) // e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>] // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip // e1 2f ff 3c blx ip
@ -189,15 +173,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Replace NOP with conditional jump. // Replace NOP with conditional jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1); CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
if (FLAG_count_based_interrupts) { patcher.masm()->b(+16, pl);
patcher.masm()->b(+16, pl); ASSERT_EQ(kBranchBeforeInterrupt,
ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(pc_after - 3 * kInstrSize));
Memory::int32_at(pc_after - 3 * kInstrSize));
} else {
patcher.masm()->b(+4, cs);
ASSERT_EQ(kBranchBeforeStackCheck,
Memory::int32_at(pc_after - 3 * kInstrSize));
}
// Replace the stack check address in the constant pool // Replace the stack check address in the constant pool
// with the entry address of the replacement code. // with the entry address of the replacement code.

43
deps/v8/src/arm/disasm-arm.cc

@ -692,11 +692,19 @@ void Decoder::DecodeType01(Instruction* instr) {
// Rn field to encode it. // Rn field to encode it.
Format(instr, "mul'cond's 'rn, 'rm, 'rs"); Format(instr, "mul'cond's 'rn, 'rm, 'rs");
} else { } else {
// The MLA instruction description (A 4.1.28) refers to the order if (instr->Bit(22) == 0) {
// of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the // The MLA instruction description (A 4.1.28) refers to the order
// Rn field to encode the Rd register and the Rd field to encode // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
// the Rn register. // Rn field to encode the Rd register and the Rd field to encode
Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd"); // the Rn register.
Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
} else {
// The MLS instruction description (A 4.1.29) refers to the order
// of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
// Rn field to encode the Rd register and the Rd field to encode
// the Rn register.
Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
}
} }
} else { } else {
// The signed/long multiply instructions use the terms RdHi and RdLo // The signed/long multiply instructions use the terms RdHi and RdLo
@ -822,6 +830,8 @@ void Decoder::DecodeType01(Instruction* instr) {
} else { } else {
Unknown(instr); // not used by V8 Unknown(instr); // not used by V8
} }
} else if ((type == 1) && instr->IsNopType1()) {
Format(instr, "nop'cond");
} else { } else {
switch (instr->OpcodeField()) { switch (instr->OpcodeField()) {
case AND: { case AND: {
@ -974,6 +984,17 @@ void Decoder::DecodeType3(Instruction* instr) {
break; break;
} }
case db_x: { case db_x: {
if (FLAG_enable_sudiv) {
if (!instr->HasW()) {
if (instr->Bits(5, 4) == 0x1) {
if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
// SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
break;
}
}
}
}
Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w"); Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
break; break;
} }
@ -1077,6 +1098,7 @@ int Decoder::DecodeType7(Instruction* instr) {
// Dd = vadd(Dn, Dm) // Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm) // Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm) // Dd = vmul(Dn, Dm)
// Dd = vmla(Dn, Dm)
// Dd = vdiv(Dn, Dm) // Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm) // vcmp(Dd, Dm)
// vmrs // vmrs
@ -1139,6 +1161,12 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
} else { } else {
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
} }
} else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmla.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) { } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) { if (instr->SzValue() == 0x1) {
Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm"); Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
@ -1367,7 +1395,7 @@ bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
int Decoder::ConstantPoolSizeAt(byte* instr_ptr) { int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
if (IsConstantPoolAt(instr_ptr)) { if (IsConstantPoolAt(instr_ptr)) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr)); int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
return instruction_bits & kConstantPoolLengthMask; return DecodeConstantPoolLength(instruction_bits);
} else { } else {
return -1; return -1;
} }
@ -1389,8 +1417,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) { if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"constant pool begin (length %d)", "constant pool begin (length %d)",
instruction_bits & DecodeConstantPoolLength(instruction_bits));
kConstantPoolLengthMask);
return Instruction::kInstrSize; return Instruction::kInstrSize;
} }
switch (instr->TypeValue()) { switch (instr->TypeValue()) {

313
deps/v8/src/arm/full-codegen-arm.cc

@ -130,7 +130,7 @@ void FullCodeGenerator::Generate() {
handler_table_ = handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell( profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget))); Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function()); SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator"); Comment cmnt(masm_, "[ function compiled by full code generator");
@ -164,14 +164,19 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots(); int locals_count = info->scope()->num_stack_slots();
__ Push(lr, fp, cp, r1); info->set_prologue_offset(masm_->pc_offset());
if (locals_count > 0) { {
PredictableCodeSizeScope predictible_code_size_scope(
masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
// The following three instructions must remain together and unmodified
// for code aging to work properly.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
// Load undefined value here, so the value is ready for the loop // Load undefined value here, so the value is ready for the loop
// below. // below.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
} }
// Adjust fp to point to caller's fp.
__ add(fp, sp, Operand(2 * kPointerSize));
{ Comment cmnt(masm_, "[ Allocate locals"); { Comment cmnt(masm_, "[ Allocate locals");
for (int i = 0; i < locals_count; i++) { for (int i = 0; i < locals_count; i++) {
@ -287,6 +292,7 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(ip, Heap::kStackLimitRootIndex); __ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip)); __ cmp(sp, Operand(ip));
__ b(hs, &ok); __ b(hs, &ok);
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
StackCheckStub stub; StackCheckStub stub;
__ CallStub(&stub); __ CallStub(&stub);
__ bind(&ok); __ bind(&ok);
@ -341,41 +347,31 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
} }
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Label* back_edge_target) { Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check"); Comment cmnt(masm_, "[ Back edge bookkeeping");
// Block literal pools whilst emitting stack check code. // Block literal pools whilst emitting stack check code.
Assembler::BlockConstPoolScope block_const_pool(masm_); Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok; Label ok;
if (FLAG_count_based_interrupts) { int weight = 1;
int weight = 1; if (FLAG_weighted_back_edges) {
if (FLAG_weighted_back_edges) { ASSERT(back_edge_target->is_bound());
ASSERT(back_edge_target->is_bound()); int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); weight = Min(kMaxBackEdgeWeight,
weight = Min(kMaxBackEdgeWeight, Max(1, distance / kBackEdgeDistanceUnit));
Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
InterruptStub stub;
__ CallStub(&stub);
} else {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
StackCheckStub stub;
__ CallStub(&stub);
} }
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
InterruptStub stub;
__ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find // Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into // the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code. // the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId()); RecordBackEdge(stmt->OsrEntryId());
if (FLAG_count_based_interrupts) { EmitProfilingCounterReset();
EmitProfilingCounterReset();
}
__ bind(&ok); __ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@ -437,6 +433,8 @@ void FullCodeGenerator::EmitReturnSequence() {
// tool from instrumenting as we rely on the code size here. // tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize; int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1); CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
// TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
PredictableCodeSizeScope predictable(masm_, -1);
__ RecordJSReturn(); __ RecordJSReturn();
masm_->mov(sp, fp); masm_->mov(sp, fp);
masm_->ldm(ia_w, sp, fp.bit() | lr.bit()); masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
@ -911,34 +909,33 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy(); Variable* variable = declaration->proxy()->var();
Variable* variable = proxy->var(); ASSERT(variable->location() == Variable::CONTEXT);
Handle<JSModule> instance = declaration->module()->interface()->Instance(); ASSERT(variable->interface()->IsFrozen());
ASSERT(!instance.is_null());
switch (variable->location()) { Comment cmnt(masm_, "[ ModuleDeclaration");
case Variable::UNALLOCATED: { EmitDebugCheckDeclarationContext(variable);
Comment cmnt(masm_, "[ ModuleDeclaration");
globals_->Add(variable->name(), zone());
globals_->Add(instance, zone());
Visit(declaration->module());
break;
}
case Variable::CONTEXT: { // Load instance object.
Comment cmnt(masm_, "[ ModuleDeclaration"); __ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope()));
EmitDebugCheckDeclarationContext(variable); __ ldr(r1, ContextOperand(r1, variable->interface()->Index()));
__ mov(r1, Operand(instance)); __ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
__ str(r1, ContextOperand(cp, variable->index()));
Visit(declaration->module());
break;
}
case Variable::PARAMETER: // Assign it.
case Variable::LOCAL: __ str(r1, ContextOperand(cp, variable->index()));
case Variable::LOOKUP: // We know that we have written a module, which is not a smi.
UNREACHABLE(); __ RecordWriteContextSlot(cp,
} Context::SlotOffset(variable->index()),
r1,
r3,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
// Traverse into body.
Visit(declaration->module());
} }
@ -981,6 +978,14 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
} }
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
__ CallRuntime(Runtime::kDeclareModules, 1);
// Return value is ignored.
}
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement"); Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt); Breakable nested_statement(this, stmt);
@ -1137,7 +1142,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r1, Operand(Smi::FromInt(0))); __ cmp(r1, Operand(Smi::FromInt(0)));
__ b(eq, &no_descriptors); __ b(eq, &no_descriptors);
__ LoadInstanceDescriptors(r0, r2, r4); __ LoadInstanceDescriptors(r0, r2);
__ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset)); __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset));
__ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset)); __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
@ -1235,7 +1240,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(r0, r0, Operand(Smi::FromInt(1))); __ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0); __ push(r0);
EmitStackCheck(stmt, &loop); EmitBackEdgeBookkeeping(stmt, &loop);
__ b(&loop); __ b(&loop);
// Remove the pointers stored on the stack. // Remove the pointers stored on the stack.
@ -1388,9 +1393,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) { } else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed(); Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow)); __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == CONST || if (local->mode() == LET ||
local->mode() == CONST_HARMONY || local->mode() == CONST ||
local->mode() == LET) { local->mode() == CONST_HARMONY) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex); __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
if (local->mode() == CONST) { if (local->mode() == CONST) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
@ -2183,43 +2188,16 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
ASSERT(prop != NULL); ASSERT(prop != NULL);
ASSERT(prop->key()->AsLiteral() != NULL); ASSERT(prop->key()->AsLiteral() != NULL);
// If the assignment starts a block of assignments to the same object,
// change to slow case to avoid the quadratic behavior of repeatedly
// adding fast properties.
if (expr->starts_initialization_block()) {
__ push(result_register());
__ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is now under value.
__ push(ip);
__ CallRuntime(Runtime::kToSlowProperties, 1);
__ pop(result_register());
}
// Record source code position before IC call. // Record source code position before IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
__ mov(r2, Operand(prop->key()->AsLiteral()->handle())); __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
// Load receiver to r1. Leave a copy in the stack if needed for turning the __ pop(r1);
// receiver into fast case.
if (expr->ends_initialization_block()) {
__ ldr(r1, MemOperand(sp));
} else {
__ pop(r1);
}
Handle<Code> ic = is_classic_mode() Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize() ? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict(); : isolate()->builtins()->StoreIC_Initialize_Strict();
CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId()); CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
__ push(r0); // Result of assignment, saved even if not needed.
// Receiver is under the result value.
__ ldr(ip, MemOperand(sp, kPointerSize));
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0);
__ Drop(1);
}
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0); context()->Plug(r0);
} }
@ -2228,44 +2206,16 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC. // Assignment to a property, using a keyed store IC.
// If the assignment starts a block of assignments to the same object,
// change to slow case to avoid the quadratic behavior of repeatedly
// adding fast properties.
if (expr->starts_initialization_block()) {
__ push(result_register());
// Receiver is now under the key and value.
__ ldr(ip, MemOperand(sp, 2 * kPointerSize));
__ push(ip);
__ CallRuntime(Runtime::kToSlowProperties, 1);
__ pop(result_register());
}
// Record source code position before IC call. // Record source code position before IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
__ pop(r1); // Key. __ pop(r1); // Key.
// Load receiver to r2. Leave a copy in the stack if needed for turning the __ pop(r2);
// receiver into fast case.
if (expr->ends_initialization_block()) {
__ ldr(r2, MemOperand(sp));
} else {
__ pop(r2);
}
Handle<Code> ic = is_classic_mode() Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize() ? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId()); CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
__ push(r0); // Result of assignment, saved even if not needed.
// Receiver is under the result value.
__ ldr(ip, MemOperand(sp, kPointerSize));
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0);
__ Drop(1);
}
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0); context()->Plug(r0);
} }
@ -2294,7 +2244,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode, RelocInfo::Mode rmode,
TypeFeedbackId ast_id) { TypeFeedbackId ast_id) {
ic_total_count_++; ic_total_count_++;
__ Call(code, rmode, ast_id); // All calls must have a predictable size in full-codegen code to ensure that
// the debugger can patch them correctly.
__ Call(code, rmode, ast_id, al, NEVER_INLINE_TARGET_ADDRESS);
} }
void FullCodeGenerator::EmitCallWithIC(Call* expr, void FullCodeGenerator::EmitCallWithIC(Call* expr,
@ -2424,7 +2376,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy(); VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty(); Property* property = callee->AsProperty();
if (proxy != NULL && proxy->var()->is_possibly_eval()) { if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to // In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the // resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given // call. Then we call the resolved function using the given
@ -2714,7 +2666,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false, context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through); &if_true, &if_false, &fall_through);
if (generate_debug_code_) __ AbortIfSmi(r0); __ AssertNotSmi(r0);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset)); __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
@ -2729,26 +2681,31 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ b(eq, if_false); __ b(eq, if_false);
// Look for valueOf symbol in the descriptor array, and indicate false if // Look for valueOf symbol in the descriptor array, and indicate false if
// found. The type is not checked, so if it is a transition it is a false // found. Since we omit an enumeration index check, if it is added via a
// negative. // transition that shares its descriptor array, this is a false positive.
__ LoadInstanceDescriptors(r1, r4, r3); Label entry, loop, done;
__ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: descriptor array // Skip loop if no descriptors are valid.
// r3: length of descriptor array __ NumberOfOwnDescriptors(r3, r1);
// Calculate the end of the descriptor array. __ cmp(r3, Operand(0));
__ b(eq, &done);
__ LoadInstanceDescriptors(r1, r4);
// r4: descriptor array.
// r3: valid entries in the descriptor array.
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kPointerSize == 4); STATIC_ASSERT(kPointerSize == 4);
__ add(r2, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ mov(ip, Operand(DescriptorArray::kDescriptorSize));
__ mul(r3, r3, ip);
// Calculate location of the first key name.
__ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
// Calculate the end of the descriptor array.
__ mov(r2, r4);
__ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
// Calculate location of the first key name.
__ add(r4,
r4,
Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
// Loop through all the keys in the descriptor array. If one of these is the // Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false. // symbol valueOf the result is false.
Label entry, loop;
// The use of ip to store the valueOf symbol asumes that it is not otherwise // The use of ip to store the valueOf symbol asumes that it is not otherwise
// used in the loop below. // used in the loop below.
__ mov(ip, Operand(FACTORY->value_of_symbol())); __ mov(ip, Operand(FACTORY->value_of_symbol()));
@ -2762,7 +2719,8 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ cmp(r4, Operand(r2)); __ cmp(r4, Operand(r2));
__ b(ne, &loop); __ b(ne, &loop);
// If a valueOf property is not found on the object check that it's __ bind(&done);
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false. // prototype is the un-modified String prototype. If not result is false.
__ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset)); __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
__ JumpIfSmi(r2, if_false); __ JumpIfSmi(r2, if_false);
@ -3173,6 +3131,39 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
} }
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
__ pop(r2);
__ pop(r1);
VisitForAccumulatorValue(args->at(0)); // string
static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
context()->Plug(r0);
}
void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
__ pop(r2);
__ pop(r1);
VisitForAccumulatorValue(args->at(0)); // string
static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
context()->Plug(r0);
}
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function. // Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
@ -3583,8 +3574,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ASSERT(args->length() == 1); ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0)); VisitForAccumulatorValue(args->at(0));
__ AbortIfNotString(r0); __ AssertString(r0);
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset)); __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ IndexFromHash(r0, r0); __ IndexFromHash(r0, r0);
@ -3666,7 +3656,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset)); __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
__ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset)); __ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch1), SetCC); __ add(string_length, string_length, Operand(scratch1), SetCC);
__ b(vs, &bailout); __ b(vs, &bailout);
__ cmp(element, elements_end); __ cmp(element, elements_end);
@ -3695,7 +3685,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times array_length) - separator length to the // Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not // string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi // smi but the other values are, so the result is a smi
__ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ sub(string_length, string_length, Operand(scratch1)); __ sub(string_length, string_length, Operand(scratch1));
__ smull(scratch2, ip, array_length, scratch1); __ smull(scratch2, ip, array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
@ -3733,10 +3723,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array_length = no_reg; array_length = no_reg;
__ add(result_pos, __ add(result_pos,
result, result,
Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator. // Check the length of the separator.
__ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ cmp(scratch1, Operand(Smi::FromInt(1))); __ cmp(scratch1, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator); __ b(eq, &one_char_separator);
__ b(gt, &long_separator); __ b(gt, &long_separator);
@ -3752,7 +3742,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length); __ SmiUntag(string_length);
__ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1); __ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end); __ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end). __ b(lt, &empty_separator_loop); // End while (element < elements_end).
@ -3762,7 +3754,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case // One-character separator case
__ bind(&one_char_separator); __ bind(&one_char_separator);
// Replace separator with its ASCII character value. // Replace separator with its ASCII character value.
__ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize)); __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first // Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator // element is not preceded by a separator
__ jmp(&one_char_separator_loop_entry); __ jmp(&one_char_separator_loop_entry);
@ -3782,7 +3774,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length); __ SmiUntag(string_length);
__ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1); __ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end); __ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end). __ b(lt, &one_char_separator_loop); // End while (element < elements_end).
@ -3803,14 +3797,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiUntag(string_length); __ SmiUntag(string_length);
__ add(string, __ add(string,
separator, separator,
Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1); __ CopyBytes(string, result_pos, string_length, scratch1);
__ bind(&long_separator); __ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length); __ SmiUntag(string_length);
__ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1); __ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end); __ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end). __ b(lt, &long_separator_loop); // End while (element < elements_end).
@ -4115,7 +4111,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub. Undo operation first. // Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value))); __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
} }
__ mov(r1, Operand(Smi::FromInt(count_value))); __ mov(r1, r0);
__ mov(r0, Operand(Smi::FromInt(count_value)));
// Record position before stub call. // Record position before stub call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
@ -4340,29 +4337,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: { default: {
VisitForAccumulatorValue(expr->right()); VisitForAccumulatorValue(expr->right());
Condition cond = eq; Condition cond = CompareIC::ComputeCondition(op);
switch (op) {
case Token::EQ_STRICT:
case Token::EQ:
cond = eq;
break;
case Token::LT:
cond = lt;
break;
case Token::GT:
cond = gt;
break;
case Token::LTE:
cond = le;
break;
case Token::GTE:
cond = ge;
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
__ pop(r1); __ pop(r1);
bool inline_smi_code = ShouldInlineSmiCase(op); bool inline_smi_code = ShouldInlineSmiCase(op);

305
deps/v8/src/arm/ic-arm.cc

@ -1301,6 +1301,143 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
} }
static void KeyedStoreGenerateGenericHelper(
MacroAssembler* masm,
Label* fast_object,
Label* fast_double,
Label* slow,
KeyedStoreCheckMap check_map,
KeyedStoreIncrementLength increment_length,
Register value,
Register key,
Register receiver,
Register receiver_map,
Register elements_map,
Register elements) {
Label transition_smi_elements;
Label finish_object_store, non_double_value, transition_double_elements;
Label fast_double_without_map_check;
// Fast case: Do the store, could be either Object or double.
__ bind(fast_object);
Register scratch_value = r4;
Register address = r5;
if (check_map == kCheckMap) {
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
__ b(ne, fast_double);
}
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(value, &non_smi_value);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
__ add(scratch_value, key, Operand(Smi::FromInt(1)));
__ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(address));
__ Ret();
__ bind(&non_smi_value);
// Escape to elements kind transition case.
__ CheckFastObjectElements(receiver_map, scratch_value,
&transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
__ bind(&finish_object_store);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
__ add(scratch_value, key, Operand(Smi::FromInt(1)));
__ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(address));
// Update write barrier for the elements array address.
__ mov(scratch_value, value); // Preserve the value which is returned.
__ RecordWrite(elements,
address,
scratch_value,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Ret();
__ bind(fast_double);
if (check_map == kCheckMap) {
// Check for fast double array case. If this fails, call through to the
// runtime.
__ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
__ b(ne, slow);
}
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
elements, // Overwritten.
r3, // Scratch regs...
r4,
r5,
r6,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
__ add(scratch_value, key, Operand(Smi::FromInt(1)));
__ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Ret();
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
__ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
__ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
__ b(ne, &non_double_value);
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
receiver_map,
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
__ bind(&transition_double_elements);
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) { StrictModeFlag strict_mode) {
// ---------- S t a t e -------------- // ---------- S t a t e --------------
@ -1309,11 +1446,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- r2 : receiver // -- r2 : receiver
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
Label slow, array, extra, check_if_double_array; Label slow, fast_object, fast_object_grow;
Label fast_object_with_map_check, fast_object_without_map_check; Label fast_double, fast_double_grow;
Label fast_double_with_map_check, fast_double_without_map_check; Label array, extra, check_if_double_array;
Label transition_smi_elements, finish_object_store, non_double_value;
Label transition_double_elements;
// Register usage. // Register usage.
Register value = r0; Register value = r0;
@ -1348,7 +1483,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check array bounds. Both the key and the length of FixedArray are smis. // Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip)); __ cmp(key, Operand(ip));
__ b(lo, &fast_object_with_map_check); __ b(lo, &fast_object);
// Slow case, handle jump to runtime. // Slow case, handle jump to runtime.
__ bind(&slow); __ bind(&slow);
@ -1373,21 +1508,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ cmp(elements_map, __ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_array_map())); Operand(masm->isolate()->factory()->fixed_array_map()));
__ b(ne, &check_if_double_array); __ b(ne, &check_if_double_array);
// Calculate key + 1 as smi. __ jmp(&fast_object_grow);
STATIC_ASSERT(kSmiTag == 0);
__ add(r4, key, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ b(&fast_object_without_map_check);
__ bind(&check_if_double_array); __ bind(&check_if_double_array);
__ cmp(elements_map, __ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map())); Operand(masm->isolate()->factory()->fixed_double_array_map()));
__ b(ne, &slow); __ b(ne, &slow);
// Add 1 to key, and go to common element store code for doubles. __ jmp(&fast_double_grow);
STATIC_ASSERT(kSmiTag == 0);
__ add(r4, key, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS // Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it // array. Check that the array is in fast mode (and writable); if it
@ -1399,106 +1526,15 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ cmp(key, Operand(ip)); __ cmp(key, Operand(ip));
__ b(hs, &extra); __ b(hs, &extra);
// Fall through to fast case.
__ bind(&fast_object_with_map_check);
Register scratch_value = r4;
Register address = r5;
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
__ b(ne, &fast_double_with_map_check);
__ bind(&fast_object_without_map_check);
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(value, &non_smi_value);
// It's irrelevant whether array is smi-only or not when writing a smi.
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(address));
__ Ret();
__ bind(&non_smi_value); KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
// Escape to elements kind transition case. &slow, kCheckMap, kDontIncrementLength,
__ CheckFastObjectElements(receiver_map, scratch_value, value, key, receiver, receiver_map,
&transition_smi_elements); elements_map, elements);
// Fast elements array, store the value to the elements backing store. KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
__ bind(&finish_object_store); &slow, kDontCheckMap, kIncrementLength,
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); value, key, receiver, receiver_map,
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); elements_map, elements);
__ str(value, MemOperand(address));
// Update write barrier for the elements array address.
__ mov(scratch_value, value); // Preserve the value which is returned.
__ RecordWrite(elements,
address,
scratch_value,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Ret();
__ bind(&fast_double_with_map_check);
// Check for fast double array case. If this fails, call through to the
// runtime.
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map()));
__ b(ne, &slow);
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
receiver,
elements,
r3,
r4,
r5,
r6,
&transition_double_elements);
__ Ret();
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
__ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
__ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
__ b(ne, &non_double_value);
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
__ bind(&transition_double_elements);
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
} }
@ -1662,42 +1698,21 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
} }
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { bool CompareIC::HasInlinedSmiCode(Address address) {
HandleScope scope; // The address of the instruction following the call.
Handle<Code> rewritten; Address cmp_instruction_address =
State previous_state = GetState(); Assembler::return_address_from_call_start(address);
State state = TargetState(previous_state, false, x, y);
if (state == GENERIC) {
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
if (state == KNOWN_OBJECTS) {
stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
}
rewritten = stub.GetCode();
}
set_target(*rewritten);
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[CompareIC (%s->%s)#%s]\n",
GetStateName(previous_state),
GetStateName(state),
Token::Name(op_));
}
#endif
// Activate inlined smi code. // If the instruction following the call is not a cmp rx, #yyy, nothing
if (previous_state == UNINITIALIZED) { // was inlined.
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); Instr instr = Assembler::instr_at(cmp_instruction_address);
} return Assembler::IsCmpImmediate(instr);
} }
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address cmp_instruction_address = Address cmp_instruction_address =
address + Assembler::kCallTargetAddressOffset; Assembler::return_address_from_call_start(address);
// If the instruction following the call is not a cmp rx, #yyy, nothing // If the instruction following the call is not a cmp rx, #yyy, nothing
// was inlined. // was inlined.

320
deps/v8/src/arm/lithium-arm.cc

@ -177,6 +177,7 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t"; case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t"; case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t"; case Token::BIT_XOR: return "bit-xor-t";
case Token::ROR: return "ror-t";
case Token::SHL: return "shl-t"; case Token::SHL: return "shl-t";
case Token::SAR: return "sar-t"; case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t"; case Token::SHR: return "shr-t";
@ -194,22 +195,22 @@ void LGoto::PrintDataTo(StringStream* stream) {
void LBranch::PrintDataTo(StringStream* stream) { void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
InputAt(0)->PrintTo(stream); value()->PrintTo(stream);
} }
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) { void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if "); stream->Add("if ");
InputAt(0)->PrintTo(stream); left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op())); stream->Add(" %s ", Token::String(op()));
InputAt(1)->PrintTo(stream); right()->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
} }
void LIsNilAndBranch::PrintDataTo(StringStream* stream) { void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if "); stream->Add("if ");
InputAt(0)->PrintTo(stream); value()->PrintTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == "); stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined"); stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
@ -218,57 +219,57 @@ void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) { void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object("); stream->Add("if is_object(");
InputAt(0)->PrintTo(stream); value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
} }
void LIsStringAndBranch::PrintDataTo(StringStream* stream) { void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string("); stream->Add("if is_string(");
InputAt(0)->PrintTo(stream); value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
} }
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi("); stream->Add("if is_smi(");
InputAt(0)->PrintTo(stream); value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
} }
void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_undetectable("); stream->Add("if is_undetectable(");
InputAt(0)->PrintTo(stream); value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
} }
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) { void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare("); stream->Add("if string_compare(");
InputAt(0)->PrintTo(stream); left()->PrintTo(stream);
InputAt(1)->PrintTo(stream); right()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
} }
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type("); stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream); value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
} }
void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) { void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index("); stream->Add("if has_cached_array_index(");
InputAt(0)->PrintTo(stream); value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
} }
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test("); stream->Add("if class_of_test(");
InputAt(0)->PrintTo(stream); value()->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d", stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(), *hydrogen()->class_name(),
true_block_id(), true_block_id(),
@ -278,7 +279,7 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof "); stream->Add("if typeof ");
InputAt(0)->PrintTo(stream); value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d", stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(), *hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id()); true_block_id(), false_block_id());
@ -292,26 +293,31 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
void LUnaryMathOperation::PrintDataTo(StringStream* stream) { void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName()); stream->Add("/%s ", hydrogen()->OpName());
InputAt(0)->PrintTo(stream); value()->PrintTo(stream);
}
void LMathExp::PrintDataTo(StringStream* stream) {
value()->PrintTo(stream);
} }
void LLoadContextSlot::PrintDataTo(StringStream* stream) { void LLoadContextSlot::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream); context()->PrintTo(stream);
stream->Add("[%d]", slot_index()); stream->Add("[%d]", slot_index());
} }
void LStoreContextSlot::PrintDataTo(StringStream* stream) { void LStoreContextSlot::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream); context()->PrintTo(stream);
stream->Add("[%d] <- ", slot_index()); stream->Add("[%d] <- ", slot_index());
InputAt(1)->PrintTo(stream); value()->PrintTo(stream);
} }
void LInvokeFunction::PrintDataTo(StringStream* stream) { void LInvokeFunction::PrintDataTo(StringStream* stream) {
stream->Add("= "); stream->Add("= ");
InputAt(0)->PrintTo(stream); function()->PrintTo(stream);
stream->Add(" #%d / ", arity()); stream->Add(" #%d / ", arity());
} }
@ -340,17 +346,15 @@ void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
void LCallNew::PrintDataTo(StringStream* stream) { void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= "); stream->Add("= ");
InputAt(0)->PrintTo(stream); constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity()); stream->Add(" #%d / ", arity());
} }
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream); arguments()->PrintTo(stream);
stream->Add(" length "); stream->Add(" length ");
length()->PrintTo(stream); length()->PrintTo(stream);
stream->Add(" index "); stream->Add(" index ");
index()->PrintTo(stream); index()->PrintTo(stream);
} }
@ -374,20 +378,27 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
} }
void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { void LLoadKeyed::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream); elements()->PrintTo(stream);
stream->Add("["); stream->Add("[");
key()->PrintTo(stream); key()->PrintTo(stream);
stream->Add("] <- "); if (hydrogen()->IsDehoisted()) {
value()->PrintTo(stream); stream->Add(" + %d]", additional_index());
} else {
stream->Add("]");
}
} }
void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) { void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream); elements()->PrintTo(stream);
stream->Add("["); stream->Add("[");
key()->PrintTo(stream); key()->PrintTo(stream);
stream->Add("] <- "); if (hydrogen()->IsDehoisted()) {
stream->Add(" + %d] <-", additional_index());
} else {
stream->Add("] <- ");
}
value()->PrintTo(stream); value()->PrintTo(stream);
} }
@ -860,6 +871,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
argument_count_, argument_count_,
value_count, value_count,
outer, outer,
hydrogen_env->entry(),
zone()); zone());
int argument_index = *argument_index_accumulator; int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) { for (int i = 0; i < value_count; ++i) {
@ -1034,6 +1046,15 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2); LOperand* input = UseFixedDouble(instr->value(), d2);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL); LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, d2), instr); return MarkAsCall(DefineFixedDouble(result, d2), instr);
} else if (op == kMathExp) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseTempRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
return DefineAsRegister(result);
} else if (op == kMathPowHalf) { } else if (op == kMathPowHalf) {
LOperand* input = UseFixedDouble(instr->value(), d2); LOperand* input = UseFixedDouble(instr->value(), d2);
LOperand* temp = FixedTemp(d3); LOperand* temp = FixedTemp(d3);
@ -1041,7 +1062,8 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DefineFixedDouble(result, d2); return DefineFixedDouble(result, d2);
} else { } else {
LOperand* input = UseRegisterAtStart(instr->value()); LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
LOperand* temp = (op == kMathRound) ? FixedTemp(d3) : NULL;
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp); LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
switch (op) { switch (op) {
case kMathAbs: case kMathAbs:
@ -1108,6 +1130,11 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
} }
LInstruction* LChunkBuilder::DoRor(HRor* instr) {
return DoShift(Token::ROR, instr);
}
LInstruction* LChunkBuilder::DoShr(HShr* instr) { LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr); return DoShift(Token::SHR, instr);
} }
@ -1306,8 +1333,21 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
return DefineAsRegister(mul); return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) { } else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr); if (instr->UseCount() == 1 && instr->uses().value()->IsAdd()) {
HAdd* add = HAdd::cast(instr->uses().value());
if (instr == add->left()) {
// This mul is the lhs of an add. The add and mul will be folded
// into a multiply-add.
return NULL;
}
if (instr == add->right() && !add->left()->IsMul()) {
// This mul is the rhs of an add, where the lhs is not another mul.
// The add and mul will be folded into a multiply-add.
return NULL;
}
}
return DoArithmeticD(Token::MUL, instr);
} else { } else {
return DoArithmeticT(Token::MUL, instr); return DoArithmeticT(Token::MUL, instr);
} }
@ -1318,6 +1358,12 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsInteger32()) { if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32());
if (instr->left()->IsConstant()) {
// If lhs is constant, do reverse subtraction instead.
return DoRSub(instr);
}
LOperand* left = UseRegisterAtStart(instr->left()); LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right()); LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right); LSubI* sub = new(zone()) LSubI(left, right);
@ -1334,6 +1380,32 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
} }
LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
// Note: The lhs of the subtraction becomes the rhs of the
// reverse-subtraction.
LOperand* left = UseRegisterAtStart(instr->right());
LOperand* right = UseOrConstantAtStart(instr->left());
LRSubI* rsb = new(zone()) LRSubI(left, right);
LInstruction* result = DefineAsRegister(rsb);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
}
LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
LOperand* multiplier_op = UseRegisterAtStart(mul->left());
LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
LOperand* addend_op = UseRegisterAtStart(addend);
return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
multiplicand_op));
}
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) { if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->left()->representation().IsInteger32());
@ -1347,6 +1419,14 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
} }
return result; return result;
} else if (instr->representation().IsDouble()) { } else if (instr->representation().IsDouble()) {
if (instr->left()->IsMul())
return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
if (instr->right()->IsMul()) {
ASSERT(!instr->left()->IsMul());
return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
}
return DoArithmeticD(Token::ADD, instr); return DoArithmeticD(Token::ADD, instr);
} else { } else {
ASSERT(instr->representation().IsTagged()); ASSERT(instr->representation().IsTagged());
@ -1412,7 +1492,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch( LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) { HCompareIDAndBranch* instr) {
Representation r = instr->GetInputRepresentation(); Representation r = instr->representation();
if (r.IsInteger32()) { if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32());
@ -1566,6 +1646,16 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
} }
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegister(instr->index());
LOperand* value = UseRegister(instr->value());
LSeqStringSetChar* result =
new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index()); LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length()); LOperand* length = UseRegister(instr->length());
@ -1617,8 +1707,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* temp1 = TempRegister(); LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL; : NULL;
LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d11) LOperand* temp3 = FixedTemp(d11);
: NULL;
res = DefineSameAsFirst(new(zone()) LTaggedToI(value, res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1, temp1,
temp2, temp2,
@ -1690,10 +1779,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LOperand* temp1 = TempRegister(); LUnallocated* temp1 = TempRegister();
LOperand* temp2 = TempRegister(); LOperand* temp2 = TempRegister();
LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2); LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
return AssignEnvironment(result); return AssignEnvironment(Define(result, temp1));
} }
@ -1861,53 +1950,40 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
} }
LInstruction* LChunkBuilder::DoLoadKeyedFastElement( LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
HLoadKeyedFastElement* instr) {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() || ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged()); instr->key()->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object()); ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
if (instr->RequiresHoleCheck()) AssignEnvironment(result);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
HLoadKeyedFastDoubleElement* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* elements = UseTempRegister(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key()); LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result = LLoadKeyed* result = NULL;
new(zone()) LLoadKeyedFastDoubleElement(elements, key);
return AssignEnvironment(DefineAsRegister(result));
}
if (!instr->is_external()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseTempRegister(instr->elements());
} else {
ASSERT(instr->representation().IsTagged());
obj = UseRegisterAtStart(instr->elements());
}
result = new(zone()) LLoadKeyed(obj, key);
} else {
ASSERT(
(instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
LOperand* external_pointer = UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement( DefineAsRegister(result);
HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it // An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment. // has an environment.
return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ? bool can_deoptimize = instr->RequiresHoleCheck() ||
AssignEnvironment(load_instr) : load_instr; (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
return can_deoptimize ? AssignEnvironment(result) : result;
} }
@ -1921,66 +1997,48 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
} }
LInstruction* LChunkBuilder::DoStoreKeyedFastElement( LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
HStoreKeyedFastElement* instr) { ElementsKind elements_kind = instr->elements_kind();
bool needs_write_barrier = instr->NeedsWriteBarrier();
ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyedFastElement(obj, key, val);
}
LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
HStoreKeyedFastDoubleElement* instr) {
ASSERT(instr->value()->representation().IsDouble());
ASSERT(instr->elements()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* val = UseTempRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val); if (!instr->is_external()) {
} ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
LOperand* key = NULL;
LOperand* val = NULL;
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
val = UseTempRegister(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
ASSERT(instr->value()->representation().IsTagged());
object = UseTempRegister(instr->elements());
val = needs_write_barrier ? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
key = needs_write_barrier ? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
}
return new(zone()) LStoreKeyed(object, key, val);
}
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
ASSERT( ASSERT(
(instr->value()->representation().IsInteger32() && (instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) && (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() && (instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal()); ASSERT(instr->elements()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register = bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS || elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS; elements_kind == EXTERNAL_FLOAT_ELEMENTS;
LOperand* val = val_is_temp_register LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
? UseTempRegister(instr->value())
: UseRegister(instr->value()); : UseRegister(instr->value());
LOperand* key = UseRegisterOrConstant(instr->key()); LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* external_pointer = UseRegister(instr->elements());
return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer, return new(zone()) LStoreKeyed(external_pointer, key, val);
key,
val);
} }
@ -2126,6 +2184,7 @@ LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry(); allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id()); current_block_->last_environment()->set_ast_id(instr->ast_id());
return AssignEnvironment(new(zone()) LOsrEntry); return AssignEnvironment(new(zone()) LOsrEntry);
@ -2164,12 +2223,10 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LOperand* arguments = UseRegister(instr->arguments()); LOperand* args = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length()); LOperand* length = UseTempRegister(instr->length());
LOperand* index = UseRegister(instr->index()); LOperand* index = UseRegister(instr->index());
LAccessArgumentsAt* result = return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
new(zone()) LAccessArgumentsAt(arguments, length, index);
return AssignEnvironment(DefineAsRegister(result));
} }
@ -2204,7 +2261,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id()); env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count()); env->Drop(instr->pop_count());
for (int i = 0; i < instr->values()->length(); ++i) { for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i); HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) { if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value); env->Bind(instr->GetAssignedIndexAt(i), value);
@ -2253,6 +2310,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
if (instr->arguments_var() != NULL) { if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject()); inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
} }
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner); current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure()); chunk_->AddInlinedClosure(instr->closure());
return NULL; return NULL;
@ -2264,7 +2322,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
HEnvironment* env = current_block_->last_environment(); HEnvironment* env = current_block_->last_environment();
if (instr->arguments_pushed()) { if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count(); int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count); pop = new(zone()) LDrop(argument_count);
argument_count_ -= argument_count; argument_count_ -= argument_count;
@ -2295,9 +2353,7 @@ LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
LOperand* map = UseRegister(instr->map()); LOperand* map = UseRegister(instr->map());
LOperand* scratch = TempRegister(); return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
return AssignEnvironment(DefineAsRegister(
new(zone()) LForInCacheArray(map, scratch)));
} }

590
deps/v8/src/arm/lithium-arm.h

File diff suppressed because it is too large

1216
deps/v8/src/arm/lithium-codegen-arm.cc

File diff suppressed because it is too large

38
deps/v8/src/arm/lithium-codegen-arm.h

@ -110,8 +110,9 @@ class LCodeGen BASE_EMBEDDED {
void FinishCode(Handle<Code> code); void FinishCode(Handle<Code> code);
// Deferred code support. // Deferred code support.
template<int T> void DoDeferredBinaryOpStub(LPointerMap* pointer_map,
void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, LOperand* left_argument,
LOperand* right_argument,
Token::Value op); Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr); void DoDeferredNumberTagD(LNumberTagD* instr);
@ -147,7 +148,10 @@ class LCodeGen BASE_EMBEDDED {
int additional_offset); int additional_offset);
// Emit frame translation commands for an environment. // Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation); void WriteTranslation(LEnvironment* environment,
Translation* translation,
int* arguments_index,
int* arguments_count);
// Declare methods that deal with the individual node types. // Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node); #define DECLARE_DO(type) void Do##type(L##type* node);
@ -209,14 +213,18 @@ class LCodeGen BASE_EMBEDDED {
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
}; };
void CallCode(Handle<Code> code, void CallCode(
RelocInfo::Mode mode, Handle<Code> code,
LInstruction* instr); RelocInfo::Mode mode,
LInstruction* instr,
TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
void CallCodeGeneric(Handle<Code> code, void CallCodeGeneric(
RelocInfo::Mode mode, Handle<Code> code,
LInstruction* instr, RelocInfo::Mode mode,
SafepointMode safepoint_mode); LInstruction* instr,
SafepointMode safepoint_mode,
TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
void CallRuntime(const Runtime::Function* function, void CallRuntime(const Runtime::Function* function,
int num_arguments, int num_arguments,
@ -258,7 +266,9 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation, void AddToTranslation(Translation* translation,
LOperand* op, LOperand* op,
bool is_tagged, bool is_tagged,
bool is_uint32); bool is_uint32,
int arguments_index,
int arguments_count);
void PopulateDeoptimizationData(Handle<Code> code); void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal); int DefineDeoptimizationLiteral(Handle<Object> literal);
@ -367,6 +377,12 @@ class LCodeGen BASE_EMBEDDED {
}; };
void EnsureSpaceForLazyDeopt(); void EnsureSpaceForLazyDeopt();
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
Zone* zone_; Zone* zone_;
LPlatformChunk* const chunk_; LPlatformChunk* const chunk_;

288
deps/v8/src/arm/macro-assembler-arm.cc

@ -108,7 +108,7 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
int MacroAssembler::CallSize(Register target, Condition cond) { int MacroAssembler::CallSize(Register target, Condition cond) {
#if USE_BLX #ifdef USE_BLX
return kInstrSize; return kInstrSize;
#else #else
return 2 * kInstrSize; return 2 * kInstrSize;
@ -121,7 +121,7 @@ void MacroAssembler::Call(Register target, Condition cond) {
BlockConstPoolScope block_const_pool(this); BlockConstPoolScope block_const_pool(this);
Label start; Label start;
bind(&start); bind(&start);
#if USE_BLX #ifdef USE_BLX
blx(target, cond); blx(target, cond);
#else #else
// set lr for return at current pc + 8 // set lr for return at current pc + 8
@ -158,15 +158,29 @@ int MacroAssembler::CallSizeNotPredictableCodeSize(
void MacroAssembler::Call(Address target, void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode, RelocInfo::Mode rmode,
Condition cond) { Condition cond,
TargetAddressStorageMode mode) {
// Block constant pool for the call instruction sequence. // Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this); BlockConstPoolScope block_const_pool(this);
Label start; Label start;
bind(&start); bind(&start);
#if USE_BLX
// On ARMv5 and after the recommended call sequence is: bool old_predictable_code_size = predictable_code_size();
// ldr ip, [pc, #...] if (mode == NEVER_INLINE_TARGET_ADDRESS) {
// blx ip set_predictable_code_size(true);
}
#ifdef USE_BLX
// Call sequence on V7 or later may be :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
// blx ip
// @ return address
// Or for pre-V7 or values that may be back-patched
// to avoid ICache flushes:
// ldr ip, [pc, #...] @ call address
// blx ip
// @ return address
// Statement positions are expected to be recorded when the target // Statement positions are expected to be recorded when the target
// address is loaded. The mov method will automatically record // address is loaded. The mov method will automatically record
@ -177,15 +191,16 @@ void MacroAssembler::Call(Address target,
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode)); mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond); blx(ip, cond);
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
#else #else
// Set lr for return at current pc + 8. // Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond); mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool]. // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond); mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif #endif
ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start)); ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(old_predictable_code_size);
}
} }
@ -200,7 +215,8 @@ int MacroAssembler::CallSize(Handle<Code> code,
void MacroAssembler::Call(Handle<Code> code, void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode, RelocInfo::Mode rmode,
TypeFeedbackId ast_id, TypeFeedbackId ast_id,
Condition cond) { Condition cond,
TargetAddressStorageMode mode) {
Label start; Label start;
bind(&start); bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode)); ASSERT(RelocInfo::IsCodeTarget(rmode));
@ -209,9 +225,7 @@ void MacroAssembler::Call(Handle<Code> code,
rmode = RelocInfo::CODE_TARGET_WITH_ID; rmode = RelocInfo::CODE_TARGET_WITH_ID;
} }
// 'code' is always generated ARM code, never THUMB code // 'code' is always generated ARM code, never THUMB code
Call(reinterpret_cast<Address>(code.location()), rmode, cond); Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
ASSERT_EQ(CallSize(code, rmode, ast_id, cond),
SizeOfCodeGeneratedSince(&start));
} }
@ -288,17 +302,15 @@ void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
void MacroAssembler::And(Register dst, Register src1, const Operand& src2, void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) { Condition cond) {
if (!src2.is_reg() && if (!src2.is_reg() &&
!src2.must_use_constant_pool(this) && !src2.must_output_reloc_info(this) &&
src2.immediate() == 0) { src2.immediate() == 0) {
mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond); mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
} else if (!src2.is_single_instruction(this) && } else if (!src2.is_single_instruction(this) &&
!src2.must_use_constant_pool(this) && !src2.must_output_reloc_info(this) &&
CpuFeatures::IsSupported(ARMv7) && CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) { IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0, ubfx(dst, src1, 0,
WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
} else { } else {
and_(dst, src1, src2, LeaveCC, cond); and_(dst, src1, src2, LeaveCC, cond);
} }
@ -363,12 +375,14 @@ void MacroAssembler::Bfi(Register dst,
} }
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) { void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32); ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask)); bic(dst, src, Operand(mask));
} else { } else {
Move(dst, src, cond);
bfc(dst, lsb, width, cond); bfc(dst, lsb, width, cond);
} }
} }
@ -408,6 +422,17 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
void MacroAssembler::LoadRoot(Register destination, void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index, Heap::RootListIndex index,
Condition cond) { Condition cond) {
if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
!Heap::RootCanBeWrittenAfterInitialization(index) &&
!predictable_code_size()) {
Handle<Object> root(isolate()->heap()->roots_array_start()[index]);
if (!isolate()->heap()->InNewSpace(*root)) {
// The CPU supports fast immediate values, and this root will never
// change. We will load it as a relocatable immediate value.
mov(destination, Operand(root), LeaveCC, cond);
return;
}
}
ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
} }
@ -789,6 +814,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst, void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm, const double imm,
const Register scratch,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP2));
static const DoubleRepresentation minus_zero(-0.0); static const DoubleRepresentation minus_zero(-0.0);
@ -800,7 +826,7 @@ void MacroAssembler::Vmov(const DwVfpRegister dst,
} else if (value.bits == minus_zero.bits) { } else if (value.bits == minus_zero.bits) {
vneg(dst, kDoubleRegZero, cond); vneg(dst, kDoubleRegZero, cond);
} else { } else {
vmov(dst, imm, cond); vmov(dst, imm, scratch, cond);
} }
} }
@ -1567,7 +1593,11 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
Register topaddr = scratch1; Register topaddr = scratch1;
Register obj_size_reg = scratch2; Register obj_size_reg = scratch2;
mov(topaddr, Operand(new_space_allocation_top)); mov(topaddr, Operand(new_space_allocation_top));
mov(obj_size_reg, Operand(object_size)); Operand obj_size_operand = Operand(object_size);
if (!obj_size_operand.is_single_instruction(this)) {
// We are about to steal IP, so we need to load this value first
mov(obj_size_reg, obj_size_operand);
}
// This code stores a temporary value in ip. This is OK, as the code below // This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation. // does not need ip for implicit literal generation.
@ -1589,7 +1619,13 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Calculate new top and bail out if new space is exhausted. Use result // Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. // to calculate the new top.
add(scratch2, result, Operand(obj_size_reg), SetCC); if (obj_size_operand.is_single_instruction(this)) {
// We can add the size as an immediate
add(scratch2, result, obj_size_operand, SetCC);
} else {
// Doesn't fit in an immediate, we have to use the register
add(scratch2, result, obj_size_reg, SetCC);
}
b(cs, gc_required); b(cs, gc_required);
cmp(scratch2, Operand(ip)); cmp(scratch2, Operand(ip));
b(hi, gc_required); b(hi, gc_required);
@ -1751,10 +1787,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) { Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while // Calculate the number of bytes needed for the characters in the string while
// observing object alignment. // observing object alignment.
ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kCharSize == 1); ASSERT(kCharSize == 1);
add(scratch1, length, add(scratch1, length,
Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize)); Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space. // Allocate ASCII string in new space.
@ -1920,13 +1956,13 @@ void MacroAssembler::CheckFastSmiElements(Register map,
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg, Register key_reg,
Register receiver_reg,
Register elements_reg, Register elements_reg,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
Register scratch4, Register scratch4,
Label* fail) { Label* fail,
int elements_offset) {
Label smi_value, maybe_nan, have_double_value, is_nan, done; Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2; Register mantissa_reg = scratch2;
Register exponent_reg = scratch3; Register exponent_reg = scratch3;
@ -1953,8 +1989,10 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&have_double_value); bind(&have_double_value);
add(scratch1, elements_reg, add(scratch1, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); str(mantissa_reg, FieldMemOperand(
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
sizeof(kHoleNanLower32);
str(exponent_reg, FieldMemOperand(scratch1, offset)); str(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done); jmp(&done);
@ -1975,7 +2013,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&smi_value); bind(&smi_value);
add(scratch1, elements_reg, add(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
elements_offset));
add(scratch1, scratch1, add(scratch1, scratch1,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch1 is now effective address of the double element // scratch1 is now effective address of the double element
@ -1987,7 +2026,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
destination = FloatingPointHelper::kCoreRegisters; destination = FloatingPointHelper::kCoreRegisters;
} }
Register untagged_value = receiver_reg; Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg); SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(this, FloatingPointHelper::ConvertIntToDouble(this,
untagged_value, untagged_value,
@ -2184,12 +2223,28 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
add(r6, r6, Operand(1)); add(r6, r6, Operand(1));
str(r6, MemOperand(r7, kLevelOffset)); str(r6, MemOperand(r7, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
PushSafepointRegisters();
PrepareCallCFunction(0, r0);
CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
PopSafepointRegisters();
}
// Native call returns to the DirectCEntry stub which redirects to the // Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC). // return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves. // DirectCEntry stub itself is generated early and never moves.
DirectCEntryStub stub; DirectCEntryStub stub;
stub.GenerateCall(this, function); stub.GenerateCall(this, function);
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
PushSafepointRegisters();
PrepareCallCFunction(0, r0);
CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
PopSafepointRegisters();
}
Label promote_scheduled_exception; Label promote_scheduled_exception;
Label delete_allocated_handles; Label delete_allocated_handles;
Label leave_exit_frame; Label leave_exit_frame;
@ -2435,17 +2490,38 @@ void MacroAssembler::ConvertToInt32(Register source,
} }
void MacroAssembler::TryFastDoubleToInt32(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
Label* done) {
ASSERT(!double_input.is(double_scratch));
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
vcvt_f64_s32(double_scratch, double_scratch.low());
VFPCompareAndSetFlags(double_input, double_scratch);
b(eq, done);
}
void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
SwVfpRegister result, Register result,
DwVfpRegister double_input, DwVfpRegister double_input,
Register scratch1, Register scratch,
Register scratch2, DwVfpRegister double_scratch,
CheckForInexactConversion check_inexact) { CheckForInexactConversion check_inexact) {
ASSERT(!result.is(scratch));
ASSERT(!double_input.is(double_scratch));
ASSERT(CpuFeatures::IsSupported(VFP2)); ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP2);
Register prev_fpscr = scratch1; Register prev_fpscr = result;
Register scratch = scratch2; Label done;
// Test for values that can be exactly represented as a signed 32-bit integer.
TryFastDoubleToInt32(result, double_input, double_scratch, &done);
// Convert to integer, respecting rounding mode.
int32_t check_inexact_conversion = int32_t check_inexact_conversion =
(check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0; (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
@ -2467,7 +2543,7 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
vmsr(scratch); vmsr(scratch);
// Convert the argument to an integer. // Convert the argument to an integer.
vcvt_s32_f64(result, vcvt_s32_f64(double_scratch.low(),
double_input, double_input,
(rounding_mode == kRoundToZero) ? kDefaultRoundToZero (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
: kFPSCRRounding); : kFPSCRRounding);
@ -2476,8 +2552,12 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
vmrs(scratch); vmrs(scratch);
// Restore FPSCR. // Restore FPSCR.
vmsr(prev_fpscr); vmsr(prev_fpscr);
// Move the converted value into the result register.
vmov(result, double_scratch.low());
// Check for vfp exceptions. // Check for vfp exceptions.
tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion)); tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
bind(&done);
} }
@ -2556,7 +2636,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
void MacroAssembler::EmitECMATruncate(Register result, void MacroAssembler::EmitECMATruncate(Register result,
DwVfpRegister double_input, DwVfpRegister double_input,
SwVfpRegister single_scratch, DwVfpRegister double_scratch,
Register scratch, Register scratch,
Register input_high, Register input_high,
Register input_low) { Register input_low) {
@ -2567,16 +2647,18 @@ void MacroAssembler::EmitECMATruncate(Register result,
ASSERT(!scratch.is(result) && ASSERT(!scratch.is(result) &&
!scratch.is(input_high) && !scratch.is(input_high) &&
!scratch.is(input_low)); !scratch.is(input_low));
ASSERT(!single_scratch.is(double_input.low()) && ASSERT(!double_input.is(double_scratch));
!single_scratch.is(double_input.high()));
Label done; Label done;
// Test for values that can be exactly represented as a signed 32-bit integer.
TryFastDoubleToInt32(result, double_input, double_scratch, &done);
// Clear cumulative exception flags. // Clear cumulative exception flags.
ClearFPSCRBits(kVFPExceptionMask, scratch); ClearFPSCRBits(kVFPExceptionMask, scratch);
// Try a conversion to a signed integer. // Try a conversion to a signed integer.
vcvt_s32_f64(single_scratch, double_input); vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, single_scratch); vmov(result, double_scratch.low());
// Retrieve he FPSCR. // Retrieve he FPSCR.
vmrs(scratch); vmrs(scratch);
// Check for overflow and NaNs. // Check for overflow and NaNs.
@ -3017,38 +3099,46 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
} }
void MacroAssembler::AbortIfSmi(Register object) { void MacroAssembler::AssertNotSmi(Register object) {
STATIC_ASSERT(kSmiTag == 0); if (emit_debug_code()) {
tst(object, Operand(kSmiTagMask)); STATIC_ASSERT(kSmiTag == 0);
Assert(ne, "Operand is a smi"); tst(object, Operand(kSmiTagMask));
Check(ne, "Operand is a smi");
}
} }
void MacroAssembler::AbortIfNotSmi(Register object) { void MacroAssembler::AssertSmi(Register object) {
STATIC_ASSERT(kSmiTag == 0); if (emit_debug_code()) {
tst(object, Operand(kSmiTagMask)); STATIC_ASSERT(kSmiTag == 0);
Assert(eq, "Operand is not smi"); tst(object, Operand(kSmiTagMask));
Check(eq, "Operand is not smi");
}
} }
void MacroAssembler::AbortIfNotString(Register object) { void MacroAssembler::AssertString(Register object) {
STATIC_ASSERT(kSmiTag == 0); if (emit_debug_code()) {
tst(object, Operand(kSmiTagMask)); STATIC_ASSERT(kSmiTag == 0);
Assert(ne, "Operand is not a string"); tst(object, Operand(kSmiTagMask));
push(object); Check(ne, "Operand is a smi and not a string");
ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); push(object);
CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
pop(object); CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
Assert(lo, "Operand is not a string"); pop(object);
Check(lo, "Operand is not a string");
}
} }
void MacroAssembler::AbortIfNotRootValue(Register src, void MacroAssembler::AssertRootValue(Register src,
Heap::RootListIndex root_value_index, Heap::RootListIndex root_value_index,
const char* message) { const char* message) {
CompareRoot(src, root_value_index); if (emit_debug_code()) {
Assert(eq, message); CompareRoot(src, root_value_index);
Check(eq, message);
}
} }
@ -3106,7 +3196,8 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register heap_number_map, Register heap_number_map,
Label* gc_required) { Label* gc_required,
TaggingMode tagging_mode) {
// Allocate an object in the heap for the heap number and tag it as a heap // Allocate an object in the heap for the heap number and tag it as a heap
// object. // object.
AllocateInNewSpace(HeapNumber::kSize, AllocateInNewSpace(HeapNumber::kSize,
@ -3114,11 +3205,16 @@ void MacroAssembler::AllocateHeapNumber(Register result,
scratch1, scratch1,
scratch2, scratch2,
gc_required, gc_required,
TAG_OBJECT); tagging_mode == TAG_RESULT ? TAG_OBJECT :
NO_ALLOCATION_FLAGS);
// Store heap number map in the allocated object. // Store heap number map in the allocated object.
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); if (tagging_mode == TAG_RESULT) {
str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
} else {
str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
}
} }
@ -3189,17 +3285,17 @@ void MacroAssembler::CopyBytes(Register src,
cmp(length, Operand(kPointerSize)); cmp(length, Operand(kPointerSize));
b(lt, &byte_loop); b(lt, &byte_loop);
ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
#if CAN_USE_UNALIGNED_ACCESSES if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
str(scratch, MemOperand(dst, kPointerSize, PostIndex)); str(scratch, MemOperand(dst, kPointerSize, PostIndex));
#else } else {
strb(scratch, MemOperand(dst, 1, PostIndex)); strb(scratch, MemOperand(dst, 1, PostIndex));
mov(scratch, Operand(scratch, LSR, 8)); mov(scratch, Operand(scratch, LSR, 8));
strb(scratch, MemOperand(dst, 1, PostIndex)); strb(scratch, MemOperand(dst, 1, PostIndex));
mov(scratch, Operand(scratch, LSR, 8)); mov(scratch, Operand(scratch, LSR, 8));
strb(scratch, MemOperand(dst, 1, PostIndex)); strb(scratch, MemOperand(dst, 1, PostIndex));
mov(scratch, Operand(scratch, LSR, 8)); mov(scratch, Operand(scratch, LSR, 8));
strb(scratch, MemOperand(dst, 1, PostIndex)); strb(scratch, MemOperand(dst, 1, PostIndex));
#endif }
sub(length, length, Operand(kPointerSize)); sub(length, length, Operand(kPointerSize));
b(&word_loop); b(&word_loop);
@ -3274,8 +3370,10 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register scratch2, Register scratch2,
Label* failure) { Label* failure) {
int kFlatAsciiStringMask = int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE; int kFlatAsciiStringTag = ASCII_STRING_TYPE;
ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
and_(scratch1, first, Operand(kFlatAsciiStringMask)); and_(scratch1, first, Operand(kFlatAsciiStringMask));
and_(scratch2, second, Operand(kFlatAsciiStringMask)); and_(scratch2, second, Operand(kFlatAsciiStringMask));
cmp(scratch1, Operand(kFlatAsciiStringTag)); cmp(scratch1, Operand(kFlatAsciiStringTag));
@ -3289,8 +3387,10 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
Register scratch, Register scratch,
Label* failure) { Label* failure) {
int kFlatAsciiStringMask = int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE; int kFlatAsciiStringTag = ASCII_STRING_TYPE;
ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
and_(scratch, type, Operand(kFlatAsciiStringMask)); and_(scratch, type, Operand(kFlatAsciiStringMask));
cmp(scratch, Operand(kFlatAsciiStringTag)); cmp(scratch, Operand(kFlatAsciiStringTag));
b(ne, failure); b(ne, failure);
@ -3481,7 +3581,7 @@ void MacroAssembler::CheckPageFlag(
int mask, int mask,
Condition cc, Condition cc,
Label* condition_met) { Label* condition_met) {
and_(scratch, object, Operand(~Page::kPageAlignmentMask)); Bfc(scratch, object, 0, kPageSizeBits);
ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
tst(scratch, Operand(mask)); tst(scratch, Operand(mask));
b(cc, condition_met); b(cc, condition_met);
@ -3630,7 +3730,7 @@ void MacroAssembler::EnsureNotWhite(
// For ASCII (char-size of 1) we shift the smi tag away to get the length. // For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2. // getting the length multiplied by 2.
ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
ldr(ip, FieldMemOperand(value, String::kLengthOffset)); ldr(ip, FieldMemOperand(value, String::kLengthOffset));
tst(instance_type, Operand(kStringEncodingMask)); tst(instance_type, Operand(kStringEncodingMask));
@ -3676,7 +3776,7 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
// Double value is >= 255, return 255. // Double value is >= 255, return 255.
bind(&above_zero); bind(&above_zero);
Vmov(temp_double_reg, 255.0); Vmov(temp_double_reg, 255.0, result_reg);
VFPCompareAndSetFlags(input_reg, temp_double_reg); VFPCompareAndSetFlags(input_reg, temp_double_reg);
b(le, &in_bounds); b(le, &in_bounds);
mov(result_reg, Operand(255)); mov(result_reg, Operand(255));
@ -3698,22 +3798,14 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
void MacroAssembler::LoadInstanceDescriptors(Register map, void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors, Register descriptors) {
Register scratch) { ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
Register temp = descriptors; }
ldr(temp, FieldMemOperand(map, Map::kTransitionsOrBackPointerOffset));
Label ok, fail; void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
CheckMap(temp, ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
scratch, DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
isolate()->factory()->fixed_array_map(),
&fail,
DONT_DO_SMI_CHECK);
ldr(descriptors, FieldMemOperand(temp, TransitionArray::kDescriptorsOffset));
jmp(&ok);
bind(&fail);
mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
bind(&ok);
} }

105
deps/v8/src/arm/macro-assembler-arm.h

@ -68,6 +68,13 @@ enum AllocationFlags {
SIZE_IN_WORDS = 1 << 2 SIZE_IN_WORDS = 1 << 2
}; };
// Flags used for AllocateHeapNumber
enum TaggingMode {
// Tag the result.
TAG_RESULT,
// Don't tag
DONT_TAG_RESULT
};
// Flags used for the ObjectToDoubleVFPRegister function. // Flags used for the ObjectToDoubleVFPRegister function.
enum ObjectToDoubleFlags { enum ObjectToDoubleFlags {
@ -95,6 +102,11 @@ bool AreAliased(Register reg1,
#endif #endif
enum TargetAddressStorageMode {
CAN_INLINE_TARGET_ADDRESS,
NEVER_INLINE_TARGET_ADDRESS
};
// MacroAssembler implements a collection of frequently used macros. // MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler { class MacroAssembler: public Assembler {
public: public:
@ -114,7 +126,9 @@ class MacroAssembler: public Assembler {
static int CallSizeNotPredictableCodeSize(Address target, static int CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode, RelocInfo::Mode rmode,
Condition cond = al); Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al); void Call(Address target, RelocInfo::Mode rmode,
Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
int CallSize(Handle<Code> code, int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(), TypeFeedbackId ast_id = TypeFeedbackId::None(),
@ -122,7 +136,8 @@ class MacroAssembler: public Assembler {
void Call(Handle<Code> code, void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(), TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al); Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
void Ret(Condition cond = al); void Ret(Condition cond = al);
// Emit code to discard a non-negative number of pointer-sized elements // Emit code to discard a non-negative number of pointer-sized elements
@ -154,7 +169,7 @@ class MacroAssembler: public Assembler {
int lsb, int lsb,
int width, int width,
Condition cond = al); Condition cond = al);
void Bfc(Register dst, int lsb, int width, Condition cond = al); void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
void Usat(Register dst, int satpos, const Operand& src, void Usat(Register dst, int satpos, const Operand& src,
Condition cond = al); Condition cond = al);
@ -307,6 +322,7 @@ class MacroAssembler: public Assembler {
// Push a handle. // Push a handle.
void Push(Handle<Object> handle); void Push(Handle<Object> handle);
void Push(Smi* smi) { Push(Handle<Smi>(smi)); }
// Push two registers. Pushes leftmost register first (to highest address). // Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) { void Push(Register src1, Register src2, Condition cond = al) {
@ -483,6 +499,7 @@ class MacroAssembler: public Assembler {
void Vmov(const DwVfpRegister dst, void Vmov(const DwVfpRegister dst,
const double imm, const double imm,
const Register scratch = no_reg,
const Condition cond = al); const Condition cond = al);
// Enter exit frame. // Enter exit frame.
@ -730,7 +747,8 @@ class MacroAssembler: public Assembler {
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register heap_number_map, Register heap_number_map,
Label* gc_required); Label* gc_required,
TaggingMode tagging_mode = TAG_RESULT);
void AllocateHeapNumberWithValue(Register result, void AllocateHeapNumberWithValue(Register result,
DwVfpRegister value, DwVfpRegister value,
Register scratch1, Register scratch1,
@ -814,13 +832,14 @@ class MacroAssembler: public Assembler {
// case scratch2, scratch3 and scratch4 are unmodified. // case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg, void StoreNumberToDoubleElements(Register value_reg,
Register key_reg, Register key_reg,
Register receiver_reg, // All regs below here overwritten.
Register elements_reg, Register elements_reg,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
Register scratch4, Register scratch4,
Label* fail); Label* fail,
int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned // Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
@ -875,12 +894,15 @@ class MacroAssembler: public Assembler {
// Load and check the instance type of an object for being a string. // Load and check the instance type of an object for being a string.
// Loads the type into the second argument register. // Loads the type into the second argument register.
// Returns a condition that will be enabled if the object was a string. // Returns a condition that will be enabled if the object was a string
// and the passed-in condition passed. If the passed-in condition failed
// then flags remain unchanged.
Condition IsObjectStringType(Register obj, Condition IsObjectStringType(Register obj,
Register type) { Register type,
ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset)); Condition cond = al) {
ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
tst(type, Operand(kIsNotStringMask)); ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
tst(type, Operand(kIsNotStringMask), cond);
ASSERT_EQ(0, kStringTag); ASSERT_EQ(0, kStringTag);
return eq; return eq;
} }
@ -937,21 +959,30 @@ class MacroAssembler: public Assembler {
DwVfpRegister double_scratch, DwVfpRegister double_scratch,
Label *not_int32); Label *not_int32);
// Truncates a double using a specific rounding mode. // Try to convert a double to a signed 32-bit integer. If the double value
// can be exactly represented as an integer, the code jumps to 'done' and
// 'result' contains the integer value. Otherwise, the code falls through.
void TryFastDoubleToInt32(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
Label* done);
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// Clears the z flag (ne condition) if an overflow occurs. // Clears the z flag (ne condition) if an overflow occurs.
// If exact_conversion is true, the z flag is also cleared if the conversion // If kCheckForInexactConversion is passed, the z flag is also cleared if the
// was inexact, i.e. if the double value could not be converted exactly // conversion was inexact, i.e. if the double value could not be converted
// to a 32bit integer. // exactly to a 32-bit integer.
void EmitVFPTruncate(VFPRoundingMode rounding_mode, void EmitVFPTruncate(VFPRoundingMode rounding_mode,
SwVfpRegister result, Register result,
DwVfpRegister double_input, DwVfpRegister double_input,
Register scratch1, Register scratch,
Register scratch2, DwVfpRegister double_scratch,
CheckForInexactConversion check CheckForInexactConversion check
= kDontCheckForInexactConversion); = kDontCheckForInexactConversion);
// Helper for EmitECMATruncate. // Helper for EmitECMATruncate.
// This will truncate a floating-point value outside of the singed 32bit // This will truncate a floating-point value outside of the signed 32bit
// integer range to a 32bit signed integer. // integer range to a 32bit signed integer.
// Expects the double value loaded in input_high and input_low. // Expects the double value loaded in input_high and input_low.
// Exits with the answer in 'result'. // Exits with the answer in 'result'.
@ -966,7 +997,7 @@ class MacroAssembler: public Assembler {
// Exits with 'result' holding the answer and all other registers clobbered. // Exits with 'result' holding the answer and all other registers clobbered.
void EmitECMATruncate(Register result, void EmitECMATruncate(Register result,
DwVfpRegister double_input, DwVfpRegister double_input,
SwVfpRegister single_scratch, DwVfpRegister double_scratch,
Register scratch, Register scratch,
Register scratch2, Register scratch2,
Register scratch3); Register scratch3);
@ -1183,7 +1214,7 @@ class MacroAssembler: public Assembler {
// Souce and destination can be the same register. // Souce and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
// Jump the register contains a smi. // Jump if the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) { inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask)); tst(value, Operand(kSmiTagMask));
b(eq, smi_label); b(eq, smi_label);
@ -1198,17 +1229,18 @@ class MacroAssembler: public Assembler {
// Jump if either of the registers contain a smi. // Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
// Abort execution if argument is a smi. Used in debug code. // Abort execution if argument is a smi, enabled via --debug-code.
void AbortIfSmi(Register object); void AssertNotSmi(Register object);
void AbortIfNotSmi(Register object); void AssertSmi(Register object);
// Abort execution if argument is a string. Used in debug code. // Abort execution if argument is a string, enabled via --debug-code.
void AbortIfNotString(Register object); void AssertString(Register object);
// Abort execution if argument is not the root value with the given index. // Abort execution if argument is not the root value with the given index,
void AbortIfNotRootValue(Register src, // enabled via --debug-code.
Heap::RootListIndex root_value_index, void AssertRootValue(Register src,
const char* message); Heap::RootListIndex root_value_index,
const char* message);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// HeapNumber utilities // HeapNumber utilities
@ -1269,10 +1301,17 @@ class MacroAssembler: public Assembler {
DoubleRegister temp_double_reg); DoubleRegister temp_double_reg);
void LoadInstanceDescriptors(Register map, void LoadInstanceDescriptors(Register map, Register descriptors);
Register descriptors,
Register scratch);
void EnumLength(Register dst, Register map); void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
template<typename Field>
void DecodeField(Register reg) {
static const int shift = Field::kShift;
static const int mask = (Field::kMask >> shift) << kSmiTagSize;
mov(reg, Operand(reg, LSR, shift));
and_(reg, reg, Operand(mask));
}
// Activation support. // Activation support.
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);

15
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -1150,7 +1150,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString)); Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string. // Current string.
bool is_ascii = subject->IsAsciiRepresentationUnderneath(); bool is_ascii = subject->IsOneByteRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address); ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <= ASSERT(*return_address <=
@ -1181,7 +1181,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
} }
// String might have changed. // String might have changed.
if (subject_tmp->IsAsciiRepresentation() != is_ascii) { if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized // If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from // code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code). // scratch (including, potentially, compiling a new version of the code).
@ -1358,6 +1358,11 @@ void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
} }
bool RegExpMacroAssemblerARM::CanReadUnaligned() {
return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
}
void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset, void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) { int characters) {
Register offset = current_input_offset(); Register offset = current_input_offset();
@ -1370,9 +1375,9 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
// and the operating system running on the target allow it. // and the operating system running on the target allow it.
// If unaligned load/stores are not supported then this function must only // If unaligned load/stores are not supported then this function must only
// be used to load a single character at a time. // be used to load a single character at a time.
#if !V8_TARGET_CAN_READ_UNALIGNED if (!CanReadUnaligned()) {
ASSERT(characters == 1); ASSERT(characters == 1);
#endif }
if (mode_ == ASCII) { if (mode_ == ASCII) {
if (characters == 4) { if (characters == 4) {

1
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -109,6 +109,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to); virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg); virtual void WriteStackPointerToRegister(int reg);
virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered. // Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before // If the code object is relocated, the return address is fixed before

290
deps/v8/src/arm/simulator-arm.cc

@ -1066,111 +1066,83 @@ void Simulator::TrashCallerSaveRegisters() {
int Simulator::ReadW(int32_t addr, Instruction* instr) { int Simulator::ReadW(int32_t addr, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
#else
if ((addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr; return *ptr;
} else {
PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
return 0;
} }
PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
return 0;
#endif
} }
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) { void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
return;
#else
if ((addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value; *ptr = value;
return; } else {
PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
} }
PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
#endif
} }
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) { uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
#else
if ((addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr; return *ptr;
} else {
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08"
V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
return 0;
} }
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
return 0;
#endif
} }
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) { int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
#else
if ((addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr); int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr; return *ptr;
} else {
PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
UNIMPLEMENTED();
return 0;
} }
PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
UNIMPLEMENTED();
return 0;
#endif
} }
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) { void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
return;
#else
if ((addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value; *ptr = value;
return; } else {
PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08"
V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
} }
PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
#endif
} }
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) { void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
return;
#else
if ((addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr); int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value; *ptr = value;
return; } else {
PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
} }
PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
#endif
} }
@ -1199,37 +1171,26 @@ void Simulator::WriteB(int32_t addr, int8_t value) {
int32_t* Simulator::ReadDW(int32_t addr) { int32_t* Simulator::ReadDW(int32_t addr) {
#if V8_TARGET_CAN_READ_UNALIGNED if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr;
#else
if ((addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr); int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr; return ptr;
} else {
PrintF("Unaligned read at 0x%08x\n", addr);
UNIMPLEMENTED();
return 0;
} }
PrintF("Unaligned read at 0x%08x\n", addr);
UNIMPLEMENTED();
return 0;
#endif
} }
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) { void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
#if V8_TARGET_CAN_READ_UNALIGNED if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr++ = value1;
*ptr = value2;
return;
#else
if ((addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr); int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr++ = value1; *ptr++ = value1;
*ptr = value2; *ptr = value2;
return; } else {
PrintF("Unaligned write at 0x%08x\n", addr);
UNIMPLEMENTED();
} }
PrintF("Unaligned write at 0x%08x\n", addr);
UNIMPLEMENTED();
#endif
} }
@ -1426,7 +1387,14 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
} }
case ROR: { case ROR: {
UNIMPLEMENTED(); if (shift_amount == 0) {
*carry_out = c_flag_;
} else {
uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
result = right | left;
*carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
}
break; break;
} }
@ -1498,7 +1466,14 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
} }
case ROR: { case ROR: {
UNIMPLEMENTED(); if (shift_amount == 0) {
*carry_out = c_flag_;
} else {
uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
result = right | left;
*carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
}
break; break;
} }
@ -1986,11 +1961,23 @@ void Simulator::DecodeType01(Instruction* instr) {
SetNZFlags(alu_out); SetNZFlags(alu_out);
} }
} else { } else {
// The MLA instruction description (A 4.1.28) refers to the order int rd = instr->RdValue();
// of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the int32_t acc_value = get_register(rd);
// Rn field to encode the Rd register and the Rd field to encode if (instr->Bit(22) == 0) {
// the Rn register. // The MLA instruction description (A 4.1.28) refers to the order
Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd"); // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
// Rn field to encode the Rd register and the Rd field to encode
// the Rn register.
// Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
int32_t mul_out = rm_val * rs_val;
int32_t result = acc_value + mul_out;
set_register(rn, result);
} else {
// Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
int32_t mul_out = rm_val * rs_val;
int32_t result = acc_value - mul_out;
set_register(rn, result);
}
} }
} else { } else {
// The signed/long multiply instructions use the terms RdHi and RdLo // The signed/long multiply instructions use the terms RdHi and RdLo
@ -2210,6 +2197,8 @@ void Simulator::DecodeType01(Instruction* instr) {
PrintF("%08x\n", instr->InstructionBits()); PrintF("%08x\n", instr->InstructionBits());
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
} else if ((type == 1) && instr->IsNopType1()) {
// NOP.
} else { } else {
int rd = instr->RdValue(); int rd = instr->RdValue();
int rn = instr->RnValue(); int rn = instr->RnValue();
@ -2546,6 +2535,25 @@ void Simulator::DecodeType3(Instruction* instr) {
break; break;
} }
case db_x: { case db_x: {
if (FLAG_enable_sudiv) {
if (!instr->HasW()) {
if (instr->Bits(5, 4) == 0x1) {
if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
// sdiv (in V8 notation matching ARM ISA format) rn = rm/rs
// Format(instr, "'sdiv'cond'b 'rn, 'rm, 'rs);
int rm = instr->RmValue();
int32_t rm_val = get_register(rm);
int rs = instr->RsValue();
int32_t rs_val = get_register(rs);
int32_t ret_val = 0;
ASSERT(rs_val != 0);
ret_val = rm_val/rs_val;
set_register(rn, ret_val);
return;
}
}
}
}
// Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w"); // Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
addr = rn_val - shifter_operand; addr = rn_val - shifter_operand;
if (instr->HasW()) { if (instr->HasW()) {
@ -2770,6 +2778,20 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dm_value = get_double_from_d_register(vm); double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value; double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value); set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
// vmla
if (instr->SzValue() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
double dd_value = get_double_from_d_register(vd);
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
// Note: we do the mul and add in separate steps to avoid getting a result
// with too high precision.
set_d_register_from_double(vd, dn_value * dm_value);
set_d_register_from_double(vd, get_double_from_d_register(vd) + dd_value);
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) { } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv // vdiv
if (instr->SzValue() != 0x1) { if (instr->SzValue() != 0x1) {
@ -3279,33 +3301,7 @@ void Simulator::Execute() {
} }
int32_t Simulator::Call(byte* entry, int argument_count, ...) { void Simulator::CallInternal(byte* entry) {
va_list parameters;
va_start(parameters, argument_count);
// Set up arguments
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
set_register(r0, va_arg(parameters, int32_t));
set_register(r1, va_arg(parameters, int32_t));
set_register(r2, va_arg(parameters, int32_t));
set_register(r3, va_arg(parameters, int32_t));
// Remaining arguments passed on stack.
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
if (OS::ActivationFrameAlignment() != 0) {
entry_stack &= -OS::ActivationFrameAlignment();
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
for (int i = 4; i < argument_count; i++) {
stack_argument[i - 4] = va_arg(parameters, int32_t);
}
va_end(parameters);
set_register(sp, entry_stack);
// Prepare to execute the code at entry // Prepare to execute the code at entry
set_register(pc, reinterpret_cast<int32_t>(entry)); set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation // Put down marker for end of simulation. The simulator will stop simulation
@ -3359,6 +3355,37 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
set_register(r9, r9_val); set_register(r9, r9_val);
set_register(r10, r10_val); set_register(r10, r10_val);
set_register(r11, r11_val); set_register(r11, r11_val);
}
int32_t Simulator::Call(byte* entry, int argument_count, ...) {
va_list parameters;
va_start(parameters, argument_count);
// Set up arguments
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
set_register(r0, va_arg(parameters, int32_t));
set_register(r1, va_arg(parameters, int32_t));
set_register(r2, va_arg(parameters, int32_t));
set_register(r3, va_arg(parameters, int32_t));
// Remaining arguments passed on stack.
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
if (OS::ActivationFrameAlignment() != 0) {
entry_stack &= -OS::ActivationFrameAlignment();
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
for (int i = 4; i < argument_count; i++) {
stack_argument[i - 4] = va_arg(parameters, int32_t);
}
va_end(parameters);
set_register(sp, entry_stack);
CallInternal(entry);
// Pop stack passed arguments. // Pop stack passed arguments.
CHECK_EQ(entry_stack, get_register(sp)); CHECK_EQ(entry_stack, get_register(sp));
@ -3369,6 +3396,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
} }
double Simulator::CallFP(byte* entry, double d0, double d1) {
if (use_eabi_hardfloat()) {
set_d_register_from_double(0, d0);
set_d_register_from_double(1, d1);
} else {
int buffer[2];
ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
memcpy(buffer, &d0, sizeof(d0));
set_dw_register(0, buffer);
memcpy(buffer, &d1, sizeof(d1));
set_dw_register(2, buffer);
}
CallInternal(entry);
if (use_eabi_hardfloat()) {
return get_double_from_d_register(0);
} else {
return get_double_from_register_pair(0);
}
}
uintptr_t Simulator::PushAddress(uintptr_t address) { uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t); int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp); uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);

4
deps/v8/src/arm/simulator-arm.h

@ -205,6 +205,8 @@ class Simulator {
// generated RegExp code with 7 parameters. This is a convenience function, // generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return. // which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...); int32_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
double CallFP(byte* entry, double d0, double d1);
// Push an address onto the JS stack. // Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address); uintptr_t PushAddress(uintptr_t address);
@ -356,6 +358,8 @@ class Simulator {
template<class InputType, int register_size> template<class InputType, int register_size>
void SetVFPRegister(int reg_index, const InputType& value); void SetVFPRegister(int reg_index, const InputType& value);
void CallInternal(byte* entry);
// Architecture state. // Architecture state.
// Saturating instructions require a Q flag to indicate saturation. // Saturating instructions require a Q flag to indicate saturation.
// There is currently no way to read the CPSR directly, and thus read the Q // There is currently no way to read the CPSR directly, and thus read the Q

206
deps/v8/src/arm/stub-cache-arm.cc

@ -327,18 +327,23 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst, Register dst,
Register src, Register src,
Handle<JSObject> holder, Handle<JSObject> holder,
int index) { PropertyIndex index) {
// Adjust for the number of properties stored in the holder. if (index.is_header_index()) {
index -= holder->map()->inobject_properties(); int offset = index.header_index() * kPointerSize;
if (index < 0) {
// Get the property straight out of the holder.
int offset = holder->map()->instance_size() + (index * kPointerSize);
__ ldr(dst, FieldMemOperand(src, offset)); __ ldr(dst, FieldMemOperand(src, offset));
} else { } else {
// Calculate the offset into the properties array. // Adjust for the number of properties stored in the holder.
int offset = index * kPointerSize + FixedArray::kHeaderSize; int slot = index.field_index() - holder->map()->inobject_properties();
__ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); if (slot < 0) {
__ ldr(dst, FieldMemOperand(dst, offset)); // Get the property straight out of the holder.
int offset = holder->map()->instance_size() + (slot * kPointerSize);
__ ldr(dst, FieldMemOperand(src, offset));
} else {
// Calculate the offset into the properties array.
int offset = slot * kPointerSize + FixedArray::kHeaderSize;
__ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
__ ldr(dst, FieldMemOperand(dst, offset));
}
} }
} }
@ -1196,7 +1201,7 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
int index, PropertyIndex index,
Handle<String> name, Handle<String> name,
Label* miss) { Label* miss) {
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
@ -1545,7 +1550,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object, Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder, Handle<JSObject> holder,
int index, PropertyIndex index,
Handle<String> name) { Handle<String> name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r2 : name // -- r2 : name
@ -1618,7 +1623,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin; Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin. if (argc == 1) { // Otherwise fall through to call the builtin.
Label attempt_to_grow_elements; Label attempt_to_grow_elements, with_write_barrier, check_double;
Register elements = r6; Register elements = r6;
Register end_elements = r5; Register end_elements = r5;
@ -1629,10 +1634,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ CheckMap(elements, __ CheckMap(elements,
r0, r0,
Heap::kFixedArrayMapRootIndex, Heap::kFixedArrayMapRootIndex,
&call_builtin, &check_double,
DONT_DO_SMI_CHECK); DONT_DO_SMI_CHECK);
// Get the array's length into r0 and calculate new length. // Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTagSize == 1);
@ -1647,7 +1651,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ b(gt, &attempt_to_grow_elements); __ b(gt, &attempt_to_grow_elements);
// Check if value is a smi. // Check if value is a smi.
Label with_write_barrier;
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(r4, &with_write_barrier); __ JumpIfNotSmi(r4, &with_write_barrier);
@ -1667,6 +1670,40 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Drop(argc + 1); __ Drop(argc + 1);
__ Ret(); __ Ret();
__ bind(&check_double);
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
r0,
Heap::kFixedDoubleArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(r0, r0, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmp(r0, r4);
__ b(gt, &call_builtin);
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(
r4, r0, elements, r3, r5, r2, r9,
&call_builtin, argc * kDoubleSize);
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Check for a smi.
__ Drop(argc + 1);
__ Ret();
__ bind(&with_write_barrier); __ bind(&with_write_barrier);
__ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
@ -1678,6 +1715,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out. // In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object); __ bind(&not_fast_object);
__ CheckFastSmiElements(r3, r7, &call_builtin); __ CheckFastSmiElements(r3, r7, &call_builtin);
__ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r7, ip);
__ b(eq, &call_builtin);
// edx: receiver // edx: receiver
// r3: map // r3: map
Label try_holey_map; Label try_holey_map;
@ -2912,7 +2954,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object, Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder, Handle<JSObject> holder,
int index, PropertyIndex index,
Handle<String> name) { Handle<String> name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : receiver // -- r0 : receiver
@ -3101,7 +3143,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name, Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
Handle<JSObject> receiver, Handle<JSObject> receiver,
Handle<JSObject> holder, Handle<JSObject> holder,
int index) { PropertyIndex index) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- lr : return address // -- lr : return address
// -- r0 : key // -- r0 : key
@ -3467,7 +3509,13 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
// r1: constructor function // r1: constructor function
// r2: initial map // r2: initial map
// r7: undefined // r7: undefined
ASSERT(function->has_initial_map());
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
#ifdef DEBUG
int instance_size = function->initial_map()->instance_size();
__ cmp(r3, Operand(instance_size >> kPointerSizeLog2));
__ Check(eq, "Instance size of initial map changed.");
#endif
__ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS); __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial // Allocated the JSObject, now initialize the fields. Map is set to initial
@ -3525,7 +3573,6 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
} }
// Fill the unused in-object property fields with undefined. // Fill the unused in-object property fields with undefined.
ASSERT(function->has_initial_map());
for (int i = shared->this_property_assignments_count(); for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties(); i < function->initial_map()->inobject_properties();
i++) { i++) {
@ -3646,6 +3693,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register scratch0, Register scratch0,
Register scratch1, Register scratch1,
DwVfpRegister double_scratch0, DwVfpRegister double_scratch0,
DwVfpRegister double_scratch1,
Label* fail) { Label* fail) {
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP2);
@ -3662,13 +3710,12 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
__ sub(ip, key, Operand(kHeapObjectTag)); __ sub(ip, key, Operand(kHeapObjectTag));
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset); __ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
__ EmitVFPTruncate(kRoundToZero, __ EmitVFPTruncate(kRoundToZero,
double_scratch0.low(),
double_scratch0,
scratch0, scratch0,
double_scratch0,
scratch1, scratch1,
double_scratch1,
kCheckForInexactConversion); kCheckForInexactConversion);
__ b(ne, fail); __ b(ne, fail);
__ vmov(scratch0, double_scratch0.low());
__ TrySmiTag(scratch0, fail, scratch1); __ TrySmiTag(scratch0, fail, scratch1);
__ mov(key, scratch0); __ mov(key, scratch0);
__ bind(&key_ok); __ bind(&key_ok);
@ -3696,7 +3743,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi. // Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic); GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// r3: elements array // r3: elements array
@ -3787,36 +3834,42 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ Ret(); __ Ret();
__ bind(&box_int); __ bind(&box_int);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't touch r0 or r1 as they are needed if allocation
// fails.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow);
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP2);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't touch r0 or r1 as they are needed if allocation
// fails.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
// Now we can use r0 for the result as key is not needed any more.
__ add(r0, r5, Operand(kHeapObjectTag));
__ vmov(s0, value); __ vmov(s0, value);
__ vcvt_f64_s32(d0, s0); __ vcvt_f64_s32(d0, s0);
__ sub(r3, r0, Operand(kHeapObjectTag)); __ vstr(d0, r5, HeapNumber::kValueOffset);
__ vstr(d0, r3, HeapNumber::kValueOffset);
__ Ret(); __ Ret();
} else { } else {
Register dst1 = r1; // Allocate a HeapNumber for the result and perform int-to-double
Register dst2 = r3; // conversion. Don't touch r0 or r1 as they are needed if allocation
// fails.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
Register dst_mantissa = r1;
Register dst_exponent = r3;
FloatingPointHelper::Destination dest = FloatingPointHelper::Destination dest =
FloatingPointHelper::kCoreRegisters; FloatingPointHelper::kCoreRegisters;
FloatingPointHelper::ConvertIntToDouble(masm, FloatingPointHelper::ConvertIntToDouble(masm,
value, value,
dest, dest,
d0, d0,
dst1, dst_mantissa,
dst2, dst_exponent,
r9, r9,
s0); s0);
__ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); __ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); __ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ Ret(); __ Ret();
} }
} else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
@ -3838,13 +3891,12 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space. // registers - also when jumping due to exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow); __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ vcvt_f64_u32(d0, s0); __ vcvt_f64_u32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag)); __ vstr(d0, r2, HeapNumber::kValueOffset);
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ mov(r0, r2); __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret(); __ Ret();
} else { } else {
// Check whether unsigned integer fits into smi. // Check whether unsigned integer fits into smi.
@ -3876,7 +3928,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// clobbers all registers - also when jumping due to exhausted young // clobbers all registers - also when jumping due to exhausted young
// space. // space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r5, r7, r6, &slow); __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
@ -3893,19 +3945,18 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// AllocateHeapNumber clobbers all registers - also when jumping due to // AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space. // exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow); __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ vcvt_f64_f32(d0, s0); __ vcvt_f64_f32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag)); __ vstr(d0, r2, HeapNumber::kValueOffset);
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ mov(r0, r2); __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret(); __ Ret();
} else { } else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as // Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to // AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space. // exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r3, r4, r5, r6, &slow); __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
// VFP is not available, do manual single to double conversion. // VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32) // r2: floating point value (binary32)
@ -3961,18 +4012,17 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// AllocateHeapNumber clobbers all registers - also when jumping due to // AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space. // exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow); __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ sub(r1, r2, Operand(kHeapObjectTag)); __ vstr(d0, r2, HeapNumber::kValueOffset);
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ mov(r0, r2); __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret(); __ Ret();
} else { } else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as // Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to // AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space. // exhausted young space.
__ LoadRoot(r7, Heap::kHeapNumberMapRootIndex); __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r5, r6, r7, &slow); __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
__ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
__ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset)); __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
@ -4030,7 +4080,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi. // Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic); GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@ -4088,7 +4138,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
} }
FloatingPointHelper::ConvertIntToDouble( FloatingPointHelper::ConvertIntToDouble(
masm, r5, destination, masm, r5, destination,
d0, r6, r7, // These are: double_dst, dst1, dst2. d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent.
r4, s2); // These are: scratch2, single_scratch. r4, s2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kVFPRegisters) { if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP2);
@ -4147,7 +4197,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// not include -kHeapObjectTag into it. // not include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag)); __ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset); __ vldr(d0, r5, HeapNumber::kValueOffset);
__ EmitECMATruncate(r5, d0, s2, r6, r7, r9); __ EmitECMATruncate(r5, d0, d1, r6, r7, r9);
switch (elements_kind) { switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS:
@ -4365,7 +4415,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi. // Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, r0, r4, r5, d1, &miss_force_generic); GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic);
// Get the elements array. // Get the elements array.
__ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
@ -4417,7 +4467,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi. // Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
// Get the elements array. // Get the elements array.
__ ldr(elements_reg, __ ldr(elements_reg,
@ -4439,7 +4489,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// Non-NaN. Allocate a new heap number and copy the double value into it. // Non-NaN. Allocate a new heap number and copy the double value into it.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
heap_number_map, &slow_allocate_heapnumber); heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
// Don't need to reload the upper 32 bits of the double, it's already in // Don't need to reload the upper 32 bits of the double, it's already in
// scratch. // scratch.
@ -4493,7 +4543,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi. // Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) { if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind); __ JumpIfNotSmi(value_reg, &transition_elements_kind);
@ -4640,9 +4690,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r1 : key // -- r1 : key
// -- r2 : receiver // -- r2 : receiver
// -- lr : return address // -- lr : return address
// -- r3 : scratch // -- r3 : scratch (elements backing store)
// -- r4 : scratch // -- r4 : scratch
// -- r5 : scratch // -- r5 : scratch
// -- r6 : scratch
// -- r7 : scratch
// -- r9 : scratch
// ----------------------------------- // -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow; Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity; Label finish_store, check_capacity;
@ -4655,13 +4708,14 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = r5; Register scratch2 = r5;
Register scratch3 = r6; Register scratch3 = r6;
Register scratch4 = r7; Register scratch4 = r7;
Register scratch5 = r9;
Register length_reg = r7; Register length_reg = r7;
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi. // Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
__ ldr(elements_reg, __ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@ -4685,7 +4739,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&finish_store); __ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg, __ StoreNumberToDoubleElements(value_reg,
key_reg, key_reg,
receiver_reg, // All registers after this are overwritten.
elements_reg, elements_reg,
scratch1, scratch1,
scratch2, scratch2,
@ -4733,8 +4787,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow, __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
TAG_OBJECT); TAG_OBJECT);
// Initialize the new FixedDoubleArray. Leave elements unitialized for // Initialize the new FixedDoubleArray.
// efficiency, they are guaranteed to be initialized before use.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex); __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
__ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset)); __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ mov(scratch1, __ mov(scratch1,
@ -4742,6 +4795,25 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ str(scratch1, __ str(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
__ mov(scratch1, elements_reg);
__ StoreNumberToDoubleElements(value_reg,
key_reg,
// All registers after this are overwritten.
scratch1,
scratch2,
scratch3,
scratch4,
scratch5,
&transition_elements_kind);
__ mov(scratch1, Operand(kHoleNanLower32));
__ mov(scratch2, Operand(kHoleNanUpper32));
for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
int offset = FixedDoubleArray::OffsetOfElementAt(i);
__ str(scratch1, FieldMemOperand(elements_reg, offset));
__ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
}
// Install the new backing store in the JSArray. // Install the new backing store in the JSArray.
__ str(elements_reg, __ str(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@ -4754,7 +4826,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(elements_reg, __ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ jmp(&finish_store); __ Ret();
__ bind(&check_capacity); __ bind(&check_capacity);
// Make sure that the backing store can hold additional elements. // Make sure that the backing store can hold additional elements.

20
deps/v8/src/array.js

@ -62,7 +62,7 @@ function GetSortedArrayKeys(array, intervals) {
} }
} }
} }
keys.sort(function(a, b) { return a - b; }); %_CallFunction(keys, function(a, b) { return a - b; }, ArraySort);
return keys; return keys;
} }
@ -413,6 +413,7 @@ function ArrayJoin(separator) {
["Array.prototype.join"]); ["Array.prototype.join"]);
} }
var length = TO_UINT32(this.length);
if (IS_UNDEFINED(separator)) { if (IS_UNDEFINED(separator)) {
separator = ','; separator = ',';
} else if (!IS_STRING(separator)) { } else if (!IS_STRING(separator)) {
@ -422,7 +423,7 @@ function ArrayJoin(separator) {
var result = %_FastAsciiArrayJoin(this, separator); var result = %_FastAsciiArrayJoin(this, separator);
if (!IS_UNDEFINED(result)) return result; if (!IS_UNDEFINED(result)) return result;
return Join(this, TO_UINT32(this.length), separator, ConvertToString); return Join(this, length, separator, ConvertToString);
} }
@ -441,8 +442,8 @@ function ArrayPop() {
} }
n--; n--;
var value = this[n]; var value = this[n];
this.length = n;
delete this[n]; delete this[n];
this.length = n;
return value; return value;
} }
@ -581,7 +582,7 @@ function ArrayShift() {
var first = this[0]; var first = this[0];
if (IS_ARRAY(this)) { if (IS_ARRAY(this) && !%IsObserved(this)) {
SmartMove(this, 0, 1, len, 0); SmartMove(this, 0, 1, len, 0);
} else { } else {
SimpleMove(this, 0, 1, len, 0); SimpleMove(this, 0, 1, len, 0);
@ -602,7 +603,7 @@ function ArrayUnshift(arg1) { // length == 1
var len = TO_UINT32(this.length); var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength(); var num_arguments = %_ArgumentsLength();
if (IS_ARRAY(this)) { if (IS_ARRAY(this) && !%IsObserved(this)) {
SmartMove(this, 0, 0, len, num_arguments); SmartMove(this, 0, 0, len, num_arguments);
} else { } else {
SimpleMove(this, 0, 0, len, num_arguments); SimpleMove(this, 0, 0, len, num_arguments);
@ -649,6 +650,7 @@ function ArraySlice(start, end) {
if (end_i < start_i) return result; if (end_i < start_i) return result;
if (IS_ARRAY(this) && if (IS_ARRAY(this) &&
!%IsObserved(this) &&
(end_i > 1000) && (end_i > 1000) &&
(%EstimateNumberOfElements(this) < end_i)) { (%EstimateNumberOfElements(this) < end_i)) {
SmartSlice(this, start_i, end_i - start_i, len, result); SmartSlice(this, start_i, end_i - start_i, len, result);
@ -705,7 +707,9 @@ function ArraySplice(start, delete_count) {
var use_simple_splice = true; var use_simple_splice = true;
if (IS_ARRAY(this) && num_additional_args !== del_count) { if (IS_ARRAY(this) &&
!%IsObserved(this) &&
num_additional_args !== del_count) {
// If we are only deleting/moving a few things near the end of the // If we are only deleting/moving a few things near the end of the
// array then the simple version is going to be faster, because it // array then the simple version is going to be faster, because it
// doesn't touch most of the array. // doesn't touch most of the array.
@ -1549,9 +1553,11 @@ function SetUpArray() {
// exposed to user code. // exposed to user code.
// Adding only the functions that are actually used. // Adding only the functions that are actually used.
SetUpLockedPrototype(InternalArray, $Array(), $Array( SetUpLockedPrototype(InternalArray, $Array(), $Array(
"indexOf", getFunction("indexOf", ArrayIndexOf),
"join", getFunction("join", ArrayJoin), "join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop), "pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush) "push", getFunction("push", ArrayPush),
"splice", getFunction("splice", ArraySplice)
)); ));
} }

197
deps/v8/src/assembler.cc

@ -103,15 +103,78 @@ static DoubleConstant double_constants;
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
static bool math_exp_data_initialized = false;
static Mutex* math_exp_data_mutex = NULL;
static double* math_exp_constants_array = NULL;
static double* math_exp_log_table_array = NULL;
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of AssemblerBase // Implementation of AssemblerBase
AssemblerBase::AssemblerBase(Isolate* isolate) AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
: isolate_(isolate), : isolate_(isolate),
jit_cookie_(0) { jit_cookie_(0),
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) { if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = V8::RandomPrivate(isolate); jit_cookie_ = V8::RandomPrivate(isolate);
} }
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
if (isolate->assembler_spare_buffer() != NULL) {
buffer = isolate->assembler_spare_buffer();
isolate->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) buffer = NewArray<byte>(buffer_size);
own_buffer_ = true;
} else {
// Use externally provided buffer instead.
ASSERT(buffer_size > 0);
own_buffer_ = false;
}
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
pc_ = buffer_;
}
AssemblerBase::~AssemblerBase() {
if (own_buffer_) {
if (isolate() != NULL &&
isolate()->assembler_spare_buffer() == NULL &&
buffer_size_ == kMinimalBufferSize) {
isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
}
}
// -----------------------------------------------------------------------------
// Implementation of PredictableCodeSizeScope
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
int expected_size)
: assembler_(assembler),
expected_size_(expected_size),
start_offset_(assembler->pc_offset()),
old_value_(assembler->predictable_code_size()) {
assembler_->set_predictable_code_size(true);
}
PredictableCodeSizeScope::~PredictableCodeSizeScope() {
// TODO(svenpanne) Remove the 'if' when everything works.
if (expected_size_ >= 0) {
CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
}
assembler_->set_predictable_code_size(old_value_);
} }
@ -313,6 +376,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#ifdef DEBUG #ifdef DEBUG
byte* begin_pos = pos_; byte* begin_pos = pos_;
#endif #endif
ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
ASSERT(rinfo->pc() - last_pc_ >= 0); ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
<= kMaxStandardNonCompactModes); <= kMaxStandardNonCompactModes);
@ -570,6 +634,15 @@ void RelocIterator::next() {
} }
} }
} }
if (code_age_sequence_ != NULL) {
byte* old_code_age_sequence = code_age_sequence_;
code_age_sequence_ = NULL;
if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) {
rinfo_.data_ = 0;
rinfo_.pc_ = old_code_age_sequence;
return;
}
}
done_ = true; done_ = true;
} }
@ -585,6 +658,12 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
mode_mask_ = mode_mask; mode_mask_ = mode_mask;
last_id_ = 0; last_id_ = 0;
last_position_ = 0; last_position_ = 0;
byte* sequence = code->FindCodeAgeSequence();
if (sequence != NULL && !Code::IsYoungSequence(sequence)) {
code_age_sequence_ = sequence;
} else {
code_age_sequence_ = NULL;
}
if (mode_mask_ == 0) pos_ = end_; if (mode_mask_ == 0) pos_ = end_;
next(); next();
} }
@ -600,6 +679,7 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
mode_mask_ = mode_mask; mode_mask_ = mode_mask;
last_id_ = 0; last_id_ = 0;
last_position_ = 0; last_position_ = 0;
code_age_sequence_ = NULL;
if (mode_mask_ == 0) pos_ = end_; if (mode_mask_ == 0) pos_ = end_;
next(); next();
} }
@ -652,6 +732,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
UNREACHABLE(); UNREACHABLE();
#endif #endif
return "debug break slot"; return "debug break slot";
case RelocInfo::CODE_AGE_SEQUENCE:
return "code_age_sequence";
case RelocInfo::NUMBER_OF_MODES: case RelocInfo::NUMBER_OF_MODES:
UNREACHABLE(); UNREACHABLE();
return "number_of_modes"; return "number_of_modes";
@ -697,7 +779,7 @@ void RelocInfo::Print(FILE* out) {
#endif // ENABLE_DISASSEMBLER #endif // ENABLE_DISASSEMBLER
#ifdef DEBUG #ifdef VERIFY_HEAP
void RelocInfo::Verify() { void RelocInfo::Verify() {
switch (rmode_) { switch (rmode_) {
case EMBEDDED_OBJECT: case EMBEDDED_OBJECT:
@ -717,12 +799,12 @@ void RelocInfo::Verify() {
case CODE_TARGET: { case CODE_TARGET: {
// convert inline target address to code object // convert inline target address to code object
Address addr = target_address(); Address addr = target_address();
ASSERT(addr != NULL); CHECK(addr != NULL);
// Check that we can find the right code object. // Check that we can find the right code object.
Code* code = Code::GetCodeFromTargetAddress(addr); Code* code = Code::GetCodeFromTargetAddress(addr);
Object* found = HEAP->FindCodeObject(addr); Object* found = HEAP->FindCodeObject(addr);
ASSERT(found->IsCode()); CHECK(found->IsCode());
ASSERT(code->address() == HeapObject::cast(found)->address()); CHECK(code->address() == HeapObject::cast(found)->address());
break; break;
} }
case RUNTIME_ENTRY: case RUNTIME_ENTRY:
@ -739,9 +821,12 @@ void RelocInfo::Verify() {
case NUMBER_OF_MODES: case NUMBER_OF_MODES:
UNREACHABLE(); UNREACHABLE();
break; break;
case CODE_AGE_SEQUENCE:
ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode());
break;
} }
} }
#endif // DEBUG #endif // VERIFY_HEAP
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
@ -756,6 +841,70 @@ void ExternalReference::SetUp() {
double_constants.canonical_non_hole_nan = OS::nan_value(); double_constants.canonical_non_hole_nan = OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64); double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY; double_constants.negative_infinity = -V8_INFINITY;
math_exp_data_mutex = OS::CreateMutex();
}
void ExternalReference::InitializeMathExpData() {
// Early return?
if (math_exp_data_initialized) return;
math_exp_data_mutex->Lock();
if (!math_exp_data_initialized) {
// If this is changed, generated code must be adapted too.
const int kTableSizeBits = 11;
const int kTableSize = 1 << kTableSizeBits;
const double kTableSizeDouble = static_cast<double>(kTableSize);
math_exp_constants_array = new double[9];
// Input values smaller than this always return 0.
math_exp_constants_array[0] = -708.39641853226408;
// Input values larger than this always return +Infinity.
math_exp_constants_array[1] = 709.78271289338397;
math_exp_constants_array[2] = V8_INFINITY;
// The rest is black magic. Do not attempt to understand it. It is
// loosely based on the "expd" function published at:
// http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
const double constant3 = (1 << kTableSizeBits) / log(2.0);
math_exp_constants_array[3] = constant3;
math_exp_constants_array[4] =
static_cast<double>(static_cast<int64_t>(3) << 51);
math_exp_constants_array[5] = 1 / constant3;
math_exp_constants_array[6] = 3.0000000027955394;
math_exp_constants_array[7] = 0.16666666685227835;
math_exp_constants_array[8] = 1;
math_exp_log_table_array = new double[kTableSize];
for (int i = 0; i < kTableSize; i++) {
double value = pow(2, i / kTableSizeDouble);
uint64_t bits = BitCast<uint64_t, double>(value);
bits &= (static_cast<uint64_t>(1) << 52) - 1;
double mantissa = BitCast<double, uint64_t>(bits);
// <just testing>
uint64_t doublebits;
memcpy(&doublebits, &value, sizeof doublebits);
doublebits &= (static_cast<uint64_t>(1) << 52) - 1;
double mantissa2;
memcpy(&mantissa2, &doublebits, sizeof mantissa2);
CHECK_EQ(mantissa, mantissa2);
// </just testing>
math_exp_log_table_array[i] = mantissa;
}
math_exp_data_initialized = true;
}
math_exp_data_mutex->Unlock();
}
void ExternalReference::TearDownMathExpData() {
delete[] math_exp_constants_array;
delete[] math_exp_log_table_array;
delete math_exp_data_mutex;
} }
@ -874,6 +1023,13 @@ ExternalReference ExternalReference::get_date_field_function(
} }
ExternalReference ExternalReference::get_make_code_young_function(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung)));
}
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) { ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address()); return ExternalReference(isolate->date_cache()->stamp_address());
} }
@ -900,6 +1056,20 @@ ExternalReference ExternalReference::compute_output_frames_function(
} }
ExternalReference ExternalReference::log_enter_external_function(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
}
ExternalReference ExternalReference::log_leave_external_function(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
}
ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) { ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
return ExternalReference(isolate->keyed_lookup_cache()->keys_address()); return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
} }
@ -1186,6 +1356,19 @@ ExternalReference ExternalReference::math_log_double_function(
} }
ExternalReference ExternalReference::math_exp_constants(int constant_index) {
ASSERT(math_exp_data_initialized);
return ExternalReference(
reinterpret_cast<void*>(math_exp_constants_array + constant_index));
}
ExternalReference ExternalReference::math_exp_log_table() {
ASSERT(math_exp_data_initialized);
return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
}
ExternalReference ExternalReference::page_flags(Page* page) { ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) + return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset); MemoryChunk::kFlagsOffset);

86
deps/v8/src/assembler.h

@ -56,18 +56,56 @@ struct StatsCounter;
class AssemblerBase: public Malloced { class AssemblerBase: public Malloced {
public: public:
explicit AssemblerBase(Isolate* isolate); AssemblerBase(Isolate* isolate, void* buffer, int buffer_size);
virtual ~AssemblerBase();
Isolate* isolate() const { return isolate_; } Isolate* isolate() const { return isolate_; }
int jit_cookie() { return jit_cookie_; } int jit_cookie() const { return jit_cookie_; }
bool emit_debug_code() const { return emit_debug_code_; }
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
bool predictable_code_size() const { return predictable_code_size_; }
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for // Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
// cross-snapshotting. // cross-snapshotting.
static void QuietNaN(HeapObject* nan) { } static void QuietNaN(HeapObject* nan) { }
int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
static const int kMinimalBufferSize = 4*KB;
protected:
// The buffer into which code and relocation info are generated. It could
// either be owned by the assembler or be provided externally.
byte* buffer_;
int buffer_size_;
bool own_buffer_;
// The program counter, which points into the buffer above and moves forward.
byte* pc_;
private: private:
Isolate* isolate_; Isolate* isolate_;
int jit_cookie_; int jit_cookie_;
bool emit_debug_code_;
bool predictable_code_size_;
};
// Avoids using instructions that vary in size in unpredictable ways between the
// snapshot and the running VM.
class PredictableCodeSizeScope {
public:
PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
~PredictableCodeSizeScope();
private:
AssemblerBase* assembler_;
int expected_size_;
int start_offset_;
bool old_value_;
}; };
@ -211,6 +249,12 @@ class RelocInfo BASE_EMBEDDED {
// Pseudo-types // Pseudo-types
NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding. NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
NONE, // never recorded NONE, // never recorded
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
// code aging.
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = CONST_POOL,
FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_CODE_ENUM = DEBUG_BREAK, LAST_CODE_ENUM = DEBUG_BREAK,
LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL, LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding. // Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
@ -225,6 +269,15 @@ class RelocInfo BASE_EMBEDDED {
: pc_(pc), rmode_(rmode), data_(data), host_(host) { : pc_(pc), rmode_(rmode), data_(data), host_(host) {
} }
static inline bool IsRealRelocMode(Mode mode) {
return mode >= FIRST_REAL_RELOC_MODE &&
mode <= LAST_REAL_RELOC_MODE;
}
static inline bool IsPseudoRelocMode(Mode mode) {
ASSERT(!IsRealRelocMode(mode));
return mode >= FIRST_PSEUDO_RELOC_MODE &&
mode <= LAST_PSEUDO_RELOC_MODE;
}
static inline bool IsConstructCall(Mode mode) { static inline bool IsConstructCall(Mode mode) {
return mode == CONSTRUCT_CALL; return mode == CONSTRUCT_CALL;
} }
@ -262,6 +315,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsDebugBreakSlot(Mode mode) { static inline bool IsDebugBreakSlot(Mode mode) {
return mode == DEBUG_BREAK_SLOT; return mode == DEBUG_BREAK_SLOT;
} }
static inline bool IsCodeAgeSequence(Mode mode) {
return mode == CODE_AGE_SEQUENCE;
}
static inline int ModeMask(Mode mode) { return 1 << mode; } static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors // Accessors
@ -294,7 +350,8 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Handle<JSGlobalPropertyCell> target_cell_handle()); INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
INLINE(void set_target_cell(JSGlobalPropertyCell* cell, INLINE(void set_target_cell(JSGlobalPropertyCell* cell,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER)); WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(Code* stub));
// Read the address of the word containing the target_address in an // Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent. // instruction stream. What this means exactly is architecture-independent.
@ -349,8 +406,7 @@ class RelocInfo BASE_EMBEDDED {
static const char* RelocModeName(Mode rmode); static const char* RelocModeName(Mode rmode);
void Print(FILE* out); void Print(FILE* out);
#endif // ENABLE_DISASSEMBLER #endif // ENABLE_DISASSEMBLER
#ifdef DEBUG #ifdef VERIFY_HEAP
// Debugging
void Verify(); void Verify();
#endif #endif
@ -369,19 +425,17 @@ class RelocInfo BASE_EMBEDDED {
Mode rmode_; Mode rmode_;
intptr_t data_; intptr_t data_;
Code* host_; Code* host_;
#ifdef V8_TARGET_ARCH_MIPS // Code and Embedded Object pointers on some platforms are stored split
// Code and Embedded Object pointers in mips are stored split
// across two consecutive 32-bit instructions. Heap management // across two consecutive 32-bit instructions. Heap management
// routines expect to access these pointers indirectly. The following // routines expect to access these pointers indirectly. The following
// location provides a place for these pointers to exist natually // location provides a place for these pointers to exist naturally
// when accessed via the Iterator. // when accessed via the Iterator.
Object* reconstructed_obj_ptr_; Object* reconstructed_obj_ptr_;
// External-reference pointers are also split across instruction-pairs // External-reference pointers are also split across instruction-pairs
// in mips, but are accessed via indirect pointers. This location // on some platforms, but are accessed via indirect pointers. This location
// provides a place for that pointer to exist naturally. Its address // provides a place for that pointer to exist naturally. Its address
// is returned by RelocInfo::target_reference_address(). // is returned by RelocInfo::target_reference_address().
Address reconstructed_adr_ptr_; Address reconstructed_adr_ptr_;
#endif // V8_TARGET_ARCH_MIPS
friend class RelocIterator; friend class RelocIterator;
}; };
@ -490,6 +544,7 @@ class RelocIterator: public Malloced {
byte* pos_; byte* pos_;
byte* end_; byte* end_;
byte* code_age_sequence_;
RelocInfo rinfo_; RelocInfo rinfo_;
bool done_; bool done_;
int mode_mask_; int mode_mask_;
@ -549,6 +604,8 @@ class ExternalReference BASE_EMBEDDED {
}; };
static void SetUp(); static void SetUp();
static void InitializeMathExpData();
static void TearDownMathExpData();
typedef void* ExternalReferenceRedirector(void* original, Type type); typedef void* ExternalReferenceRedirector(void* original, Type type);
@ -598,10 +655,16 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference get_date_field_function(Isolate* isolate); static ExternalReference get_date_field_function(Isolate* isolate);
static ExternalReference date_cache_stamp(Isolate* isolate); static ExternalReference date_cache_stamp(Isolate* isolate);
static ExternalReference get_make_code_young_function(Isolate* isolate);
// Deoptimization support. // Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate); static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate); static ExternalReference compute_output_frames_function(Isolate* isolate);
// Log support.
static ExternalReference log_enter_external_function(Isolate* isolate);
static ExternalReference log_leave_external_function(Isolate* isolate);
// Static data in the keyed lookup cache. // Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate); static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate); static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
@ -668,6 +731,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference math_tan_double_function(Isolate* isolate); static ExternalReference math_tan_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate); static ExternalReference math_log_double_function(Isolate* isolate);
static ExternalReference math_exp_constants(int constant_index);
static ExternalReference math_exp_log_table();
static ExternalReference page_flags(Page* page); static ExternalReference page_flags(Page* page);
Address address() const {return reinterpret_cast<Address>(address_);} Address address() const {return reinterpret_cast<Address>(address_);}

22
deps/v8/src/ast.cc

@ -103,6 +103,7 @@ VariableProxy::VariableProxy(Isolate* isolate,
void VariableProxy::BindTo(Variable* var) { void VariableProxy::BindTo(Variable* var) {
ASSERT(var_ == NULL); // must be bound only once ASSERT(var_ == NULL); // must be bound only once
ASSERT(var != NULL); // must bind ASSERT(var != NULL); // must bind
ASSERT(!FLAG_harmony_modules || interface_->IsUnified(var->interface()));
ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name())); ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name()));
// Ideally CONST-ness should match. However, this is very hard to achieve // Ideally CONST-ness should match. However, this is very hard to achieve
// because we don't know the exact semantics of conflicting (const and // because we don't know the exact semantics of conflicting (const and
@ -126,8 +127,6 @@ Assignment::Assignment(Isolate* isolate,
pos_(pos), pos_(pos),
binary_operation_(NULL), binary_operation_(NULL),
assignment_id_(GetNextId(isolate)), assignment_id_(GetNextId(isolate)),
block_start_(false),
block_end_(false),
is_monomorphic_(false) { } is_monomorphic_(false) { }
@ -478,6 +477,7 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) { void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo info = oracle->SwitchType(this); TypeInfo info = oracle->SwitchType(this);
if (info.IsUninitialized()) info = TypeInfo::Unknown();
if (info.IsSmi()) { if (info.IsSmi()) {
compare_type_ = SMI_ONLY; compare_type_ = SMI_ONLY;
} else if (info.IsSymbol()) { } else if (info.IsSymbol()) {
@ -606,18 +606,6 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
} }
void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo info = oracle->CompareType(this);
if (info.IsSmi()) {
compare_type_ = SMI_ONLY;
} else if (info.IsNonPrimitive()) {
compare_type_ = OBJECT_ONLY;
} else {
ASSERT(compare_type_ == NONE);
}
}
void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) { void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this) receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this)
? oracle->GetObjectLiteralStoreMap(this) ? oracle->GetObjectLiteralStoreMap(this)
@ -1072,16 +1060,14 @@ REGULAR_NODE(CallNew)
// LOOKUP variables only result from constructs that cannot be inlined anyway. // LOOKUP variables only result from constructs that cannot be inlined anyway.
REGULAR_NODE(VariableProxy) REGULAR_NODE(VariableProxy)
// We currently do not optimize any modules. Note in particular, that module // We currently do not optimize any modules.
// instance objects associated with ModuleLiterals are allocated during
// scope resolution, and references to them are embedded into the code.
// That code may hence neither be cached nor re-compiled.
DONT_OPTIMIZE_NODE(ModuleDeclaration) DONT_OPTIMIZE_NODE(ModuleDeclaration)
DONT_OPTIMIZE_NODE(ImportDeclaration) DONT_OPTIMIZE_NODE(ImportDeclaration)
DONT_OPTIMIZE_NODE(ExportDeclaration) DONT_OPTIMIZE_NODE(ExportDeclaration)
DONT_OPTIMIZE_NODE(ModuleVariable) DONT_OPTIMIZE_NODE(ModuleVariable)
DONT_OPTIMIZE_NODE(ModulePath) DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl) DONT_OPTIMIZE_NODE(ModuleUrl)
DONT_OPTIMIZE_NODE(ModuleStatement)
DONT_OPTIMIZE_NODE(WithStatement) DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(TryCatchStatement) DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement) DONT_OPTIMIZE_NODE(TryFinallyStatement)

50
deps/v8/src/ast.h

@ -75,6 +75,7 @@ namespace internal {
#define STATEMENT_NODE_LIST(V) \ #define STATEMENT_NODE_LIST(V) \
V(Block) \ V(Block) \
V(ModuleStatement) \
V(ExpressionStatement) \ V(ExpressionStatement) \
V(EmptyStatement) \ V(EmptyStatement) \
V(IfStatement) \ V(IfStatement) \
@ -522,7 +523,7 @@ class ModuleDeclaration: public Declaration {
ModuleDeclaration(VariableProxy* proxy, ModuleDeclaration(VariableProxy* proxy,
Module* module, Module* module,
Scope* scope) Scope* scope)
: Declaration(proxy, LET, scope), : Declaration(proxy, MODULE, scope),
module_(module) { module_(module) {
} }
@ -645,6 +646,25 @@ class ModuleUrl: public Module {
}; };
class ModuleStatement: public Statement {
public:
DECLARE_NODE_TYPE(ModuleStatement)
VariableProxy* proxy() const { return proxy_; }
Block* body() const { return body_; }
protected:
ModuleStatement(VariableProxy* proxy, Block* body)
: proxy_(proxy),
body_(body) {
}
private:
VariableProxy* proxy_;
Block* body_;
};
class IterationStatement: public BreakableStatement { class IterationStatement: public BreakableStatement {
public: public:
// Type testing & conversion. // Type testing & conversion.
@ -1417,7 +1437,7 @@ class VariableProxy: public Expression {
void MarkAsTrivial() { is_trivial_ = true; } void MarkAsTrivial() { is_trivial_ = true; }
void MarkAsLValue() { is_lvalue_ = true; } void MarkAsLValue() { is_lvalue_ = true; }
// Bind this proxy to the variable var. // Bind this proxy to the variable var. Interfaces must match.
void BindTo(Variable* var); void BindTo(Variable* var);
protected: protected:
@ -1777,9 +1797,6 @@ class CompareOperation: public Expression {
// Type feedback information. // Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); } TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
// Match special cases. // Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check); bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@ -1796,8 +1813,7 @@ class CompareOperation: public Expression {
op_(op), op_(op),
left_(left), left_(left),
right_(right), right_(right),
pos_(pos), pos_(pos) {
compare_type_(NONE) {
ASSERT(Token::IsCompareOp(op)); ASSERT(Token::IsCompareOp(op));
} }
@ -1806,9 +1822,6 @@ class CompareOperation: public Expression {
Expression* left_; Expression* left_;
Expression* right_; Expression* right_;
int pos_; int pos_;
enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
CompareTypeFeedback compare_type_;
}; };
@ -1870,15 +1883,6 @@ class Assignment: public Expression {
// This check relies on the definition order of token in token.h. // This check relies on the definition order of token in token.h.
bool is_compound() const { return op() > Token::ASSIGN; } bool is_compound() const { return op() > Token::ASSIGN; }
// An initialization block is a series of statments of the form
// x.y.z.a = ...; x.y.z.b = ...; etc. The parser marks the beginning and
// ending of these blocks to allow for optimizations of initialization
// blocks.
bool starts_initialization_block() { return block_start_; }
bool ends_initialization_block() { return block_end_; }
void mark_block_start() { block_start_ = true; }
void mark_block_end() { block_end_ = true; }
BailoutId AssignmentId() const { return assignment_id_; } BailoutId AssignmentId() const { return assignment_id_; }
// Type feedback information. // Type feedback information.
@ -1911,9 +1915,6 @@ class Assignment: public Expression {
BinaryOperation* binary_operation_; BinaryOperation* binary_operation_;
const BailoutId assignment_id_; const BailoutId assignment_id_;
bool block_start_;
bool block_end_;
bool is_monomorphic_; bool is_monomorphic_;
SmallMapList receiver_types_; SmallMapList receiver_types_;
}; };
@ -2659,6 +2660,11 @@ class AstNodeFactory BASE_EMBEDDED {
STATEMENT_WITH_LABELS(SwitchStatement) STATEMENT_WITH_LABELS(SwitchStatement)
#undef STATEMENT_WITH_LABELS #undef STATEMENT_WITH_LABELS
ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) {
ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body);
VISIT_AND_RETURN(ModuleStatement, stmt)
}
ExpressionStatement* NewExpressionStatement(Expression* expression) { ExpressionStatement* NewExpressionStatement(Expression* expression) {
ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression); ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression);
VISIT_AND_RETURN(ExpressionStatement, stmt) VISIT_AND_RETURN(ExpressionStatement, stmt)

8
deps/v8/src/atomicops.h

@ -69,7 +69,11 @@ typedef intptr_t Atomic64;
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture. // Atomic64 routines below, depending on your architecture.
#if defined(__OpenBSD__) && defined(__i386__)
typedef Atomic32 AtomicWord;
#else
typedef intptr_t AtomicWord; typedef intptr_t AtomicWord;
#endif
// Atomically execute: // Atomically execute:
// result = *ptr; // result = *ptr;
@ -147,7 +151,9 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
} } // namespace v8::internal } } // namespace v8::internal
// Include our platform specific implementation. // Include our platform specific implementation.
#if defined(_MSC_VER) && \ #if defined(THREAD_SANITIZER)
#include "atomicops_internals_tsan.h"
#elif defined(_MSC_VER) && \
(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)) (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
#include "atomicops_internals_x86_msvc.h" #include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__) && \ #elif defined(__APPLE__) && \

335
deps/v8/src/atomicops_internals_tsan.h

@ -0,0 +1,335 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation for compiler-based
// ThreadSanitizer. Use base/atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
#define V8_ATOMICOPS_INTERNALS_TSAN_H_
// This struct is not part of the public API of this module; clients may not
// use it. (However, it's exported via BASE_EXPORT because clients implicitly
// do use it at link time by inlining these functions.)
// Features of this x86. Values may not be correct before main() is run,
// but are set conservatively.
struct AtomicOps_x86CPUFeatureStruct {
bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
// after acquire compare-and-swap.
bool has_sse2; // Processor has SSE2.
};
extern struct AtomicOps_x86CPUFeatureStruct
AtomicOps_Internalx86CPUFeatures;
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace v8 {
namespace internal {
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
#ifdef __cplusplus
extern "C" {
#endif
typedef char __tsan_atomic8;
typedef short __tsan_atomic16; // NOLINT
typedef int __tsan_atomic32;
typedef long __tsan_atomic64; // NOLINT
typedef enum {
__tsan_memory_order_relaxed = (1 << 0) + 100500,
__tsan_memory_order_consume = (1 << 1) + 100500,
__tsan_memory_order_acquire = (1 << 2) + 100500,
__tsan_memory_order_release = (1 << 3) + 100500,
__tsan_memory_order_acq_rel = (1 << 4) + 100500,
__tsan_memory_order_seq_cst = (1 << 5) + 100500,
} __tsan_memory_order;
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
__tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
__tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
__tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
__tsan_memory_order mo);
void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
__tsan_memory_order mo);
void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
__tsan_memory_order mo);
void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
__tsan_memory_order mo);
void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
__tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
__tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
__tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
__tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
__tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
__tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
__tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
__tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
__tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_relaxed);
return cmp;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_relaxed);
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_acquire);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_release);
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_acquire);
return cmp;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_release);
return cmp;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_relaxed);
return cmp;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_acquire);
return cmp;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_release);
return cmp;
}
inline void MemoryBarrier() {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
} // namespace internal
} // namespace v8
#undef ATOMICOPS_COMPILER_BARRIER
#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_

69
deps/v8/src/bootstrapper.cc

@ -384,7 +384,7 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
void Genesis::SetFunctionInstanceDescriptor( void Genesis::SetFunctionInstanceDescriptor(
Handle<Map> map, PrototypePropertyMode prototypeMode) { Handle<Map> map, PrototypePropertyMode prototypeMode) {
int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5; int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size)); Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(0, size));
DescriptorArray::WhitenessWitness witness(*descriptors); DescriptorArray::WhitenessWitness witness(*descriptors);
Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength)); Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength));
@ -397,7 +397,7 @@ void Genesis::SetFunctionInstanceDescriptor(
} }
PropertyAttributes attribs = static_cast<PropertyAttributes>( PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE | READ_ONLY); DONT_ENUM | DONT_DELETE | READ_ONLY);
Map::SetDescriptors(map, descriptors); map->set_instance_descriptors(*descriptors);
{ // Add length. { // Add length.
CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs); CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
@ -525,7 +525,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
void Genesis::SetStrictFunctionInstanceDescriptor( void Genesis::SetStrictFunctionInstanceDescriptor(
Handle<Map> map, PrototypePropertyMode prototypeMode) { Handle<Map> map, PrototypePropertyMode prototypeMode) {
int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5; int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size)); Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(0, size));
DescriptorArray::WhitenessWitness witness(*descriptors); DescriptorArray::WhitenessWitness witness(*descriptors);
Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength)); Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength));
@ -538,7 +538,7 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
} }
PropertyAttributes attribs = static_cast<PropertyAttributes>( PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE); DONT_ENUM | DONT_DELETE);
Map::SetDescriptors(map, descriptors); map->set_instance_descriptors(*descriptors);
{ // Add length. { // Add length.
CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs); CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
@ -637,7 +637,7 @@ static void SetAccessors(Handle<Map> map,
Handle<String> name, Handle<String> name,
Handle<JSFunction> func) { Handle<JSFunction> func) {
DescriptorArray* descs = map->instance_descriptors(); DescriptorArray* descs = map->instance_descriptors();
int number = descs->Search(*name); int number = descs->SearchWithCache(*name, *map);
AccessorPair* accessors = AccessorPair::cast(descs->GetValue(number)); AccessorPair* accessors = AccessorPair::cast(descs->GetValue(number));
accessors->set_getter(*func); accessors->set_getter(*func);
accessors->set_setter(*func); accessors->set_setter(*func);
@ -868,13 +868,14 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
array_function->shared()->set_length(1); array_function->shared()->set_length(1);
Handle<Map> initial_map(array_function->initial_map()); Handle<Map> initial_map(array_function->initial_map());
Handle<DescriptorArray> array_descriptors(factory->NewDescriptorArray(1)); Handle<DescriptorArray> array_descriptors(
factory->NewDescriptorArray(0, 1));
DescriptorArray::WhitenessWitness witness(*array_descriptors); DescriptorArray::WhitenessWitness witness(*array_descriptors);
Handle<Foreign> array_length(factory->NewForeign(&Accessors::ArrayLength)); Handle<Foreign> array_length(factory->NewForeign(&Accessors::ArrayLength));
PropertyAttributes attribs = static_cast<PropertyAttributes>( PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE); DONT_ENUM | DONT_DELETE);
Map::SetDescriptors(initial_map, array_descriptors); initial_map->set_instance_descriptors(*array_descriptors);
{ // Add length. { // Add length.
CallbacksDescriptor d(*factory->length_symbol(), *array_length, attribs); CallbacksDescriptor d(*factory->length_symbol(), *array_length, attribs);
@ -915,14 +916,15 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<Map> string_map = Handle<Map> string_map =
Handle<Map>(native_context()->string_function()->initial_map()); Handle<Map>(native_context()->string_function()->initial_map());
Handle<DescriptorArray> string_descriptors(factory->NewDescriptorArray(1)); Handle<DescriptorArray> string_descriptors(
factory->NewDescriptorArray(0, 1));
DescriptorArray::WhitenessWitness witness(*string_descriptors); DescriptorArray::WhitenessWitness witness(*string_descriptors);
Handle<Foreign> string_length( Handle<Foreign> string_length(
factory->NewForeign(&Accessors::StringLength)); factory->NewForeign(&Accessors::StringLength));
PropertyAttributes attribs = static_cast<PropertyAttributes>( PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE | READ_ONLY); DONT_ENUM | DONT_DELETE | READ_ONLY);
Map::SetDescriptors(string_map, string_descriptors); string_map->set_instance_descriptors(*string_descriptors);
{ // Add length. { // Add length.
CallbacksDescriptor d(*factory->length_symbol(), *string_length, attribs); CallbacksDescriptor d(*factory->length_symbol(), *string_length, attribs);
@ -956,9 +958,9 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
PropertyAttributes final = PropertyAttributes final =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY); static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5); Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 5);
DescriptorArray::WhitenessWitness witness(*descriptors); DescriptorArray::WhitenessWitness witness(*descriptors);
Map::SetDescriptors(initial_map, descriptors); initial_map->set_instance_descriptors(*descriptors);
{ {
// ECMA-262, section 15.10.7.1. // ECMA-262, section 15.10.7.1.
@ -1082,11 +1084,11 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
LookupResult lookup(isolate); LookupResult lookup(isolate);
result->LocalLookup(heap->callee_symbol(), &lookup); result->LocalLookup(heap->callee_symbol(), &lookup);
ASSERT(lookup.IsField()); ASSERT(lookup.IsField());
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex); ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsCalleeIndex);
result->LocalLookup(heap->length_symbol(), &lookup); result->LocalLookup(heap->length_symbol(), &lookup);
ASSERT(lookup.IsField()); ASSERT(lookup.IsField());
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex); ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex); ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex); ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@ -1140,9 +1142,9 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
Heap::kArgumentsObjectSizeStrict); Heap::kArgumentsObjectSizeStrict);
// Create the descriptor array for the arguments object. // Create the descriptor array for the arguments object.
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3); Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 3);
DescriptorArray::WhitenessWitness witness(*descriptors); DescriptorArray::WhitenessWitness witness(*descriptors);
Map::SetDescriptors(map, descriptors); map->set_instance_descriptors(*descriptors);
{ // length { // length
FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM); FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
@ -1184,7 +1186,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
LookupResult lookup(isolate); LookupResult lookup(isolate);
result->LocalLookup(heap->length_symbol(), &lookup); result->LocalLookup(heap->length_symbol(), &lookup);
ASSERT(lookup.IsField()); ASSERT(lookup.IsField());
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex); ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex); ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@ -1238,8 +1240,9 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Initialize the out of memory slot. // Initialize the out of memory slot.
native_context()->set_out_of_memory(heap->false_value()); native_context()->set_out_of_memory(heap->false_value());
// Initialize the data slot. // Initialize the embedder data slot.
native_context()->set_data(heap->undefined_value()); Handle<FixedArray> embedder_data = factory->NewFixedArray(2);
native_context()->set_embedder_data(*embedder_data);
{ {
// Initialize the random seed slot. // Initialize the random seed slot.
@ -1338,7 +1341,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
// If we can't find the function in the cache, we compile a new // If we can't find the function in the cache, we compile a new
// function and insert it into the cache. // function and insert it into the cache.
if (cache == NULL || !cache->Lookup(name, &function_info)) { if (cache == NULL || !cache->Lookup(name, &function_info)) {
ASSERT(source->IsAsciiRepresentation()); ASSERT(source->IsOneByteRepresentation());
Handle<String> script_name = factory->NewStringFromUtf8(name); Handle<String> script_name = factory->NewStringFromUtf8(name);
function_info = Compiler::Compile( function_info = Compiler::Compile(
source, source,
@ -1413,6 +1416,11 @@ void Genesis::InstallExperimentalNativeFunctions() {
INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap); INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate); INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
} }
if (FLAG_harmony_observation) {
INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
INSTALL_NATIVE(JSFunction, "DeliverChangeRecords",
observers_deliver_changes);
}
} }
#undef INSTALL_NATIVE #undef INSTALL_NATIVE
@ -1487,7 +1495,7 @@ bool Genesis::InstallNatives() {
Handle<Map> script_map = Handle<Map>(script_fun->initial_map()); Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
Handle<DescriptorArray> script_descriptors( Handle<DescriptorArray> script_descriptors(
factory()->NewDescriptorArray(13)); factory()->NewDescriptorArray(0, 13));
DescriptorArray::WhitenessWitness witness(*script_descriptors); DescriptorArray::WhitenessWitness witness(*script_descriptors);
Handle<Foreign> script_source( Handle<Foreign> script_source(
@ -1532,7 +1540,7 @@ bool Genesis::InstallNatives() {
factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName)); factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName));
PropertyAttributes attribs = PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY); static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
Map::SetDescriptors(script_map, script_descriptors); script_map->set_instance_descriptors(*script_descriptors);
{ {
CallbacksDescriptor d( CallbacksDescriptor d(
@ -1665,14 +1673,15 @@ bool Genesis::InstallNatives() {
// Make "length" magic on instances. // Make "length" magic on instances.
Handle<Map> initial_map(array_function->initial_map()); Handle<Map> initial_map(array_function->initial_map());
Handle<DescriptorArray> array_descriptors(factory()->NewDescriptorArray(1)); Handle<DescriptorArray> array_descriptors(
factory()->NewDescriptorArray(0, 1));
DescriptorArray::WhitenessWitness witness(*array_descriptors); DescriptorArray::WhitenessWitness witness(*array_descriptors);
Handle<Foreign> array_length(factory()->NewForeign( Handle<Foreign> array_length(factory()->NewForeign(
&Accessors::ArrayLength)); &Accessors::ArrayLength));
PropertyAttributes attribs = static_cast<PropertyAttributes>( PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE); DONT_ENUM | DONT_DELETE);
Map::SetDescriptors(initial_map, array_descriptors); initial_map->set_instance_descriptors(*array_descriptors);
{ // Add length. { // Add length.
CallbacksDescriptor d( CallbacksDescriptor d(
@ -1765,16 +1774,17 @@ bool Genesis::InstallNatives() {
// Update map with length accessor from Array and add "index" and "input". // Update map with length accessor from Array and add "index" and "input".
Handle<DescriptorArray> reresult_descriptors = Handle<DescriptorArray> reresult_descriptors =
factory()->NewDescriptorArray(3); factory()->NewDescriptorArray(0, 3);
DescriptorArray::WhitenessWitness witness(*reresult_descriptors); DescriptorArray::WhitenessWitness witness(*reresult_descriptors);
Map::SetDescriptors(initial_map, reresult_descriptors); initial_map->set_instance_descriptors(*reresult_descriptors);
{ {
JSFunction* array_function = native_context()->array_function(); JSFunction* array_function = native_context()->array_function();
Handle<DescriptorArray> array_descriptors( Handle<DescriptorArray> array_descriptors(
array_function->initial_map()->instance_descriptors()); array_function->initial_map()->instance_descriptors());
String* length = heap()->length_symbol(); String* length = heap()->length_symbol();
int old = array_descriptors->SearchWithCache(length); int old = array_descriptors->SearchWithCache(
length, array_function->initial_map());
ASSERT(old != DescriptorArray::kNotFound); ASSERT(old != DescriptorArray::kNotFound);
CallbacksDescriptor desc(length, CallbacksDescriptor desc(length,
array_descriptors->GetValue(old), array_descriptors->GetValue(old),
@ -1802,7 +1812,7 @@ bool Genesis::InstallNatives() {
native_context()->set_regexp_result_map(*initial_map); native_context()->set_regexp_result_map(*initial_map);
} }
#ifdef DEBUG #ifdef VERIFY_HEAP
builtins->Verify(); builtins->Verify();
#endif #endif
@ -1824,6 +1834,11 @@ bool Genesis::InstallExperimentalNatives() {
"native collection.js") == 0) { "native collection.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false; if (!CompileExperimentalBuiltin(isolate(), i)) return false;
} }
if (FLAG_harmony_observation &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native object-observe.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
} }
InstallExperimentalNativeFunctions(); InstallExperimentalNativeFunctions();

2
deps/v8/src/bootstrapper.h

@ -54,7 +54,7 @@ class SourceCodeCache BASE_EMBEDDED {
bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) { bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
for (int i = 0; i < cache_->length(); i+=2) { for (int i = 0; i < cache_->length(); i+=2) {
SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i)); SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i));
if (str->IsEqualTo(name)) { if (str->IsEqualTo(name)) {
*handle = Handle<SharedFunctionInfo>( *handle = Handle<SharedFunctionInfo>(
SharedFunctionInfo::cast(cache_->get(i + 1))); SharedFunctionInfo::cast(cache_->get(i + 1)));

704
deps/v8/src/builtins.cc

File diff suppressed because it is too large

31
deps/v8/src/builtins.h

@ -38,6 +38,25 @@ enum BuiltinExtraArguments {
}; };
#define CODE_AGE_LIST_WITH_ARG(V, A) \
V(Quadragenarian, A) \
V(Quinquagenarian, A) \
V(Sexagenarian, A) \
V(Septuagenarian, A) \
V(Octogenarian, A)
#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X)
#define CODE_AGE_LIST(V) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
#define DECLARE_CODE_AGE_BUILTIN(C, V) \
V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \
UNINITIALIZED, Code::kNoExtraICState) \
V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, \
UNINITIALIZED, Code::kNoExtraICState)
// Define list of builtins implemented in C++. // Define list of builtins implemented in C++.
#define BUILTIN_LIST_C(V) \ #define BUILTIN_LIST_C(V) \
V(Illegal, NO_EXTRA_ARGUMENTS) \ V(Illegal, NO_EXTRA_ARGUMENTS) \
@ -195,8 +214,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \ Code::kNoExtraICState) \
\ \
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \ V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) Code::kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly. // Define list of builtins used by the debugger implemented in assembly.
@ -379,6 +398,14 @@ class Builtins {
static void Generate_StringConstructCode(MacroAssembler* masm); static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm); static void Generate_OnStackReplacement(MacroAssembler* masm);
#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
static void Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm); \
static void Generate_Make##C##CodeYoungAgainOddMarking( \
MacroAssembler* masm);
CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR)
#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR
static void InitBuiltinFunctionTable(); static void InitBuiltinFunctionTable();
bool initialized_; bool initialized_;

185
deps/v8/src/code-stubs.cc

@ -37,11 +37,11 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
bool CodeStub::FindCodeInCache(Code** code_out) { bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
Heap* heap = Isolate::Current()->heap(); UnseededNumberDictionary* stubs = isolate->heap()->code_stubs();
int index = heap->code_stubs()->FindEntry(GetKey()); int index = stubs->FindEntry(GetKey());
if (index != UnseededNumberDictionary::kNotFound) { if (index != UnseededNumberDictionary::kNotFound) {
*code_out = Code::cast(heap->code_stubs()->ValueAt(index)); *code_out = Code::cast(stubs->ValueAt(index));
return true; return true;
} }
return false; return false;
@ -93,8 +93,8 @@ Handle<Code> CodeStub::GetCode() {
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
Code* code; Code* code;
if (UseSpecialCache() if (UseSpecialCache()
? FindCodeInSpecialCache(&code) ? FindCodeInSpecialCache(&code, isolate)
: FindCodeInCache(&code)) { : FindCodeInCache(&code, isolate)) {
ASSERT(IsPregenerated() == code->is_pregenerated()); ASSERT(IsPregenerated() == code->is_pregenerated());
return Handle<Code>(code); return Handle<Code>(code);
} }
@ -142,7 +142,9 @@ Handle<Code> CodeStub::GetCode() {
} }
Activate(code); Activate(code);
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code)); ASSERT(!NeedsImmovableCode() ||
heap->lo_space()->Contains(code) ||
heap->code_space()->FirstPage()->Contains(code->address()));
return Handle<Code>(code, isolate); return Handle<Code>(code, isolate);
} }
@ -167,6 +169,122 @@ void CodeStub::PrintName(StringStream* stream) {
} }
void BinaryOpStub::Generate(MacroAssembler* masm) {
// Explicitly allow generation of nested stubs. It is safe here because
// generation code does not use any raw pointers.
AllowStubCallsScope allow_stub_calls(masm, true);
BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
// The OddballStub handles a number and an oddball, not two oddballs.
operands_type = BinaryOpIC::GENERIC;
}
switch (operands_type) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
break;
case BinaryOpIC::SMI:
GenerateSmiStub(masm);
break;
case BinaryOpIC::INT32:
GenerateInt32Stub(masm);
break;
case BinaryOpIC::HEAP_NUMBER:
GenerateHeapNumberStub(masm);
break;
case BinaryOpIC::ODDBALL:
GenerateOddballStub(masm);
break;
case BinaryOpIC::STRING:
GenerateStringStub(masm);
break;
case BinaryOpIC::GENERIC:
GenerateGeneric(masm);
break;
default:
UNREACHABLE();
}
}
#define __ ACCESS_MASM(masm)
void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
switch (op_) {
case Token::ADD:
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break;
case Token::SUB:
__ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
break;
case Token::MUL:
__ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
break;
case Token::DIV:
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
break;
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
break;
case Token::BIT_AND:
__ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
break;
case Token::BIT_XOR:
__ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
break;
case Token::SAR:
__ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
break;
case Token::SHR:
__ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
case Token::SHL:
__ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
}
#undef __
void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
case NO_OVERWRITE: overwrite_name = "Alloc"; break;
case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
stream->Add("BinaryOpStub_%s_%s_%s+%s",
op_name,
overwrite_name,
BinaryOpIC::GetName(left_type_),
BinaryOpIC::GetName(right_type_));
}
void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
GenerateBothStringStub(masm);
return;
}
// Try to add arguments as strings, otherwise, transition to the generic
// BinaryOpIC type.
GenerateAddStrings(masm);
GenerateTypeTransition(masm);
}
void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) { void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
ASSERT(*known_map_ != NULL); ASSERT(*known_map_ != NULL);
Isolate* isolate = new_object->GetIsolate(); Isolate* isolate = new_object->GetIsolate();
@ -179,8 +297,7 @@ void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
} }
bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) { bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
Isolate* isolate = known_map_->GetIsolate();
Factory* factory = isolate->factory(); Factory* factory = isolate->factory();
Code::Flags flags = Code::ComputeFlags( Code::Flags flags = Code::ComputeFlags(
static_cast<Code::Kind>(GetCodeKind()), static_cast<Code::Kind>(GetCodeKind()),
@ -194,7 +311,12 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
flags)); flags));
if (probe->IsCode()) { if (probe->IsCode()) {
*code_out = Code::cast(*probe); *code_out = Code::cast(*probe);
ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ); #ifdef DEBUG
Token::Value cached_op;
ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL,
&cached_op);
ASSERT(op_ == cached_op);
#endif
return true; return true;
} }
return false; return false;
@ -202,7 +324,33 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
int ICCompareStub::MinorKey() { int ICCompareStub::MinorKey() {
return OpField::encode(op_ - Token::EQ) | StateField::encode(state_); return OpField::encode(op_ - Token::EQ) |
LeftStateField::encode(left_) |
RightStateField::encode(right_) |
HandlerStateField::encode(state_);
}
void ICCompareStub::DecodeMinorKey(int minor_key,
CompareIC::State* left_state,
CompareIC::State* right_state,
CompareIC::State* handler_state,
Token::Value* op) {
if (left_state) {
*left_state =
static_cast<CompareIC::State>(LeftStateField::decode(minor_key));
}
if (right_state) {
*right_state =
static_cast<CompareIC::State>(RightStateField::decode(minor_key));
}
if (handler_state) {
*handler_state =
static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
}
if (op) {
*op = static_cast<Token::Value>(OpField::decode(minor_key) + Token::EQ);
}
} }
@ -211,27 +359,28 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
case CompareIC::UNINITIALIZED: case CompareIC::UNINITIALIZED:
GenerateMiss(masm); GenerateMiss(masm);
break; break;
case CompareIC::SMIS: case CompareIC::SMI:
GenerateSmis(masm); GenerateSmis(masm);
break; break;
case CompareIC::HEAP_NUMBERS: case CompareIC::HEAP_NUMBER:
GenerateHeapNumbers(masm); GenerateHeapNumbers(masm);
break; break;
case CompareIC::STRINGS: case CompareIC::STRING:
GenerateStrings(masm); GenerateStrings(masm);
break; break;
case CompareIC::SYMBOLS: case CompareIC::SYMBOL:
GenerateSymbols(masm); GenerateSymbols(masm);
break; break;
case CompareIC::OBJECTS: case CompareIC::OBJECT:
GenerateObjects(masm); GenerateObjects(masm);
break; break;
case CompareIC::KNOWN_OBJECTS: case CompareIC::KNOWN_OBJECTS:
ASSERT(*known_map_ != NULL); ASSERT(*known_map_ != NULL);
GenerateKnownObjects(masm); GenerateKnownObjects(masm);
break; break;
default: case CompareIC::GENERIC:
UNREACHABLE(); GenerateGeneric(masm);
break;
} }
} }

278
deps/v8/src/code-stubs.h

@ -141,7 +141,7 @@ class CodeStub BASE_EMBEDDED {
bool CompilingCallsToThisStubIsGCSafe() { bool CompilingCallsToThisStubIsGCSafe() {
bool is_pregenerated = IsPregenerated(); bool is_pregenerated = IsPregenerated();
Code* code = NULL; Code* code = NULL;
CHECK(!is_pregenerated || FindCodeInCache(&code)); CHECK(!is_pregenerated || FindCodeInCache(&code, Isolate::Current()));
return is_pregenerated; return is_pregenerated;
} }
@ -160,7 +160,10 @@ class CodeStub BASE_EMBEDDED {
virtual bool SometimesSetsUpAFrame() { return true; } virtual bool SometimesSetsUpAFrame() { return true; }
// Lookup the code in the (possibly custom) cache. // Lookup the code in the (possibly custom) cache.
bool FindCodeInCache(Code** code_out); bool FindCodeInCache(Code** code_out, Isolate* isolate);
protected:
static bool CanUseFPRegisters();
private: private:
// Nonvirtual wrapper around the stub-specific Generate function. Call // Nonvirtual wrapper around the stub-specific Generate function. Call
@ -199,7 +202,9 @@ class CodeStub BASE_EMBEDDED {
virtual void AddToSpecialCache(Handle<Code> new_object) { } virtual void AddToSpecialCache(Handle<Code> new_object) { }
// Find code in a specialized cache, work is delegated to the specific stub. // Find code in a specialized cache, work is delegated to the specific stub.
virtual bool FindCodeInSpecialCache(Code** code_out) { return false; } virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
return false;
}
// If a stub uses a special cache override this. // If a stub uses a special cache override this.
virtual bool UseSpecialCache() { return false; } virtual bool UseSpecialCache() { return false; }
@ -479,10 +484,132 @@ class MathPowStub: public CodeStub {
}; };
class BinaryOpStub: public CodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
platform_specific_bit_(false),
left_type_(BinaryOpIC::UNINITIALIZED),
right_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED) {
Initialize();
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
BinaryOpStub(
int key,
BinaryOpIC::TypeInfo left_type,
BinaryOpIC::TypeInfo right_type,
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
platform_specific_bit_(PlatformSpecificBits::decode(key)),
left_type_(left_type),
right_type_(right_type),
result_type_(result_type) { }
static void decode_types_from_minor_key(int minor_key,
BinaryOpIC::TypeInfo* left_type,
BinaryOpIC::TypeInfo* right_type,
BinaryOpIC::TypeInfo* result_type) {
*left_type =
static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key));
*right_type =
static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key));
*result_type =
static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key));
}
static Token::Value decode_op_from_minor_key(int minor_key) {
return static_cast<Token::Value>(OpBits::decode(minor_key));
}
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
};
private:
Token::Value op_;
OverwriteMode mode_;
bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM.
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo left_type_;
BinaryOpIC::TypeInfo right_type_;
BinaryOpIC::TypeInfo result_type_;
virtual void PrintName(StringStream* stream);
// Minor key encoding in 19 bits TTTRRRLLLSOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class PlatformSpecificBits: public BitField<bool, 9, 1> {};
class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
Major MajorKey() { return BinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| PlatformSpecificBits::encode(platform_specific_bit_)
| LeftTypeBits::encode(left_type_)
| RightTypeBits::encode(right_type_)
| ResultTypeBits::encode(result_type_);
}
// Platform-independent implementation.
void Generate(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
// Platform-independent signature, platform-specific implementation.
void Initialize();
void GenerateAddStrings(MacroAssembler* masm);
void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
// Entirely platform-specific methods are defined as static helper
// functions in the <arch>/code-stubs-<arch>.cc files.
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(Max(left_type_, right_type_));
}
virtual void FinishCode(Handle<Code> code) {
code->set_stub_info(MinorKey());
}
friend class CodeGenerator;
};
class ICCompareStub: public CodeStub { class ICCompareStub: public CodeStub {
public: public:
ICCompareStub(Token::Value op, CompareIC::State state) ICCompareStub(Token::Value op,
: op_(op), state_(state) { CompareIC::State left,
CompareIC::State right,
CompareIC::State handler)
: op_(op),
left_(left),
right_(right),
state_(handler) {
ASSERT(Token::IsCompareOp(op)); ASSERT(Token::IsCompareOp(op));
} }
@ -490,13 +617,24 @@ class ICCompareStub: public CodeStub {
void set_known_map(Handle<Map> map) { known_map_ = map; } void set_known_map(Handle<Map> map) { known_map_ = map; }
static void DecodeMinorKey(int minor_key,
CompareIC::State* left_state,
CompareIC::State* right_state,
CompareIC::State* handler_state,
Token::Value* op);
static CompareIC::State CompareState(int minor_key) {
return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
}
private: private:
class OpField: public BitField<int, 0, 3> { }; class OpField: public BitField<int, 0, 3> { };
class StateField: public BitField<int, 3, 5> { }; class LeftStateField: public BitField<int, 3, 3> { };
class RightStateField: public BitField<int, 6, 3> { };
class HandlerStateField: public BitField<int, 9, 3> { };
virtual void FinishCode(Handle<Code> code) { virtual void FinishCode(Handle<Code> code) {
code->set_compare_state(state_); code->set_stub_info(MinorKey());
code->set_compare_operation(op_ - Token::EQ);
} }
virtual CodeStub::Major MajorKey() { return CompareIC; } virtual CodeStub::Major MajorKey() { return CompareIC; }
@ -511,117 +649,23 @@ class ICCompareStub: public CodeStub {
void GenerateObjects(MacroAssembler* masm); void GenerateObjects(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm); void GenerateMiss(MacroAssembler* masm);
void GenerateKnownObjects(MacroAssembler* masm); void GenerateKnownObjects(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
bool strict() const { return op_ == Token::EQ_STRICT; } bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return CompareIC::ComputeCondition(op_); } Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
virtual void AddToSpecialCache(Handle<Code> new_object); virtual void AddToSpecialCache(Handle<Code> new_object);
virtual bool FindCodeInSpecialCache(Code** code_out); virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate);
virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; } virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; }
Token::Value op_; Token::Value op_;
CompareIC::State left_;
CompareIC::State right_;
CompareIC::State state_; CompareIC::State state_;
Handle<Map> known_map_; Handle<Map> known_map_;
}; };
// Flags that control the compare stub code generation.
enum CompareFlags {
NO_COMPARE_FLAGS = 0,
NO_SMI_COMPARE_IN_STUB = 1 << 0,
NO_NUMBER_COMPARE_IN_STUB = 1 << 1,
CANT_BOTH_BE_NAN = 1 << 2
};
enum NaNInformation {
kBothCouldBeNaN,
kCantBothBeNaN
};
class CompareStub: public CodeStub {
public:
CompareStub(Condition cc,
bool strict,
CompareFlags flags,
Register lhs,
Register rhs) :
cc_(cc),
strict_(strict),
never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
lhs_(lhs),
rhs_(rhs) { }
CompareStub(Condition cc,
bool strict,
CompareFlags flags) :
cc_(cc),
strict_(strict),
never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
lhs_(no_reg),
rhs_(no_reg) { }
void Generate(MacroAssembler* masm);
private:
Condition cc_;
bool strict_;
// Only used for 'equal' comparisons. Tells the stub that we already know
// that at least one side of the comparison is not NaN. This allows the
// stub to use object identity in the positive case. We ignore it when
// generating the minor key for other comparisons to avoid creating more
// stubs.
bool never_nan_nan_;
// Do generate the number comparison code in the stub. Stubs without number
// comparison code is used when the number comparison has been inlined, and
// the stub will be called if one of the operands is not a number.
bool include_number_compare_;
// Generate the comparison code for two smi operands in the stub.
bool include_smi_compare_;
// Register holding the left hand side of the comparison if the stub gives
// a choice, no_reg otherwise.
Register lhs_;
// Register holding the right hand side of the comparison if the stub gives
// a choice, no_reg otherwise.
Register rhs_;
// Encoding of the minor key in 16 bits.
class StrictField: public BitField<bool, 0, 1> {};
class NeverNanNanField: public BitField<bool, 1, 1> {};
class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
class IncludeSmiCompareField: public BitField<bool, 3, 1> {};
class RegisterField: public BitField<bool, 4, 1> {};
class ConditionField: public BitField<int, 5, 11> {};
Major MajorKey() { return Compare; }
int MinorKey();
virtual int GetCodeKind() { return Code::COMPARE_IC; }
virtual void FinishCode(Handle<Code> code) {
code->set_compare_state(CompareIC::GENERIC);
}
// Branch to the label if the given object isn't a symbol.
void BranchIfNonSymbol(MacroAssembler* masm,
Label* label,
Register object,
Register scratch);
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
virtual void PrintName(StringStream* stream);
};
class CEntryStub : public CodeStub { class CEntryStub : public CodeStub {
public: public:
explicit CEntryStub(int result_size, explicit CEntryStub(int result_size,
@ -998,13 +1042,15 @@ class KeyedStoreElementStub : public CodeStub {
KeyedAccessGrowMode grow_mode) KeyedAccessGrowMode grow_mode)
: is_js_array_(is_js_array), : is_js_array_(is_js_array),
elements_kind_(elements_kind), elements_kind_(elements_kind),
grow_mode_(grow_mode) { } grow_mode_(grow_mode),
fp_registers_(CanUseFPRegisters()) { }
Major MajorKey() { return KeyedStoreElement; } Major MajorKey() { return KeyedStoreElement; }
int MinorKey() { int MinorKey() {
return ElementsKindBits::encode(elements_kind_) | return ElementsKindBits::encode(elements_kind_) |
IsJSArrayBits::encode(is_js_array_) | IsJSArrayBits::encode(is_js_array_) |
GrowModeBits::encode(grow_mode_); GrowModeBits::encode(grow_mode_) |
FPRegisters::encode(fp_registers_);
} }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
@ -1013,10 +1059,12 @@ class KeyedStoreElementStub : public CodeStub {
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {}; class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class GrowModeBits: public BitField<KeyedAccessGrowMode, 8, 1> {}; class GrowModeBits: public BitField<KeyedAccessGrowMode, 8, 1> {};
class IsJSArrayBits: public BitField<bool, 9, 1> {}; class IsJSArrayBits: public BitField<bool, 9, 1> {};
class FPRegisters: public BitField<bool, 10, 1> {};
bool is_js_array_; bool is_js_array_;
ElementsKind elements_kind_; ElementsKind elements_kind_;
KeyedAccessGrowMode grow_mode_; KeyedAccessGrowMode grow_mode_;
bool fp_registers_;
DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub); DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
}; };
@ -1046,6 +1094,9 @@ class ToBooleanStub: public CodeStub {
bool IsEmpty() const { return set_.IsEmpty(); } bool IsEmpty() const { return set_.IsEmpty(); }
bool Contains(Type type) const { return set_.Contains(type); } bool Contains(Type type) const { return set_.Contains(type); }
bool ContainsAnyOf(Types types) const {
return set_.ContainsAnyOf(types.set_);
}
void Add(Type type) { set_.Add(type); } void Add(Type type) { set_.Add(type); }
byte ToByte() const { return set_.ToIntegral(); } byte ToByte() const { return set_.ToIntegral(); }
void Print(StringStream* stream) const; void Print(StringStream* stream) const;
@ -1132,14 +1183,19 @@ class ElementsTransitionAndStoreStub : public CodeStub {
class StoreArrayLiteralElementStub : public CodeStub { class StoreArrayLiteralElementStub : public CodeStub {
public: public:
explicit StoreArrayLiteralElementStub() {} StoreArrayLiteralElementStub()
: fp_registers_(CanUseFPRegisters()) { }
private: private:
class FPRegisters: public BitField<bool, 0, 1> {};
Major MajorKey() { return StoreArrayLiteralElement; } Major MajorKey() { return StoreArrayLiteralElement; }
int MinorKey() { return 0; } int MinorKey() { return FPRegisters::encode(fp_registers_); }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
bool fp_registers_;
DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub); DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
}; };
@ -1159,6 +1215,8 @@ class ProfileEntryHookStub : public CodeStub {
// non-NULL hook. // non-NULL hook.
static bool SetFunctionEntryHook(FunctionEntryHook entry_hook); static bool SetFunctionEntryHook(FunctionEntryHook entry_hook);
static bool HasEntryHook() { return entry_hook_ != NULL; }
private: private:
static void EntryHookTrampoline(intptr_t function, static void EntryHookTrampoline(intptr_t function,
intptr_t stack_pointer); intptr_t stack_pointer);

1
deps/v8/src/codegen.cc

@ -107,6 +107,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
if (!code.is_null()) { if (!code.is_null()) {
isolate->counters()->total_compiled_code_size()->Increment( isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size()); code->instruction_size());
code->set_prologue_offset(info->prologue_offset());
} }
return code; return code;
} }

14
deps/v8/src/codegen.h

@ -90,6 +90,7 @@ namespace internal {
typedef double (*UnaryMathFunction)(double x); typedef double (*UnaryMathFunction)(double x);
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type); UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
UnaryMathFunction CreateExpFunction();
UnaryMathFunction CreateSqrtFunction(); UnaryMathFunction CreateSqrtFunction();
@ -103,6 +104,19 @@ class ElementsTransitionGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator); DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
}; };
class SeqStringSetCharGenerator : public AllStatic {
public:
static void Generate(MacroAssembler* masm,
String::Encoding encoding,
Register string,
Register index,
Register value);
private:
DISALLOW_COPY_AND_ASSIGN(SeqStringSetCharGenerator);
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_CODEGEN_H_ #endif // V8_CODEGEN_H_

46
deps/v8/src/collection.js

@ -88,6 +88,25 @@ function SetDelete(key) {
} }
function SetGetSize() {
if (!IS_SET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.size', this]);
}
return %SetGetSize(this);
}
function SetClear() {
if (!IS_SET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.clear', this]);
}
// Replace the internal table with a new empty table.
%SetInitialize(this);
}
function MapConstructor() { function MapConstructor() {
if (%_IsConstructCall()) { if (%_IsConstructCall()) {
%MapInitialize(this); %MapInitialize(this);
@ -145,6 +164,25 @@ function MapDelete(key) {
} }
function MapGetSize() {
if (!IS_MAP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.size', this]);
}
return %MapGetSize(this);
}
function MapClear() {
if (!IS_MAP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.clear', this]);
}
// Replace the internal table with a new empty table.
%MapInitialize(this);
}
function WeakMapConstructor() { function WeakMapConstructor() {
if (%_IsConstructCall()) { if (%_IsConstructCall()) {
%WeakMapInitialize(this); %WeakMapInitialize(this);
@ -215,18 +253,22 @@ function WeakMapDelete(key) {
%SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM); %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
// Set up the non-enumerable functions on the Set prototype object. // Set up the non-enumerable functions on the Set prototype object.
InstallGetter($Set.prototype, "size", SetGetSize);
InstallFunctions($Set.prototype, DONT_ENUM, $Array( InstallFunctions($Set.prototype, DONT_ENUM, $Array(
"add", SetAdd, "add", SetAdd,
"has", SetHas, "has", SetHas,
"delete", SetDelete "delete", SetDelete,
"clear", SetClear
)); ));
// Set up the non-enumerable functions on the Map prototype object. // Set up the non-enumerable functions on the Map prototype object.
InstallGetter($Map.prototype, "size", MapGetSize);
InstallFunctions($Map.prototype, DONT_ENUM, $Array( InstallFunctions($Map.prototype, DONT_ENUM, $Array(
"get", MapGet, "get", MapGet,
"set", MapSet, "set", MapSet,
"has", MapHas, "has", MapHas,
"delete", MapDelete "delete", MapDelete,
"clear", MapClear
)); ));
// Set up the WeakMap constructor function. // Set up the WeakMap constructor function.

2
deps/v8/src/compilation-cache.cc

@ -98,7 +98,7 @@ void CompilationSubCache::Age() {
void CompilationSubCache::IterateFunctions(ObjectVisitor* v) { void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
Object* undefined = isolate()->heap()->raw_unchecked_undefined_value(); Object* undefined = isolate()->heap()->undefined_value();
for (int i = 0; i < generations_; i++) { for (int i = 0; i < generations_; i++) {
if (tables_[i] != undefined) { if (tables_[i] != undefined) {
reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v); reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);

117
deps/v8/src/compiler.cc

@ -52,57 +52,53 @@ namespace internal {
CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone) CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
: isolate_(script->GetIsolate()), : flags_(LanguageModeField::encode(CLASSIC_MODE)),
flags_(LanguageModeField::encode(CLASSIC_MODE)),
function_(NULL),
scope_(NULL),
global_scope_(NULL),
script_(script), script_(script),
extension_(NULL), osr_ast_id_(BailoutId::None()) {
pre_parse_data_(NULL), Initialize(zone);
osr_ast_id_(BailoutId::None()),
zone_(zone),
deferred_handles_(NULL) {
Initialize(BASE);
} }
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info, CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone) Zone* zone)
: isolate_(shared_info->GetIsolate()), : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
global_scope_(NULL),
shared_info_(shared_info), shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))), script_(Handle<Script>(Script::cast(shared_info->script()))),
extension_(NULL), osr_ast_id_(BailoutId::None()) {
pre_parse_data_(NULL), Initialize(zone);
osr_ast_id_(BailoutId::None()),
zone_(zone),
deferred_handles_(NULL) {
Initialize(BASE);
} }
CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone) CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
: isolate_(closure->GetIsolate()), : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
global_scope_(NULL),
closure_(closure), closure_(closure),
shared_info_(Handle<SharedFunctionInfo>(closure->shared())), shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))), script_(Handle<Script>(Script::cast(shared_info_->script()))),
extension_(NULL),
pre_parse_data_(NULL),
context_(closure->context()), context_(closure->context()),
osr_ast_id_(BailoutId::None()), osr_ast_id_(BailoutId::None()) {
zone_(zone), Initialize(zone);
deferred_handles_(NULL) { }
Initialize(BASE);
void CompilationInfo::Initialize(Zone* zone) {
isolate_ = script_->GetIsolate();
function_ = NULL;
scope_ = NULL;
global_scope_ = NULL;
extension_ = NULL;
pre_parse_data_ = NULL;
zone_ = zone;
deferred_handles_ = NULL;
prologue_offset_ = kPrologueOffsetNotSet;
mode_ = V8::UseCrankshaft() ? BASE : NONOPT;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
if (!shared_info_.is_null()) {
ASSERT(language_mode() == CLASSIC_MODE);
SetLanguageMode(shared_info_->language_mode());
}
set_bailout_reason("unknown");
} }
@ -194,6 +190,11 @@ void OptimizingCompiler::RecordOptimizationStats() {
code_size, code_size,
compilation_time); compilation_time);
} }
if (FLAG_hydrogen_stats) {
HStatistics::Instance()->IncrementSubtotals(time_taken_to_create_graph_,
time_taken_to_optimize_,
time_taken_to_codegen_);
}
} }
@ -284,7 +285,6 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// doesn't have deoptimization support. Alternatively, we may decide to // doesn't have deoptimization support. Alternatively, we may decide to
// run the full code generator to get a baseline for the compile-time // run the full code generator to get a baseline for the compile-time
// performance of the hydrogen-based compiler. // performance of the hydrogen-based compiler.
Timer t(this, &time_taken_to_create_graph_);
bool should_recompile = !info()->shared_info()->has_deoptimization_support(); bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) { if (should_recompile || FLAG_hydrogen_stats) {
HPhase phase(HPhase::kFullCodeGen); HPhase phase(HPhase::kFullCodeGen);
@ -324,7 +324,8 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
oracle_ = new(info()->zone()) TypeFeedbackOracle( oracle_ = new(info()->zone()) TypeFeedbackOracle(
code, native_context, info()->isolate(), info()->zone()); code, native_context, info()->isolate(), info()->zone());
graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_); graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
HPhase phase(HPhase::kTotal);
Timer t(this, &time_taken_to_create_graph_);
graph_ = graph_builder_->CreateGraph(); graph_ = graph_builder_->CreateGraph();
if (info()->isolate()->has_pending_exception()) { if (info()->isolate()->has_pending_exception()) {
@ -371,15 +372,17 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() { OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED); ASSERT(last_status() == SUCCEEDED);
Timer timer(this, &time_taken_to_codegen_); { // Scope for timer.
ASSERT(chunk_ != NULL); Timer timer(this, &time_taken_to_codegen_);
ASSERT(graph_ != NULL); ASSERT(chunk_ != NULL);
Handle<Code> optimized_code = chunk_->Codegen(); ASSERT(graph_ != NULL);
if (optimized_code.is_null()) { Handle<Code> optimized_code = chunk_->Codegen();
info()->set_bailout_reason("code generation failed"); if (optimized_code.is_null()) {
return AbortOptimization(); info()->set_bailout_reason("code generation failed");
return AbortOptimization();
}
info()->SetCode(optimized_code);
} }
info()->SetCode(optimized_code);
RecordOptimizationStats(); RecordOptimizationStats();
return SetLastStatus(SUCCEEDED); return SetLastStatus(SUCCEEDED);
} }
@ -390,6 +393,8 @@ static bool GenerateCode(CompilationInfo* info) {
!info->IsCompilingForDebugging() && !info->IsCompilingForDebugging() &&
info->IsOptimizing(); info->IsOptimizing();
if (is_optimizing) { if (is_optimizing) {
Logger::TimerEventScope timer(
info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
return MakeCrankshaftCode(info); return MakeCrankshaftCode(info);
} else { } else {
if (info->IsOptimizing()) { if (info->IsOptimizing()) {
@ -397,6 +402,8 @@ static bool GenerateCode(CompilationInfo* info) {
// BASE or NONOPT. // BASE or NONOPT.
info->DisableOptimization(); info->DisableOptimization();
} }
Logger::TimerEventScope timer(
info->isolate(), Logger::TimerEventScope::v8_compile_full_code);
return FullCodeGenerator::MakeCode(info); return FullCodeGenerator::MakeCode(info);
} }
} }
@ -432,7 +439,9 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
ASSERT(!isolate->native_context().is_null()); ASSERT(!isolate->native_context().is_null());
Handle<Script> script = info->script(); Handle<Script> script = info->script();
script->set_context_data((*isolate->native_context())->data()); // TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
FixedArray* array = isolate->native_context()->embedder_data();
script->set_context_data(array->get(0));
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
if (info->is_eval()) { if (info->is_eval()) {
@ -841,6 +850,11 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
ASSERT(closure->IsMarkedForParallelRecompilation()); ASSERT(closure->IsMarkedForParallelRecompilation());
Isolate* isolate = closure->GetIsolate(); Isolate* isolate = closure->GetIsolate();
// Here we prepare compile data for the parallel recompilation thread, but
// this still happens synchronously and interrupts execution.
Logger::TimerEventScope timer(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) { if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
if (FLAG_trace_parallel_recompilation) { if (FLAG_trace_parallel_recompilation) {
PrintF(" ** Compilation queue, will retry opting on next run.\n"); PrintF(" ** Compilation queue, will retry opting on next run.\n");
@ -849,7 +863,7 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
} }
SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure)); SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
VMState state(isolate, PARALLEL_COMPILER_PROLOGUE); VMState state(isolate, PARALLEL_COMPILER);
PostponeInterruptsScope postpone(isolate); PostponeInterruptsScope postpone(isolate);
Handle<SharedFunctionInfo> shared = info->shared_info(); Handle<SharedFunctionInfo> shared = info->shared_info();
@ -860,7 +874,10 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
{ {
CompilationHandleScope handle_scope(*info); CompilationHandleScope handle_scope(*info);
if (InstallCodeFromOptimizedCodeMap(*info)) return; if (!FLAG_manual_parallel_recompilation &&
InstallCodeFromOptimizedCodeMap(*info)) {
return;
}
if (ParserApi::Parse(*info, kNoParsingFlags)) { if (ParserApi::Parse(*info, kNoParsingFlags)) {
LanguageMode language_mode = info->function()->language_mode(); LanguageMode language_mode = info->function()->language_mode();
@ -894,6 +911,10 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) { void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
SmartPointer<CompilationInfo> info(optimizing_compiler->info()); SmartPointer<CompilationInfo> info(optimizing_compiler->info());
Isolate* isolate = info->isolate();
VMState state(isolate, PARALLEL_COMPILER);
Logger::TimerEventScope timer(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
// If crankshaft succeeded, install the optimized code else install // If crankshaft succeeded, install the optimized code else install
// the unoptimized code. // the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status(); OptimizingCompiler::Status status = optimizing_compiler->last_status();

29
deps/v8/src/compiler.h

@ -35,6 +35,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
static const int kPrologueOffsetNotSet = -1;
class ScriptDataImpl; class ScriptDataImpl;
// CompilationInfo encapsulates some information known at compile time. It // CompilationInfo encapsulates some information known at compile time. It
@ -186,6 +188,16 @@ class CompilationInfo {
const char* bailout_reason() const { return bailout_reason_; } const char* bailout_reason() const { return bailout_reason_; }
void set_bailout_reason(const char* reason) { bailout_reason_ = reason; } void set_bailout_reason(const char* reason) { bailout_reason_ = reason; }
int prologue_offset() const {
ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_);
return prologue_offset_;
}
void set_prologue_offset(int prologue_offset) {
ASSERT_EQ(kPrologueOffsetNotSet, prologue_offset_);
prologue_offset_ = prologue_offset;
}
private: private:
Isolate* isolate_; Isolate* isolate_;
@ -200,18 +212,7 @@ class CompilationInfo {
NONOPT NONOPT
}; };
void Initialize(Mode mode) { void Initialize(Zone* zone);
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
ASSERT(!script_.is_null());
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
if (!shared_info_.is_null()) {
ASSERT(language_mode() == CLASSIC_MODE);
SetLanguageMode(shared_info_->language_mode());
}
set_bailout_reason("unknown");
}
void SetMode(Mode mode) { void SetMode(Mode mode) {
ASSERT(V8::UseCrankshaft()); ASSERT(V8::UseCrankshaft());
@ -285,6 +286,8 @@ class CompilationInfo {
const char* bailout_reason_; const char* bailout_reason_;
int prologue_offset_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo); DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
}; };
@ -293,6 +296,8 @@ class CompilationInfo {
// Zone on construction and deallocates it on exit. // Zone on construction and deallocates it on exit.
class CompilationInfoWithZone: public CompilationInfo { class CompilationInfoWithZone: public CompilationInfo {
public: public:
INLINE(void* operator new(size_t size)) { return Malloced::New(size); }
explicit CompilationInfoWithZone(Handle<Script> script) explicit CompilationInfoWithZone(Handle<Script> script)
: CompilationInfo(script, &zone_), : CompilationInfo(script, &zone_),
zone_(script->GetIsolate()), zone_(script->GetIsolate()),

37
deps/v8/src/contexts.cc

@ -55,6 +55,15 @@ JSBuiltinsObject* Context::builtins() {
} }
Context* Context::global_context() {
Context* current = this;
while (!current->IsGlobalContext()) {
current = current->previous();
}
return current;
}
Context* Context::native_context() { Context* Context::native_context() {
// Fast case: the global object for this context has been set. In // Fast case: the global object for this context has been set. In
// that case, the global object has a direct pointer to the global // that case, the global object has a direct pointer to the global
@ -183,6 +192,10 @@ Handle<Object> Context::Lookup(Handle<String> name,
? IMMUTABLE_CHECK_INITIALIZED_HARMONY : ? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
IMMUTABLE_IS_INITIALIZED_HARMONY; IMMUTABLE_IS_INITIALIZED_HARMONY;
break; break;
case MODULE:
*attributes = READ_ONLY;
*binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
break;
case DYNAMIC: case DYNAMIC:
case DYNAMIC_GLOBAL: case DYNAMIC_GLOBAL:
case DYNAMIC_LOCAL: case DYNAMIC_LOCAL:
@ -251,8 +264,6 @@ void Context::AddOptimizedFunction(JSFunction* function) {
} }
} }
CHECK(function->next_function_link()->IsUndefined());
// Check that the context belongs to the weak native contexts list. // Check that the context belongs to the weak native contexts list.
bool found = false; bool found = false;
Object* context = GetHeap()->native_contexts_list(); Object* context = GetHeap()->native_contexts_list();
@ -265,6 +276,16 @@ void Context::AddOptimizedFunction(JSFunction* function) {
} }
CHECK(found); CHECK(found);
#endif #endif
// If the function link field is already used then the function was
// enqueued as a code flushing candidate and we remove it now.
if (!function->next_function_link()->IsUndefined()) {
CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
flusher->EvictCandidate(function);
}
ASSERT(function->next_function_link()->IsUndefined());
function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST)); function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST));
set(OPTIMIZED_FUNCTIONS_LIST, function); set(OPTIMIZED_FUNCTIONS_LIST, function);
} }
@ -305,6 +326,18 @@ void Context::ClearOptimizedFunctions() {
} }
Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
Handle<Object> result(error_message_for_code_gen_from_strings());
if (result->IsUndefined()) {
const char* error =
"Code generation from strings disallowed for this context";
Isolate* isolate = Isolate::Current();
result = isolate->factory()->NewStringFromAscii(i::CStrVector(error));
}
return result;
}
#ifdef DEBUG #ifdef DEBUG
bool Context::IsBootstrappingOrValidParentContext( bool Context::IsBootstrappingOrValidParentContext(
Object* object, Context* child) { Object* object, Context* child) {

29
deps/v8/src/contexts.h

@ -152,14 +152,18 @@ enum BindingFlags {
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \ V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \ V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \ V(MAP_CACHE_INDEX, Object, map_cache) \
V(CONTEXT_DATA_INDEX, Object, data) \ V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \ V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \ V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \ to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \ V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \ V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \ V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
V(PROXY_ENUMERATE, JSFunction, proxy_enumerate) \ V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \
V(RANDOM_SEED_INDEX, ByteArray, random_seed) V(RANDOM_SEED_INDEX, ByteArray, random_seed)
// JSFunctions are pairs (context, function code), sometimes also called // JSFunctions are pairs (context, function code), sometimes also called
@ -279,13 +283,16 @@ class Context: public FixedArray {
OPAQUE_REFERENCE_FUNCTION_INDEX, OPAQUE_REFERENCE_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX, CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX, OUT_OF_MEMORY_INDEX,
CONTEXT_DATA_INDEX, EMBEDDER_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX, ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX, DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX, DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX, DERIVED_SET_TRAP_INDEX,
PROXY_ENUMERATE, PROXY_ENUMERATE_INDEX,
OBSERVERS_NOTIFY_CHANGE_INDEX,
OBSERVERS_DELIVER_CHANGES_INDEX,
RANDOM_SEED_INDEX, RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC. // Properties from here are treated as weak references by the full GC.
@ -338,12 +345,19 @@ class Context: public FixedArray {
// The builtins object. // The builtins object.
JSBuiltinsObject* builtins(); JSBuiltinsObject* builtins();
// Get the innermost global context by traversing the context chain.
Context* global_context();
// Compute the native context by traversing the context chain. // Compute the native context by traversing the context chain.
Context* native_context(); Context* native_context();
// Predicates for context types. IsNativeContext is defined on Object // Predicates for context types. IsNativeContext is also defined on Object
// because we frequently have to know if arbitrary objects are natives // because we frequently have to know if arbitrary objects are natives
// contexts. // contexts.
bool IsNativeContext() {
Map* map = this->map();
return map == map->GetHeap()->native_context_map();
}
bool IsFunctionContext() { bool IsFunctionContext() {
Map* map = this->map(); Map* map = this->map();
return map == map->GetHeap()->function_context_map(); return map == map->GetHeap()->function_context_map();
@ -381,6 +395,8 @@ class Context: public FixedArray {
Object* OptimizedFunctionsListHead(); Object* OptimizedFunctionsListHead();
void ClearOptimizedFunctions(); void ClearOptimizedFunctions();
Handle<Object> ErrorMessageForCodeGenerationFromStrings();
#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \ #define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
void set_##name(type* value) { \ void set_##name(type* value) { \
ASSERT(IsNativeContext()); \ ASSERT(IsNativeContext()); \
@ -441,6 +457,9 @@ class Context: public FixedArray {
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid); static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
static bool IsBootstrappingOrGlobalObject(Object* object); static bool IsBootstrappingOrGlobalObject(Object* object);
#endif #endif
STATIC_CHECK(kHeaderSize == Internals::kContextHeaderSize);
STATIC_CHECK(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
}; };
} } // namespace v8::internal } } // namespace v8::internal

7
deps/v8/src/counters.cc

@ -77,7 +77,7 @@ void* Histogram::CreateHistogram() const {
// Start the timer. // Start the timer.
void HistogramTimer::Start() { void HistogramTimer::Start() {
if (histogram_.Enabled()) { if (histogram_.Enabled() || FLAG_log_internal_timer_events) {
stop_time_ = 0; stop_time_ = 0;
start_time_ = OS::Ticks(); start_time_ = OS::Ticks();
} }
@ -87,11 +87,14 @@ void HistogramTimer::Start() {
void HistogramTimer::Stop() { void HistogramTimer::Stop() {
if (histogram_.Enabled()) { if (histogram_.Enabled()) {
stop_time_ = OS::Ticks(); stop_time_ = OS::Ticks();
// Compute the delta between start and stop, in milliseconds. // Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000; int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
histogram_.AddSample(milliseconds); histogram_.AddSample(milliseconds);
} }
if (FLAG_log_internal_timer_events) {
LOG(Isolate::Current(),
TimerEvent(histogram_.name_, start_time_, OS::Ticks()));
}
} }
} } // namespace v8::internal } } // namespace v8::internal

415
deps/v8/src/d8.cc

@ -67,6 +67,62 @@
namespace v8 { namespace v8 {
static Handle<Value> Throw(const char* message) {
return ThrowException(String::New(message));
}
// TODO(rossberg): should replace these by proper uses of HasInstance,
// once we figure out a good way to make the templates global.
const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
#define FOR_EACH_SYMBOL(V) \
V(ArrayBuffer, "ArrayBuffer") \
V(ArrayBufferMarkerPropName, kArrayBufferMarkerPropName) \
V(ArrayMarkerPropName, kArrayMarkerPropName) \
V(buffer, "buffer") \
V(byteLength, "byteLength") \
V(byteOffset, "byteOffset") \
V(BYTES_PER_ELEMENT, "BYTES_PER_ELEMENT") \
V(length, "length")
class Symbols {
public:
explicit Symbols(Isolate* isolate) : isolate_(isolate) {
HandleScope scope;
#define INIT_SYMBOL(name, value) \
name##_ = Persistent<String>::New(String::NewSymbol(value));
FOR_EACH_SYMBOL(INIT_SYMBOL)
#undef INIT_SYMBOL
isolate->SetData(this);
}
~Symbols() {
#define DISPOSE_SYMBOL(name, value) name##_.Dispose();
FOR_EACH_SYMBOL(DISPOSE_SYMBOL)
#undef DISPOSE_SYMBOL
isolate_->SetData(NULL); // Not really needed, just to be sure...
}
#define DEFINE_SYMBOL_GETTER(name, value) \
static Persistent<String> name(Isolate* isolate) { \
return reinterpret_cast<Symbols*>(isolate->GetData())->name##_; \
}
FOR_EACH_SYMBOL(DEFINE_SYMBOL_GETTER)
#undef DEFINE_SYMBOL_GETTER
private:
Isolate* isolate_;
#define DEFINE_MEMBER(name, value) Persistent<String> name##_;
FOR_EACH_SYMBOL(DEFINE_MEMBER)
#undef DEFINE_MEMBER
};
LineEditor *LineEditor::first_ = NULL; LineEditor *LineEditor::first_ = NULL;
@ -92,17 +148,17 @@ LineEditor* LineEditor::Get() {
class DumbLineEditor: public LineEditor { class DumbLineEditor: public LineEditor {
public: public:
DumbLineEditor() : LineEditor(LineEditor::DUMB, "dumb") { } explicit DumbLineEditor(Isolate* isolate)
: LineEditor(LineEditor::DUMB, "dumb"), isolate_(isolate) { }
virtual Handle<String> Prompt(const char* prompt); virtual Handle<String> Prompt(const char* prompt);
private:
Isolate* isolate_;
}; };
static DumbLineEditor dumb_line_editor;
Handle<String> DumbLineEditor::Prompt(const char* prompt) { Handle<String> DumbLineEditor::Prompt(const char* prompt) {
printf("%s", prompt); printf("%s", prompt);
return Shell::ReadFromStdin(); return Shell::ReadFromStdin(isolate_);
} }
@ -115,7 +171,6 @@ i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
Persistent<Context> Shell::utility_context_; Persistent<Context> Shell::utility_context_;
#endif // V8_SHARED #endif // V8_SHARED
LineEditor* Shell::console = NULL;
Persistent<Context> Shell::evaluation_context_; Persistent<Context> Shell::evaluation_context_;
ShellOptions Shell::options; ShellOptions Shell::options;
const char* Shell::kPrompt = "d8> "; const char* Shell::kPrompt = "d8> ";
@ -200,7 +255,13 @@ Handle<Value> Shell::Write(const Arguments& args) {
if (i != 0) { if (i != 0) {
printf(" "); printf(" ");
} }
v8::String::Utf8Value str(args[i]);
// Explicitly catch potential exceptions in toString().
v8::TryCatch try_catch;
Handle<String> str_obj = args[i]->ToString();
if (try_catch.HasCaught()) return try_catch.ReThrow();
v8::String::Utf8Value str(str_obj);
int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout)); int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout));
if (n != str.length()) { if (n != str.length()) {
printf("Error in fwrite\n"); printf("Error in fwrite\n");
@ -226,17 +287,17 @@ Handle<Value> Shell::DisableProfiler(const Arguments& args) {
Handle<Value> Shell::Read(const Arguments& args) { Handle<Value> Shell::Read(const Arguments& args) {
String::Utf8Value file(args[0]); String::Utf8Value file(args[0]);
if (*file == NULL) { if (*file == NULL) {
return ThrowException(String::New("Error loading file")); return Throw("Error loading file");
} }
Handle<String> source = ReadFile(*file); Handle<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) { if (source.IsEmpty()) {
return ThrowException(String::New("Error loading file")); return Throw("Error loading file");
} }
return source; return source;
} }
Handle<String> Shell::ReadFromStdin() { Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
static const int kBufferSize = 256; static const int kBufferSize = 256;
char buffer[kBufferSize]; char buffer[kBufferSize];
Handle<String> accumulator = String::New(""); Handle<String> accumulator = String::New("");
@ -247,7 +308,7 @@ Handle<String> Shell::ReadFromStdin() {
// If fgets gets an error, just give up. // If fgets gets an error, just give up.
char* input = NULL; char* input = NULL;
{ // Release lock for blocking input. { // Release lock for blocking input.
Unlocker unlock(Isolate::GetCurrent()); Unlocker unlock(isolate);
input = fgets(buffer, kBufferSize, stdin); input = fgets(buffer, kBufferSize, stdin);
} }
if (input == NULL) return Handle<String>(); if (input == NULL) return Handle<String>();
@ -271,14 +332,14 @@ Handle<Value> Shell::Load(const Arguments& args) {
HandleScope handle_scope; HandleScope handle_scope;
String::Utf8Value file(args[i]); String::Utf8Value file(args[i]);
if (*file == NULL) { if (*file == NULL) {
return ThrowException(String::New("Error loading file")); return Throw("Error loading file");
} }
Handle<String> source = ReadFile(*file); Handle<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) { if (source.IsEmpty()) {
return ThrowException(String::New("Error loading file")); return Throw("Error loading file");
} }
if (!ExecuteString(source, String::New(*file), false, true)) { if (!ExecuteString(source, String::New(*file), false, true)) {
return ThrowException(String::New("Error executing file")); return Throw("Error executing file");
} }
} }
return Undefined(); return Undefined();
@ -308,7 +369,7 @@ static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
if (try_catch->HasCaught()) return 0; if (try_catch->HasCaught()) return 0;
if (raw_value < 0) { if (raw_value < 0) {
ThrowException(String::New("Array length must not be negative.")); Throw("Array length must not be negative.");
return 0; return 0;
} }
@ -317,33 +378,27 @@ static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
ASSERT(kMaxLength == i::ExternalArray::kMaxLength); ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
#endif // V8_SHARED #endif // V8_SHARED
if (raw_value > static_cast<int32_t>(kMaxLength)) { if (raw_value > static_cast<int32_t>(kMaxLength)) {
ThrowException( Throw("Array length exceeds maximum length.");
String::New("Array length exceeds maximum length."));
} }
return raw_value; return raw_value;
} }
// TODO(rossberg): should replace these by proper uses of HasInstance, Handle<Value> Shell::CreateExternalArrayBuffer(Isolate* isolate,
// once we figure out a good way to make the templates global. Handle<Object> buffer,
const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
Handle<Value> Shell::CreateExternalArrayBuffer(Handle<Object> buffer,
int32_t length) { int32_t length) {
static const int32_t kMaxSize = 0x7fffffff; static const int32_t kMaxSize = 0x7fffffff;
// Make sure the total size fits into a (signed) int. // Make sure the total size fits into a (signed) int.
if (length < 0 || length > kMaxSize) { if (length < 0 || length > kMaxSize) {
return ThrowException(String::New("ArrayBuffer exceeds maximum size (2G)")); return Throw("ArrayBuffer exceeds maximum size (2G)");
} }
uint8_t* data = new uint8_t[length]; uint8_t* data = new uint8_t[length];
if (data == NULL) { if (data == NULL) {
return ThrowException(String::New("Memory allocation failed")); return Throw("Memory allocation failed");
} }
memset(data, 0, length); memset(data, 0, length);
buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True()); buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True());
Persistent<Object> persistent_array = Persistent<Object>::New(buffer); Persistent<Object> persistent_array = Persistent<Object>::New(buffer);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback); persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
persistent_array.MarkIndependent(); persistent_array.MarkIndependent();
@ -351,7 +406,7 @@ Handle<Value> Shell::CreateExternalArrayBuffer(Handle<Object> buffer,
buffer->SetIndexedPropertiesToExternalArrayData( buffer->SetIndexedPropertiesToExternalArrayData(
data, v8::kExternalByteArray, length); data, v8::kExternalByteArray, length);
buffer->Set(String::New("byteLength"), Int32::New(length), ReadOnly); buffer->Set(Symbols::byteLength(isolate), Int32::New(length), ReadOnly);
return buffer; return buffer;
} }
@ -367,18 +422,18 @@ Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
} }
if (args.Length() == 0) { if (args.Length() == 0) {
return ThrowException( return Throw("ArrayBuffer constructor must have one argument");
String::New("ArrayBuffer constructor must have one argument"));
} }
TryCatch try_catch; TryCatch try_catch;
int32_t length = convertToUint(args[0], &try_catch); int32_t length = convertToUint(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
return CreateExternalArrayBuffer(args.This(), length); return CreateExternalArrayBuffer(args.GetIsolate(), args.This(), length);
} }
Handle<Object> Shell::CreateExternalArray(Handle<Object> array, Handle<Object> Shell::CreateExternalArray(Isolate* isolate,
Handle<Object> array,
Handle<Object> buffer, Handle<Object> buffer,
ExternalArrayType type, ExternalArrayType type,
int32_t length, int32_t length,
@ -394,12 +449,13 @@ Handle<Object> Shell::CreateExternalArray(Handle<Object> array,
array->SetIndexedPropertiesToExternalArrayData( array->SetIndexedPropertiesToExternalArrayData(
static_cast<uint8_t*>(data) + byteOffset, type, length); static_cast<uint8_t*>(data) + byteOffset, type, length);
array->SetHiddenValue(String::New(kArrayMarkerPropName), Int32::New(type)); array->SetHiddenValue(Symbols::ArrayMarkerPropName(isolate),
array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly); Int32::New(type));
array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly); array->Set(Symbols::byteLength(isolate), Int32::New(byteLength), ReadOnly);
array->Set(String::New("length"), Int32::New(length), ReadOnly); array->Set(Symbols::byteOffset(isolate), Int32::New(byteOffset), ReadOnly);
array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size)); array->Set(Symbols::length(isolate), Int32::New(length), ReadOnly);
array->Set(String::New("buffer"), buffer, ReadOnly); array->Set(Symbols::BYTES_PER_ELEMENT(isolate), Int32::New(element_size));
array->Set(Symbols::buffer(isolate), buffer, ReadOnly);
return array; return array;
} }
@ -408,6 +464,7 @@ Handle<Object> Shell::CreateExternalArray(Handle<Object> array,
Handle<Value> Shell::CreateExternalArray(const Arguments& args, Handle<Value> Shell::CreateExternalArray(const Arguments& args,
ExternalArrayType type, ExternalArrayType type,
int32_t element_size) { int32_t element_size) {
Isolate* isolate = args.GetIsolate();
if (!args.IsConstructCall()) { if (!args.IsConstructCall()) {
Handle<Value>* rec_args = new Handle<Value>[args.Length()]; Handle<Value>* rec_args = new Handle<Value>[args.Length()];
for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i]; for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
@ -433,16 +490,15 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
int32_t byteOffset; int32_t byteOffset;
bool init_from_array = false; bool init_from_array = false;
if (args.Length() == 0) { if (args.Length() == 0) {
return ThrowException( return Throw("Array constructor must have at least one argument");
String::New("Array constructor must have at least one argument"));
} }
if (args[0]->IsObject() && if (args[0]->IsObject() &&
!args[0]->ToObject()->GetHiddenValue( !args[0]->ToObject()->GetHiddenValue(
String::New(kArrayBufferMarkerPropName)).IsEmpty()) { Symbols::ArrayBufferMarkerPropName(isolate)).IsEmpty()) {
// Construct from ArrayBuffer. // Construct from ArrayBuffer.
buffer = args[0]->ToObject(); buffer = args[0]->ToObject();
int32_t bufferLength = int32_t bufferLength =
convertToUint(buffer->Get(String::New("byteLength")), &try_catch); convertToUint(buffer->Get(Symbols::byteLength(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() < 2 || args[1]->IsUndefined()) { if (args.Length() < 2 || args[1]->IsUndefined()) {
@ -451,11 +507,10 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
byteOffset = convertToUint(args[1], &try_catch); byteOffset = convertToUint(args[1], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
if (byteOffset > bufferLength) { if (byteOffset > bufferLength) {
return ThrowException(String::New("byteOffset out of bounds")); return Throw("byteOffset out of bounds");
} }
if (byteOffset % element_size != 0) { if (byteOffset % element_size != 0) {
return ThrowException( return Throw("byteOffset must be multiple of element size");
String::New("byteOffset must be multiple of element size"));
} }
} }
@ -463,23 +518,22 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
byteLength = bufferLength - byteOffset; byteLength = bufferLength - byteOffset;
length = byteLength / element_size; length = byteLength / element_size;
if (byteLength % element_size != 0) { if (byteLength % element_size != 0) {
return ThrowException( return Throw("buffer size must be multiple of element size");
String::New("buffer size must be multiple of element size"));
} }
} else { } else {
length = convertToUint(args[2], &try_catch); length = convertToUint(args[2], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
byteLength = length * element_size; byteLength = length * element_size;
if (byteOffset + byteLength > bufferLength) { if (byteOffset + byteLength > bufferLength) {
return ThrowException(String::New("length out of bounds")); return Throw("length out of bounds");
} }
} }
} else { } else {
if (args[0]->IsObject() && if (args[0]->IsObject() &&
args[0]->ToObject()->Has(String::New("length"))) { args[0]->ToObject()->Has(Symbols::length(isolate))) {
// Construct from array. // Construct from array.
length = convertToUint( length = convertToUint(
args[0]->ToObject()->Get(String::New("length")), &try_catch); args[0]->ToObject()->Get(Symbols::length(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
init_from_array = true; init_from_array = true;
} else { } else {
@ -491,7 +545,7 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
byteOffset = 0; byteOffset = 0;
Handle<Object> global = Context::GetCurrent()->Global(); Handle<Object> global = Context::GetCurrent()->Global();
Handle<Value> array_buffer = global->Get(String::New("ArrayBuffer")); Handle<Value> array_buffer = global->Get(Symbols::ArrayBuffer(isolate));
ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction()); ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction());
Handle<Value> buffer_args[] = { Uint32::New(byteLength) }; Handle<Value> buffer_args[] = { Uint32::New(byteLength) };
Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance( Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance(
@ -500,8 +554,9 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
buffer = result->ToObject(); buffer = result->ToObject();
} }
Handle<Object> array = CreateExternalArray( Handle<Object> array =
args.This(), buffer, type, length, byteLength, byteOffset, element_size); CreateExternalArray(isolate, args.This(), buffer, type, length,
byteLength, byteOffset, element_size);
if (init_from_array) { if (init_from_array) {
Handle<Object> init = args[0]->ToObject(); Handle<Object> init = args[0]->ToObject();
@ -516,25 +571,23 @@ Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) {
TryCatch try_catch; TryCatch try_catch;
if (!args.This()->IsObject()) { if (!args.This()->IsObject()) {
return ThrowException( return Throw("'slice' invoked on non-object receiver");
String::New("'slice' invoked on non-object receiver"));
} }
Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This(); Local<Object> self = args.This();
Local<Value> marker = Local<Value> marker =
self->GetHiddenValue(String::New(kArrayBufferMarkerPropName)); self->GetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate));
if (marker.IsEmpty()) { if (marker.IsEmpty()) {
return ThrowException( return Throw("'slice' invoked on wrong receiver type");
String::New("'slice' invoked on wrong receiver type"));
} }
int32_t length = int32_t length =
convertToUint(self->Get(String::New("byteLength")), &try_catch); convertToUint(self->Get(Symbols::byteLength(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) { if (args.Length() == 0) {
return ThrowException( return Throw("'slice' must have at least one argument");
String::New("'slice' must have at least one argument"));
} }
int32_t begin = convertToInt(args[0], &try_catch); int32_t begin = convertToInt(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
@ -573,32 +626,31 @@ Handle<Value> Shell::ArraySubArray(const Arguments& args) {
TryCatch try_catch; TryCatch try_catch;
if (!args.This()->IsObject()) { if (!args.This()->IsObject()) {
return ThrowException( return Throw("'subarray' invoked on non-object receiver");
String::New("'subarray' invoked on non-object receiver"));
} }
Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This(); Local<Object> self = args.This();
Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName)); Local<Value> marker =
self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate));
if (marker.IsEmpty()) { if (marker.IsEmpty()) {
return ThrowException( return Throw("'subarray' invoked on wrong receiver type");
String::New("'subarray' invoked on wrong receiver type"));
} }
Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject(); Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t length = int32_t length =
convertToUint(self->Get(String::New("length")), &try_catch); convertToUint(self->Get(Symbols::length(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset = int32_t byteOffset =
convertToUint(self->Get(String::New("byteOffset")), &try_catch); convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size = int32_t element_size =
convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch); convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) { if (args.Length() == 0) {
return ThrowException( return Throw("'subarray' must have at least one argument");
String::New("'subarray' must have at least one argument"));
} }
int32_t begin = convertToInt(args[0], &try_catch); int32_t begin = convertToInt(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
@ -633,35 +685,33 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
TryCatch try_catch; TryCatch try_catch;
if (!args.This()->IsObject()) { if (!args.This()->IsObject()) {
return ThrowException( return Throw("'set' invoked on non-object receiver");
String::New("'set' invoked on non-object receiver"));
} }
Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This(); Local<Object> self = args.This();
Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName)); Local<Value> marker =
self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate));
if (marker.IsEmpty()) { if (marker.IsEmpty()) {
return ThrowException( return Throw("'set' invoked on wrong receiver type");
String::New("'set' invoked on wrong receiver type"));
} }
int32_t length = int32_t length =
convertToUint(self->Get(String::New("length")), &try_catch); convertToUint(self->Get(Symbols::length(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size = int32_t element_size =
convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch); convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) { if (args.Length() == 0) {
return ThrowException( return Throw("'set' must have at least one argument");
String::New("'set' must have at least one argument"));
} }
if (!args[0]->IsObject() || if (!args[0]->IsObject() ||
!args[0]->ToObject()->Has(String::New("length"))) { !args[0]->ToObject()->Has(Symbols::length(isolate))) {
return ThrowException( return Throw("'set' invoked with non-array argument");
String::New("'set' invoked with non-array argument"));
} }
Handle<Object> source = args[0]->ToObject(); Handle<Object> source = args[0]->ToObject();
int32_t source_length = int32_t source_length =
convertToUint(source->Get(String::New("length")), &try_catch); convertToUint(source->Get(Symbols::length(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t offset; int32_t offset;
@ -672,31 +722,32 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
} }
if (offset + source_length > length) { if (offset + source_length > length) {
return ThrowException(String::New("offset or source length out of bounds")); return Throw("offset or source length out of bounds");
} }
int32_t source_element_size; int32_t source_element_size;
if (source->GetHiddenValue(String::New(kArrayMarkerPropName)).IsEmpty()) { if (source->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)).IsEmpty()) {
source_element_size = 0; source_element_size = 0;
} else { } else {
source_element_size = source_element_size =
convertToUint(source->Get(String::New("BYTES_PER_ELEMENT")), &try_catch); convertToUint(source->Get(Symbols::BYTES_PER_ELEMENT(isolate)),
&try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
} }
if (element_size == source_element_size && if (element_size == source_element_size &&
self->GetConstructor()->StrictEquals(source->GetConstructor())) { self->GetConstructor()->StrictEquals(source->GetConstructor())) {
// Use memmove on the array buffers. // Use memmove on the array buffers.
Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject(); Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer = Handle<Object> source_buffer =
source->Get(String::New("buffer"))->ToObject(); source->Get(Symbols::buffer(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset = int32_t byteOffset =
convertToUint(self->Get(String::New("byteOffset")), &try_catch); convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset = int32_t source_byteOffset =
convertToUint(source->Get(String::New("byteOffset")), &try_catch); convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>( uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>(
@ -712,10 +763,10 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
} }
} else { } else {
// Need to copy element-wise to make the right conversions. // Need to copy element-wise to make the right conversions.
Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject(); Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer = Handle<Object> source_buffer =
source->Get(String::New("buffer"))->ToObject(); source->Get(Symbols::buffer(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
if (buffer->StrictEquals(source_buffer)) { if (buffer->StrictEquals(source_buffer)) {
@ -723,10 +774,10 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
// This gets a bit tricky in the case of different element sizes // This gets a bit tricky in the case of different element sizes
// (which, of course, is extremely unlikely to ever occur in practice). // (which, of course, is extremely unlikely to ever occur in practice).
int32_t byteOffset = int32_t byteOffset =
convertToUint(self->Get(String::New("byteOffset")), &try_catch); convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset = int32_t source_byteOffset =
convertToUint(source->Get(String::New("byteOffset")), &try_catch); convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow(); if (try_catch.HasCaught()) return try_catch.ReThrow();
// Copy as much as we can from left to right. // Copy as much as we can from left to right.
@ -772,8 +823,9 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) { void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
HandleScope scope; HandleScope scope;
Isolate* isolate = Isolate::GetCurrent();
int32_t length = int32_t length =
object->ToObject()->Get(String::New("byteLength"))->Uint32Value(); object->ToObject()->Get(Symbols::byteLength(isolate))->Uint32Value();
V8::AdjustAmountOfExternalAllocatedMemory(-length); V8::AdjustAmountOfExternalAllocatedMemory(-length);
delete[] static_cast<uint8_t*>(data); delete[] static_cast<uint8_t*>(data);
object.Dispose(); object.Dispose();
@ -1139,7 +1191,7 @@ Handle<FunctionTemplate> Shell::CreateArrayTemplate(InvocationCallback fun) {
} }
Handle<ObjectTemplate> Shell::CreateGlobalTemplate() { Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
Handle<ObjectTemplate> global_template = ObjectTemplate::New(); Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print)); global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write)); global_template->Set(String::New("write"), FunctionTemplate::New(Write));
@ -1159,7 +1211,7 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
// Bind the handlers for external arrays. // Bind the handlers for external arrays.
PropertyAttribute attr = PropertyAttribute attr =
static_cast<PropertyAttribute>(ReadOnly | DontDelete); static_cast<PropertyAttribute>(ReadOnly | DontDelete);
global_template->Set(String::New("ArrayBuffer"), global_template->Set(Symbols::ArrayBuffer(isolate),
CreateArrayBufferTemplate(ArrayBuffer), attr); CreateArrayBufferTemplate(ArrayBuffer), attr);
global_template->Set(String::New("Int8Array"), global_template->Set(String::New("Int8Array"),
CreateArrayTemplate(Int8Array), attr); CreateArrayTemplate(Int8Array), attr);
@ -1196,7 +1248,7 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
} }
void Shell::Initialize() { void Shell::Initialize(Isolate* isolate) {
#ifdef COMPRESS_STARTUP_DATA_BZ2 #ifdef COMPRESS_STARTUP_DATA_BZ2
BZip2Decompressor startup_data_decompressor; BZip2Decompressor startup_data_decompressor;
int bz2_result = startup_data_decompressor.Decompress(); int bz2_result = startup_data_decompressor.Decompress();
@ -1217,12 +1269,15 @@ void Shell::Initialize() {
V8::SetAddHistogramSampleFunction(AddHistogramSample); V8::SetAddHistogramSampleFunction(AddHistogramSample);
} }
#endif // V8_SHARED #endif // V8_SHARED
if (options.test_shell) return; }
void Shell::InitializeDebugger(Isolate* isolate) {
if (options.test_shell) return;
#ifndef V8_SHARED #ifndef V8_SHARED
Locker lock; Locker lock;
HandleScope scope; HandleScope scope;
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(); Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
utility_context_ = Context::New(NULL, global_template); utility_context_ = Context::New(NULL, global_template);
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
@ -1236,13 +1291,13 @@ void Shell::Initialize() {
} }
Persistent<Context> Shell::CreateEvaluationContext() { Persistent<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED #ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe // This needs to be a critical section since this is not thread-safe
i::ScopedLock lock(context_mutex_); i::ScopedLock lock(context_mutex_);
#endif // V8_SHARED #endif // V8_SHARED
// Initialize the global objects // Initialize the global objects
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(); Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
Persistent<Context> context = Context::New(NULL, global_template); Persistent<Context> context = Context::New(NULL, global_template);
ASSERT(!context.IsEmpty()); ASSERT(!context.IsEmpty());
Context::Scope scope(context); Context::Scope scope(context);
@ -1288,7 +1343,6 @@ int CompareKeys(const void* a, const void* b) {
void Shell::OnExit() { void Shell::OnExit() {
if (console != NULL) console->Close();
if (i::FLAG_dump_counters) { if (i::FLAG_dump_counters) {
int number_of_counters = 0; int number_of_counters = 0;
for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) { for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
@ -1348,9 +1402,9 @@ static FILE* FOpen(const char* path, const char* mode) {
} }
static char* ReadChars(const char* name, int* size_out) { static char* ReadChars(Isolate* isolate, const char* name, int* size_out) {
// Release the V8 lock while reading files. // Release the V8 lock while reading files.
v8::Unlocker unlocker(Isolate::GetCurrent()); v8::Unlocker unlocker(isolate);
FILE* file = FOpen(name, "rb"); FILE* file = FOpen(name, "rb");
if (file == NULL) return NULL; if (file == NULL) return NULL;
@ -1375,15 +1429,17 @@ Handle<Value> Shell::ReadBuffer(const Arguments& args) {
String::Utf8Value filename(args[0]); String::Utf8Value filename(args[0]);
int length; int length;
if (*filename == NULL) { if (*filename == NULL) {
return ThrowException(String::New("Error loading file")); return Throw("Error loading file");
} }
uint8_t* data = reinterpret_cast<uint8_t*>(ReadChars(*filename, &length)); uint8_t* data = reinterpret_cast<uint8_t*>(
ReadChars(args.GetIsolate(), *filename, &length));
if (data == NULL) { if (data == NULL) {
return ThrowException(String::New("Error reading file")); return Throw("Error reading file");
} }
Isolate* isolate = args.GetIsolate();
Handle<Object> buffer = Object::New(); Handle<Object> buffer = Object::New();
buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True()); buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True());
Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer); Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer);
persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback); persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback);
persistent_buffer.MarkIndependent(); persistent_buffer.MarkIndependent();
@ -1391,7 +1447,7 @@ Handle<Value> Shell::ReadBuffer(const Arguments& args) {
buffer->SetIndexedPropertiesToExternalArrayData( buffer->SetIndexedPropertiesToExternalArrayData(
data, kExternalUnsignedByteArray, length); data, kExternalUnsignedByteArray, length);
buffer->Set(String::New("byteLength"), buffer->Set(Symbols::byteLength(isolate),
Int32::New(static_cast<int32_t>(length)), ReadOnly); Int32::New(static_cast<int32_t>(length)), ReadOnly);
return buffer; return buffer;
} }
@ -1421,9 +1477,9 @@ static char* ReadWord(char* data) {
// Reads a file into a v8 string. // Reads a file into a v8 string.
Handle<String> Shell::ReadFile(const char* name) { Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
int size = 0; int size = 0;
char* chars = ReadChars(name, &size); char* chars = ReadChars(isolate, name, &size);
if (chars == NULL) return Handle<String>(); if (chars == NULL) return Handle<String>();
Handle<String> result = String::New(chars); Handle<String> result = String::New(chars);
delete[] chars; delete[] chars;
@ -1431,12 +1487,13 @@ Handle<String> Shell::ReadFile(const char* name) {
} }
void Shell::RunShell() { void Shell::RunShell(Isolate* isolate) {
Locker locker; Locker locker;
Context::Scope context_scope(evaluation_context_); Context::Scope context_scope(evaluation_context_);
HandleScope outer_scope; HandleScope outer_scope;
Handle<String> name = String::New("(d8)"); Handle<String> name = String::New("(d8)");
console = LineEditor::Get(); DumbLineEditor dumb_line_editor(isolate);
LineEditor* console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name()); printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
console->Open(); console->Open();
while (true) { while (true) {
@ -1446,6 +1503,7 @@ void Shell::RunShell() {
ExecuteString(input, name, true, true); ExecuteString(input, name, true, true);
} }
printf("\n"); printf("\n");
console->Close();
} }
@ -1453,9 +1511,9 @@ void Shell::RunShell() {
class ShellThread : public i::Thread { class ShellThread : public i::Thread {
public: public:
// Takes ownership of the underlying char array of |files|. // Takes ownership of the underlying char array of |files|.
ShellThread(int no, char* files) ShellThread(Isolate* isolate, char* files)
: Thread("d8:ShellThread"), : Thread("d8:ShellThread"),
no_(no), files_(files) { } isolate_(isolate), files_(files) { }
~ShellThread() { ~ShellThread() {
delete[] files_; delete[] files_;
@ -1463,7 +1521,7 @@ class ShellThread : public i::Thread {
virtual void Run(); virtual void Run();
private: private:
int no_; Isolate* isolate_;
char* files_; char* files_;
}; };
@ -1483,7 +1541,8 @@ void ShellThread::Run() {
// Prepare the context for this thread. // Prepare the context for this thread.
Locker locker; Locker locker;
HandleScope outer_scope; HandleScope outer_scope;
Persistent<Context> thread_context = Shell::CreateEvaluationContext(); Persistent<Context> thread_context =
Shell::CreateEvaluationContext(isolate_);
Context::Scope context_scope(thread_context); Context::Scope context_scope(thread_context);
while ((ptr != NULL) && (*ptr != '\0')) { while ((ptr != NULL) && (*ptr != '\0')) {
@ -1496,7 +1555,7 @@ void ShellThread::Run() {
continue; continue;
} }
Handle<String> str = Shell::ReadFile(filename); Handle<String> str = Shell::ReadFile(isolate_, filename);
if (str.IsEmpty()) { if (str.IsEmpty()) {
printf("File '%s' not found\n", filename); printf("File '%s' not found\n", filename);
Shell::Exit(1); Shell::Exit(1);
@ -1524,7 +1583,7 @@ SourceGroup::~SourceGroup() {
} }
void SourceGroup::Execute() { void SourceGroup::Execute(Isolate* isolate) {
for (int i = begin_offset_; i < end_offset_; ++i) { for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i]; const char* arg = argv_[i];
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) { if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
@ -1542,7 +1601,7 @@ void SourceGroup::Execute() {
// Use all other arguments as names of files to load and run. // Use all other arguments as names of files to load and run.
HandleScope handle_scope; HandleScope handle_scope;
Handle<String> file_name = String::New(arg); Handle<String> file_name = String::New(arg);
Handle<String> source = ReadFile(arg); Handle<String> source = ReadFile(isolate, arg);
if (source.IsEmpty()) { if (source.IsEmpty()) {
printf("Error reading '%s'\n", arg); printf("Error reading '%s'\n", arg);
Shell::Exit(1); Shell::Exit(1);
@ -1555,9 +1614,9 @@ void SourceGroup::Execute() {
} }
Handle<String> SourceGroup::ReadFile(const char* name) { Handle<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
int size; int size;
char* chars = ReadChars(name, &size); char* chars = ReadChars(isolate, name, &size);
if (chars == NULL) return Handle<String>(); if (chars == NULL) return Handle<String>();
Handle<String> result = String::New(chars, size); Handle<String> result = String::New(chars, size);
delete[] chars; delete[] chars;
@ -1583,10 +1642,11 @@ void SourceGroup::ExecuteInThread() {
Isolate::Scope iscope(isolate); Isolate::Scope iscope(isolate);
Locker lock(isolate); Locker lock(isolate);
HandleScope scope; HandleScope scope;
Persistent<Context> context = Shell::CreateEvaluationContext(); Symbols symbols(isolate);
Persistent<Context> context = Shell::CreateEvaluationContext(isolate);
{ {
Context::Scope cscope(context); Context::Scope cscope(context);
Execute(); Execute(isolate);
} }
context.Dispose(); context.Dispose();
if (Shell::options.send_idle_notification) { if (Shell::options.send_idle_notification) {
@ -1754,21 +1814,21 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} }
int Shell::RunMain(int argc, char* argv[]) { int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
#ifndef V8_SHARED #ifndef V8_SHARED
i::List<i::Thread*> threads(1); i::List<i::Thread*> threads(1);
if (options.parallel_files != NULL) { if (options.parallel_files != NULL) {
for (int i = 0; i < options.num_parallel_files; i++) { for (int i = 0; i < options.num_parallel_files; i++) {
char* files = NULL; char* files = NULL;
{ Locker lock(Isolate::GetCurrent()); { Locker lock(isolate);
int size = 0; int size = 0;
files = ReadChars(options.parallel_files[i], &size); files = ReadChars(isolate, options.parallel_files[i], &size);
} }
if (files == NULL) { if (files == NULL) {
printf("File list '%s' not found\n", options.parallel_files[i]); printf("File list '%s' not found\n", options.parallel_files[i]);
Exit(1); Exit(1);
} }
ShellThread* thread = new ShellThread(threads.length(), files); ShellThread* thread = new ShellThread(isolate, files);
thread->Start(); thread->Start();
threads.Add(thread); threads.Add(thread);
} }
@ -1780,7 +1840,7 @@ int Shell::RunMain(int argc, char* argv[]) {
{ // NOLINT { // NOLINT
Locker lock; Locker lock;
HandleScope scope; HandleScope scope;
Persistent<Context> context = CreateEvaluationContext(); Persistent<Context> context = CreateEvaluationContext(isolate);
if (options.last_run) { if (options.last_run) {
// Keep using the same context in the interactive shell. // Keep using the same context in the interactive shell.
evaluation_context_ = context; evaluation_context_ = context;
@ -1794,7 +1854,7 @@ int Shell::RunMain(int argc, char* argv[]) {
} }
{ {
Context::Scope cscope(context); Context::Scope cscope(context);
options.isolate_sources[0].Execute(); options.isolate_sources[0].Execute(isolate);
} }
if (!options.last_run) { if (!options.last_run) {
context.Dispose(); context.Dispose();
@ -1836,59 +1896,62 @@ int Shell::RunMain(int argc, char* argv[]) {
int Shell::Main(int argc, char* argv[]) { int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1; if (!SetOptions(argc, argv)) return 1;
Initialize();
int result = 0; int result = 0;
if (options.stress_opt || options.stress_deopt) { Isolate* isolate = Isolate::GetCurrent();
Testing::SetStressRunType( {
options.stress_opt ? Testing::kStressTypeOpt Initialize(isolate);
: Testing::kStressTypeDeopt); Symbols symbols(isolate);
int stress_runs = Testing::GetStressRuns(); InitializeDebugger(isolate);
for (int i = 0; i < stress_runs && result == 0; i++) {
printf("============ Stress %d/%d ============\n", i + 1, stress_runs); if (options.stress_opt || options.stress_deopt) {
Testing::PrepareStressRun(i); Testing::SetStressRunType(options.stress_opt
options.last_run = (i == stress_runs - 1); ? Testing::kStressTypeOpt
result = RunMain(argc, argv); : Testing::kStressTypeDeopt);
} int stress_runs = Testing::GetStressRuns();
printf("======== Full Deoptimization =======\n"); for (int i = 0; i < stress_runs && result == 0; i++) {
Testing::DeoptimizeAll(); printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
Testing::PrepareStressRun(i);
options.last_run = (i == stress_runs - 1);
result = RunMain(isolate, argc, argv);
}
printf("======== Full Deoptimization =======\n");
Testing::DeoptimizeAll();
#if !defined(V8_SHARED) #if !defined(V8_SHARED)
} else if (i::FLAG_stress_runs > 0) { } else if (i::FLAG_stress_runs > 0) {
int stress_runs = i::FLAG_stress_runs; int stress_runs = i::FLAG_stress_runs;
for (int i = 0; i < stress_runs && result == 0; i++) { for (int i = 0; i < stress_runs && result == 0; i++) {
printf("============ Run %d/%d ============\n", i + 1, stress_runs); printf("============ Run %d/%d ============\n", i + 1, stress_runs);
options.last_run = (i == stress_runs - 1); options.last_run = (i == stress_runs - 1);
result = RunMain(argc, argv); result = RunMain(isolate, argc, argv);
} }
#endif #endif
} else { } else {
result = RunMain(argc, argv); result = RunMain(isolate, argc, argv);
} }
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) #if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
// Run remote debugger if requested, but never on --test // Run remote debugger if requested, but never on --test
if (i::FLAG_remote_debugger && !options.test_shell) { if (i::FLAG_remote_debugger && !options.test_shell) {
InstallUtilityScript(); InstallUtilityScript();
RunRemoteDebugger(i::FLAG_debugger_port); RunRemoteDebugger(i::FLAG_debugger_port);
return 0; return 0;
} }
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT #endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
// Run interactive shell if explicitly requested or if no script has been // Run interactive shell if explicitly requested or if no script has been
// executed, but never on --test // executed, but never on --test
if (( options.interactive_shell if (( options.interactive_shell || !options.script_executed )
|| !options.script_executed ) && !options.test_shell ) {
&& !options.test_shell ) {
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) #if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
if (!i::FLAG_debugger) { if (!i::FLAG_debugger) {
InstallUtilityScript(); InstallUtilityScript();
} }
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT #endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
RunShell(); RunShell(isolate);
}
} }
V8::Dispose(); V8::Dispose();
#ifndef V8_SHARED #ifndef V8_SHARED

5
deps/v8/src/d8.gyp

@ -61,7 +61,8 @@
'libraries': [ '-lreadline', ], 'libraries': [ '-lreadline', ],
'sources': [ 'd8-readline.cc' ], 'sources': [ 'd8-readline.cc' ],
}], }],
[ 'OS!="win"', { ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
or OS=="openbsd" or OS=="solaris" or OS=="android")', {
'sources': [ 'd8-posix.cc', ] 'sources': [ 'd8-posix.cc', ]
}], }],
[ 'OS=="win"', { [ 'OS=="win"', {
@ -98,7 +99,7 @@
'<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
], ],
'action': [ 'action': [
'<(python)', 'python',
'../tools/js2c.py', '../tools/js2c.py',
'<@(_outputs)', '<@(_outputs)',
'D8', 'D8',

28
deps/v8/src/d8.h

@ -158,7 +158,7 @@ class SourceGroup {
void End(int offset) { end_offset_ = offset; } void End(int offset) { end_offset_ = offset; }
void Execute(); void Execute(Isolate* isolate);
#ifndef V8_SHARED #ifndef V8_SHARED
void StartExecuteInThread(); void StartExecuteInThread();
@ -187,7 +187,7 @@ class SourceGroup {
#endif // V8_SHARED #endif // V8_SHARED
void ExitShell(int exit_code); void ExitShell(int exit_code);
Handle<String> ReadFile(const char* name); Handle<String> ReadFile(Isolate* isolate, const char* name);
const char** argv_; const char** argv_;
int begin_offset_; int begin_offset_;
@ -272,9 +272,9 @@ class Shell : public i::AllStatic {
bool report_exceptions); bool report_exceptions);
static const char* ToCString(const v8::String::Utf8Value& value); static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(TryCatch* try_catch); static void ReportException(TryCatch* try_catch);
static Handle<String> ReadFile(const char* name); static Handle<String> ReadFile(Isolate* isolate, const char* name);
static Persistent<Context> CreateEvaluationContext(); static Persistent<Context> CreateEvaluationContext(Isolate* isolate);
static int RunMain(int argc, char* argv[]); static int RunMain(Isolate* isolate, int argc, char* argv[]);
static int Main(int argc, char* argv[]); static int Main(int argc, char* argv[]);
static void Exit(int exit_code); static void Exit(int exit_code);
@ -310,9 +310,9 @@ class Shell : public i::AllStatic {
static Handle<Value> DisableProfiler(const Arguments& args); static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args); static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadBuffer(const Arguments& args); static Handle<Value> ReadBuffer(const Arguments& args);
static Handle<String> ReadFromStdin(); static Handle<String> ReadFromStdin(Isolate* isolate);
static Handle<Value> ReadLine(const Arguments& args) { static Handle<Value> ReadLine(const Arguments& args) {
return ReadFromStdin(); return ReadFromStdin(args.GetIsolate());
} }
static Handle<Value> Load(const Arguments& args); static Handle<Value> Load(const Arguments& args);
static Handle<Value> ArrayBuffer(const Arguments& args); static Handle<Value> ArrayBuffer(const Arguments& args);
@ -365,7 +365,6 @@ class Shell : public i::AllStatic {
static void AddOSMethods(Handle<ObjectTemplate> os_template); static void AddOSMethods(Handle<ObjectTemplate> os_template);
static LineEditor* console;
static const char* kPrompt; static const char* kPrompt;
static ShellOptions options; static ShellOptions options;
@ -384,15 +383,18 @@ class Shell : public i::AllStatic {
static Counter* GetCounter(const char* name, bool is_histogram); static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript(); static void InstallUtilityScript();
#endif // V8_SHARED #endif // V8_SHARED
static void Initialize(); static void Initialize(Isolate* isolate);
static void RunShell(); static void InitializeDebugger(Isolate* isolate);
static void RunShell(Isolate* isolate);
static bool SetOptions(int argc, char* argv[]); static bool SetOptions(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate(); static Handle<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback); static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback);
static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback); static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback);
static Handle<Value> CreateExternalArrayBuffer(Handle<Object> buffer, static Handle<Value> CreateExternalArrayBuffer(Isolate* isolate,
Handle<Object> buffer,
int32_t size); int32_t size);
static Handle<Object> CreateExternalArray(Handle<Object> array, static Handle<Object> CreateExternalArray(Isolate* isolate,
Handle<Object> array,
Handle<Object> buffer, Handle<Object> buffer,
ExternalArrayType type, ExternalArrayType type,
int32_t length, int32_t length,

2
deps/v8/src/date.js

@ -107,7 +107,7 @@ function MakeDay(year, month, date) {
} }
// Now we rely on year and month being SMIs. // Now we rely on year and month being SMIs.
return %DateMakeDay(year, month) + date - 1; return %DateMakeDay(year | 0, month | 0) + date - 1;
} }

3
deps/v8/src/dateparser-inl.h

@ -62,7 +62,8 @@ bool DateParser::Parse(Vector<Char> str,
// sss is in the range 000..999, // sss is in the range 000..999,
// hh is in the range 00..23, // hh is in the range 00..23,
// mm, ss, and sss default to 00 if missing, and // mm, ss, and sss default to 00 if missing, and
// timezone defaults to Z if missing. // timezone defaults to Z if missing
// (following Safari, ISO actually demands local time).
// Extensions: // Extensions:
// We also allow sss to have more or less than three digits (but at // We also allow sss to have more or less than three digits (but at
// least one). // least one).

121
deps/v8/src/debug-debugger.js

@ -1306,9 +1306,12 @@ ProtocolMessage.prototype.setOption = function(name, value) {
}; };
ProtocolMessage.prototype.failed = function(message) { ProtocolMessage.prototype.failed = function(message, opt_details) {
this.success = false; this.success = false;
this.message = message; this.message = message;
if (IS_OBJECT(opt_details)) {
this.error_details = opt_details;
}
}; };
@ -1355,6 +1358,9 @@ ProtocolMessage.prototype.toJSONProtocol = function() {
if (this.message) { if (this.message) {
json.message = this.message; json.message = this.message;
} }
if (this.error_details) {
json.error_details = this.error_details;
}
json.running = this.running; json.running = this.running;
return JSON.stringify(json); return JSON.stringify(json);
}; };
@ -1427,6 +1433,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
this.scopesRequest_(request, response); this.scopesRequest_(request, response);
} else if (request.command == 'scope') { } else if (request.command == 'scope') {
this.scopeRequest_(request, response); this.scopeRequest_(request, response);
} else if (request.command == 'setVariableValue') {
this.setVariableValueRequest_(request, response);
} else if (request.command == 'evaluate') { } else if (request.command == 'evaluate') {
this.evaluateRequest_(request, response); this.evaluateRequest_(request, response);
} else if (lol_is_enabled && request.command == 'getobj') { } else if (lol_is_enabled && request.command == 'getobj') {
@ -1953,11 +1961,12 @@ DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
}; };
DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) { DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ =
function(scope_description) {
// Get the frame for which the scope or scopes are requested. // Get the frame for which the scope or scopes are requested.
// With no frameNumber argument use the currently selected frame. // With no frameNumber argument use the currently selected frame.
if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) { if (scope_description && !IS_UNDEFINED(scope_description.frameNumber)) {
frame_index = request.arguments.frameNumber; frame_index = scope_description.frameNumber;
if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) { if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
throw new Error('Invalid frame number'); throw new Error('Invalid frame number');
} }
@ -1971,13 +1980,13 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
// Gets scope host object from request. It is either a function // Gets scope host object from request. It is either a function
// ('functionHandle' argument must be specified) or a stack frame // ('functionHandle' argument must be specified) or a stack frame
// ('frameNumber' may be specified and the current frame is taken by default). // ('frameNumber' may be specified and the current frame is taken by default).
DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ = DebugCommandProcessor.prototype.resolveScopeHolder_ =
function(request) { function(scope_description) {
if (request.arguments && "functionHandle" in request.arguments) { if (scope_description && "functionHandle" in scope_description) {
if (!IS_NUMBER(request.arguments.functionHandle)) { if (!IS_NUMBER(scope_description.functionHandle)) {
throw new Error('Function handle must be a number'); throw new Error('Function handle must be a number');
} }
var function_mirror = LookupMirror(request.arguments.functionHandle); var function_mirror = LookupMirror(scope_description.functionHandle);
if (!function_mirror) { if (!function_mirror) {
throw new Error('Failed to find function object by handle'); throw new Error('Failed to find function object by handle');
} }
@ -1992,14 +2001,14 @@ DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ =
} }
// Get the frame for which the scopes are requested. // Get the frame for which the scopes are requested.
var frame = this.frameForScopeRequest_(request); var frame = this.resolveFrameFromScopeDescription_(scope_description);
return frame; return frame;
} }
} }
DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) { DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
var scope_holder = this.scopeHolderForScopeRequest_(request); var scope_holder = this.resolveScopeHolder_(request.arguments);
// Fill all scopes for this frame or function. // Fill all scopes for this frame or function.
var total_scopes = scope_holder.scopeCount(); var total_scopes = scope_holder.scopeCount();
@ -2018,7 +2027,7 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) { DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
// Get the frame or function for which the scope is requested. // Get the frame or function for which the scope is requested.
var scope_holder = this.scopeHolderForScopeRequest_(request); var scope_holder = this.resolveScopeHolder_(request.arguments);
// With no scope argument just return top scope. // With no scope argument just return top scope.
var scope_index = 0; var scope_index = 0;
@ -2033,6 +2042,77 @@ DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
}; };
// Reads value from protocol description. Description may be in form of type
// (for singletons), raw value (primitive types supported in JSON),
// string value description plus type (for primitive values) or handle id.
// Returns raw value or throws exception.
DebugCommandProcessor.resolveValue_ = function(value_description) {
if ("handle" in value_description) {
var value_mirror = LookupMirror(value_description.handle);
if (!value_mirror) {
throw new Error("Failed to resolve value by handle, ' #" +
mapping.handle + "# not found");
}
return value_mirror.value();
} else if ("stringDescription" in value_description) {
if (value_description.type == BOOLEAN_TYPE) {
return Boolean(value_description.stringDescription);
} else if (value_description.type == NUMBER_TYPE) {
return Number(value_description.stringDescription);
} if (value_description.type == STRING_TYPE) {
return String(value_description.stringDescription);
} else {
throw new Error("Unknown type");
}
} else if ("value" in value_description) {
return value_description.value;
} else if (value_description.type == UNDEFINED_TYPE) {
return void 0;
} else if (value_description.type == NULL_TYPE) {
return null;
} else {
throw new Error("Failed to parse value description");
}
};
DebugCommandProcessor.prototype.setVariableValueRequest_ =
function(request, response) {
if (!request.arguments) {
response.failed('Missing arguments');
return;
}
if (IS_UNDEFINED(request.arguments.name)) {
response.failed('Missing variable name');
}
var variable_name = request.arguments.name;
var scope_description = request.arguments.scope;
// Get the frame or function for which the scope is requested.
var scope_holder = this.resolveScopeHolder_(scope_description);
if (IS_UNDEFINED(scope_description.number)) {
response.failed('Missing scope number');
}
var scope_index = %ToNumber(scope_description.number);
var scope = scope_holder.scope(scope_index);
var new_value =
DebugCommandProcessor.resolveValue_(request.arguments.newValue);
scope.setVariableValue(variable_name, new_value);
var new_value_mirror = MakeMirror(new_value);
response.body = {
newValue: new_value_mirror
};
};
DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) { DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
if (!request.arguments) { if (!request.arguments) {
return response.failed('Missing arguments'); return response.failed('Missing arguments');
@ -2387,8 +2467,17 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(
var new_source = request.arguments.new_source; var new_source = request.arguments.new_source;
var result_description = Debug.LiveEdit.SetScriptSource(the_script, var result_description;
new_source, preview_only, change_log); try {
result_description = Debug.LiveEdit.SetScriptSource(the_script,
new_source, preview_only, change_log);
} catch (e) {
if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
response.failed(e.message, e.details);
return;
}
throw e;
}
response.body = {change_log: change_log, result: result_description}; response.body = {change_log: change_log, result: result_description};
if (!preview_only && !this.running_ && result_description.stack_modified) { if (!preview_only && !this.running_ && result_description.stack_modified) {
@ -2663,3 +2752,7 @@ function ValueToProtocolValue_(value, mirror_serializer) {
} }
return json; return json;
} }
Debug.TestApi = {
CommandProcessorResolveValue: DebugCommandProcessor.resolveValue_
};

21
deps/v8/src/debug.cc

@ -261,8 +261,12 @@ void BreakLocationIterator::Reset() {
// Create relocation iterators for the two code objects. // Create relocation iterators for the two code objects.
if (reloc_iterator_ != NULL) delete reloc_iterator_; if (reloc_iterator_ != NULL) delete reloc_iterator_;
if (reloc_iterator_original_ != NULL) delete reloc_iterator_original_; if (reloc_iterator_original_ != NULL) delete reloc_iterator_original_;
reloc_iterator_ = new RelocIterator(debug_info_->code()); reloc_iterator_ = new RelocIterator(
reloc_iterator_original_ = new RelocIterator(debug_info_->original_code()); debug_info_->code(),
~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
reloc_iterator_original_ = new RelocIterator(
debug_info_->original_code(),
~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
// Position at the first break point. // Position at the first break point.
break_point_ = -1; break_point_ = -1;
@ -782,9 +786,11 @@ bool Debug::CompileDebuggerScript(int index) {
"error_loading_debugger", &computed_location, "error_loading_debugger", &computed_location,
Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>()); Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception()); ASSERT(!isolate->has_pending_exception());
isolate->set_pending_exception(*exception); if (!exception.is_null()) {
MessageHandler::ReportMessage(Isolate::Current(), NULL, message); isolate->set_pending_exception(*exception);
isolate->clear_pending_exception(); MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
isolate->clear_pending_exception();
}
return false; return false;
} }
@ -2285,7 +2291,7 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// Find the call address in the running code. This address holds the call to // Find the call address in the running code. This address holds the call to
// either a DebugBreakXXX or to the debug break return entry code if the // either a DebugBreakXXX or to the debug break return entry code if the
// break point is still active after processing the break point. // break point is still active after processing the break point.
Address addr = frame->pc() - Assembler::kCallTargetAddressOffset; Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset;
// Check if the location is at JS exit or debug break slot. // Check if the location is at JS exit or debug break slot.
bool at_js_return = false; bool at_js_return = false;
@ -2374,7 +2380,7 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
#endif #endif
// Find the call address in the running code. // Find the call address in the running code.
Address addr = frame->pc() - Assembler::kCallTargetAddressOffset; Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset;
// Check if the location is at JS return. // Check if the location is at JS return.
RelocIterator it(debug_info->code()); RelocIterator it(debug_info->code());
@ -2826,6 +2832,7 @@ void Debugger::OnScriptCollected(int id) {
HandleScope scope(isolate_); HandleScope scope(isolate_);
// No more to do if not debugging. // No more to do if not debugging.
if (isolate_->debug()->InDebugger()) return;
if (!IsDebuggerActive()) return; if (!IsDebuggerActive()) return;
if (!Debugger::EventActive(v8::ScriptCollected)) return; if (!Debugger::EventActive(v8::ScriptCollected)) return;

1
deps/v8/src/debug.h

@ -793,7 +793,6 @@ class Debugger {
}; };
void OnAfterCompile(Handle<Script> script, void OnAfterCompile(Handle<Script> script,
AfterCompileFlags after_compile_flags); AfterCompileFlags after_compile_flags);
void OnNewFunction(Handle<JSFunction> fun);
void OnScriptCollected(int id); void OnScriptCollected(int id);
void ProcessDebugEvent(v8::DebugEvent event, void ProcessDebugEvent(v8::DebugEvent event,
Handle<JSObject> event_data, Handle<JSObject> event_data,

263
deps/v8/src/deoptimizer.cc

@ -27,6 +27,7 @@
#include "v8.h" #include "v8.h"
#include "accessors.h"
#include "codegen.h" #include "codegen.h"
#include "deoptimizer.h" #include "deoptimizer.h"
#include "disasm.h" #include "disasm.h"
@ -40,8 +41,11 @@ namespace v8 {
namespace internal { namespace internal {
DeoptimizerData::DeoptimizerData() { DeoptimizerData::DeoptimizerData() {
eager_deoptimization_entry_code_ = NULL; eager_deoptimization_entry_code_entries_ = -1;
lazy_deoptimization_entry_code_ = NULL; lazy_deoptimization_entry_code_entries_ = -1;
size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
current_ = NULL; current_ = NULL;
deoptimizing_code_list_ = NULL; deoptimizing_code_list_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
@ -51,16 +55,18 @@ DeoptimizerData::DeoptimizerData() {
DeoptimizerData::~DeoptimizerData() { DeoptimizerData::~DeoptimizerData() {
if (eager_deoptimization_entry_code_ != NULL) { delete eager_deoptimization_entry_code_;
Isolate::Current()->memory_allocator()->Free( eager_deoptimization_entry_code_ = NULL;
eager_deoptimization_entry_code_); delete lazy_deoptimization_entry_code_;
eager_deoptimization_entry_code_ = NULL; lazy_deoptimization_entry_code_ = NULL;
}
if (lazy_deoptimization_entry_code_ != NULL) { DeoptimizingCodeListNode* current = deoptimizing_code_list_;
Isolate::Current()->memory_allocator()->Free( while (current != NULL) {
lazy_deoptimization_entry_code_); DeoptimizingCodeListNode* prev = current;
lazy_deoptimization_entry_code_ = NULL; current = current->next();
delete prev;
} }
deoptimizing_code_list_ = NULL;
} }
@ -95,6 +101,20 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
} }
// No larger than 2K on all platforms
static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
size_t Deoptimizer::GetMaxDeoptTableSize() {
int entries_size =
Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
int commit_page_size = static_cast<int>(OS::CommitPageSize());
int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
commit_page_size) + 1;
return static_cast<size_t>(commit_page_size * page_count);
}
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) { Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
ASSERT(isolate == Isolate::Current()); ASSERT(isolate == Isolate::Current());
Deoptimizer* result = isolate->deoptimizer_data()->current_; Deoptimizer* result = isolate->deoptimizer_data()->current_;
@ -368,6 +388,8 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
output_count_(0), output_count_(0),
jsframe_count_(0), jsframe_count_(0),
output_(NULL), output_(NULL),
deferred_arguments_objects_values_(0),
deferred_arguments_objects_(0),
deferred_heap_numbers_(0) { deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) { if (FLAG_trace_deopt && type != OSR) {
if (type == DEBUGGER) { if (type == DEBUGGER) {
@ -451,44 +473,45 @@ void Deoptimizer::DeleteFrameDescriptions() {
} }
Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) { Address Deoptimizer::GetDeoptimizationEntry(int id,
BailoutType type,
GetEntryMode mode) {
ASSERT(id >= 0); ASSERT(id >= 0);
if (id >= kNumberOfEntries) return NULL; if (id >= kMaxNumberOfEntries) return NULL;
MemoryChunk* base = NULL; VirtualMemory* base = NULL;
if (mode == ENSURE_ENTRY_CODE) {
EnsureCodeForDeoptimizationEntry(type, id);
} else {
ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
}
DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) { if (type == EAGER) {
if (data->eager_deoptimization_entry_code_ == NULL) {
data->eager_deoptimization_entry_code_ = CreateCode(type);
}
base = data->eager_deoptimization_entry_code_; base = data->eager_deoptimization_entry_code_;
} else { } else {
if (data->lazy_deoptimization_entry_code_ == NULL) {
data->lazy_deoptimization_entry_code_ = CreateCode(type);
}
base = data->lazy_deoptimization_entry_code_; base = data->lazy_deoptimization_entry_code_;
} }
return return
static_cast<Address>(base->area_start()) + (id * table_entry_size_); static_cast<Address>(base->address()) + (id * table_entry_size_);
} }
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) { int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
MemoryChunk* base = NULL; VirtualMemory* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) { if (type == EAGER) {
base = data->eager_deoptimization_entry_code_; base = data->eager_deoptimization_entry_code_;
} else { } else {
base = data->lazy_deoptimization_entry_code_; base = data->lazy_deoptimization_entry_code_;
} }
Address base_casted = reinterpret_cast<Address>(base->address());
if (base == NULL || if (base == NULL ||
addr < base->area_start() || addr < base->address() ||
addr >= base->area_start() + addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) {
(kNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry; return kNotDeoptimizationEntry;
} }
ASSERT_EQ(0, ASSERT_EQ(0,
static_cast<int>(addr - base->area_start()) % table_entry_size_); static_cast<int>(addr - base_casted) % table_entry_size_);
return static_cast<int>(addr - base->area_start()) / table_entry_size_; return static_cast<int>(addr - base_casted) / table_entry_size_;
} }
@ -512,7 +535,7 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
shared->SourceCodePrint(&stream, -1); shared->SourceCodePrint(&stream, -1);
PrintF("[source:\n%s\n]", *stream.ToCString()); PrintF("[source:\n%s\n]", *stream.ToCString());
UNREACHABLE(); FATAL("unable to find pc offset during deoptimization");
return -1; return -1;
} }
@ -633,8 +656,21 @@ void Deoptimizer::DoComputeOutputFrames() {
} }
void Deoptimizer::MaterializeHeapNumbers() { void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
ASSERT_NE(DEBUGGER, bailout_type_); ASSERT_NE(DEBUGGER, bailout_type_);
// Handlify all argument object values before triggering any allocation.
List<Handle<Object> > values(deferred_arguments_objects_values_.length());
for (int i = 0; i < deferred_arguments_objects_values_.length(); ++i) {
values.Add(Handle<Object>(deferred_arguments_objects_values_[i]));
}
// Play it safe and clear all unhandlified values before we continue.
deferred_arguments_objects_values_.Clear();
// Materialize all heap numbers before looking at arguments because when the
// output frames are used to materialize arguments objects later on they need
// to already contain valid heap numbers.
for (int i = 0; i < deferred_heap_numbers_.length(); i++) { for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i]; HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value()); Handle<Object> num = isolate_->factory()->NewNumber(d.value());
@ -644,9 +680,55 @@ void Deoptimizer::MaterializeHeapNumbers() {
d.value(), d.value(),
d.slot_address()); d.slot_address());
} }
Memory::Object_at(d.slot_address()) = *num; Memory::Object_at(d.slot_address()) = *num;
} }
// Materialize arguments objects one frame at a time.
for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
if (frame_index != 0) it->Advance();
JavaScriptFrame* frame = it->frame();
Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate_);
Handle<JSObject> arguments;
for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
if (frame->GetExpression(i) == isolate_->heap()->arguments_marker()) {
ArgumentsObjectMaterializationDescriptor descriptor =
deferred_arguments_objects_.RemoveLast();
const int length = descriptor.arguments_length();
if (arguments.is_null()) {
if (frame->has_adapted_arguments()) {
// Use the arguments adapter frame we just built to materialize the
// arguments object. FunctionGetArguments can't throw an exception,
// so cast away the doubt with an assert.
arguments = Handle<JSObject>(JSObject::cast(
Accessors::FunctionGetArguments(*function,
NULL)->ToObjectUnchecked()));
values.RewindBy(length);
} else {
// Construct an arguments object and copy the parameters to a newly
// allocated arguments object backing store.
arguments =
isolate_->factory()->NewArgumentsObject(function, length);
Handle<FixedArray> array =
isolate_->factory()->NewFixedArray(length);
ASSERT(array->length() == length);
for (int i = length - 1; i >= 0 ; --i) {
array->set(i, *values.RemoveLast());
}
arguments->set_elements(*array);
}
}
frame->SetExpression(i, *arguments);
ASSERT_EQ(Memory::Object_at(descriptor.slot_address()), *arguments);
if (FLAG_trace_deopt) {
PrintF("Materializing %sarguments object for %p: ",
frame->has_adapted_arguments() ? "(adapted) " : "",
reinterpret_cast<void*>(descriptor.slot_address()));
arguments->ShortPrint();
PrintF("\n");
}
}
}
}
} }
@ -932,8 +1014,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
} }
case Translation::ARGUMENTS_OBJECT: { case Translation::ARGUMENTS_OBJECT: {
// Use the arguments marker value as a sentinel and fill in the arguments int args_index = iterator->Next() + 1; // Skip receiver.
// object after the deoptimized frame is built. int args_length = iterator->Next() - 1; // Skip receiver.
if (FLAG_trace_deopt) { if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ", PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset, output_[frame_index]->GetTop() + output_offset,
@ -941,9 +1023,20 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
isolate_->heap()->arguments_marker()->ShortPrint(); isolate_->heap()->arguments_marker()->ShortPrint();
PrintF(" ; arguments object\n"); PrintF(" ; arguments object\n");
} }
// Use the arguments marker value as a sentinel and fill in the arguments
// object after the deoptimized frame is built.
intptr_t value = reinterpret_cast<intptr_t>( intptr_t value = reinterpret_cast<intptr_t>(
isolate_->heap()->arguments_marker()); isolate_->heap()->arguments_marker());
AddArgumentsObject(
output_[frame_index]->GetTop() + output_offset, args_length);
output_[frame_index]->SetFrameSlot(output_offset, value); output_[frame_index]->SetFrameSlot(output_offset, value);
// We save the tagged argument values on the side and materialize the
// actual arguments object after the deoptimized frame is built.
for (int i = 0; i < args_length; i++) {
unsigned input_offset = input_->GetOffsetFromSlotIndex(args_index + i);
intptr_t input_value = input_->GetFrameSlot(input_offset);
AddArgumentsObjectValue(input_value);
}
return; return;
} }
} }
@ -1285,39 +1378,63 @@ Object* Deoptimizer::ComputeLiteral(int index) const {
} }
void Deoptimizer::AddDoubleValue(intptr_t slot_address, void Deoptimizer::AddArgumentsObject(intptr_t slot_address, int argc) {
double value) { ArgumentsObjectMaterializationDescriptor object_desc(
reinterpret_cast<Address>(slot_address), argc);
deferred_arguments_objects_.Add(object_desc);
}
void Deoptimizer::AddArgumentsObjectValue(intptr_t value) {
deferred_arguments_objects_values_.Add(reinterpret_cast<Object*>(value));
}
void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
HeapNumberMaterializationDescriptor value_desc( HeapNumberMaterializationDescriptor value_desc(
reinterpret_cast<Address>(slot_address), value); reinterpret_cast<Address>(slot_address), value);
deferred_heap_numbers_.Add(value_desc); deferred_heap_numbers_.Add(value_desc);
} }
MemoryChunk* Deoptimizer::CreateCode(BailoutType type) { void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
int max_entry_id) {
// We cannot run this if the serializer is enabled because this will // We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external // cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section // references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all. // isn't meant to be serialized at all.
ASSERT(!Serializer::enabled()); ASSERT(!Serializer::enabled());
ASSERT(type == EAGER || type == LAZY);
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
int entry_count = (type == EAGER)
? data->eager_deoptimization_entry_code_entries_
: data->lazy_deoptimization_entry_code_entries_;
if (max_entry_id < entry_count) return;
entry_count = Min(Max(entry_count * 2, Deoptimizer::kMinNumberOfEntries),
Deoptimizer::kMaxNumberOfEntries);
MacroAssembler masm(Isolate::Current(), NULL, 16 * KB); MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
masm.set_emit_debug_code(false); masm.set_emit_debug_code(false);
GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type); GenerateDeoptimizationEntries(&masm, entry_count, type);
CodeDesc desc; CodeDesc desc;
masm.GetCode(&desc); masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0); ASSERT(desc.reloc_size == 0);
MemoryChunk* chunk = VirtualMemory* memory = type == EAGER
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size, ? data->eager_deoptimization_entry_code_
EXECUTABLE, : data->lazy_deoptimization_entry_code_;
NULL); size_t table_size = Deoptimizer::GetMaxDeoptTableSize();
ASSERT(chunk->area_size() >= desc.instr_size); ASSERT(static_cast<int>(table_size) >= desc.instr_size);
if (chunk == NULL) { memory->Commit(memory->address(), table_size, true);
V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table"); memcpy(memory->address(), desc.buffer, desc.instr_size);
CPU::FlushICache(memory->address(), desc.instr_size);
if (type == EAGER) {
data->eager_deoptimization_entry_code_entries_ = entry_count;
} else {
data->lazy_deoptimization_entry_code_entries_ = entry_count;
} }
memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->area_start(), desc.instr_size);
return chunk;
} }
@ -1359,6 +1476,54 @@ void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
} }
static Object* CutOutRelatedFunctionsList(Context* context,
Code* code,
Object* undefined) {
Object* result_list_head = undefined;
Object* head;
Object* current;
current = head = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
JSFunction* prev = NULL;
while (current != undefined) {
JSFunction* func = JSFunction::cast(current);
current = func->next_function_link();
if (func->code() == code) {
func->set_next_function_link(result_list_head);
result_list_head = func;
if (prev) {
prev->set_next_function_link(current);
} else {
head = current;
}
} else {
prev = func;
}
}
if (head != context->get(Context::OPTIMIZED_FUNCTIONS_LIST)) {
context->set(Context::OPTIMIZED_FUNCTIONS_LIST, head);
}
return result_list_head;
}
void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function,
Code* code) {
Context* context = function->context()->native_context();
SharedFunctionInfo* shared = function->shared();
Object* undefined = Isolate::Current()->heap()->undefined_value();
Object* current = CutOutRelatedFunctionsList(context, code, undefined);
while (current != undefined) {
JSFunction* func = JSFunction::cast(current);
current = func->next_function_link();
func->set_code(shared->code());
func->set_next_function_link(undefined);
}
}
FrameDescription::FrameDescription(uint32_t frame_size, FrameDescription::FrameDescription(uint32_t frame_size,
JSFunction* function) JSFunction* function)
: frame_size_(frame_size), : frame_size_(frame_size),
@ -1570,8 +1735,10 @@ void Translation::StoreLiteral(int literal_id) {
} }
void Translation::StoreArgumentsObject() { void Translation::StoreArgumentsObject(int args_index, int args_length) {
buffer_->Add(ARGUMENTS_OBJECT, zone()); buffer_->Add(ARGUMENTS_OBJECT, zone());
buffer_->Add(args_index, zone());
buffer_->Add(args_length, zone());
} }
@ -1582,7 +1749,6 @@ void Translation::MarkDuplicate() {
int Translation::NumberOfOperandsFor(Opcode opcode) { int Translation::NumberOfOperandsFor(Opcode opcode) {
switch (opcode) { switch (opcode) {
case ARGUMENTS_OBJECT:
case DUPLICATE: case DUPLICATE:
return 0; return 0;
case GETTER_STUB_FRAME: case GETTER_STUB_FRAME:
@ -1600,6 +1766,7 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case BEGIN: case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME: case ARGUMENTS_ADAPTOR_FRAME:
case CONSTRUCT_STUB_FRAME: case CONSTRUCT_STUB_FRAME:
case ARGUMENTS_OBJECT:
return 2; return 2;
case JS_FRAME: case JS_FRAME:
return 3; return 3;

55
deps/v8/src/deoptimizer.h

@ -57,6 +57,20 @@ class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
}; };
class ArgumentsObjectMaterializationDescriptor BASE_EMBEDDED {
public:
ArgumentsObjectMaterializationDescriptor(Address slot_address, int argc)
: slot_address_(slot_address), arguments_length_(argc) { }
Address slot_address() const { return slot_address_; }
int arguments_length() const { return arguments_length_; }
private:
Address slot_address_;
int arguments_length_;
};
class OptimizedFunctionVisitor BASE_EMBEDDED { class OptimizedFunctionVisitor BASE_EMBEDDED {
public: public:
virtual ~OptimizedFunctionVisitor() {} virtual ~OptimizedFunctionVisitor() {}
@ -86,8 +100,10 @@ class DeoptimizerData {
#endif #endif
private: private:
MemoryChunk* eager_deoptimization_entry_code_; int eager_deoptimization_entry_code_entries_;
MemoryChunk* lazy_deoptimization_entry_code_; int lazy_deoptimization_entry_code_entries_;
VirtualMemory* eager_deoptimization_entry_code_;
VirtualMemory* lazy_deoptimization_entry_code_;
Deoptimizer* current_; Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
@ -152,6 +168,10 @@ class Deoptimizer : public Malloced {
// execution returns. // execution returns.
static void DeoptimizeFunction(JSFunction* function); static void DeoptimizeFunction(JSFunction* function);
// Iterate over all the functions which share the same code object
// and make them use unoptimized version.
static void ReplaceCodeForRelatedFunctions(JSFunction* function, Code* code);
// Deoptimize all functions in the heap. // Deoptimize all functions in the heap.
static void DeoptimizeAll(); static void DeoptimizeAll();
@ -196,7 +216,7 @@ class Deoptimizer : public Malloced {
~Deoptimizer(); ~Deoptimizer();
void MaterializeHeapNumbers(); void MaterializeHeapObjects(JavaScriptFrameIterator* it);
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
void MaterializeHeapNumbersForDebuggerInspectableFrame( void MaterializeHeapNumbersForDebuggerInspectableFrame(
Address parameters_top, Address parameters_top,
@ -208,7 +228,17 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer); static void ComputeOutputFrames(Deoptimizer* deoptimizer);
static Address GetDeoptimizationEntry(int id, BailoutType type);
enum GetEntryMode {
CALCULATE_ENTRY_ADDRESS,
ENSURE_ENTRY_CODE
};
static Address GetDeoptimizationEntry(
int id,
BailoutType type,
GetEntryMode mode = ENSURE_ENTRY_CODE);
static int GetDeoptimizationId(Address addr, BailoutType type); static int GetDeoptimizationId(Address addr, BailoutType type);
static int GetOutputInfo(DeoptimizationOutputData* data, static int GetOutputInfo(DeoptimizationOutputData* data,
BailoutId node_id, BailoutId node_id,
@ -265,8 +295,11 @@ class Deoptimizer : public Malloced {
int ConvertJSFrameIndexToFrameIndex(int jsframe_index); int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
static size_t GetMaxDeoptTableSize();
private: private:
static const int kNumberOfEntries = 16384; static const int kMinNumberOfEntries = 64;
static const int kMaxNumberOfEntries = 16384;
Deoptimizer(Isolate* isolate, Deoptimizer(Isolate* isolate,
JSFunction* function, JSFunction* function,
@ -305,9 +338,12 @@ class Deoptimizer : public Malloced {
Object* ComputeLiteral(int index) const; Object* ComputeLiteral(int index) const;
void AddArgumentsObject(intptr_t slot_address, int argc);
void AddArgumentsObjectValue(intptr_t value);
void AddDoubleValue(intptr_t slot_address, double value); void AddDoubleValue(intptr_t slot_address, double value);
static MemoryChunk* CreateCode(BailoutType type); static void EnsureCodeForDeoptimizationEntry(BailoutType type,
int max_entry_id);
static void GenerateDeoptimizationEntries( static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type); MacroAssembler* masm, int count, BailoutType type);
@ -340,6 +376,8 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions. // Array of output frame descriptions.
FrameDescription** output_; FrameDescription** output_;
List<Object*> deferred_arguments_objects_values_;
List<ArgumentsObjectMaterializationDescriptor> deferred_arguments_objects_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_; List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
static const int table_entry_size_; static const int table_entry_size_;
@ -499,9 +537,6 @@ class FrameDescription {
intptr_t context_; intptr_t context_;
StackFrame::Type type_; StackFrame::Type type_;
Smi* state_; Smi* state_;
#ifdef DEBUG
Code::Kind kind_;
#endif
// Continuation is the PC where the execution continues after // Continuation is the PC where the execution continues after
// deoptimizing. // deoptimizing.
@ -608,7 +643,7 @@ class Translation BASE_EMBEDDED {
void StoreUint32StackSlot(int index); void StoreUint32StackSlot(int index);
void StoreDoubleStackSlot(int index); void StoreDoubleStackSlot(int index);
void StoreLiteral(int literal_id); void StoreLiteral(int literal_id);
void StoreArgumentsObject(); void StoreArgumentsObject(int args_index, int args_length);
void MarkDuplicate(); void MarkDuplicate();
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }

9
deps/v8/src/elements-kind.cc

@ -35,9 +35,14 @@ namespace v8 {
namespace internal { namespace internal {
void PrintElementsKind(FILE* out, ElementsKind kind) { const char* ElementsKindToString(ElementsKind kind) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind); ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
PrintF(out, "%s", accessor->name()); return accessor->name();
}
void PrintElementsKind(FILE* out, ElementsKind kind) {
PrintF(out, "%s", ElementsKindToString(kind));
} }

8
deps/v8/src/elements-kind.h

@ -77,6 +77,7 @@ const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND - const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
FIRST_FAST_ELEMENTS_KIND + 1; FIRST_FAST_ELEMENTS_KIND + 1;
const char* ElementsKindToString(ElementsKind kind);
void PrintElementsKind(FILE* out, ElementsKind kind); void PrintElementsKind(FILE* out, ElementsKind kind);
ElementsKind GetInitialFastElementsKind(); ElementsKind GetInitialFastElementsKind();
@ -109,6 +110,13 @@ inline bool IsFastDoubleElementsKind(ElementsKind kind) {
} }
inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
return IsFastDoubleElementsKind(kind) ||
kind == EXTERNAL_DOUBLE_ELEMENTS ||
kind == EXTERNAL_FLOAT_ELEMENTS;
}
inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) { inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS || return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS || kind == FAST_HOLEY_SMI_ELEMENTS ||

663
deps/v8/src/elements.cc

File diff suppressed because it is too large

43
deps/v8/src/elements.h

@ -71,6 +71,39 @@ class ElementsAccessor {
uint32_t key, uint32_t key,
FixedArrayBase* backing_store = NULL) = 0; FixedArrayBase* backing_store = NULL) = 0;
// Returns an element's attributes, or ABSENT if there is no such
// element. This method doesn't iterate up the prototype chain. The caller
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Returns an element's type, or NONEXISTENT if there is no such
// element. This method doesn't iterate up the prototype chain. The caller
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual PropertyType GetType(
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Returns an element's accessors, or NULL if the element does not exist or
// is plain. This method doesn't iterate up the prototype chain. The caller
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Modifies the length data property as specified for JSArrays and resizes the // Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of // underlying backing store accordingly. The method honors the semantics of
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
@ -164,16 +197,6 @@ class ElementsAccessor {
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor); DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
}; };
void CopyObjectToObjectElements(FixedArray* from_obj,
ElementsKind from_kind,
uint32_t from_start,
FixedArray* to_obj,
ElementsKind to_kind,
uint32_t to_start,
int copy_size);
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ELEMENTS_H_ #endif // V8_ELEMENTS_H_

36
deps/v8/src/execution.cc

@ -118,7 +118,7 @@ static Handle<Object> Invoke(bool is_construct,
CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv); CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
} }
#ifdef DEBUG #ifdef VERIFY_HEAP
value->Verify(); value->Verify();
#endif #endif
@ -211,6 +211,9 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
Isolate* isolate = Isolate::Current(); Isolate* isolate = Isolate::Current();
ASSERT(isolate->has_pending_exception()); ASSERT(isolate->has_pending_exception());
ASSERT(isolate->external_caught_exception()); ASSERT(isolate->external_caught_exception());
if (isolate->is_out_of_memory() && !isolate->ignore_out_of_memory()) {
V8::FatalProcessOutOfMemory("OOM during Execution::TryCall");
}
if (isolate->pending_exception() == if (isolate->pending_exception() ==
isolate->heap()->termination_exception()) { isolate->heap()->termination_exception()) {
result = isolate->factory()->termination_exception(); result = isolate->factory()->termination_exception();
@ -427,25 +430,6 @@ void StackGuard::TerminateExecution() {
} }
bool StackGuard::IsRuntimeProfilerTick() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
}
void StackGuard::RequestRuntimeProfilerTick() {
// Ignore calls if we're not optimizing or if we can't get the lock.
if (FLAG_opt && ExecutionAccess::TryLock(isolate_)) {
thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK;
if (thread_local_.postpone_interrupts_nesting_ == 0) {
thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
isolate_->heap()->SetStackLimits();
}
ExecutionAccess::Unlock(isolate_);
}
}
void StackGuard::RequestCodeReadyEvent() { void StackGuard::RequestCodeReadyEvent() {
ASSERT(FLAG_parallel_recompilation); ASSERT(FLAG_parallel_recompilation);
if (ExecutionAccess::TryLock(isolate_)) { if (ExecutionAccess::TryLock(isolate_)) {
@ -937,18 +921,14 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
} }
stack_guard->Continue(CODE_READY); stack_guard->Continue(CODE_READY);
} }
if (!stack_guard->IsTerminateExecution()) { if (!stack_guard->IsTerminateExecution() &&
!FLAG_manual_parallel_recompilation) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions(); isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
} }
isolate->counters()->stack_interrupts()->Increment(); isolate->counters()->stack_interrupts()->Increment();
// If FLAG_count_based_interrupts, every interrupt is a profiler interrupt. isolate->counters()->runtime_profiler_ticks()->Increment();
if (FLAG_count_based_interrupts || isolate->runtime_profiler()->OptimizeNow();
stack_guard->IsRuntimeProfilerTick()) {
isolate->counters()->runtime_profiler_ticks()->Increment();
stack_guard->Continue(RUNTIME_PROFILER_TICK);
isolate->runtime_profiler()->OptimizeNow();
}
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) { if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
DebugBreakHelper(); DebugBreakHelper();

7
deps/v8/src/execution.h

@ -41,9 +41,8 @@ enum InterruptFlag {
DEBUGCOMMAND = 1 << 2, DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3, PREEMPT = 1 << 3,
TERMINATE = 1 << 4, TERMINATE = 1 << 4,
RUNTIME_PROFILER_TICK = 1 << 5, GC_REQUEST = 1 << 5,
GC_REQUEST = 1 << 6, CODE_READY = 1 << 6
CODE_READY = 1 << 7
}; };
@ -194,8 +193,6 @@ class StackGuard {
void Interrupt(); void Interrupt();
bool IsTerminateExecution(); bool IsTerminateExecution();
void TerminateExecution(); void TerminateExecution();
bool IsRuntimeProfilerTick();
void RequestRuntimeProfilerTick();
bool IsCodeReadyEvent(); bool IsCodeReadyEvent();
void RequestCodeReadyEvent(); void RequestCodeReadyEvent();
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT

5
deps/v8/src/extensions/externalize-string-extension.cc

@ -93,7 +93,7 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
return v8::ThrowException(v8::String::New( return v8::ThrowException(v8::String::New(
"externalizeString() can't externalize twice.")); "externalizeString() can't externalize twice."));
} }
if (string->IsAsciiRepresentation() && !force_two_byte) { if (string->IsOneByteRepresentation() && !force_two_byte) {
char* data = new char[string->length()]; char* data = new char[string->length()];
String::WriteToFlat(*string, data, 0, string->length()); String::WriteToFlat(*string, data, 0, string->length());
SimpleAsciiStringResource* resource = new SimpleAsciiStringResource( SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
@ -127,7 +127,8 @@ v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
return v8::ThrowException(v8::String::New( return v8::ThrowException(v8::String::New(
"isAsciiString() requires a single string argument.")); "isAsciiString() requires a single string argument."));
} }
return Utils::OpenHandle(*args[0].As<v8::String>())->IsAsciiRepresentation() ? return
Utils::OpenHandle(*args[0].As<v8::String>())->IsOneByteRepresentation() ?
v8::True() : v8::False(); v8::True() : v8::False();
} }

6
deps/v8/src/extensions/gc-extension.cc

@ -40,7 +40,11 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) { v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension"); if (args[0]->BooleanValue()) {
HEAP->CollectGarbage(NEW_SPACE, "gc extension");
} else {
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
}
return v8::Undefined(); return v8::Undefined();
} }

54
deps/v8/src/factory.cc

@ -112,10 +112,11 @@ Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
} }
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) { Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
int slack) {
ASSERT(0 <= number_of_descriptors); ASSERT(0 <= number_of_descriptors);
CALL_HEAP_FUNCTION(isolate(), CALL_HEAP_FUNCTION(isolate(),
DescriptorArray::Allocate(number_of_descriptors), DescriptorArray::Allocate(number_of_descriptors, slack),
DescriptorArray); DescriptorArray);
} }
@ -177,7 +178,7 @@ Handle<String> Factory::LookupAsciiSymbol(Vector<const char> string) {
} }
Handle<String> Factory::LookupAsciiSymbol(Handle<SeqAsciiString> string, Handle<String> Factory::LookupAsciiSymbol(Handle<SeqOneByteString> string,
int from, int from,
int length) { int length) {
CALL_HEAP_FUNCTION(isolate(), CALL_HEAP_FUNCTION(isolate(),
@ -199,7 +200,7 @@ Handle<String> Factory::NewStringFromAscii(Vector<const char> string,
PretenureFlag pretenure) { PretenureFlag pretenure) {
CALL_HEAP_FUNCTION( CALL_HEAP_FUNCTION(
isolate(), isolate(),
isolate()->heap()->AllocateStringFromAscii(string, pretenure), isolate()->heap()->AllocateStringFromOneByte(string, pretenure),
String); String);
} }
@ -221,12 +222,12 @@ Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
} }
Handle<SeqAsciiString> Factory::NewRawAsciiString(int length, Handle<SeqOneByteString> Factory::NewRawOneByteString(int length,
PretenureFlag pretenure) { PretenureFlag pretenure) {
CALL_HEAP_FUNCTION( CALL_HEAP_FUNCTION(
isolate(), isolate(),
isolate()->heap()->AllocateRawAsciiString(length, pretenure), isolate()->heap()->AllocateRawOneByteString(length, pretenure),
SeqAsciiString); SeqOneByteString);
} }
@ -524,6 +525,12 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
} }
Handle<FixedArray> Factory::CopySizeFixedArray(Handle<FixedArray> array,
int new_length) {
CALL_HEAP_FUNCTION(isolate(), array->CopySize(new_length), FixedArray);
}
Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray( Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<FixedDoubleArray> array) { Handle<FixedDoubleArray> array) {
CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray); CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray);
@ -869,6 +876,13 @@ Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
} }
Handle<JSObject> Factory::NewExternal(void* value) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateExternal(value),
JSObject);
}
Handle<Code> Factory::NewCode(const CodeDesc& desc, Handle<Code> Factory::NewCode(const CodeDesc& desc,
Code::Flags flags, Code::Flags flags,
Handle<Object> self_ref, Handle<Object> self_ref,
@ -936,6 +950,9 @@ Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
Handle<JSArray> Factory::NewJSArray(int capacity, Handle<JSArray> Factory::NewJSArray(int capacity,
ElementsKind elements_kind, ElementsKind elements_kind,
PretenureFlag pretenure) { PretenureFlag pretenure) {
if (capacity != 0) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
CALL_HEAP_FUNCTION(isolate(), CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateJSArrayAndStorage( isolate()->heap()->AllocateJSArrayAndStorage(
elements_kind, elements_kind,
@ -954,6 +971,7 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
isolate(), isolate(),
isolate()->heap()->AllocateJSArrayWithElements(*elements, isolate()->heap()->AllocateJSArrayWithElements(*elements,
elements_kind, elements_kind,
elements->length(),
pretenure), pretenure),
JSArray); JSArray);
} }
@ -1284,10 +1302,26 @@ Handle<JSFunction> Factory::CreateApiFunction(
result->shared()->DontAdaptArguments(); result->shared()->DontAdaptArguments();
// Recursively copy parent templates' accessors, 'data' may be modified. // Recursively copy parent templates' accessors, 'data' may be modified.
int max_number_of_additional_properties = 0;
FunctionTemplateInfo* info = *obj;
while (true) {
Object* props = info->property_accessors();
if (!props->IsUndefined()) {
Handle<Object> props_handle(props);
NeanderArray props_array(props_handle);
max_number_of_additional_properties += props_array.length();
}
Object* parent = info->parent_template();
if (parent->IsUndefined()) break;
info = FunctionTemplateInfo::cast(parent);
}
Map::EnsureDescriptorSlack(map, max_number_of_additional_properties);
while (true) { while (true) {
Handle<Object> props = Handle<Object>(obj->property_accessors()); Handle<Object> props = Handle<Object>(obj->property_accessors());
if (!props->IsUndefined()) { if (!props->IsUndefined()) {
Map::CopyAppendCallbackDescriptors(map, props); Map::AppendCallbackDescriptors(map, props);
} }
Handle<Object> parent = Handle<Object>(obj->parent_template()); Handle<Object> parent = Handle<Object>(obj->parent_template());
if (parent->IsUndefined()) break; if (parent->IsUndefined()) break;
@ -1336,7 +1370,7 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
// Check to see whether there is a matching element in the cache. // Check to see whether there is a matching element in the cache.
Handle<MapCache> cache = Handle<MapCache> cache =
Handle<MapCache>(MapCache::cast(context->map_cache())); Handle<MapCache>(MapCache::cast(context->map_cache()));
Handle<Object> result = Handle<Object>(cache->Lookup(*keys)); Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate());
if (result->IsMap()) return Handle<Map>::cast(result); if (result->IsMap()) return Handle<Map>::cast(result);
// Create a new map and add it to the cache. // Create a new map and add it to the cache.
Handle<Map> map = Handle<Map> map =
@ -1388,7 +1422,7 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
bool* pending_exception) { bool* pending_exception) {
// Configure the instance by adding the properties specified by the // Configure the instance by adding the properties specified by the
// instance template. // instance template.
Handle<Object> instance_template = Handle<Object>(desc->instance_template()); Handle<Object> instance_template(desc->instance_template(), isolate());
if (!instance_template->IsUndefined()) { if (!instance_template->IsUndefined()) {
Execution::ConfigureInstance(instance, Execution::ConfigureInstance(instance,
instance_template, instance_template,

12
deps/v8/src/factory.h

@ -66,7 +66,8 @@ class Factory {
Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for); Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors); Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors,
int slack = 0);
Handle<DeoptimizationInputData> NewDeoptimizationInputData( Handle<DeoptimizationInputData> NewDeoptimizationInputData(
int deopt_entry_count, int deopt_entry_count,
PretenureFlag pretenure); PretenureFlag pretenure);
@ -81,7 +82,7 @@ class Factory {
Handle<String> LookupSymbol(Vector<const char> str); Handle<String> LookupSymbol(Vector<const char> str);
Handle<String> LookupSymbol(Handle<String> str); Handle<String> LookupSymbol(Handle<String> str);
Handle<String> LookupAsciiSymbol(Vector<const char> str); Handle<String> LookupAsciiSymbol(Vector<const char> str);
Handle<String> LookupAsciiSymbol(Handle<SeqAsciiString>, Handle<String> LookupAsciiSymbol(Handle<SeqOneByteString>,
int from, int from,
int length); int length);
Handle<String> LookupTwoByteSymbol(Vector<const uc16> str); Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
@ -129,7 +130,7 @@ class Factory {
// Allocates and partially initializes an ASCII or TwoByte String. The // Allocates and partially initializes an ASCII or TwoByte String. The
// characters of the string are uninitialized. Currently used in regexp code // characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured. // only, where they are pretenured.
Handle<SeqAsciiString> NewRawAsciiString( Handle<SeqOneByteString> NewRawOneByteString(
int length, int length,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
Handle<SeqTwoByteString> NewRawTwoByteString( Handle<SeqTwoByteString> NewRawTwoByteString(
@ -238,6 +239,9 @@ class Factory {
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array); Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array,
int new_length);
Handle<FixedDoubleArray> CopyFixedDoubleArray( Handle<FixedDoubleArray> CopyFixedDoubleArray(
Handle<FixedDoubleArray> array); Handle<FixedDoubleArray> array);
@ -324,6 +328,8 @@ class Factory {
Handle<ScopeInfo> NewScopeInfo(int length); Handle<ScopeInfo> NewScopeInfo(int length);
Handle<JSObject> NewExternal(void* value);
Handle<Code> NewCode(const CodeDesc& desc, Handle<Code> NewCode(const CodeDesc& desc,
Code::Flags flags, Code::Flags flags,
Handle<Object> self_reference, Handle<Object> self_reference,

48
deps/v8/src/flag-definitions.h

@ -144,12 +144,16 @@ DEFINE_bool(harmony_modules, false,
DEFINE_bool(harmony_proxies, false, "enable harmony proxies") DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_collections, false, DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)") "enable harmony collections (sets, maps, and weak maps)")
DEFINE_bool(harmony_observation, false,
"enable harmony object observation (implies harmony collections")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)") DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_modules)
DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
// Flags for experimental implementation features. // Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes") DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
@ -177,6 +181,7 @@ DEFINE_int(max_inlined_nodes, 196,
DEFINE_int(max_inlined_nodes_cumulative, 196, DEFINE_int(max_inlined_nodes_cumulative, 196,
"maximum cumulative number of AST nodes considered for inlining") "maximum cumulative number of AST nodes considered for inlining")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion") DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions")
DEFINE_bool(collect_megamorphic_maps_from_stub_cache, DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
true, true,
"crankshaft harvests type feedback from stub cache") "crankshaft harvests type feedback from stub cache")
@ -198,10 +203,12 @@ DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases") DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining") DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement") DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(array_bounds_checks_elimination, false, DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination") "perform array bounds checks elimination")
DEFINE_bool(array_index_dehoisting, false, DEFINE_bool(array_index_dehoisting, true,
"perform array index dehoisting") "perform array index dehoisting")
DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
DEFINE_bool(trace_osr, false, "trace on-stack replacement") DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs") DEFINE_int(stress_runs, 0, "number of stress runs")
@ -219,7 +226,7 @@ DEFINE_int(loop_weight, 1, "loop weight for representation inference")
DEFINE_bool(optimize_for_in, true, DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops") "optimize functions containing for-in loops")
DEFINE_bool(opt_safe_uint32_operations, true, DEFINE_bool(opt_safe_uint32_operations, true,
"allow uint32 values on optimize frames if they are used only in" "allow uint32 values on optimize frames if they are used only in "
"safe operations") "safe operations")
DEFINE_bool(parallel_recompilation, false, DEFINE_bool(parallel_recompilation, false,
@ -227,6 +234,9 @@ DEFINE_bool(parallel_recompilation, false,
DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation") DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
DEFINE_int(parallel_recompilation_queue_length, 2, DEFINE_int(parallel_recompilation_queue_length, 2,
"the length of the parallel compilation queue") "the length of the parallel compilation queue")
DEFINE_bool(manual_parallel_recompilation, false,
"disable automatic optimization")
DEFINE_implication(manual_parallel_recompilation, parallel_recompilation)
// Experimental profiler changes. // Experimental profiler changes.
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments") DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
@ -237,8 +247,6 @@ DEFINE_bool(self_optimization, false,
DEFINE_bool(direct_self_opt, false, DEFINE_bool(direct_self_opt, false,
"call recompile stub directly when self-optimizing") "call recompile stub directly when self-optimizing")
DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed") DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed")
DEFINE_bool(count_based_interrupts, false,
"trigger profiler ticks based on counting instead of timing")
DEFINE_bool(interrupt_at_exit, false, DEFINE_bool(interrupt_at_exit, false,
"insert an interrupt check at function exit") "insert an interrupt check at function exit")
DEFINE_bool(weighted_back_edges, false, DEFINE_bool(weighted_back_edges, false,
@ -254,7 +262,6 @@ DEFINE_implication(experimental_profiler, watch_ic_patching)
DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, self_optimization)
// Not implying direct_self_opt here because it seems to be a bad idea. // Not implying direct_self_opt here because it seems to be a bad idea.
DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, retry_self_opt)
DEFINE_implication(experimental_profiler, count_based_interrupts)
DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, interrupt_at_exit)
DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_implication(experimental_profiler, weighted_back_edges)
@ -284,6 +291,13 @@ DEFINE_bool(enable_vfp2, true,
"enable use of VFP2 instructions if available") "enable use of VFP2 instructions if available")
DEFINE_bool(enable_armv7, true, DEFINE_bool(enable_armv7, true,
"enable use of ARMv7 instructions if available (ARM only)") "enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_sudiv, true,
"enable use of SDIV and UDIV instructions if available (ARM only)")
DEFINE_bool(enable_movw_movt, false,
"enable loading 32-bit constant by means of movw/movt "
"instruction pairs (ARM only)")
DEFINE_bool(enable_unaligned_accesses, true,
"enable unaligned accesses for ARMv7 (ARM only)")
DEFINE_bool(enable_fpu, true, DEFINE_bool(enable_fpu, true,
"enable use of MIPS FPU instructions if available (MIPS only)") "enable use of MIPS FPU instructions if available (MIPS only)")
@ -380,13 +394,21 @@ DEFINE_bool(trace_external_memory, false,
DEFINE_bool(collect_maps, true, DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached") "garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, true, DEFINE_bool(flush_code, true,
"flush code that we expect not to use again before full gc") "flush code that we expect not to use again (during full gc)")
DEFINE_bool(flush_code_incrementally, true,
"flush code that we expect not to use again (incrementally)")
DEFINE_bool(age_code, true,
"track un-executed functions to age code and flush only "
"old code")
DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking, true, "use incremental marking")
DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
DEFINE_bool(trace_incremental_marking, false, DEFINE_bool(trace_incremental_marking, false,
"trace progress of the incremental marking") "trace progress of the incremental marking")
DEFINE_bool(track_gc_object_stats, false, DEFINE_bool(track_gc_object_stats, false,
"track object counts and memory usage") "track object counts and memory usage")
#ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif
// v8.cc // v8.cc
DEFINE_bool(use_idle_notification, true, DEFINE_bool(use_idle_notification, true,
@ -412,9 +434,14 @@ DEFINE_bool(never_compact, false,
"Never perform compaction on full GC - testing only") "Never perform compaction on full GC - testing only")
DEFINE_bool(compact_code_space, true, DEFINE_bool(compact_code_space, true,
"Compact code space on full non-incremental collections") "Compact code space on full non-incremental collections")
DEFINE_bool(incremental_code_compaction, true,
"Compact code space on full incremental collections")
DEFINE_bool(cleanup_code_caches_at_gc, true, DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and " "Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.") "flush code caches in maps during mark compact cycle.")
DEFINE_bool(use_marking_progress_bar, true,
"Use a progress bar to scan large objects in increments when "
"incremental marking is active.")
DEFINE_int(random_seed, 0, DEFINE_int(random_seed, 0,
"Default seed for initializing random generator " "Default seed for initializing random generator "
"(0, the default, means to use system random).") "(0, the default, means to use system random).")
@ -558,7 +585,6 @@ DEFINE_bool(gc_greedy, false, "perform GC prior to some allocations")
DEFINE_bool(gc_verbose, false, "print stuff during garbage collection") DEFINE_bool(gc_verbose, false, "print stuff during garbage collection")
DEFINE_bool(heap_stats, false, "report heap statistics before and after GC") DEFINE_bool(heap_stats, false, "report heap statistics before and after GC")
DEFINE_bool(code_stats, false, "report code statistics after GC") DEFINE_bool(code_stats, false, "report code statistics after GC")
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
DEFINE_bool(verify_native_context_separation, false, DEFINE_bool(verify_native_context_separation, false,
"verify that code holds on to at most one native context after GC") "verify that code holds on to at most one native context after GC")
DEFINE_bool(print_handles, false, "report handles after GC") DEFINE_bool(print_handles, false, "report handles after GC")
@ -629,12 +655,14 @@ DEFINE_bool(prof_lazy, false,
DEFINE_bool(prof_browser_mode, true, DEFINE_bool(prof_browser_mode, true,
"Used with --prof, turns on browser-compatible mode for profiling.") "Used with --prof, turns on browser-compatible mode for profiling.")
DEFINE_bool(log_regexp, false, "Log regular expression execution.") DEFINE_bool(log_regexp, false, "Log regular expression execution.")
DEFINE_bool(sliding_state_window, false,
"Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.") DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.") DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__", DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof") "Specify the name of the file for fake gc mmap used in ll_prof")
DEFINE_bool(log_internal_timer_events, false, "Time internal events.")
DEFINE_bool(log_timer_events, false,
"Time events including external callbacks.")
DEFINE_implication(log_timer_events, log_internal_timer_events)
// //
// Disassembler only flags // Disassembler only flags

4
deps/v8/src/frames.cc

@ -484,7 +484,7 @@ Address StackFrame::UnpaddedFP() const {
Code* EntryFrame::unchecked_code() const { Code* EntryFrame::unchecked_code() const {
return HEAP->raw_unchecked_js_entry_code(); return HEAP->js_entry_code();
} }
@ -507,7 +507,7 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
Code* EntryConstructFrame::unchecked_code() const { Code* EntryConstructFrame::unchecked_code() const {
return HEAP->raw_unchecked_js_construct_entry_code(); return HEAP->js_construct_entry_code();
} }

240
deps/v8/src/full-codegen.cc

@ -86,6 +86,10 @@ void BreakableStatementChecker::VisitModuleUrl(ModuleUrl* module) {
} }
void BreakableStatementChecker::VisitModuleStatement(ModuleStatement* stmt) {
}
void BreakableStatementChecker::VisitBlock(Block* stmt) { void BreakableStatementChecker::VisitBlock(Block* stmt) {
} }
@ -466,9 +470,8 @@ void FullCodeGenerator::RecordTypeFeedbackCell(
} }
void FullCodeGenerator::RecordStackCheck(BailoutId ast_id) { void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a // The pc offset does not need to be encoded and packed together with a state.
// state.
ASSERT(masm_->pc_offset() > 0); ASSERT(masm_->pc_offset() > 0);
BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) }; BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
stack_checks_.Add(entry, zone()); stack_checks_.Add(entry, zone());
@ -582,16 +585,137 @@ void FullCodeGenerator::DoTest(const TestContext* context) {
} }
void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
ASSERT(scope_->is_global_scope());
for (int i = 0; i < declarations->length(); i++) {
ModuleDeclaration* declaration = declarations->at(i)->AsModuleDeclaration();
if (declaration != NULL) {
ModuleLiteral* module = declaration->module()->AsModuleLiteral();
if (module != NULL) {
Comment cmnt(masm_, "[ Link nested modules");
Scope* scope = module->body()->scope();
Interface* interface = scope->interface();
ASSERT(interface->IsModule() && interface->IsFrozen());
interface->Allocate(scope->module_var()->index());
// Set up module context.
ASSERT(scope->interface()->Index() >= 0);
__ Push(Smi::FromInt(scope->interface()->Index()));
__ Push(scope->GetScopeInfo());
__ CallRuntime(Runtime::kPushModuleContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
AllocateModules(scope->declarations());
// Pop module context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
// Update local stack frame context field.
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
}
}
}
// Modules have their own local scope, represented by their own context.
// Module instance objects have an accessor for every export that forwards
// access to the respective slot from the module's context. (Exports that are
// modules themselves, however, are simple data properties.)
//
// All modules have a _hosting_ scope/context, which (currently) is the
// (innermost) enclosing global scope. To deal with recursion, nested modules
// are hosted by the same scope as global ones.
//
// For every (global or nested) module literal, the hosting context has an
// internal slot that points directly to the respective module context. This
// enables quick access to (statically resolved) module members by 2-dimensional
// access through the hosting context. For example,
//
// module A {
// let x;
// module B { let y; }
// }
// module C { let z; }
//
// allocates contexts as follows:
//
// [header| .A | .B | .C | A | C ] (global)
// | | |
// | | +-- [header| z ] (module)
// | |
// | +------- [header| y ] (module)
// |
// +------------ [header| x | B ] (module)
//
// Here, .A, .B, .C are the internal slots pointing to the hosted module
// contexts, whereas A, B, C hold the actual instance objects (note that every
// module context also points to the respective instance object through its
// extension slot in the header).
//
// To deal with arbitrary recursion and aliases between modules,
// they are created and initialized in several stages. Each stage applies to
// all modules in the hosting global scope, including nested ones.
//
// 1. Allocate: for each module _literal_, allocate the module contexts and
// respective instance object and wire them up. This happens in the
// PushModuleContext runtime function, as generated by AllocateModules
// (invoked by VisitDeclarations in the hosting scope).
//
// 2. Bind: for each module _declaration_ (i.e. literals as well as aliases),
// assign the respective instance object to respective local variables. This
// happens in VisitModuleDeclaration, and uses the instance objects created
// in the previous stage.
// For each module _literal_, this phase also constructs a module descriptor
// for the next stage. This happens in VisitModuleLiteral.
//
// 3. Populate: invoke the DeclareModules runtime function to populate each
// _instance_ object with accessors for it exports. This is generated by
// DeclareModules (invoked by VisitDeclarations in the hosting scope again),
// and uses the descriptors generated in the previous stage.
//
// 4. Initialize: execute the module bodies (and other code) in sequence. This
// happens by the separate statements generated for module bodies. To reenter
// the module scopes properly, the parser inserted ModuleStatements.
void FullCodeGenerator::VisitDeclarations( void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) { ZoneList<Declaration*>* declarations) {
Handle<FixedArray> saved_modules = modules_;
int saved_module_index = module_index_;
ZoneList<Handle<Object> >* saved_globals = globals_; ZoneList<Handle<Object> >* saved_globals = globals_;
ZoneList<Handle<Object> > inner_globals(10, zone()); ZoneList<Handle<Object> > inner_globals(10, zone());
globals_ = &inner_globals; globals_ = &inner_globals;
if (scope_->num_modules() != 0) {
// This is a scope hosting modules. Allocate a descriptor array to pass
// to the runtime for initialization.
Comment cmnt(masm_, "[ Allocate modules");
ASSERT(scope_->is_global_scope());
modules_ =
isolate()->factory()->NewFixedArray(scope_->num_modules(), TENURED);
module_index_ = 0;
// Generate code for allocating all modules, including nested ones.
// The allocated contexts are stored in internal variables in this scope.
AllocateModules(declarations);
}
AstVisitor::VisitDeclarations(declarations); AstVisitor::VisitDeclarations(declarations);
if (scope_->num_modules() != 0) {
// Initialize modules from descriptor array.
ASSERT(module_index_ == modules_->length());
DeclareModules(modules_);
modules_ = saved_modules;
module_index_ = saved_module_index;
}
if (!globals_->is_empty()) { if (!globals_->is_empty()) {
// Invoke the platform-dependent code generator to do the actual // Invoke the platform-dependent code generator to do the actual
// declaration the global functions and variables. // declaration of the global functions and variables.
Handle<FixedArray> array = Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_->length(), TENURED); isolate()->factory()->NewFixedArray(globals_->length(), TENURED);
for (int i = 0; i < globals_->length(); ++i) for (int i = 0; i < globals_->length(); ++i)
@ -604,19 +728,23 @@ void FullCodeGenerator::VisitDeclarations(
void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) { void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
// Allocate a module context statically.
Block* block = module->body(); Block* block = module->body();
Scope* saved_scope = scope(); Scope* saved_scope = scope();
scope_ = block->scope(); scope_ = block->scope();
Interface* interface = module->interface(); Interface* interface = scope_->interface();
Handle<JSModule> instance = interface->Instance();
Comment cmnt(masm_, "[ ModuleLiteral"); Comment cmnt(masm_, "[ ModuleLiteral");
SetStatementPosition(block); SetStatementPosition(block);
ASSERT(!modules_.is_null());
ASSERT(module_index_ < modules_->length());
int index = module_index_++;
// Set up module context. // Set up module context.
__ Push(instance); ASSERT(interface->Index() >= 0);
__ CallRuntime(Runtime::kPushModuleContext, 1); __ Push(Smi::FromInt(interface->Index()));
__ Push(Smi::FromInt(0));
__ CallRuntime(Runtime::kPushModuleContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register()); StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
{ {
@ -624,6 +752,11 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
VisitDeclarations(scope_->declarations()); VisitDeclarations(scope_->declarations());
} }
// Populate the module description.
Handle<ModuleInfo> description =
ModuleInfo::Create(isolate(), interface, scope_);
modules_->set(index, *description);
scope_ = saved_scope; scope_ = saved_scope;
// Pop module context. // Pop module context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX); LoadContextField(context_register(), Context::PREVIOUS_INDEX);
@ -644,8 +777,20 @@ void FullCodeGenerator::VisitModulePath(ModulePath* module) {
} }
void FullCodeGenerator::VisitModuleUrl(ModuleUrl* decl) { void FullCodeGenerator::VisitModuleUrl(ModuleUrl* module) {
// TODO(rossberg) // TODO(rossberg): dummy allocation for now.
Scope* scope = module->body()->scope();
Interface* interface = scope_->interface();
ASSERT(interface->IsModule() && interface->IsFrozen());
ASSERT(!modules_.is_null());
ASSERT(module_index_ < modules_->length());
interface->Allocate(scope->module_var()->index());
int index = module_index_++;
Handle<ModuleInfo> description =
ModuleInfo::Create(isolate(), interface, scope_);
modules_->set(index, *description);
} }
@ -904,37 +1049,28 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
// Push a block context when entering a block with block scoped variables. // Push a block context when entering a block with block scoped variables.
if (stmt->scope() != NULL) { if (stmt->scope() != NULL) {
scope_ = stmt->scope(); scope_ = stmt->scope();
if (scope_->is_module_scope()) { ASSERT(!scope_->is_module_scope());
// If this block is a module body, then we have already allocated and { Comment cmnt(masm_, "[ Extend block context");
// initialized the declarations earlier. Just push the context. Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
ASSERT(!scope_->interface()->Instance().is_null()); int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
__ Push(scope_->interface()->Instance()); __ Push(scope_info);
__ CallRuntime(Runtime::kPushModuleContext, 1); PushFunctionArgumentForContextAllocation();
StoreToFrameField( if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
StandardFrameConstants::kContextOffset, context_register()); FastNewBlockContextStub stub(heap_slots);
} else { __ CallStub(&stub);
{ Comment cmnt(masm_, "[ Extend block context"); } else {
Handle<ScopeInfo> scope_info = scope_->GetScopeInfo(); __ CallRuntime(Runtime::kPushBlockContext, 2);
int heap_slots =
scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
__ Push(scope_info);
PushFunctionArgumentForContextAllocation();
if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
FastNewBlockContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kPushBlockContext, 2);
}
// Replace the context stored in the frame.
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
{ Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope_->declarations());
} }
// Replace the context stored in the frame.
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
{ Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope_->declarations());
} }
} }
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
VisitStatements(stmt->statements()); VisitStatements(stmt->statements());
scope_ = saved_scope; scope_ = saved_scope;
@ -951,6 +1087,26 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
} }
void FullCodeGenerator::VisitModuleStatement(ModuleStatement* stmt) {
Comment cmnt(masm_, "[ Module context");
__ Push(Smi::FromInt(stmt->proxy()->interface()->Index()));
__ Push(Smi::FromInt(0));
__ CallRuntime(Runtime::kPushModuleContext, 2);
StoreToFrameField(
StandardFrameConstants::kContextOffset, context_register());
Scope* saved_scope = scope_;
scope_ = stmt->body()->scope();
VisitStatements(stmt->body()->statements());
scope_ = saved_scope;
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
// Update local stack frame context field.
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) { void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
Comment cmnt(masm_, "[ ExpressionStatement"); Comment cmnt(masm_, "[ ExpressionStatement");
SetStatementPosition(stmt); SetStatementPosition(stmt);
@ -1111,7 +1267,7 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// Check stack before looping. // Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS); PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
__ bind(&stack_check); __ bind(&stack_check);
EmitStackCheck(stmt, &body); EmitBackEdgeBookkeeping(stmt, &body);
__ jmp(&body); __ jmp(&body);
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@ -1140,7 +1296,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
SetStatementPosition(stmt); SetStatementPosition(stmt);
// Check stack before looping. // Check stack before looping.
EmitStackCheck(stmt, &body); EmitBackEdgeBookkeeping(stmt, &body);
__ bind(&test); __ bind(&test);
VisitForControl(stmt->cond(), VisitForControl(stmt->cond(),
@ -1186,7 +1342,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
SetStatementPosition(stmt); SetStatementPosition(stmt);
// Check stack before looping. // Check stack before looping.
EmitStackCheck(stmt, &body); EmitBackEdgeBookkeeping(stmt, &body);
__ bind(&test); __ bind(&test);
if (stmt->cond() != NULL) { if (stmt->cond() != NULL) {

19
deps/v8/src/full-codegen.h

@ -396,9 +396,15 @@ class FullCodeGenerator: public AstVisitor {
void VisitInDuplicateContext(Expression* expr); void VisitInDuplicateContext(Expression* expr);
void VisitDeclarations(ZoneList<Declaration*>* declarations); void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareModules(Handle<FixedArray> descriptions);
void DeclareGlobals(Handle<FixedArray> pairs); void DeclareGlobals(Handle<FixedArray> pairs);
int DeclareGlobalsFlags(); int DeclareGlobalsFlags();
// Generate code to allocate all (including nested) modules and contexts.
// Because of recursive linking and the presence of module alias declarations,
// this has to be a separate pass _before_ populating or executing any module.
void AllocateModules(ZoneList<Declaration*>* declarations);
// Try to perform a comparison as a fast inlined literal compare if // Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations // the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise. // has been matched and all code generated; false otherwise.
@ -442,14 +448,13 @@ class FullCodeGenerator: public AstVisitor {
// neither a with nor a catch context. // neither a with nor a catch context.
void EmitDebugCheckDeclarationContext(Variable* variable); void EmitDebugCheckDeclarationContext(Variable* variable);
// Platform-specific code for checking the stack limit at the back edge of
// a loop.
// This is meant to be called at loop back edges, |back_edge_target| is // This is meant to be called at loop back edges, |back_edge_target| is
// the jump target of the back edge and is used to approximate the amount // the jump target of the back edge and is used to approximate the amount
// of code inside the loop. // of code inside the loop.
void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target); void EmitBackEdgeBookkeeping(IterationStatement* stmt,
// Record the OSR AST id corresponding to a stack check in the code. Label* back_edge_target);
void RecordStackCheck(BailoutId osr_ast_id); // Record the OSR AST id corresponding to a back edge in the code.
void RecordBackEdge(BailoutId osr_ast_id);
// Emit a table of stack check ids and pcs into the code stream. Return // Emit a table of stack check ids and pcs into the code stream. Return
// the offset of the start of the table. // the offset of the start of the table.
unsigned EmitStackCheckTable(); unsigned EmitStackCheckTable();
@ -804,8 +809,12 @@ class FullCodeGenerator: public AstVisitor {
NestedStatement* nesting_stack_; NestedStatement* nesting_stack_;
int loop_depth_; int loop_depth_;
ZoneList<Handle<Object> >* globals_; ZoneList<Handle<Object> >* globals_;
Handle<FixedArray> modules_;
int module_index_;
const ExpressionContext* context_; const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_; ZoneList<BailoutEntry> bailout_entries_;
// TODO(svenpanne) Rename this to something like back_edges_ and rename
// related functions accordingly.
ZoneList<BailoutEntry> stack_checks_; ZoneList<BailoutEntry> stack_checks_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_; ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_; int ic_total_count_;

103
deps/v8/src/global-handles.cc

@ -69,6 +69,7 @@ class GlobalHandles::Node {
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId; class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
index_ = 0; index_ = 0;
independent_ = false; independent_ = false;
partially_dependent_ = false;
in_new_space_list_ = false; in_new_space_list_ = false;
parameter_or_next_free_.next_free = NULL; parameter_or_next_free_.next_free = NULL;
callback_ = NULL; callback_ = NULL;
@ -89,6 +90,7 @@ class GlobalHandles::Node {
object_ = object; object_ = object;
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId; class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
independent_ = false; independent_ = false;
partially_dependent_ = false;
state_ = NORMAL; state_ = NORMAL;
parameter_or_next_free_.parameter = NULL; parameter_or_next_free_.parameter = NULL;
callback_ = NULL; callback_ = NULL;
@ -154,6 +156,15 @@ class GlobalHandles::Node {
} }
bool is_independent() const { return independent_; } bool is_independent() const { return independent_; }
void MarkPartiallyDependent(GlobalHandles* global_handles) {
ASSERT(state_ != FREE);
if (global_handles->isolate()->heap()->InNewSpace(object_)) {
partially_dependent_ = true;
}
}
bool is_partially_dependent() const { return partially_dependent_; }
void clear_partially_dependent() { partially_dependent_ = false; }
// In-new-space-list flag accessors. // In-new-space-list flag accessors.
void set_in_new_space_list(bool v) { in_new_space_list_ = v; } void set_in_new_space_list(bool v) { in_new_space_list_ = v; }
bool is_in_new_space_list() const { return in_new_space_list_; } bool is_in_new_space_list() const { return in_new_space_list_; }
@ -260,6 +271,7 @@ class GlobalHandles::Node {
State state_ : 4; State state_ : 4;
bool independent_ : 1; bool independent_ : 1;
bool partially_dependent_ : 1;
bool in_new_space_list_ : 1; bool in_new_space_list_ : 1;
// Handle specific callback. // Handle specific callback.
@ -448,6 +460,16 @@ void GlobalHandles::MarkIndependent(Object** location) {
} }
void GlobalHandles::MarkPartiallyDependent(Object** location) {
Node::FromLocation(location)->MarkPartiallyDependent(this);
}
bool GlobalHandles::IsIndependent(Object** location) {
return Node::FromLocation(location)->is_independent();
}
bool GlobalHandles::IsNearDeath(Object** location) { bool GlobalHandles::IsNearDeath(Object** location) {
return Node::FromLocation(location)->IsNearDeath(); return Node::FromLocation(location)->IsNearDeath();
} }
@ -462,6 +484,9 @@ void GlobalHandles::SetWrapperClassId(Object** location, uint16_t class_id) {
Node::FromLocation(location)->set_wrapper_class_id(class_id); Node::FromLocation(location)->set_wrapper_class_id(class_id);
} }
uint16_t GlobalHandles::GetWrapperClassId(Object** location) {
return Node::FromLocation(location)->wrapper_class_id();
}
void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) { void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) { for (NodeIterator it(this); !it.done(); it.Advance()) {
@ -493,8 +518,9 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) { for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i]; Node* node = new_space_nodes_[i];
if (node->IsStrongRetainer() || if (node->IsStrongRetainer() ||
(node->IsWeakRetainer() && !node->is_independent())) { (node->IsWeakRetainer() && !node->is_independent() &&
v->VisitPointer(node->location()); !node->is_partially_dependent())) {
v->VisitPointer(node->location());
} }
} }
} }
@ -505,8 +531,8 @@ void GlobalHandles::IdentifyNewSpaceWeakIndependentHandles(
for (int i = 0; i < new_space_nodes_.length(); ++i) { for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i]; Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list()); ASSERT(node->is_in_new_space_list());
if (node->is_independent() && node->IsWeak() && if ((node->is_independent() || node->is_partially_dependent()) &&
f(isolate_->heap(), node->location())) { node->IsWeak() && f(isolate_->heap(), node->location())) {
node->MarkPending(); node->MarkPending();
} }
} }
@ -517,15 +543,61 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) { for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i]; Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list()); ASSERT(node->is_in_new_space_list());
if (node->is_independent() && node->IsWeakRetainer()) { if ((node->is_independent() || node->is_partially_dependent()) &&
node->IsWeakRetainer()) {
v->VisitPointer(node->location()); v->VisitPointer(node->location());
} }
} }
} }
bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
WeakSlotCallbackWithHeap can_skip) {
int last = 0;
bool any_group_was_visited = false;
for (int i = 0; i < object_groups_.length(); i++) {
ObjectGroup* entry = object_groups_.at(i);
ASSERT(entry != NULL);
Object*** objects = entry->objects_;
bool group_should_be_visited = false;
for (size_t j = 0; j < entry->length_; j++) {
Object* object = *objects[j];
if (object->IsHeapObject()) {
if (!can_skip(isolate_->heap(), &object)) {
group_should_be_visited = true;
break;
}
}
}
if (!group_should_be_visited) {
object_groups_[last++] = entry;
continue;
}
// An object in the group requires visiting, so iterate over all
// objects in the group.
for (size_t j = 0; j < entry->length_; ++j) {
Object* object = *objects[j];
if (object->IsHeapObject()) {
v->VisitPointer(&object);
any_group_was_visited = true;
}
}
// Once the entire group has been iterated over, set the object
// group to NULL so it won't be processed again.
entry->Dispose();
object_groups_.at(i) = NULL;
}
object_groups_.Rewind(last);
return any_group_was_visited;
}
bool GlobalHandles::PostGarbageCollectionProcessing( bool GlobalHandles::PostGarbageCollectionProcessing(
GarbageCollector collector) { GarbageCollector collector, GCTracer* tracer) {
// Process weak global handle callbacks. This must be done after the // Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary // GC is completely done, because the callbacks may invoke arbitrary
// API functions. // API functions.
@ -539,7 +611,10 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
// Skip dependent handles. Their weak callbacks might expect to be // Skip dependent handles. Their weak callbacks might expect to be
// called between two global garbage collection callbacks which // called between two global garbage collection callbacks which
// are not called for minor collections. // are not called for minor collections.
if (!node->is_independent()) continue; if (!node->is_independent() && !node->is_partially_dependent()) {
continue;
}
node->clear_partially_dependent();
if (node->PostGarbageCollectionProcessing(isolate_, this)) { if (node->PostGarbageCollectionProcessing(isolate_, this)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) { if (initial_post_gc_processing_count != post_gc_processing_count_) {
// Weak callback triggered another GC and another round of // Weak callback triggered another GC and another round of
@ -555,6 +630,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
} }
} else { } else {
for (NodeIterator it(this); !it.done(); it.Advance()) { for (NodeIterator it(this); !it.done(); it.Advance()) {
it.node()->clear_partially_dependent();
if (it.node()->PostGarbageCollectionProcessing(isolate_, this)) { if (it.node()->PostGarbageCollectionProcessing(isolate_, this)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) { if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above. // See the comment above.
@ -571,10 +647,17 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
for (int i = 0; i < new_space_nodes_.length(); ++i) { for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i]; Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list()); ASSERT(node->is_in_new_space_list());
if (node->IsRetainer() && isolate_->heap()->InNewSpace(node->object())) { if (node->IsRetainer()) {
new_space_nodes_[last++] = node; if (isolate_->heap()->InNewSpace(node->object())) {
new_space_nodes_[last++] = node;
tracer->increment_nodes_copied_in_new_space();
} else {
node->set_in_new_space_list(false);
tracer->increment_nodes_promoted();
}
} else { } else {
node->set_in_new_space_list(false); node->set_in_new_space_list(false);
tracer->increment_nodes_died_in_new_space();
} }
} }
new_space_nodes_.Rewind(last); new_space_nodes_.Rewind(last);
@ -602,7 +685,7 @@ void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) { void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) { for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->has_wrapper_class_id() && it.node()->IsRetainer()) { if (it.node()->IsRetainer() && it.node()->has_wrapper_class_id()) {
v->VisitEmbedderReference(it.node()->location(), v->VisitEmbedderReference(it.node()->location(),
it.node()->wrapper_class_id()); it.node()->wrapper_class_id());
} }

23
deps/v8/src/global-handles.h

@ -131,6 +131,7 @@ class GlobalHandles {
WeakReferenceCallback callback); WeakReferenceCallback callback);
static void SetWrapperClassId(Object** location, uint16_t class_id); static void SetWrapperClassId(Object** location, uint16_t class_id);
static uint16_t GetWrapperClassId(Object** location);
// Returns the current number of weak handles. // Returns the current number of weak handles.
int NumberOfWeakHandles() { return number_of_weak_handles_; } int NumberOfWeakHandles() { return number_of_weak_handles_; }
@ -154,6 +155,11 @@ class GlobalHandles {
// Clear the weakness of a global handle. // Clear the weakness of a global handle.
void MarkIndependent(Object** location); void MarkIndependent(Object** location);
// Mark the reference to this object externaly unreachable.
void MarkPartiallyDependent(Object** location);
static bool IsIndependent(Object** location);
// Tells whether global handle is near death. // Tells whether global handle is near death.
static bool IsNearDeath(Object** location); static bool IsNearDeath(Object** location);
@ -162,7 +168,8 @@ class GlobalHandles {
// Process pending weak handles. // Process pending weak handles.
// Returns true if next major GC is likely to collect more garbage. // Returns true if next major GC is likely to collect more garbage.
bool PostGarbageCollectionProcessing(GarbageCollector collector); bool PostGarbageCollectionProcessing(GarbageCollector collector,
GCTracer* tracer);
// Iterates over all strong handles. // Iterates over all strong handles.
void IterateStrongRoots(ObjectVisitor* v); void IterateStrongRoots(ObjectVisitor* v);
@ -192,16 +199,22 @@ class GlobalHandles {
// Iterates over strong and dependent handles. See the node above. // Iterates over strong and dependent handles. See the node above.
void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v); void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v);
// Finds weak independent handles satisfying the callback predicate // Finds weak independent or partially independent handles satisfying
// and marks them as pending. See the note above. // the callback predicate and marks them as pending. See the note above.
void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f); void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f);
// Iterates over weak independent handles. See the note above. // Iterates over weak independent or partially independent handles.
// See the note above.
void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v); void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
// Iterate over objects in object groups that have at least one object
// which requires visiting. The callback has to return true if objects
// can be skipped and false otherwise.
bool IterateObjectGroups(ObjectVisitor* v, WeakSlotCallbackWithHeap can_skip);
// Add an object group. // Add an object group.
// Should be only used in GC callback function before a collection. // Should be only used in GC callback function before a collection.
// All groups are destroyed after a mark-compact collection. // All groups are destroyed after a garbage collection.
void AddObjectGroup(Object*** handles, void AddObjectGroup(Object*** handles,
size_t length, size_t length,
v8::RetainedObjectInfo* info); v8::RetainedObjectInfo* info);

15
deps/v8/src/globals.h

@ -136,21 +136,6 @@ namespace internal {
#endif #endif
#endif #endif
// Define unaligned read for the target architectures supporting it.
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
#define V8_TARGET_CAN_READ_UNALIGNED 1
#elif V8_TARGET_ARCH_ARM
// Some CPU-OS combinations allow unaligned access on ARM. We assume
// that unaligned accesses are not allowed unless the build system
// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
#if CAN_USE_UNALIGNED_ACCESSES
#define V8_TARGET_CAN_READ_UNALIGNED 1
#endif
#elif V8_TARGET_ARCH_MIPS
#else
#error Target architecture is not supported by v8
#endif
// Support for alternative bool type. This is only enabled if the code is // Support for alternative bool type. This is only enabled if the code is
// compiled with USE_MYBOOL defined. This catches some nasty type bugs. // compiled with USE_MYBOOL defined. This catches some nasty type bugs.
// For instance, 'bool b = "false";' results in b == true! This is a hidden // For instance, 'bool b = "false";' results in b == true! This is a hidden

81
deps/v8/src/handles.cc

@ -229,12 +229,12 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
} }
Handle<Object> SetProperty(Handle<Object> object, Handle<Object> SetProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key, Handle<Object> key,
Handle<Object> value, Handle<Object> value,
PropertyAttributes attributes, PropertyAttributes attributes,
StrictModeFlag strict_mode) { StrictModeFlag strict_mode) {
Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION( CALL_HEAP_FUNCTION(
isolate, isolate,
Runtime::SetObjectProperty( Runtime::SetObjectProperty(
@ -593,6 +593,25 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
} }
Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script) {
Isolate* isolate = script->GetIsolate();
Handle<String> name_or_source_url_key =
isolate->factory()->LookupAsciiSymbol("nameOrSourceURL");
Handle<JSValue> script_wrapper = GetScriptWrapper(script);
Handle<Object> property = GetProperty(script_wrapper,
name_or_source_url_key);
ASSERT(property->IsJSFunction());
Handle<JSFunction> method = Handle<JSFunction>::cast(property);
bool caught_exception;
Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
NULL, &caught_exception);
if (caught_exception) {
result = isolate->factory()->undefined_value();
}
return result;
}
static bool ContainsOnlyValidKeys(Handle<FixedArray> array) { static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
int len = array->length(); int len = array->length();
for (int i = 0; i < len; i++) { for (int i = 0; i < len; i++) {
@ -705,24 +724,46 @@ Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw) {
} }
Handle<FixedArray> ReduceFixedArrayTo(Handle<FixedArray> array, int length) {
ASSERT(array->length() >= length);
if (array->length() == length) return array;
Handle<FixedArray> new_array =
array->GetIsolate()->factory()->NewFixedArray(length);
for (int i = 0; i < length; ++i) new_array->set(i, array->get(i));
return new_array;
}
Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
bool cache_result) { bool cache_result) {
Isolate* isolate = object->GetIsolate(); Isolate* isolate = object->GetIsolate();
if (object->HasFastProperties()) { if (object->HasFastProperties()) {
if (object->map()->instance_descriptors()->HasEnumCache()) { if (object->map()->instance_descriptors()->HasEnumCache()) {
int own_property_count = object->map()->EnumLength(); int own_property_count = object->map()->EnumLength();
// If we have an enum cache, but the enum length of the given map is set
// Mark that we have an enum cache if we are allowed to cache it. // to kInvalidEnumCache, this means that the map itself has never used the
if (cache_result && own_property_count == Map::kInvalidEnumCache) { // present enum cache. The first step to using the cache is to set the
int num_enum = object->map()->NumberOfDescribedProperties(DONT_ENUM); // enum length of the map by counting the number of own descriptors that
object->map()->SetEnumLength(num_enum); // are not DONT_ENUM.
if (own_property_count == Map::kInvalidEnumCache) {
own_property_count = object->map()->NumberOfDescribedProperties(
OWN_DESCRIPTORS, DONT_ENUM);
if (cache_result) object->map()->SetEnumLength(own_property_count);
} }
DescriptorArray* desc = object->map()->instance_descriptors(); DescriptorArray* desc = object->map()->instance_descriptors();
Handle<FixedArray> keys(FixedArray::cast(desc->GetEnumCache()), isolate); Handle<FixedArray> keys(desc->GetEnumCache(), isolate);
isolate->counters()->enum_cache_hits()->Increment(); // In case the number of properties required in the enum are actually
return keys; // present, we can reuse the enum cache. Otherwise, this means that the
// enum cache was generated for a previous (smaller) version of the
// Descriptor Array. In that case we regenerate the enum cache.
if (own_property_count <= keys->length()) {
isolate->counters()->enum_cache_hits()->Increment();
return ReduceFixedArrayTo(keys, own_property_count);
}
} }
Handle<Map> map(object->map()); Handle<Map> map(object->map());
@ -734,8 +775,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
} }
isolate->counters()->enum_cache_misses()->Increment(); isolate->counters()->enum_cache_misses()->Increment();
int num_enum = map->NumberOfDescribedProperties(ALL_DESCRIPTORS, DONT_ENUM);
int num_enum = map->NumberOfDescribedProperties(DONT_ENUM);
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum); Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
Handle<FixedArray> indices = isolate->factory()->NewFixedArray(num_enum); Handle<FixedArray> indices = isolate->factory()->NewFixedArray(num_enum);
@ -743,10 +783,14 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
Handle<DescriptorArray> descs = Handle<DescriptorArray> descs =
Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate); Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
int real_size = map->NumberOfOwnDescriptors();
int enum_size = 0;
int index = 0; int index = 0;
for (int i = 0; i < descs->number_of_descriptors(); i++) { for (int i = 0; i < descs->number_of_descriptors(); i++) {
PropertyDetails details = descs->GetDetails(i); PropertyDetails details = descs->GetDetails(i);
if (!details.IsDontEnum()) { if (!details.IsDontEnum()) {
if (i < real_size) ++enum_size;
storage->set(index, descs->GetKey(i)); storage->set(index, descs->GetKey(i));
if (!indices.is_null()) { if (!indices.is_null()) {
if (details.type() != FIELD) { if (details.type() != FIELD) {
@ -773,9 +817,10 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
indices.is_null() ? Object::cast(Smi::FromInt(0)) indices.is_null() ? Object::cast(Smi::FromInt(0))
: Object::cast(*indices)); : Object::cast(*indices));
if (cache_result) { if (cache_result) {
object->map()->SetEnumLength(index); object->map()->SetEnumLength(enum_size);
} }
return storage;
return ReduceFixedArrayTo(storage, enum_size);
} else { } else {
Handle<StringDictionary> dictionary(object->property_dictionary()); Handle<StringDictionary> dictionary(object->property_dictionary());
@ -870,7 +915,7 @@ int Utf8LengthHelper(String* input,
int total = 0; int total = 0;
bool dummy; bool dummy;
while (true) { while (true) {
if (input->IsAsciiRepresentation()) { if (input->IsOneByteRepresentation()) {
*starts_with_surrogate = false; *starts_with_surrogate = false;
return total + to - from; return total + to - from;
} }
@ -903,14 +948,14 @@ int Utf8LengthHelper(String* input,
} else { } else {
if (first_length > from) { if (first_length > from) {
// Left hand side is shorter. // Left hand side is shorter.
if (first->IsAsciiRepresentation()) { if (first->IsOneByteRepresentation()) {
total += first_length - from; total += first_length - from;
*starts_with_surrogate = false; *starts_with_surrogate = false;
starts_with_surrogate = &dummy; starts_with_surrogate = &dummy;
input = second; input = second;
from = 0; from = 0;
to -= first_length; to -= first_length;
} else if (second->IsAsciiRepresentation()) { } else if (second->IsOneByteRepresentation()) {
followed_by_surrogate = false; followed_by_surrogate = false;
total += to - first_length; total += to - first_length;
input = first; input = first;

12
deps/v8/src/handles.h

@ -95,6 +95,13 @@ class Handle {
}; };
// Convenience wrapper.
template<class T>
inline Handle<T> handle(T* t, Isolate* isolate) {
return Handle<T>(t, isolate);
}
class DeferredHandles; class DeferredHandles;
class HandleScopeImplementer; class HandleScopeImplementer;
@ -209,7 +216,8 @@ Handle<String> FlattenGetString(Handle<String> str);
int Utf8Length(Handle<String> str); int Utf8Length(Handle<String> str);
Handle<Object> SetProperty(Handle<Object> object, Handle<Object> SetProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key, Handle<Object> key,
Handle<Object> value, Handle<Object> value,
PropertyAttributes attributes, PropertyAttributes attributes,
@ -260,6 +268,7 @@ int GetScriptLineNumber(Handle<Script> script, int code_position);
// The safe version does not make heap allocations but may work much slower. // The safe version does not make heap allocations but may work much slower.
int GetScriptLineNumberSafe(Handle<Script> script, int code_position); int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
int GetScriptColumnNumber(Handle<Script> script, int code_position); int GetScriptColumnNumber(Handle<Script> script, int code_position);
Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script);
// Computes the enumerable keys from interceptors. Used for debug mirrors and // Computes the enumerable keys from interceptors. Used for debug mirrors and
// by GetKeysInFixedArrayFor below. // by GetKeysInFixedArrayFor below.
@ -276,6 +285,7 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
KeyCollectionType type, KeyCollectionType type,
bool* threw); bool* threw);
Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw); Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw);
Handle<FixedArray> ReduceFixedArrayTo(Handle<FixedArray> array, int length);
Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
bool cache_result); bool cache_result);

47
deps/v8/src/heap-inl.h

@ -85,13 +85,16 @@ void PromotionQueue::ActivateGuardIfOnTheSamePage() {
MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str, MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
PretenureFlag pretenure) { PretenureFlag pretenure) {
// Check for ASCII first since this is the common case. // Check for ASCII first since this is the common case.
if (String::IsAscii(str.start(), str.length())) { const char* start = str.start();
int length = str.length();
int non_ascii_start = String::NonAsciiStart(start, length);
if (non_ascii_start >= length) {
// If the string is ASCII, we do not need to convert the characters // If the string is ASCII, we do not need to convert the characters
// since UTF8 is backwards compatible with ASCII. // since UTF8 is backwards compatible with ASCII.
return AllocateStringFromAscii(str, pretenure); return AllocateStringFromOneByte(str, pretenure);
} }
// Non-ASCII and we need to decode. // Non-ASCII and we need to decode.
return AllocateStringFromUtf8Slow(str, pretenure); return AllocateStringFromUtf8Slow(str, non_ascii_start, pretenure);
} }
@ -106,12 +109,12 @@ MaybeObject* Heap::AllocateSymbol(Vector<const char> str,
MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str, MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
uint32_t hash_field) { uint32_t hash_field) {
if (str.length() > SeqAsciiString::kMaxLength) { if (str.length() > SeqOneByteString::kMaxLength) {
return Failure::OutOfMemoryException(); return Failure::OutOfMemoryException();
} }
// Compute map and object size. // Compute map and object size.
Map* map = ascii_symbol_map(); Map* map = ascii_symbol_map();
int size = SeqAsciiString::SizeFor(str.length()); int size = SeqOneByteString::SizeFor(str.length());
// Allocate string. // Allocate string.
Object* result; Object* result;
@ -131,7 +134,7 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
ASSERT_EQ(size, answer->Size()); ASSERT_EQ(size, answer->Size());
// Fill in the characters. // Fill in the characters.
memcpy(answer->address() + SeqAsciiString::kHeaderSize, memcpy(answer->address() + SeqOneByteString::kHeaderSize,
str.start(), str.length()); str.start(), str.length());
return answer; return answer;
@ -267,13 +270,6 @@ MaybeObject* Heap::AllocateRawMap() {
#endif #endif
MaybeObject* result = map_space_->AllocateRaw(Map::kSize); MaybeObject* result = map_space_->AllocateRaw(Map::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true; if (result->IsFailure()) old_gen_exhausted_ = true;
#ifdef DEBUG
if (!result->IsFailure()) {
// Maps have their own alignment.
CHECK((reinterpret_cast<intptr_t>(result) & kMapAlignmentMask) ==
static_cast<intptr_t>(kHeapObjectTag));
}
#endif
return result; return result;
} }
@ -464,7 +460,7 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes) { intptr_t change_in_bytes) {
ASSERT(HasBeenSetUp()); ASSERT(HasBeenSetUp());
intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes; intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
if (change_in_bytes >= 0) { if (change_in_bytes > 0) {
// Avoid overflow. // Avoid overflow.
if (amount > amount_of_external_allocated_memory_) { if (amount > amount_of_external_allocated_memory_) {
amount_of_external_allocated_memory_ = amount; amount_of_external_allocated_memory_ = amount;
@ -611,7 +607,7 @@ void ExternalStringTable::Verify() {
Object* obj = Object::cast(new_space_strings_[i]); Object* obj = Object::cast(new_space_strings_[i]);
// TODO(yangguo): check that the object is indeed an external string. // TODO(yangguo): check that the object is indeed an external string.
ASSERT(heap_->InNewSpace(obj)); ASSERT(heap_->InNewSpace(obj));
ASSERT(obj != HEAP->raw_unchecked_the_hole_value()); ASSERT(obj != HEAP->the_hole_value());
if (obj->IsExternalAsciiString()) { if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj); ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length())); ASSERT(String::IsAscii(string->GetChars(), string->length()));
@ -621,7 +617,7 @@ void ExternalStringTable::Verify() {
Object* obj = Object::cast(old_space_strings_[i]); Object* obj = Object::cast(old_space_strings_[i]);
// TODO(yangguo): check that the object is indeed an external string. // TODO(yangguo): check that the object is indeed an external string.
ASSERT(!heap_->InNewSpace(obj)); ASSERT(!heap_->InNewSpace(obj));
ASSERT(obj != HEAP->raw_unchecked_the_hole_value()); ASSERT(obj != HEAP->the_hole_value());
if (obj->IsExternalAsciiString()) { if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj); ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length())); ASSERT(String::IsAscii(string->GetChars(), string->length()));
@ -640,9 +636,11 @@ void ExternalStringTable::AddOldString(String* string) {
void ExternalStringTable::ShrinkNewStrings(int position) { void ExternalStringTable::ShrinkNewStrings(int position) {
new_space_strings_.Rewind(position); new_space_strings_.Rewind(position);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) { if (FLAG_verify_heap) {
Verify(); Verify();
} }
#endif
} }
@ -741,28 +739,15 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
} }
LinearAllocationScope::LinearAllocationScope() {
HEAP->linear_allocation_scope_depth_++;
}
LinearAllocationScope::~LinearAllocationScope() {
HEAP->linear_allocation_scope_depth_--;
ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
}
#ifdef DEBUG
void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) { void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) { for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) { if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current); HeapObject* object = HeapObject::cast(*current);
ASSERT(HEAP->Contains(object)); CHECK(HEAP->Contains(object));
ASSERT(object->map()->IsMap()); CHECK(object->map()->IsMap());
} }
} }
} }
#endif
double GCTracer::SizeOfHeapObjects() { double GCTracer::SizeOfHeapObjects() {

43
deps/v8/src/heap-profiler.cc

@ -65,23 +65,29 @@ void HeapProfiler::TearDown() {
} }
HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name, HeapSnapshot* HeapProfiler::TakeSnapshot(
int type, const char* name,
v8::ActivityControl* control) { int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
ASSERT(Isolate::Current()->heap_profiler() != NULL); ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name, return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type, type,
control); control,
resolver);
} }
HeapSnapshot* HeapProfiler::TakeSnapshot(String* name, HeapSnapshot* HeapProfiler::TakeSnapshot(
int type, String* name,
v8::ActivityControl* control) { int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
ASSERT(Isolate::Current()->heap_profiler() != NULL); ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name, return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type, type,
control); control,
resolver);
} }
@ -122,16 +128,18 @@ v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
} }
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name, HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
int type, const char* name,
v8::ActivityControl* control) { int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type); HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
HeapSnapshot* result = HeapSnapshot* result =
snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++); snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
bool generation_completed = true; bool generation_completed = true;
switch (s_type) { switch (s_type) {
case HeapSnapshot::kFull: { case HeapSnapshot::kFull: {
HeapSnapshotGenerator generator(result, control); HeapSnapshotGenerator generator(result, control, resolver);
generation_completed = generator.GenerateSnapshot(); generation_completed = generator.GenerateSnapshot();
break; break;
} }
@ -147,10 +155,13 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
} }
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name, HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
int type, String* name,
v8::ActivityControl* control) { int type,
return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control); v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control,
resolver);
} }
void HeapProfiler::StartHeapObjectsTrackingImpl() { void HeapProfiler::StartHeapObjectsTrackingImpl() {

32
deps/v8/src/heap-profiler.h

@ -51,12 +51,16 @@ class HeapProfiler {
static size_t GetMemorySizeUsedByProfiler(); static size_t GetMemorySizeUsedByProfiler();
static HeapSnapshot* TakeSnapshot(const char* name, static HeapSnapshot* TakeSnapshot(
int type, const char* name,
v8::ActivityControl* control); int type,
static HeapSnapshot* TakeSnapshot(String* name, v8::ActivityControl* control,
int type, v8::HeapProfiler::ObjectNameResolver* resolver);
v8::ActivityControl* control); static HeapSnapshot* TakeSnapshot(
String* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
static void StartHeapObjectsTracking(); static void StartHeapObjectsTracking();
static void StopHeapObjectsTracking(); static void StopHeapObjectsTracking();
@ -81,12 +85,16 @@ class HeapProfiler {
private: private:
HeapProfiler(); HeapProfiler();
~HeapProfiler(); ~HeapProfiler();
HeapSnapshot* TakeSnapshotImpl(const char* name, HeapSnapshot* TakeSnapshotImpl(
int type, const char* name,
v8::ActivityControl* control); int type,
HeapSnapshot* TakeSnapshotImpl(String* name, v8::ActivityControl* control,
int type, v8::HeapProfiler::ObjectNameResolver* resolver);
v8::ActivityControl* control); HeapSnapshot* TakeSnapshotImpl(
String* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
void ResetSnapshots(); void ResetSnapshots();
void StartHeapObjectsTrackingImpl(); void StartHeapObjectsTrackingImpl();

706
deps/v8/src/heap.cc

File diff suppressed because it is too large

177
deps/v8/src/heap.h

@ -154,7 +154,9 @@ namespace internal {
V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \ V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \ V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \ V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
V(JSObject, observation_state, ObservationState) \
V(Map, external_map, ExternalMap)
#define ROOT_LIST(V) \ #define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \ STRONG_ROOT_LIST(V) \
@ -176,6 +178,7 @@ namespace internal {
V(constructor_symbol, "constructor") \ V(constructor_symbol, "constructor") \
V(code_symbol, ".code") \ V(code_symbol, ".code") \
V(result_symbol, ".result") \ V(result_symbol, ".result") \
V(dot_for_symbol, ".for.") \
V(catch_var_symbol, ".catch-var") \ V(catch_var_symbol, ".catch-var") \
V(empty_symbol, "") \ V(empty_symbol, "") \
V(eval_symbol, "eval") \ V(eval_symbol, "eval") \
@ -283,14 +286,6 @@ class StoreBufferRebuilder {
// The all static Heap captures the interface to the global object heap.
// All JavaScript contexts by this process share the same object heap.
#ifdef DEBUG
class HeapDebugUtils;
#endif
// A queue of objects promoted during scavenge. Each object is accompanied // A queue of objects promoted during scavenge. Each object is accompanied
// by it's size to avoid dereferencing a map pointer for scanning. // by it's size to avoid dereferencing a map pointer for scanning.
class PromotionQueue { class PromotionQueue {
@ -486,6 +481,9 @@ class Heap {
// Returns the amount of executable memory currently committed for the heap. // Returns the amount of executable memory currently committed for the heap.
intptr_t CommittedMemoryExecutable(); intptr_t CommittedMemoryExecutable();
// Returns the amount of phyical memory currently committed for the heap.
size_t CommittedPhysicalMemory();
// Returns the available bytes in space w/o growing. // Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires // Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead. // all available bytes. Check MaxHeapObjectSize() instead.
@ -508,6 +506,24 @@ class Heap {
MapSpace* map_space() { return map_space_; } MapSpace* map_space() { return map_space_; }
CellSpace* cell_space() { return cell_space_; } CellSpace* cell_space() { return cell_space_; }
LargeObjectSpace* lo_space() { return lo_space_; } LargeObjectSpace* lo_space() { return lo_space_; }
PagedSpace* paged_space(int idx) {
switch (idx) {
case OLD_POINTER_SPACE:
return old_pointer_space();
case OLD_DATA_SPACE:
return old_data_space();
case MAP_SPACE:
return map_space();
case CELL_SPACE:
return cell_space();
case CODE_SPACE:
return code_space();
case NEW_SPACE:
case LO_SPACE:
UNREACHABLE();
}
return NULL;
}
bool always_allocate() { return always_allocate_scope_depth_ != 0; } bool always_allocate() { return always_allocate_scope_depth_ != 0; }
Address always_allocate_scope_depth_address() { Address always_allocate_scope_depth_address() {
@ -560,6 +576,7 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements( MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
FixedArrayBase* array_base, FixedArrayBase* array_base,
ElementsKind elements_kind, ElementsKind elements_kind,
int length,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
// Allocates and initializes a new global object based on a constructor. // Allocates and initializes a new global object based on a constructor.
@ -642,6 +659,9 @@ class Heap {
// Allocates a serialized scope info. // Allocates a serialized scope info.
MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length); MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length);
// Allocates an External object for v8's external API.
MUST_USE_RESULT MaybeObject* AllocateExternal(void* value);
// Allocates an empty PolymorphicCodeCache. // Allocates an empty PolymorphicCodeCache.
MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache(); MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
@ -657,6 +677,9 @@ class Heap {
// Clear the Instanceof cache (used when a prototype changes). // Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache(); inline void ClearInstanceofCache();
// For use during bootup.
void RepairFreeListsAfterBoot();
// Allocates and fully initializes a String. There are two String // Allocates and fully initializes a String. There are two String
// encodings: ASCII and two byte. One should choose between the three string // encodings: ASCII and two byte. One should choose between the three string
// allocation functions based on the encoding of the string buffer used to // allocation functions based on the encoding of the string buffer used to
@ -675,7 +698,7 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateStringFromAscii( MUST_USE_RESULT MaybeObject* AllocateStringFromOneByte(
Vector<const char> str, Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8( MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8(
@ -683,6 +706,7 @@ class Heap {
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateStringFromUtf8Slow( MUST_USE_RESULT MaybeObject* AllocateStringFromUtf8Slow(
Vector<const char> str, Vector<const char> str,
int non_ascii_start,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateStringFromTwoByte( MUST_USE_RESULT MaybeObject* AllocateStringFromTwoByte(
Vector<const uc16> str, Vector<const uc16> str,
@ -718,7 +742,7 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateRawAsciiString( MUST_USE_RESULT MaybeObject* AllocateRawOneByteString(
int length, int length,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString( MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString(
@ -1013,9 +1037,8 @@ class Heap {
return LookupSymbol(CStrVector(str)); return LookupSymbol(CStrVector(str));
} }
MUST_USE_RESULT MaybeObject* LookupSymbol(String* str); MUST_USE_RESULT MaybeObject* LookupSymbol(String* str);
MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Handle<SeqAsciiString> string, MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(
int from, Handle<SeqOneByteString> string, int from, int length);
int length);
bool LookupSymbolIfExists(String* str, String** symbol); bool LookupSymbolIfExists(String* str, String** symbol);
bool LookupTwoCharsSymbolIfExists(String* str, String** symbol); bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
@ -1081,7 +1104,10 @@ class Heap {
void EnsureHeapIsIterable(); void EnsureHeapIsIterable();
// Notify the heap that a context has been disposed. // Notify the heap that a context has been disposed.
int NotifyContextDisposed() { return ++contexts_disposed_; } int NotifyContextDisposed() {
flush_monomorphic_ics_ = true;
return ++contexts_disposed_;
}
// Utility to invoke the scavenger. This is needed in test code to // Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles. // ensure correct callback for weak global handles.
@ -1239,13 +1265,15 @@ class Heap {
return &native_contexts_list_; return &native_contexts_list_;
} }
#ifdef VERIFY_HEAP
// Verify the heap is in its normal state before or after a GC.
void Verify();
#endif
#ifdef DEBUG #ifdef DEBUG
void Print(); void Print();
void PrintHandles(); void PrintHandles();
// Verify the heap is in its normal state before or after a GC.
void Verify();
void OldPointerSpaceCheckStoreBuffer(); void OldPointerSpaceCheckStoreBuffer();
void MapSpaceCheckStoreBuffer(); void MapSpaceCheckStoreBuffer();
void LargeObjectSpaceCheckStoreBuffer(); void LargeObjectSpaceCheckStoreBuffer();
@ -1253,10 +1281,23 @@ class Heap {
// Report heap statistics. // Report heap statistics.
void ReportHeapStatistics(const char* title); void ReportHeapStatistics(const char* title);
void ReportCodeStatistics(const char* title); void ReportCodeStatistics(const char* title);
#endif
// Zapping is needed for verify heap, and always done in debug builds.
static inline bool ShouldZapGarbage() {
#ifdef DEBUG
return true;
#else
#ifdef VERIFY_HEAP
return FLAG_verify_heap;
#else
return false;
#endif
#endif
}
// Fill in bogus values in from space // Fill in bogus values in from space
void ZapFromSpace(); void ZapFromSpace();
#endif
// Print short heap statistics. // Print short heap statistics.
void PrintShortHeapStatistics(); void PrintShortHeapStatistics();
@ -1309,20 +1350,9 @@ class Heap {
// Commits from space if it is uncommitted. // Commits from space if it is uncommitted.
void EnsureFromSpaceIsCommitted(); void EnsureFromSpaceIsCommitted();
// Support for partial snapshots. After calling this we can allocate a // Support for partial snapshots. After calling this we have a linear
// certain number of bytes using only linear allocation (with a // space to write objects in each space.
// LinearAllocationScope and an AlwaysAllocateScope) without using freelists void ReserveSpace(int *sizes, Address* addresses);
// or causing a GC. It returns true of space was reserved or false if a GC is
// needed. For paged spaces the space requested must include the space wasted
// at the end of each page when allocating linearly.
void ReserveSpace(
int new_space_size,
int pointer_space_size,
int data_space_size,
int code_space_size,
int map_space_size,
int cell_space_size,
int large_object_size);
// //
// Support for the API. // Support for the API.
@ -1418,6 +1448,10 @@ class Heap {
STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex); STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
STATIC_CHECK(kempty_symbolRootIndex == Internals::kEmptySymbolRootIndex); STATIC_CHECK(kempty_symbolRootIndex == Internals::kEmptySymbolRootIndex);
// Generated code can embed direct references to non-writable roots if
// they are in new space.
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
MUST_USE_RESULT MaybeObject* NumberToString( MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true); Object* number, bool check_number_string_cache = true);
MUST_USE_RESULT MaybeObject* Uint32ToString( MUST_USE_RESULT MaybeObject* Uint32ToString(
@ -1489,13 +1523,6 @@ class Heap {
void ClearNormalizedMapCaches(); void ClearNormalizedMapCaches();
// Clears the cache of ICs related to this map.
void ClearCacheOnMap(Map* map) {
if (FLAG_cleanup_code_caches_at_gc) {
map->ClearCodeCache(this);
}
}
GCTracer* tracer() { return tracer_; } GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces. // Returns the size of objects residing in non new spaces.
@ -1616,6 +1643,8 @@ class Heap {
global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax; global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
} }
bool flush_monomorphic_ics() { return flush_monomorphic_ics_; }
intptr_t amount_of_external_allocated_memory() { intptr_t amount_of_external_allocated_memory() {
return amount_of_external_allocated_memory_; return amount_of_external_allocated_memory_;
} }
@ -1701,6 +1730,8 @@ class Heap {
int global_ic_age_; int global_ic_age_;
bool flush_monomorphic_ics_;
int scan_on_scavenge_pages_; int scan_on_scavenge_pages_;
#if defined(V8_TARGET_ARCH_X64) #if defined(V8_TARGET_ARCH_X64)
@ -1754,8 +1785,6 @@ class Heap {
// Do we expect to be able to handle allocation failure at this // Do we expect to be able to handle allocation failure at this
// time? // time?
bool disallow_allocation_failure_; bool disallow_allocation_failure_;
HeapDebugUtils* debug_utils_;
#endif // DEBUG #endif // DEBUG
// Indicates that the new space should be kept small due to high promotion // Indicates that the new space should be kept small due to high promotion
@ -1872,7 +1901,6 @@ class Heap {
bool PerformGarbageCollection(GarbageCollector collector, bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer); GCTracer* tracer);
inline void UpdateOldSpaceLimits(); inline void UpdateOldSpaceLimits();
// Allocate an uninitialized object in map space. The behavior is identical // Allocate an uninitialized object in map space. The behavior is identical
@ -1899,9 +1927,9 @@ class Heap {
void CreateFixedStubs(); void CreateFixedStubs();
MaybeObject* CreateOddball(const char* to_string, MUST_USE_RESULT MaybeObject* CreateOddball(const char* to_string,
Object* to_number, Object* to_number,
byte kind); byte kind);
// Allocate a JSArray with no elements // Allocate a JSArray with no elements
MUST_USE_RESULT MaybeObject* AllocateJSArray( MUST_USE_RESULT MaybeObject* AllocateJSArray(
@ -2131,7 +2159,6 @@ class Heap {
friend class GCTracer; friend class GCTracer;
friend class DisallowAllocationFailure; friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope; friend class AlwaysAllocateScope;
friend class LinearAllocationScope;
friend class Page; friend class Page;
friend class Isolate; friend class Isolate;
friend class MarkCompactCollector; friend class MarkCompactCollector;
@ -2198,14 +2225,6 @@ class AlwaysAllocateScope {
}; };
class LinearAllocationScope {
public:
inline LinearAllocationScope();
inline ~LinearAllocationScope();
};
#ifdef DEBUG
// Visitor class to verify interior pointers in spaces that do not contain // Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to // or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word. // point into the heap to a location that has a map pointer at its first word.
@ -2215,7 +2234,6 @@ class VerifyPointersVisitor: public ObjectVisitor {
public: public:
inline void VisitPointers(Object** start, Object** end); inline void VisitPointers(Object** start, Object** end);
}; };
#endif
// Space iterator for iterating over all spaces of the heap. // Space iterator for iterating over all spaces of the heap.
@ -2374,7 +2392,7 @@ class KeyedLookupCache {
}; };
// Cache for mapping (array, property name) into descriptor index. // Cache for mapping (map, property name) into descriptor index.
// The cache contains both positive and negative results. // The cache contains both positive and negative results.
// Descriptor index equals kNotFound means the property is absent. // Descriptor index equals kNotFound means the property is absent.
// Cleared at startup and prior to any gc. // Cleared at startup and prior to any gc.
@ -2382,21 +2400,21 @@ class DescriptorLookupCache {
public: public:
// Lookup descriptor index for (map, name). // Lookup descriptor index for (map, name).
// If absent, kAbsent is returned. // If absent, kAbsent is returned.
int Lookup(DescriptorArray* array, String* name) { int Lookup(Map* source, String* name) {
if (!StringShape(name).IsSymbol()) return kAbsent; if (!StringShape(name).IsSymbol()) return kAbsent;
int index = Hash(array, name); int index = Hash(source, name);
Key& key = keys_[index]; Key& key = keys_[index];
if ((key.array == array) && (key.name == name)) return results_[index]; if ((key.source == source) && (key.name == name)) return results_[index];
return kAbsent; return kAbsent;
} }
// Update an element in the cache. // Update an element in the cache.
void Update(DescriptorArray* array, String* name, int result) { void Update(Map* source, String* name, int result) {
ASSERT(result != kAbsent); ASSERT(result != kAbsent);
if (StringShape(name).IsSymbol()) { if (StringShape(name).IsSymbol()) {
int index = Hash(array, name); int index = Hash(source, name);
Key& key = keys_[index]; Key& key = keys_[index];
key.array = array; key.source = source;
key.name = name; key.name = name;
results_[index] = result; results_[index] = result;
} }
@ -2410,26 +2428,26 @@ class DescriptorLookupCache {
private: private:
DescriptorLookupCache() { DescriptorLookupCache() {
for (int i = 0; i < kLength; ++i) { for (int i = 0; i < kLength; ++i) {
keys_[i].array = NULL; keys_[i].source = NULL;
keys_[i].name = NULL; keys_[i].name = NULL;
results_[i] = kAbsent; results_[i] = kAbsent;
} }
} }
static int Hash(DescriptorArray* array, String* name) { static int Hash(Object* source, String* name) {
// Uses only lower 32 bits if pointers are larger. // Uses only lower 32 bits if pointers are larger.
uint32_t array_hash = uint32_t source_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source))
>> kPointerSizeLog2; >> kPointerSizeLog2;
uint32_t name_hash = uint32_t name_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name))
>> kPointerSizeLog2; >> kPointerSizeLog2;
return (array_hash ^ name_hash) % kLength; return (source_hash ^ name_hash) % kLength;
} }
static const int kLength = 64; static const int kLength = 64;
struct Key { struct Key {
DescriptorArray* array; Map* source;
String* name; String* name;
}; };
@ -2531,6 +2549,18 @@ class GCTracer BASE_EMBEDDED {
promoted_objects_size_ += object_size; promoted_objects_size_ += object_size;
} }
void increment_nodes_died_in_new_space() {
nodes_died_in_new_space_++;
}
void increment_nodes_copied_in_new_space() {
nodes_copied_in_new_space_++;
}
void increment_nodes_promoted() {
nodes_promoted_++;
}
private: private:
// Returns a string matching the collector. // Returns a string matching the collector.
const char* CollectorString(); const char* CollectorString();
@ -2575,6 +2605,15 @@ class GCTracer BASE_EMBEDDED {
// Size of objects promoted during the current collection. // Size of objects promoted during the current collection.
intptr_t promoted_objects_size_; intptr_t promoted_objects_size_;
// Number of died nodes in the new space.
int nodes_died_in_new_space_;
// Number of copied nodes to the new space.
int nodes_copied_in_new_space_;
// Number of promoted nodes to the old space.
int nodes_promoted_;
// Incremental marking steps counters. // Incremental marking steps counters.
int steps_count_; int steps_count_;
double steps_took_; double steps_took_;

502
deps/v8/src/hydrogen-instructions.cc

@ -85,6 +85,81 @@ void HValue::AssumeRepresentation(Representation r) {
} }
void HValue::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
}
Representation HValue::RepresentationFromUses() {
if (HasNoUses()) return Representation::None();
// Array of use counts for each representation.
int use_count[Representation::kNumRepresentations] = { 0 };
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
Representation rep = use->observed_input_representation(it.index());
if (rep.IsNone()) continue;
if (FLAG_trace_representation) {
PrintF("#%d %s is used by #%d %s as %s%s\n",
id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
(use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
}
use_count[rep.kind()] += use->LoopWeight();
}
if (IsPhi()) HPhi::cast(this)->AddIndirectUsesTo(&use_count[0]);
int tagged_count = use_count[Representation::kTagged];
int double_count = use_count[Representation::kDouble];
int int32_count = use_count[Representation::kInteger32];
if (tagged_count > 0) return Representation::Tagged();
if (double_count > 0) return Representation::Double();
if (int32_count > 0) return Representation::Integer32();
return Representation::None();
}
void HValue::UpdateRepresentation(Representation new_rep,
HInferRepresentation* h_infer,
const char* reason) {
Representation r = representation();
if (new_rep.is_more_general_than(r)) {
// When an HConstant is marked "not convertible to integer", then
// never try to represent it as an integer.
if (new_rep.IsInteger32() && !IsConvertibleToInteger()) {
new_rep = Representation::Tagged();
if (FLAG_trace_representation) {
PrintF("Changing #%d %s representation %s -> %s because it's NCTI"
" (%s want i)\n",
id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
}
} else {
if (FLAG_trace_representation) {
PrintF("Changing #%d %s representation %s -> %s based on %s\n",
id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
}
}
ChangeRepresentation(new_rep);
AddDependantsToWorklist(h_infer);
}
}
void HValue::AddDependantsToWorklist(HInferRepresentation* h_infer) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
h_infer->AddToWorklist(it.value());
}
for (int i = 0; i < OperandCount(); ++i) {
h_infer->AddToWorklist(OperandAt(i));
}
}
static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) { static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
if (result > kMaxInt) { if (result > kMaxInt) {
*overflow = true; *overflow = true;
@ -301,6 +376,7 @@ HUseListNode* HUseListNode::tail() {
bool HValue::CheckUsesForFlag(Flag f) { bool HValue::CheckUsesForFlag(Flag f) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) { for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
if (it.value()->IsSimulate()) continue;
if (!it.value()->CheckFlag(f)) return false; if (!it.value()->CheckFlag(f)) return false;
} }
return true; return true;
@ -707,7 +783,7 @@ void HCallGlobal::PrintDataTo(StringStream* stream) {
void HCallKnownGlobal::PrintDataTo(StringStream* stream) { void HCallKnownGlobal::PrintDataTo(StringStream* stream) {
stream->Add("o ", target()->shared()->DebugName()); stream->Add("%o ", target()->shared()->DebugName());
stream->Add("#%d", argument_count()); stream->Add("#%d", argument_count());
} }
@ -764,6 +840,24 @@ void HReturn::PrintDataTo(StringStream* stream) {
} }
Representation HBranch::observed_input_representation(int index) {
static const ToBooleanStub::Types tagged_types(
ToBooleanStub::UNDEFINED |
ToBooleanStub::NULL_TYPE |
ToBooleanStub::SPEC_OBJECT |
ToBooleanStub::STRING);
if (expected_input_types_.ContainsAnyOf(tagged_types)) {
return Representation::Tagged();
} else if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
return Representation::Double();
} else if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
return Representation::Integer32();
} else {
return Representation::None();
}
}
void HCompareMap::PrintDataTo(StringStream* stream) { void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream); value()->PrintNameTo(stream);
stream->Add(" (%p)", *map()); stream->Add(" (%p)", *map());
@ -859,16 +953,6 @@ void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
} }
HValue* HConstant::Canonicalize() {
return HasNoUses() ? NULL : this;
}
HValue* HTypeof::Canonicalize() {
return HasNoUses() ? NULL : this;
}
HValue* HBitwise::Canonicalize() { HValue* HBitwise::Canonicalize() {
if (!representation().IsInteger32()) return this; if (!representation().IsInteger32()) return this;
// If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x. // If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
@ -1058,6 +1142,13 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
} }
void HLoadElements::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" ");
typecheck()->PrintNameTo(stream);
}
void HCheckMaps::PrintDataTo(StringStream* stream) { void HCheckMaps::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream); value()->PrintNameTo(stream);
stream->Add(" [%p", *map_set()->first()); stream->Add(" [%p", *map_set()->first());
@ -1342,15 +1433,11 @@ void HPhi::InitRealUses(int phi_id) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) { for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value(); HValue* value = it.value();
if (!value->IsPhi()) { if (!value->IsPhi()) {
Representation rep = value->ObservedInputRepresentation(it.index()); Representation rep = value->observed_input_representation(it.index());
non_phi_uses_[rep.kind()] += value->LoopWeight(); non_phi_uses_[rep.kind()] += value->LoopWeight();
if (FLAG_trace_representation) { if (FLAG_trace_representation) {
PrintF("%d %s is used by %d %s as %s\n", PrintF("#%d Phi is used by real #%d %s as %s\n",
this->id(), id(), value->id(), value->Mnemonic(), rep.Mnemonic());
this->Mnemonic(),
value->id(),
value->Mnemonic(),
rep.Mnemonic());
} }
} }
} }
@ -1359,11 +1446,8 @@ void HPhi::InitRealUses(int phi_id) {
void HPhi::AddNonPhiUsesFrom(HPhi* other) { void HPhi::AddNonPhiUsesFrom(HPhi* other) {
if (FLAG_trace_representation) { if (FLAG_trace_representation) {
PrintF("adding to %d %s uses of %d %s: i%d d%d t%d\n", PrintF("adding to #%d Phi uses of #%d Phi: i%d d%d t%d\n",
this->id(), id(), other->id(),
this->Mnemonic(),
other->id(),
other->Mnemonic(),
other->non_phi_uses_[Representation::kInteger32], other->non_phi_uses_[Representation::kInteger32],
other->non_phi_uses_[Representation::kDouble], other->non_phi_uses_[Representation::kDouble],
other->non_phi_uses_[Representation::kTagged]); other->non_phi_uses_[Representation::kTagged]);
@ -1382,9 +1466,20 @@ void HPhi::AddIndirectUsesTo(int* dest) {
} }
void HPhi::ResetInteger32Uses() { void HSimulate::MergeInto(HSimulate* other) {
non_phi_uses_[Representation::kInteger32] = 0; for (int i = 0; i < values_.length(); ++i) {
indirect_uses_[Representation::kInteger32] = 0; HValue* value = values_[i];
if (HasAssignedIndexAt(i)) {
other->AddAssignedValue(GetAssignedIndexAt(i), value);
} else {
if (other->pop_count_ > 0) {
other->pop_count_--;
} else {
other->AddPushedValue(value);
}
}
}
other->pop_count_ += pop_count();
} }
@ -1393,7 +1488,7 @@ void HSimulate::PrintDataTo(StringStream* stream) {
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_); if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
if (values_.length() > 0) { if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /"); if (pop_count_ > 0) stream->Add(" /");
for (int i = 0; i < values_.length(); ++i) { for (int i = values_.length() - 1; i >= 0; --i) {
if (i > 0) stream->Add(","); if (i > 0) stream->Add(",");
if (HasAssignedIndexAt(i)) { if (HasAssignedIndexAt(i)) {
stream->Add(" var[%d] = ", GetAssignedIndexAt(i)); stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
@ -1432,7 +1527,6 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
: handle_(handle), : handle_(handle),
has_int32_value_(false), has_int32_value_(false),
has_double_value_(false) { has_double_value_(false) {
set_representation(r);
SetFlag(kUseGVN); SetFlag(kUseGVN);
if (handle_->IsNumber()) { if (handle_->IsNumber()) {
double n = handle_->Number(); double n = handle_->Number();
@ -1441,6 +1535,16 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
double_value_ = n; double_value_ = n;
has_double_value_ = true; has_double_value_ = true;
} }
if (r.IsNone()) {
if (has_int32_value_) {
r = Representation::Integer32();
} else if (has_double_value_) {
r = Representation::Double();
} else {
r = Representation::Tagged();
}
}
set_representation(r);
} }
@ -1539,6 +1643,60 @@ void HBinaryOperation::PrintDataTo(StringStream* stream) {
} }
void HBinaryOperation::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
// When the operation has information about its own output type, don't look
// at uses.
if (!observed_output_representation_.IsNone()) return;
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
}
Representation HBinaryOperation::RepresentationFromInputs() {
// Determine the worst case of observed input representations and
// the currently assumed output representation.
Representation rep = representation();
if (observed_output_representation_.is_more_general_than(rep)) {
rep = observed_output_representation_;
}
for (int i = 1; i <= 2; ++i) {
Representation input_rep = observed_input_representation(i);
if (input_rep.is_more_general_than(rep)) rep = input_rep;
}
// If any of the actual input representation is more general than what we
// have so far but not Tagged, use that representation instead.
Representation left_rep = left()->representation();
Representation right_rep = right()->representation();
if (left_rep.is_more_general_than(rep) &&
left()->CheckFlag(kFlexibleRepresentation)) {
rep = left_rep;
}
if (right_rep.is_more_general_than(rep) &&
right()->CheckFlag(kFlexibleRepresentation)) {
rep = right_rep;
}
return rep;
}
void HBinaryOperation::AssumeRepresentation(Representation r) {
set_observed_input_representation(r, r);
HValue::AssumeRepresentation(r);
}
void HMathMinMax::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
// Do not care about uses.
}
Range* HBitwise::InferRange(Zone* zone) { Range* HBitwise::InferRange(Zone* zone) {
if (op() == Token::BIT_XOR) return HValue::InferRange(zone); if (op() == Token::BIT_XOR) return HValue::InferRange(zone);
const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff); const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
@ -1615,7 +1773,7 @@ Range* HShl::InferRange(Zone* zone) {
} }
Range* HLoadKeyedSpecializedArrayElement::InferRange(Zone* zone) { Range* HLoadKeyed::InferRange(Zone* zone) {
switch (elements_kind()) { switch (elements_kind()) {
case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_PIXEL_ELEMENTS:
return new(zone) Range(0, 255); return new(zone) Range(0, 255);
@ -1670,9 +1828,19 @@ void HGoto::PrintDataTo(StringStream* stream) {
} }
void HCompareIDAndBranch::SetInputRepresentation(Representation r) { void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
input_representation_ = r; Representation rep = Representation::None();
if (r.IsDouble()) { Representation left_rep = left()->representation();
Representation right_rep = right()->representation();
bool observed_integers =
observed_input_representation(0).IsInteger32() &&
observed_input_representation(1).IsInteger32();
bool inputs_are_not_doubles =
!left_rep.IsDouble() && !right_rep.IsDouble();
if (observed_integers && inputs_are_not_doubles) {
rep = Representation::Integer32();
} else {
rep = Representation::Double();
// According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, === // According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
// and !=) have special handling of undefined, e.g. undefined == undefined // and !=) have special handling of undefined, e.g. undefined == undefined
// is 'true'. Relational comparisons have a different semantic, first // is 'true'. Relational comparisons have a different semantic, first
@ -1689,9 +1857,8 @@ void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
if (!Token::IsOrderedRelationalCompareOp(token_)) { if (!Token::IsOrderedRelationalCompareOp(token_)) {
SetFlag(kDeoptimizeOnUndefined); SetFlag(kDeoptimizeOnUndefined);
} }
} else {
ASSERT(r.IsInteger32());
} }
ChangeRepresentation(rep);
} }
@ -1842,11 +2009,25 @@ void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
} }
void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) { void HLoadKeyed::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream); if (!is_external()) {
elements()->PrintNameTo(stream);
} else {
ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
elements()->PrintNameTo(stream);
stream->Add(".");
stream->Add(ElementsKindToString(elements_kind()));
}
stream->Add("["); stream->Add("[");
key()->PrintNameTo(stream); key()->PrintNameTo(stream);
stream->Add("] "); if (IsDehoisted()) {
stream->Add(" + %d] ", index_offset());
} else {
stream->Add("] ");
}
dependency()->PrintNameTo(stream); dependency()->PrintNameTo(stream);
if (RequiresHoleCheck()) { if (RequiresHoleCheck()) {
stream->Add(" check_hole"); stream->Add(" check_hole");
@ -1854,29 +2035,26 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
} }
bool HLoadKeyedFastElement::RequiresHoleCheck() { bool HLoadKeyed::RequiresHoleCheck() const {
if (IsFastPackedElementsKind(elements_kind())) { if (IsFastPackedElementsKind(elements_kind())) {
return false; return false;
} }
if (IsFastDoubleElementsKind(elements_kind())) {
return true;
}
for (HUseIterator it(uses()); !it.Done(); it.Advance()) { for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value(); HValue* use = it.value();
if (!use->IsChange()) return true; if (!use->IsChange()) {
return true;
}
} }
return false; return false;
} }
void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
elements()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("] ");
dependency()->PrintNameTo(stream);
}
void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) { void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream); object()->PrintNameTo(stream);
stream->Add("["); stream->Add("[");
@ -1889,21 +2067,22 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
// Recognize generic keyed loads that use property name generated // Recognize generic keyed loads that use property name generated
// by for-in statement as a key and rewrite them into fast property load // by for-in statement as a key and rewrite them into fast property load
// by index. // by index.
if (key()->IsLoadKeyedFastElement()) { if (key()->IsLoadKeyed()) {
HLoadKeyedFastElement* key_load = HLoadKeyedFastElement::cast(key()); HLoadKeyed* key_load = HLoadKeyed::cast(key());
if (key_load->object()->IsForInCacheArray()) { if (key_load->elements()->IsForInCacheArray()) {
HForInCacheArray* names_cache = HForInCacheArray* names_cache =
HForInCacheArray::cast(key_load->object()); HForInCacheArray::cast(key_load->elements());
if (names_cache->enumerable() == object()) { if (names_cache->enumerable() == object()) {
HForInCacheArray* index_cache = HForInCacheArray* index_cache =
names_cache->index_cache(); names_cache->index_cache();
HCheckMapValue* map_check = HCheckMapValue* map_check =
new(block()->zone()) HCheckMapValue(object(), names_cache->map()); new(block()->zone()) HCheckMapValue(object(), names_cache->map());
HInstruction* index = new(block()->zone()) HLoadKeyedFastElement( HInstruction* index = new(block()->zone()) HLoadKeyed(
index_cache, index_cache,
key_load->key(), key_load->key(),
key_load->key()); key_load->key(),
key_load->elements_kind());
map_check->InsertBefore(this); map_check->InsertBefore(this);
index->InsertBefore(this); index->InsertBefore(this);
HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex( HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
@ -1918,56 +2097,6 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
} }
void HLoadKeyedSpecializedArrayElement::PrintDataTo(
StringStream* stream) {
external_pointer()->PrintNameTo(stream);
stream->Add(".");
switch (elements_kind()) {
case EXTERNAL_BYTE_ELEMENTS:
stream->Add("byte");
break;
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
stream->Add("u_byte");
break;
case EXTERNAL_SHORT_ELEMENTS:
stream->Add("short");
break;
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
stream->Add("u_short");
break;
case EXTERNAL_INT_ELEMENTS:
stream->Add("int");
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
stream->Add("u_int");
break;
case EXTERNAL_FLOAT_ELEMENTS:
stream->Add("float");
break;
case EXTERNAL_DOUBLE_ELEMENTS:
stream->Add("double");
break;
case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("] ");
dependency()->PrintNameTo(stream);
}
void HStoreNamedGeneric::PrintDataTo(StringStream* stream) { void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream); object()->PrintNameTo(stream);
stream->Add("."); stream->Add(".");
@ -1994,20 +2123,25 @@ void HStoreNamedField::PrintDataTo(StringStream* stream) {
} }
void HStoreKeyedFastElement::PrintDataTo(StringStream* stream) { void HStoreKeyed::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream); if (!is_external()) {
stream->Add("["); elements()->PrintNameTo(stream);
key()->PrintNameTo(stream); } else {
stream->Add("] = "); elements()->PrintNameTo(stream);
value()->PrintNameTo(stream); stream->Add(".");
} stream->Add(ElementsKindToString(elements_kind()));
ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
}
void HStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
elements()->PrintNameTo(stream);
stream->Add("["); stream->Add("[");
key()->PrintNameTo(stream); key()->PrintNameTo(stream);
stream->Add("] = "); if (IsDehoisted()) {
stream->Add(" + %d] = ", index_offset());
} else {
stream->Add("] = ");
}
value()->PrintNameTo(stream); value()->PrintNameTo(stream);
} }
@ -2021,56 +2155,6 @@ void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
} }
void HStoreKeyedSpecializedArrayElement::PrintDataTo(
StringStream* stream) {
external_pointer()->PrintNameTo(stream);
stream->Add(".");
switch (elements_kind()) {
case EXTERNAL_BYTE_ELEMENTS:
stream->Add("byte");
break;
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
stream->Add("u_byte");
break;
case EXTERNAL_SHORT_ELEMENTS:
stream->Add("short");
break;
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
stream->Add("u_short");
break;
case EXTERNAL_INT_ELEMENTS:
stream->Add("int");
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
stream->Add("u_int");
break;
case EXTERNAL_FLOAT_ELEMENTS:
stream->Add("float");
break;
case EXTERNAL_DOUBLE_ELEMENTS:
stream->Add("double");
break;
case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("] = ");
value()->PrintNameTo(stream);
}
void HTransitionElementsKind::PrintDataTo(StringStream* stream) { void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream); object()->PrintNameTo(stream);
ElementsKind from_kind = original_map()->elements_kind(); ElementsKind from_kind = original_map()->elements_kind();
@ -2090,7 +2174,7 @@ void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
} }
bool HLoadGlobalCell::RequiresHoleCheck() { bool HLoadGlobalCell::RequiresHoleCheck() const {
if (details_.IsDontDelete() && !details_.IsReadOnly()) return false; if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
for (HUseIterator it(uses()); !it.Done(); it.Advance()) { for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value(); HValue* use = it.value();
@ -2361,10 +2445,10 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
} }
bool HStoreKeyedFastDoubleElement::NeedsCanonicalization() { bool HStoreKeyed::NeedsCanonicalization() {
// If value was loaded from unboxed double backing store or // If value is an integer or comes from the result of a keyed load
// converted from an integer then we don't have to canonicalize it. // then it will be a non-hole value: no need for canonicalization.
if (value()->IsLoadKeyedFastDoubleElement() || if (value()->IsLoadKeyed() ||
(value()->IsChange() && HChange::cast(value())->from().IsInteger32())) { (value()->IsChange() && HChange::cast(value())->from().IsInteger32())) {
return false; return false;
} }
@ -2547,7 +2631,41 @@ void HBitwise::PrintDataTo(StringStream* stream) {
} }
Representation HPhi::InferredRepresentation() { void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
// If there are non-Phi uses, and all of them have observed the same
// representation, than that's what this Phi is going to use.
Representation new_rep = RepresentationObservedByAllNonPhiUses();
if (!new_rep.IsNone()) {
UpdateRepresentation(new_rep, h_infer, "unanimous use observations");
return;
}
new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
new_rep = RepresentationFromUseRequirements();
UpdateRepresentation(new_rep, h_infer, "use requirements");
}
Representation HPhi::RepresentationObservedByAllNonPhiUses() {
int non_phi_use_count = 0;
for (int i = Representation::kInteger32;
i < Representation::kNumRepresentations; ++i) {
non_phi_use_count += non_phi_uses_[i];
}
if (non_phi_use_count <= 1) return Representation::None();
for (int i = 0; i < Representation::kNumRepresentations; ++i) {
if (non_phi_uses_[i] == non_phi_use_count) {
return Representation::FromKind(static_cast<Representation::Kind>(i));
}
}
return Representation::None();
}
Representation HPhi::RepresentationFromInputs() {
bool double_occurred = false; bool double_occurred = false;
bool int32_occurred = false; bool int32_occurred = false;
for (int i = 0; i < OperandCount(); ++i) { for (int i = 0; i < OperandCount(); ++i) {
@ -2556,6 +2674,7 @@ Representation HPhi::InferredRepresentation() {
HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value(); HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
if (hint_value != NULL) { if (hint_value != NULL) {
Representation hint = hint_value->representation(); Representation hint = hint_value->representation();
if (hint.IsTagged()) return hint;
if (hint.IsDouble()) double_occurred = true; if (hint.IsDouble()) double_occurred = true;
if (hint.IsInteger32()) int32_occurred = true; if (hint.IsInteger32()) int32_occurred = true;
} }
@ -2574,7 +2693,9 @@ Representation HPhi::InferredRepresentation() {
return Representation::Tagged(); return Representation::Tagged();
} }
} else { } else {
return Representation::Tagged(); if (value->IsPhi() && !IsConvertibleToInteger()) {
return Representation::Tagged();
}
} }
} }
} }
@ -2587,6 +2708,37 @@ Representation HPhi::InferredRepresentation() {
} }
Representation HPhi::RepresentationFromUseRequirements() {
Representation all_uses_require = Representation::None();
bool all_uses_require_the_same = true;
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
// We check for observed_input_representation elsewhere.
Representation use_rep =
it.value()->RequiredInputRepresentation(it.index());
// No useful info from this use -> look at the next one.
if (use_rep.IsNone()) {
continue;
}
if (use_rep.Equals(all_uses_require)) {
continue;
}
// This use's representation contradicts what we've seen so far.
if (!all_uses_require.IsNone()) {
ASSERT(!use_rep.Equals(all_uses_require));
all_uses_require_the_same = false;
break;
}
// Otherwise, initialize observed representation.
all_uses_require = use_rep;
}
if (all_uses_require_the_same) {
return all_uses_require;
}
return Representation::None();
}
// Node-specific verification code is only included in debug mode. // Node-specific verification code is only included in debug mode.
#ifdef DEBUG #ifdef DEBUG
@ -2625,12 +2777,6 @@ void HCheckFunction::Verify() {
ASSERT(HasNoUses()); ASSERT(HasNoUses());
} }
void HCheckPrototypeMaps::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
#endif #endif
} } // namespace v8::internal } } // namespace v8::internal

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save