Browse Source

v8: upgrade to v3.11.10

v0.9.1-release
Bert Belder 13 years ago
parent
commit
50464cd4f4
  1. 1
      deps/v8/AUTHORS
  2. 311
      deps/v8/ChangeLog
  3. 27
      deps/v8/DEPS
  4. 69
      deps/v8/Makefile
  5. 4
      deps/v8/SConstruct
  6. 36
      deps/v8/build/armu.gypi
  7. 268
      deps/v8/build/common.gypi
  8. 2
      deps/v8/build/gyp_v8
  9. 33
      deps/v8/build/mipsu.gypi
  10. 12
      deps/v8/build/standalone.gypi
  11. 85
      deps/v8/include/v8-profiler.h
  12. 251
      deps/v8/include/v8.h
  13. 6
      deps/v8/samples/lineprocessor.cc
  14. 8
      deps/v8/samples/samples.gyp
  15. 25
      deps/v8/samples/shell.cc
  16. 1
      deps/v8/src/SConscript
  17. 4
      deps/v8/src/allocation-inl.h
  18. 14
      deps/v8/src/allocation.h
  19. 175
      deps/v8/src/api.cc
  20. 11
      deps/v8/src/api.h
  21. 9
      deps/v8/src/apiutils.h
  22. 13
      deps/v8/src/arguments.h
  23. 9
      deps/v8/src/arm/builtins-arm.cc
  24. 146
      deps/v8/src/arm/code-stubs-arm.cc
  25. 4
      deps/v8/src/arm/codegen-arm.cc
  26. 4
      deps/v8/src/arm/debug-arm.cc
  27. 293
      deps/v8/src/arm/full-codegen-arm.cc
  28. 61
      deps/v8/src/arm/ic-arm.cc
  29. 210
      deps/v8/src/arm/lithium-arm.cc
  30. 116
      deps/v8/src/arm/lithium-arm.h
  31. 548
      deps/v8/src/arm/lithium-codegen-arm.cc
  32. 41
      deps/v8/src/arm/lithium-codegen-arm.h
  33. 4
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  34. 127
      deps/v8/src/arm/macro-assembler-arm.cc
  35. 27
      deps/v8/src/arm/macro-assembler-arm.h
  36. 244
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  37. 24
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  38. 12
      deps/v8/src/arm/simulator-arm.h
  39. 342
      deps/v8/src/arm/stub-cache-arm.cc
  40. 166
      deps/v8/src/array.js
  41. 95
      deps/v8/src/assembler.cc
  42. 7
      deps/v8/src/assembler.h
  43. 269
      deps/v8/src/ast.cc
  44. 81
      deps/v8/src/ast.h
  45. 38
      deps/v8/src/bootstrapper.cc
  46. 153
      deps/v8/src/builtins.cc
  47. 1
      deps/v8/src/builtins.h
  48. 35
      deps/v8/src/bytecodes-irregexp.h
  49. 51
      deps/v8/src/code-stubs.cc
  50. 1
      deps/v8/src/code-stubs.h
  51. 6
      deps/v8/src/codegen.h
  52. 17
      deps/v8/src/compiler-intrinsics.h
  53. 21
      deps/v8/src/compiler.cc
  54. 22
      deps/v8/src/contexts.h
  55. 4
      deps/v8/src/conversions-inl.h
  56. 245
      deps/v8/src/d8.cc
  57. 6
      deps/v8/src/d8.h
  58. 2
      deps/v8/src/d8.js
  59. 3
      deps/v8/src/dateparser-inl.h
  60. 38
      deps/v8/src/debug-agent.cc
  61. 57
      deps/v8/src/debug-debugger.js
  62. 86
      deps/v8/src/debug.cc
  63. 55
      deps/v8/src/debug.h
  64. 68
      deps/v8/src/deoptimizer.cc
  65. 24
      deps/v8/src/deoptimizer.h
  66. 6
      deps/v8/src/double.h
  67. 134
      deps/v8/src/elements-kind.cc
  68. 221
      deps/v8/src/elements-kind.h
  69. 681
      deps/v8/src/elements.cc
  70. 65
      deps/v8/src/elements.h
  71. 7
      deps/v8/src/extensions/externalize-string-extension.cc
  72. 5
      deps/v8/src/extensions/gc-extension.cc
  73. 80
      deps/v8/src/factory.cc
  74. 30
      deps/v8/src/factory.h
  75. 16
      deps/v8/src/flag-definitions.h
  76. 54
      deps/v8/src/frames.cc
  77. 12
      deps/v8/src/frames.h
  78. 141
      deps/v8/src/full-codegen.cc
  79. 54
      deps/v8/src/full-codegen.h
  80. 15
      deps/v8/src/func-name-inferrer.cc
  81. 10
      deps/v8/src/func-name-inferrer.h
  82. 6
      deps/v8/src/handles.cc
  83. 102
      deps/v8/src/hashmap.h
  84. 27
      deps/v8/src/heap-inl.h
  85. 50
      deps/v8/src/heap-profiler.cc
  86. 13
      deps/v8/src/heap-profiler.h
  87. 325
      deps/v8/src/heap.cc
  88. 59
      deps/v8/src/heap.h
  89. 249
      deps/v8/src/hydrogen-instructions.cc
  90. 466
      deps/v8/src/hydrogen-instructions.h
  91. 2015
      deps/v8/src/hydrogen.cc
  92. 147
      deps/v8/src/hydrogen.h
  93. 3
      deps/v8/src/ia32/assembler-ia32.h
  94. 20
      deps/v8/src/ia32/builtins-ia32.cc
  95. 150
      deps/v8/src/ia32/code-stubs-ia32.cc
  96. 22
      deps/v8/src/ia32/codegen-ia32.cc
  97. 39
      deps/v8/src/ia32/debug-ia32.cc
  98. 98
      deps/v8/src/ia32/deoptimizer-ia32.cc
  99. 6
      deps/v8/src/ia32/frames-ia32.h
  100. 307
      deps/v8/src/ia32/full-codegen-ia32.cc

1
deps/v8/AUTHORS

@ -23,6 +23,7 @@ Daniel James <dnljms@gmail.com>
Dineel D Sule <dsule@codeaurora.org> Dineel D Sule <dsule@codeaurora.org>
Erich Ocean <erich.ocean@me.com> Erich Ocean <erich.ocean@me.com>
Fedor Indutny <fedor@indutny.com> Fedor Indutny <fedor@indutny.com>
Filipe David Manana <fdmanana@gmail.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com> Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Jan de Mooij <jandemooij@gmail.com> Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com> Jay Freeman <saurik@saurik.com>

311
deps/v8/ChangeLog

@ -1,3 +1,314 @@
2012-06-13: Version 3.11.10
Implemented heap profiler memory usage reporting.
Preserved error message during finally block in try..finally.
(Chromium issue 129171)
Fixed EnsureCanContainElements to properly handle double values.
(issue 2170)
Improved heuristics to keep objects in fast mode with inherited
constructors.
Performance and stability improvements on all platforms.
2012-06-06: Version 3.11.9
Implemented ES5-conformant semantics for inherited setters and read-only
properties. Currently behind --es5_readonly flag, because it breaks
WebKit bindings.
Exposed last seen heap object id via v8 public api.
Performance and stability improvements on all platforms.
2012-05-31: Version 3.11.8
Avoid overdeep recursion in regexp where a guarded expression with a
minimum repetition count is inside another quantifier.
(Chromium issue 129926)
Fixed missing write barrier in store field stub.
(issues 2143, 1465, Chromium issue 129355)
Proxies: Fixed receiver for setters inherited from proxies.
Proxies: Fixed ToStringArray function so that it does not reject some
keys.
(issue 1543)
Performance and stability improvements on all platforms.
2012-05-29: Version 3.11.7
Get better function names in stack traces.
Performance and stability improvements on all platforms.
2012-05-24: Version 3.11.6
Fixed RegExp.prototype.toString for incompatible receivers
(issue 1981).
Performance and stability improvements on all platforms.
2012-05-23: Version 3.11.5
Performance and stability improvements on all platforms.
2012-05-22: Version 3.11.4
Some cleanup to common.gypi. This fixes some host/target combinations
that weren't working in the Make build on Mac.
Handle EINTR in socket functions and continue incomplete sends.
(issue 2098)
Fixed python deprecations. (issue 1391)
Made socket send and receive more robust and return 0 on failure.
(Chromium issue 15719)
Fixed GCC 4.7 (C++11) compilation. (issue 2136)
Set '-m32' option for host and target platforms
Performance and stability improvements on all platforms.
2012-05-18: Version 3.11.3
Disable optimization for functions that have scopes that cannot be
reconstructed from the context chain. (issue 2071)
Define V8_EXPORT to nothing for clients of v8. (Chromium issue 90078)
Correctly check for native error objects. (Chromium issue 2138)
Performance and stability improvements on all platforms.
2012-05-16: Version 3.11.2
Revert r11496. (Chromium issue 128146)
Implement map collection for incremental marking. (issue 1465)
Add toString method to CallSite (which describes a frame of the
stack trace).
2012-05-15: Version 3.11.1
Added a readbuffer function to d8 that reads a file into an ArrayBuffer.
Fix freebsd build. (V8 issue 2126)
Performance and stability improvements on all platforms.
2012-05-11: Version 3.11.0
Fixed compose-discard crasher from r11524 (issue 2123).
Activated new global semantics by default. Global variables can
now shadow properties of the global object (ES5.1 erratum).
Properly set ElementsKind of empty FAST_DOUBLE_ELEMENTS arrays when
transitioning (Chromium issue 117409).
Made Error.prototype.name writable again, as required by the spec and
the web (Chromium issue 69187).
Implemented map collection with incremental marking (issue 1465).
Regexp: Fixed overflow in min-match-length calculation
(Chromium issue 126412).
MIPS: Fixed illegal instruction use on Loongson in code for
Math.random() (issue 2115).
Fixed crash bug in VisitChoice (Chromium issue 126272).
Fixed unsigned-Smi check in MappedArgumentsLookup
(Chromium issue 126414).
Fixed LiveEdit for function with no locals (issue 825).
Fixed register clobbering in LoadIC for interceptors
(Chromium issue 125988).
Implemented clearing of CompareICs (issue 2102).
Performance and stability improvements on all platforms.
2012-05-03: Version 3.10.8
Enabled MIPS cross-compilation.
Ensured reload of elements pointer in StoreFastDoubleElement stub.
(Chromium issue 125515)
Fixed corner cases in truncation behavior when storing to
TypedArrays. (issue 2110)
Fixed failure to properly recognize and report out-of-memory
conditions when allocating code space pages. (Chromium issue
118625)
Fixed idle notifications to perform a round of incremental GCs
after context disposal. (issue 2107)
Fixed preparser for try statement. (issue 2109)
Performance and stability improvements on all platforms.
2012-04-30: Version 3.10.7
Performance and stability improvements on all platforms.
2012-04-26: Version 3.10.6
Fixed some bugs in accessing details of the last regexp match.
Fixed source property of empty RegExp objects. (issue 1982)
Enabled inlining some V8 API functions.
Performance and stability improvements on all platforms.
2012-04-23: Version 3.10.5
Put new global var semantics behind a flag until WebKit tests are
cleaned up.
Enabled stepping into callback passed to builtins.
(Chromium issue 109564)
Performance and stability improvements on all platforms.
2012-04-19: Version 3.10.4
Fixed issues when stressing compaction with WeakMaps.
Fixed missing GVN flag for new-space promotion. (Chromium issue 123919)
Simplify invocation sequence at monomorphic function invocation sites.
(issue 2079)
Performance and stability improvements on all platforms.
2012-04-17: Version 3.10.3
Fixed several bugs in heap profiles (including issue 2078).
Throw syntax errors on illegal escape sequences.
Implemented rudimentary module linking (behind --harmony flag)
Implemented ES5 erratum: Global declarations should shadow
inherited properties.
Made handling of const more consistent when combined with 'eval'
and 'with'.
Fixed V8 on MinGW-x64 (issue 2026).
Performance and stability improvements on all platforms.
2012-04-13: Version 3.10.2
Fixed native ARM build (issues 1744, 539)
Return LOOKUP variable instead of CONTEXT for non-context allocated
outer scope parameters (Chromium issue 119609).
Fixed regular and ElementsKind transitions interfering with each other
(Chromium issue 122271).
Improved performance of keyed loads/stores which have a HeapNumber
index (issues 1388, 1295).
Fixed WeakMap processing for evacuation candidates (issue 2060).
Bailout on possible direct eval calls (Chromium issue 122681).
Do not assume that names of function expressions are context-allocated
(issue 2051).
Performance and stability improvements on all platforms.
2012-04-10: Version 3.10.1
Fixed bug with arguments object in inlined functions (issue 2045).
Fixed performance bug with lazy initialization (Chromium issue
118686).
Added suppport for Mac OS X 64bit builds with GYP.
(Patch contributed by Filipe David Manana <fdmanana@gmail.com>)
Fixed bug with hidden properties (issue 2034).
Fixed a performance bug when reloading pages (Chromium issue 117767,
V8 issue 1902).
Fixed bug when optimizing throw in top-level code (issue 2054).
Fixed two bugs with array literals (issue 2055, Chromium issue 121407).
Fixed bug with Math.min/Math.max with NaN inputs (issue 2056).
Fixed a bug with the new runtime profiler (Chromium issue 121147).
Fixed compilation of V8 using uClibc.
Optimized boot-up memory use.
Optimized regular expressions.
2012-03-30: Version 3.10.0
Fixed store IC writability check in strict mode
(Chromium issue 120099).
Resynchronize timers if the Windows system time was changed.
(Chromium issue 119815)
Removed "-mfloat-abi=hard" from host compiler cflags when building for
hardfp ARM
(https://code.google.com/p/chrome-os-partner/issues/detail?id=8539)
Fixed edge case for case independent regexp character classes
(issue 2032).
Reset function info counters after context disposal.
(Chromium issue 117767, V8 issue 1902)
Fixed missing write barrier in CopyObjectToObjectElements.
(Chromium issue 119926)
Fixed missing bounds check in HasElementImpl.
(Chromium issue 119925)
Performance and stability improvements on all platforms.
2012-03-23: Version 3.9.24 2012-03-23: Version 3.9.24
Activated count-based profiler for ARM. Activated count-based profiler for ARM.

27
deps/v8/DEPS

@ -0,0 +1,27 @@
# Note: The buildbots evaluate this file with CWD set to the parent
# directory and assume that the root of the checkout is in ./v8/, so
# all paths in here must match this assumption.
deps = {
# Remember to keep the revision in sync with the Makefile.
"v8/build/gyp":
"http://gyp.googlecode.com/svn/trunk@1282",
}
deps_os = {
"win": {
"v8/third_party/cygwin":
"http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844",
"v8/third_party/python_26":
"http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111",
}
}
hooks = [
{
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
"pattern": ".",
"action": ["python", "v8/build/gyp_v8"],
},
]

69
deps/v8/Makefile

@ -150,21 +150,21 @@ $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
$(ARCHES): $(addprefix $$@.,$(MODES)) $(ARCHES): $(addprefix $$@.,$(MODES))
# Defines how to build a particular target (e.g. ia32.release). # Defines how to build a particular target (e.g. ia32.release).
$(BUILDS): $(OUTDIR)/Makefile-$$(basename $$@) $(BUILDS): $(OUTDIR)/Makefile.$$(basename $$@)
@$(MAKE) -C "$(OUTDIR)" -f Makefile-$(basename $@) \ @$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
CXX="$(CXX)" LINK="$(LINK)" \ CXX="$(CXX)" LINK="$(LINK)" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \ BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \ python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@" builddir="$(shell pwd)/$(OUTDIR)/$@"
native: $(OUTDIR)/Makefile-native native: $(OUTDIR)/Makefile.native
@$(MAKE) -C "$(OUTDIR)" -f Makefile-native \ @$(MAKE) -C "$(OUTDIR)" -f Makefile.native \
CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \ CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
builddir="$(shell pwd)/$(OUTDIR)/$@" builddir="$(shell pwd)/$(OUTDIR)/$@"
# TODO(jkummerow): add "android.debug" when we need it. # TODO(jkummerow): add "android.debug" when we need it.
android android.release: $(OUTDIR)/Makefile-android android android.release: $(OUTDIR)/Makefile.android
@$(MAKE) -C "$(OUTDIR)" -f Makefile-android \ @$(MAKE) -C "$(OUTDIR)" -f Makefile.android \
CXX="$(ANDROID_TOOL_PREFIX)-g++" \ CXX="$(ANDROID_TOOL_PREFIX)-g++" \
AR="$(ANDROID_TOOL_PREFIX)-ar" \ AR="$(ANDROID_TOOL_PREFIX)-ar" \
RANLIB="$(ANDROID_TOOL_PREFIX)-ranlib" \ RANLIB="$(ANDROID_TOOL_PREFIX)-ranlib" \
@ -197,55 +197,41 @@ native.check: native
--arch-and-mode=. $(TESTFLAGS) --arch-and-mode=. $(TESTFLAGS)
# Clean targets. You can clean each architecture individually, or everything. # Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean,$(ARCHES)): $(addsuffix .clean,$(ARCHES)) android.clean:
rm -f $(OUTDIR)/Makefile-$(basename $@) rm -f $(OUTDIR)/Makefile.$(basename $@)
rm -rf $(OUTDIR)/$(basename $@).release rm -rf $(OUTDIR)/$(basename $@).release
rm -rf $(OUTDIR)/$(basename $@).debug rm -rf $(OUTDIR)/$(basename $@).debug
find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete find $(OUTDIR) -regex '.*\(host\|target\).$(basename $@)\.mk' -delete
native.clean: native.clean:
rm -f $(OUTDIR)/Makefile-native rm -f $(OUTDIR)/Makefile.native
rm -rf $(OUTDIR)/native rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete find $(OUTDIR) -regex '.*\(host\|target\).native\.mk' -delete
android.clean: clean: $(addsuffix .clean,$(ARCHES)) native.clean android.clean
rm -f $(OUTDIR)/Makefile-android
rm -rf $(OUTDIR)/android.release
find $(OUTDIR) -regex '.*\(host\|target\)-android\.mk' -delete
clean: $(addsuffix .clean,$(ARCHES)) native.clean
# GYP file generation targets. # GYP file generation targets.
$(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE) MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ $(MAKEFILES): $(GYPFILES) $(ENVFILE)
-Ibuild/standalone.gypi --depth=. -Dtarget_arch=ia32 \ GYP_GENERATORS=make \
-S-ia32 $(GYPFLAGS)
$(OUTDIR)/Makefile-x64: $(GYPFILES) $(ENVFILE)
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Dtarget_arch=x64 \
-S-x64 $(GYPFLAGS)
$(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE) build/armu.gypi
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \
-S-arm $(GYPFLAGS)
$(OUTDIR)/Makefile-mips: $(GYPFILES) $(ENVFILE) build/mipsu.gypi
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/mipsu.gypi \ -Ibuild/standalone.gypi --depth=. \
-S-mips $(GYPFLAGS) -Dv8_target_arch=$(subst .,,$(suffix $@)) \
-S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE) $(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS) -Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
$(OUTDIR)/Makefile-android: $(GYPFILES) $(ENVFILE) build/android.gypi \ $(OUTDIR)/Makefile.android: $(GYPFILES) $(ENVFILE) build/android.gypi \
must-set-ANDROID_NDK_ROOT must-set-ANDROID_NDK_ROOT
GYP_GENERATORS=make \
CC="${ANDROID_TOOL_PREFIX}-gcc" \ CC="${ANDROID_TOOL_PREFIX}-gcc" \
CXX="${ANDROID_TOOL_PREFIX}-g++" \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \ -Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-S-android $(GYPFLAGS) -S.android $(GYPFLAGS)
must-set-ANDROID_NDK_ROOT: must-set-ANDROID_NDK_ROOT:
ifndef ANDROID_NDK_ROOT ifndef ANDROID_NDK_ROOT
@ -261,9 +247,10 @@ $(ENVFILE): $(ENVFILE).new
# Stores current GYPFLAGS in a file. # Stores current GYPFLAGS in a file.
$(ENVFILE).new: $(ENVFILE).new:
@mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new; @mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new; \
echo "CXX=$(CXX)" >> $(ENVFILE).new
# Dependencies. # Dependencies.
dependencies: dependencies:
svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \ svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \
--revision 1026 --revision 1282

4
deps/v8/SConstruct

@ -101,14 +101,14 @@ LIBRARY_FLAGS = {
'os:linux': { 'os:linux': {
'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS, 'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS,
'library:shared': { 'library:shared': {
'CPPDEFINES': ['V8_SHARED'], 'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
'LIBS': ['pthread'] 'LIBS': ['pthread']
} }
}, },
'os:macos': { 'os:macos': {
'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'], 'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
'library:shared': { 'library:shared': {
'CPPDEFINES': ['V8_SHARED'] 'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
} }
}, },
'os:freebsd': { 'os:freebsd': {

36
deps/v8/build/armu.gypi

@ -1,36 +0,0 @@
# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'variables': {
'target_arch': 'ia32',
'v8_target_arch': 'arm',
'armv7': 1,
'arm_neon': 0,
'arm_fpu': 'vfpv3',
},
}

268
deps/v8/build/common.gypi

@ -110,133 +110,117 @@
['v8_enable_gdbjit==1', { ['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',], 'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}], }],
['OS!="mac"', { ['v8_target_arch=="arm"', {
# TODO(mark): The OS!="mac" conditional is temporary. It can be 'defines': [
# removed once the Mac Chromium build stops setting target_arch to 'V8_TARGET_ARCH_ARM',
# ia32 and instead sets it to mac. Other checks in this file for ],
# OS=="mac" can be removed at that time as well. This can be cleaned
# up once http://crbug.com/44205 is fixed.
'conditions': [ 'conditions': [
['v8_target_arch=="arm"', { [ 'v8_can_use_unaligned_accesses=="true"', {
'defines': [ 'defines': [
'V8_TARGET_ARCH_ARM', 'CAN_USE_UNALIGNED_ACCESSES=1',
], ],
'conditions': [ }],
[ 'v8_can_use_unaligned_accesses=="true"', { [ 'v8_can_use_unaligned_accesses=="false"', {
'defines': [ 'defines': [
'CAN_USE_UNALIGNED_ACCESSES=1', 'CAN_USE_UNALIGNED_ACCESSES=0',
],
}],
[ 'v8_can_use_unaligned_accesses=="false"', {
'defines': [
'CAN_USE_UNALIGNED_ACCESSES=0',
],
}],
[ 'v8_can_use_vfp_instructions=="true"', {
'defines': [
'CAN_USE_VFP_INSTRUCTIONS',
],
}],
[ 'v8_use_arm_eabi_hardfloat=="true"', {
'defines': [
'USE_EABI_HARDFLOAT=1',
'CAN_USE_VFP_INSTRUCTIONS',
],
'cflags': [
'-mfloat-abi=hard',
],
}, {
'defines': [
'USE_EABI_HARDFLOAT=0',
],
}],
# The ARM assembler assumes the host is 32 bits,
# so force building 32-bit host tools.
['host_arch=="x64" or OS=="android"', {
'target_conditions': [
['_toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}],
],
}],
], ],
}], }],
['v8_target_arch=="ia32"', { [ 'v8_can_use_vfp_instructions=="true"', {
'defines': [ 'defines': [
'V8_TARGET_ARCH_IA32', 'CAN_USE_VFP_INSTRUCTIONS',
], ],
}], }],
['v8_target_arch=="mips"', { [ 'v8_use_arm_eabi_hardfloat=="true"', {
'defines': [ 'defines': [
'V8_TARGET_ARCH_MIPS', 'USE_EABI_HARDFLOAT=1',
'CAN_USE_VFP_INSTRUCTIONS',
], ],
'conditions': [ 'target_conditions': [
[ 'target_arch=="mips"', { ['_toolset=="target"', {
'target_conditions': [ 'cflags': ['-mfloat-abi=hard',],
['_toolset=="target"', {
'cflags': ['-EL'],
'ldflags': ['-EL'],
'conditions': [
[ 'v8_use_mips_abi_hardfloat=="true"', {
'cflags': ['-mhard-float'],
'ldflags': ['-mhard-float'],
}, {
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_arch_variant=="mips32r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="loongson"', {
'cflags': ['-mips3', '-Wa,-mips3'],
}, {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
],
}],
],
}],
[ 'v8_can_use_fpu_instructions=="true"', {
'defines': [
'CAN_USE_FPU_INSTRUCTIONS',
],
}], }],
[ 'v8_use_mips_abi_hardfloat=="true"', { ],
'defines': [ }, {
'__mips_hard_float=1', 'defines': [
'CAN_USE_FPU_INSTRUCTIONS', 'USE_EABI_HARDFLOAT=0',
], ],
}, { }],
'defines': [ ],
'__mips_soft_float=1' }], # v8_target_arch=="arm"
], ['v8_target_arch=="ia32"', {
}], 'defines': [
['mips_arch_variant=="mips32r2"', { 'V8_TARGET_ARCH_IA32',
'defines': ['_MIPS_ARCH_MIPS32R2',], ],
}], }], # v8_target_arch=="ia32"
['mips_arch_variant=="loongson"', { ['v8_target_arch=="mips"', {
'defines': ['_MIPS_ARCH_LOONGSON',], 'defines': [
}], 'V8_TARGET_ARCH_MIPS',
# The MIPS assembler assumes the host is 32 bits, ],
# so force building 32-bit host tools. 'variables': {
['host_arch=="x64"', { 'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")',
'target_conditions': [ },
['_toolset=="host"', { 'conditions': [
'cflags': ['-m32'], ['mipscompiler=="yes"', {
'ldflags': ['-m32'], 'target_conditions': [
['_toolset=="target"', {
'cflags': ['-EL'],
'ldflags': ['-EL'],
'conditions': [
[ 'v8_use_mips_abi_hardfloat=="true"', {
'cflags': ['-mhard-float'],
'ldflags': ['-mhard-float'],
}, {
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_arch_variant=="mips32r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="loongson"', {
'cflags': ['-mips3', '-Wa,-mips3'],
}, {
'cflags': ['-mips32', '-Wa,-mips32'],
}], }],
], ],
}], }],
], ],
}], }],
['v8_target_arch=="x64"', { [ 'v8_can_use_fpu_instructions=="true"', {
'defines': [ 'defines': [
'V8_TARGET_ARCH_X64', 'CAN_USE_FPU_INSTRUCTIONS',
], ],
}], }],
[ 'v8_use_mips_abi_hardfloat=="true"', {
'defines': [
'__mips_hard_float=1',
'CAN_USE_FPU_INSTRUCTIONS',
],
}, {
'defines': [
'__mips_soft_float=1'
],
}],
['mips_arch_variant=="mips32r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
}],
['mips_arch_variant=="loongson"', {
'defines': ['_MIPS_ARCH_LOONGSON',],
}],
], ],
}], }], # v8_target_arch=="mips"
['v8_target_arch=="x64"', {
'defines': [
'V8_TARGET_ARCH_X64',
],
'xcode_settings': {
'ARCHS': [ 'x86_64' ],
},
'msvs_settings': {
'VCLinkerTool': {
'StackReserveSize': '2097152',
},
},
}], # v8_target_arch=="x64"
['v8_use_liveobjectlist=="true"', { ['v8_use_liveobjectlist=="true"', {
'defines': [ 'defines': [
'ENABLE_DEBUGGER_SUPPORT', 'ENABLE_DEBUGGER_SUPPORT',
@ -254,6 +238,11 @@
'defines': [ 'defines': [
'WIN32', 'WIN32',
], ],
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
}], }],
['OS=="win" and v8_enable_prof==1', { ['OS=="win" and v8_enable_prof==1', {
'msvs_settings': { 'msvs_settings': {
@ -262,20 +251,9 @@
}, },
}, },
}], }],
['OS=="win" and v8_target_arch=="x64"', {
'msvs_settings': {
'VCLinkerTool': {
'StackReserveSize': '2097152',
},
},
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', { or OS=="netbsd"', {
'conditions': [ 'conditions': [
[ 'target_arch=="ia32"', {
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ],
}],
[ 'v8_no_strict_aliasing==1', { [ 'v8_no_strict_aliasing==1', {
'cflags': [ '-fno-strict-aliasing' ], 'cflags': [ '-fno-strict-aliasing' ],
}], }],
@ -284,6 +262,41 @@
['OS=="solaris"', { ['OS=="solaris"', {
'defines': [ '__C99FEATURES__=1' ], # isinf() etc. 'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}], }],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="mips")', {
# Check whether the host compiler and target compiler support the
# '-m32' option and set it if so.
'target_conditions': [
['_toolset=="host"', {
'variables': {
'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
'xcode_settings': {
'ARCHS': [ 'i386' ],
},
}],
['_toolset=="target"', {
'variables': {
'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
'xcode_settings': {
'ARCHS': [ 'i386' ],
},
}],
],
}],
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
], # conditions ], # conditions
'configurations': { 'configurations': {
'Debug': { 'Debug': {
@ -310,14 +323,8 @@
}, },
}, },
'conditions': [ 'conditions': [
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wno-unused-parameter', 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ], '-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}], }],
], ],
@ -345,12 +352,6 @@
}], }],
], ],
}], }],
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="mac"', { ['OS=="mac"', {
'xcode_settings': { 'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '3', # -O3 'GCC_OPTIMIZATION_LEVEL': '3', # -O3
@ -363,11 +364,6 @@
}, },
}], # OS=="mac" }], # OS=="mac"
['OS=="win"', { ['OS=="win"', {
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
'msvs_settings': { 'msvs_settings': {
'VCCLCompilerTool': { 'VCCLCompilerTool': {
'Optimization': '2', 'Optimization': '2',

2
deps/v8/build/gyp_v8

@ -1,6 +1,6 @@
#!/usr/bin/python #!/usr/bin/python
# #
# Copyright 2010 the V8 project authors. All rights reserved. # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are # modification, are permitted provided that the following conditions are
# met: # met:

33
deps/v8/build/mipsu.gypi

@ -1,33 +0,0 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'variables': {
'target_arch': 'ia32',
'v8_target_arch': 'mips',
},
}

12
deps/v8/build/standalone.gypi

@ -37,8 +37,9 @@
'variables': { 'variables': {
'variables': { 'variables': {
'conditions': [ 'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
# This handles the Linux platforms we generally deal with. OS=="netbsd" or OS=="mac"', {
# This handles the Unix platforms we generally deal with.
# Anything else gets passed through, which probably won't work # Anything else gets passed through, which probably won't work
# very well; such hosts should pass an explicit target_arch # very well; such hosts should pass an explicit target_arch
# to gyp. # to gyp.
@ -46,7 +47,8 @@
'<!(uname -m | sed -e "s/i.86/ia32/;\ '<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")', s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")',
}, { }, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and OS!="netbsd" # OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac"
'host_arch%': 'ia32', 'host_arch%': 'ia32',
}], }],
], ],
@ -71,6 +73,10 @@
'want_separate_host_toolset': 0, 'want_separate_host_toolset': 0,
}], }],
], ],
# Default ARM variable settings.
'armv7%': 1,
'arm_neon%': 0,
'arm_fpu%': 'vfpv3',
}, },
'target_defaults': { 'target_defaults': {
'default_configuration': 'Debug', 'default_configuration': 'Debug',

85
deps/v8/include/v8-profiler.h

@ -64,6 +64,7 @@
*/ */
namespace v8 { namespace v8 {
typedef uint32_t SnapshotObjectId;
/** /**
* CpuProfileNode represents a node in a call graph. * CpuProfileNode represents a node in a call graph.
@ -274,7 +275,7 @@ class V8EXPORT HeapGraphNode {
* Returns node id. For the same heap object, the id remains the same * Returns node id. For the same heap object, the id remains the same
* across all snapshots. * across all snapshots.
*/ */
uint64_t GetId() const; SnapshotObjectId GetId() const;
/** Returns node's own size, in bytes. */ /** Returns node's own size, in bytes. */
int GetSelfSize() const; int GetSelfSize() const;
@ -338,7 +339,7 @@ class V8EXPORT HeapSnapshot {
const HeapGraphNode* GetRoot() const; const HeapGraphNode* GetRoot() const;
/** Returns a node by its id. */ /** Returns a node by its id. */
const HeapGraphNode* GetNodeById(uint64_t id) const; const HeapGraphNode* GetNodeById(SnapshotObjectId id) const;
/** Returns total nodes count in the snapshot. */ /** Returns total nodes count in the snapshot. */
int GetNodesCount() const; int GetNodesCount() const;
@ -346,6 +347,9 @@ class V8EXPORT HeapSnapshot {
/** Returns a node by index. */ /** Returns a node by index. */
const HeapGraphNode* GetNode(int index) const; const HeapGraphNode* GetNode(int index) const;
/** Returns a max seen JS object Id. */
SnapshotObjectId GetMaxSnapshotJSObjectId() const;
/** /**
* Deletes the snapshot and removes it from HeapProfiler's list. * Deletes the snapshot and removes it from HeapProfiler's list.
* All pointers to nodes, edges and paths previously returned become * All pointers to nodes, edges and paths previously returned become
@ -364,16 +368,20 @@ class V8EXPORT HeapSnapshot {
* with the following structure: * with the following structure:
* *
* { * {
* snapshot: {title: "...", uid: nnn}, * snapshot: {
* nodes: [ * title: "...",
* meta-info (JSON string), * uid: nnn,
* nodes themselves * meta: { meta-info },
* ], * node_count: nnn,
* strings: [strings] * edge_count: nnn
* },
* nodes: [nodes array],
* edges: [edges array],
* strings: [strings array]
* } * }
* *
* Outgoing node links are stored after each node. Nodes reference strings * Nodes reference strings, other nodes, and edges by their indexes
* and other nodes by their indexes in corresponding arrays. * in corresponding arrays.
*/ */
void Serialize(OutputStream* stream, SerializationFormat format) const; void Serialize(OutputStream* stream, SerializationFormat format) const;
}; };
@ -404,6 +412,19 @@ class V8EXPORT HeapProfiler {
/** Returns a profile by uid. */ /** Returns a profile by uid. */
static const HeapSnapshot* FindSnapshot(unsigned uid); static const HeapSnapshot* FindSnapshot(unsigned uid);
/**
* Returns SnapshotObjectId for a heap object referenced by |value| if
* it has been seen by the heap profiler, kUnknownObjectId otherwise.
*/
static SnapshotObjectId GetSnapshotObjectId(Handle<Value> value);
/**
* A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
* it in case heap profiler cannot find id for the object passed as
* parameter. HeapSnapshot::GetNodeById will always return NULL for such id.
*/
static const SnapshotObjectId kUnknownObjectId = 0;
/** /**
* Takes a heap snapshot and returns it. Title may be an empty string. * Takes a heap snapshot and returns it. Title may be an empty string.
* See HeapSnapshot::Type for types description. * See HeapSnapshot::Type for types description.
@ -413,6 +434,34 @@ class V8EXPORT HeapProfiler {
HeapSnapshot::Type type = HeapSnapshot::kFull, HeapSnapshot::Type type = HeapSnapshot::kFull,
ActivityControl* control = NULL); ActivityControl* control = NULL);
/**
* Starts tracking of heap objects population statistics. After calling
* this method, all heap objects relocations done by the garbage collector
* are being registered.
*/
static void StartHeapObjectsTracking();
/**
* Adds a new time interval entry to the aggregated statistics array. The
* time interval entry contains information on the current heap objects
* population size. The method also updates aggregated statistics and
* reports updates for all previous time intervals via the OutputStream
* object. Updates on each time interval are provided as a stream of the
* HeapStatsUpdate structure instances.
* The return value of the function is the last seen heap object Id.
*
* StartHeapObjectsTracking must be called before the first call to this
* method.
*/
static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
/**
* Stops tracking of heap objects population statistics, cleans up all
* collected data. StartHeapObjectsTracking must be called again prior to
* calling PushHeapObjectsStats next time.
*/
static void StopHeapObjectsTracking();
/** /**
* Deletes all snapshots taken. All previously returned pointers to * Deletes all snapshots taken. All previously returned pointers to
* snapshots and their contents become invalid after this call. * snapshots and their contents become invalid after this call.
@ -433,6 +482,9 @@ class V8EXPORT HeapProfiler {
/** Returns the number of currently existing persistent handles. */ /** Returns the number of currently existing persistent handles. */
static int GetPersistentHandleCount(); static int GetPersistentHandleCount();
/** Returns memory used for profiler internal data and snapshots. */
static size_t GetMemorySizeUsedByProfiler();
}; };
@ -510,6 +562,19 @@ class V8EXPORT RetainedObjectInfo { // NOLINT
}; };
/**
* A struct for exporting HeapStats data from V8, using "push" model.
* See HeapProfiler::PushHeapObjectsStats.
*/
struct HeapStatsUpdate {
HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size)
: index(index), count(count), size(size) { }
uint32_t index; // Index of the time interval that was changed.
uint32_t count; // New value of count field for the interval with this index.
uint32_t size; // New value of size field for the interval with this index.
};
} // namespace v8 } // namespace v8

251
deps/v8/include/v8.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -62,11 +62,13 @@
#else // _WIN32 #else // _WIN32
// Setup for Linux shared library export. There is no need to distinguish // Setup for Linux shared library export.
// between building or using the V8 shared library, but we should not
// export symbols when we are building a static library.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED) #if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#ifdef BUILDING_V8_SHARED
#define V8EXPORT __attribute__ ((visibility("default"))) #define V8EXPORT __attribute__ ((visibility("default")))
#else
#define V8EXPORT
#endif
#else // defined(__GNUC__) && (__GNUC__ >= 4) #else // defined(__GNUC__) && (__GNUC__ >= 4)
#define V8EXPORT #define V8EXPORT
#endif // defined(__GNUC__) && (__GNUC__ >= 4) #endif // defined(__GNUC__) && (__GNUC__ >= 4)
@ -98,6 +100,7 @@ class Function;
class Date; class Date;
class ImplementationUtilities; class ImplementationUtilities;
class Signature; class Signature;
class AccessorSignature;
template <class T> class Handle; template <class T> class Handle;
template <class T> class Local; template <class T> class Local;
template <class T> class Persistent; template <class T> class Persistent;
@ -107,6 +110,7 @@ class Data;
class AccessorInfo; class AccessorInfo;
class StackTrace; class StackTrace;
class StackFrame; class StackFrame;
class Isolate;
namespace internal { namespace internal {
@ -862,13 +866,13 @@ class Value : public Data {
* Returns true if this value is the undefined value. See ECMA-262 * Returns true if this value is the undefined value. See ECMA-262
* 4.3.10. * 4.3.10.
*/ */
V8EXPORT bool IsUndefined() const; inline bool IsUndefined() const;
/** /**
* Returns true if this value is the null value. See ECMA-262 * Returns true if this value is the null value. See ECMA-262
* 4.3.11. * 4.3.11.
*/ */
V8EXPORT bool IsNull() const; inline bool IsNull() const;
/** /**
* Returns true if this value is true. * Returns true if this value is true.
@ -982,7 +986,11 @@ class Value : public Data {
V8EXPORT bool StrictEquals(Handle<Value> that) const; V8EXPORT bool StrictEquals(Handle<Value> that) const;
private: private:
inline bool QuickIsUndefined() const;
inline bool QuickIsNull() const;
inline bool QuickIsString() const; inline bool QuickIsString() const;
V8EXPORT bool FullIsUndefined() const;
V8EXPORT bool FullIsNull() const;
V8EXPORT bool FullIsString() const; V8EXPORT bool FullIsString() const;
}; };
@ -1079,6 +1087,7 @@ class String : public Primitive {
* A zero length string. * A zero length string.
*/ */
V8EXPORT static v8::Local<v8::String> Empty(); V8EXPORT static v8::Local<v8::String> Empty();
inline static v8::Local<v8::String> Empty(Isolate* isolate);
/** /**
* Returns true if the string is external * Returns true if the string is external
@ -1236,8 +1245,7 @@ class String : public Primitive {
* this function should not otherwise delete or modify the resource. Neither * this function should not otherwise delete or modify the resource. Neither
* should the underlying buffer be deallocated or modified except through the * should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource. * destructor of the external string resource.
*/ */ V8EXPORT static Local<String> NewExternal(
V8EXPORT static Local<String> NewExternal(
ExternalAsciiStringResource* resource); ExternalAsciiStringResource* resource);
/** /**
@ -1968,10 +1976,13 @@ class Arguments {
inline Local<Object> Holder() const; inline Local<Object> Holder() const;
inline bool IsConstructCall() const; inline bool IsConstructCall() const;
inline Local<Value> Data() const; inline Local<Value> Data() const;
inline Isolate* GetIsolate() const;
private: private:
static const int kDataIndex = 0; static const int kIsolateIndex = 0;
static const int kCalleeIndex = -1; static const int kDataIndex = -1;
static const int kHolderIndex = -2; static const int kCalleeIndex = -2;
static const int kHolderIndex = -3;
friend class ImplementationUtilities; friend class ImplementationUtilities;
inline Arguments(internal::Object** implicit_args, inline Arguments(internal::Object** implicit_args,
@ -1993,9 +2004,11 @@ class V8EXPORT AccessorInfo {
public: public:
inline AccessorInfo(internal::Object** args) inline AccessorInfo(internal::Object** args)
: args_(args) { } : args_(args) { }
inline Isolate* GetIsolate() const;
inline Local<Value> Data() const; inline Local<Value> Data() const;
inline Local<Object> This() const; inline Local<Object> This() const;
inline Local<Object> Holder() const; inline Local<Object> Holder() const;
private: private:
internal::Object** args_; internal::Object** args_;
}; };
@ -2277,7 +2290,8 @@ class V8EXPORT FunctionTemplate : public Template {
AccessorSetter setter, AccessorSetter setter,
Handle<Value> data, Handle<Value> data,
AccessControl settings, AccessControl settings,
PropertyAttribute attributes); PropertyAttribute attributes,
Handle<AccessorSignature> signature);
void SetNamedInstancePropertyHandler(NamedPropertyGetter getter, void SetNamedInstancePropertyHandler(NamedPropertyGetter getter,
NamedPropertySetter setter, NamedPropertySetter setter,
NamedPropertyQuery query, NamedPropertyQuery query,
@ -2335,13 +2349,20 @@ class V8EXPORT ObjectTemplate : public Template {
* cross-context access. * cross-context access.
* \param attribute The attributes of the property for which an accessor * \param attribute The attributes of the property for which an accessor
* is added. * is added.
* \param signature The signature describes valid receivers for the accessor
* and is used to perform implicit instance checks against them. If the
* receiver is incompatible (i.e. is not an instance of the constructor as
* defined by FunctionTemplate::HasInstance()), an implicit TypeError is
* thrown and no callback is invoked.
*/ */
void SetAccessor(Handle<String> name, void SetAccessor(Handle<String> name,
AccessorGetter getter, AccessorGetter getter,
AccessorSetter setter = 0, AccessorSetter setter = 0,
Handle<Value> data = Handle<Value>(), Handle<Value> data = Handle<Value>(),
AccessControl settings = DEFAULT, AccessControl settings = DEFAULT,
PropertyAttribute attribute = None); PropertyAttribute attribute = None,
Handle<AccessorSignature> signature =
Handle<AccessorSignature>());
/** /**
* Sets a named property handler on the object template. * Sets a named property handler on the object template.
@ -2445,8 +2466,8 @@ class V8EXPORT ObjectTemplate : public Template {
/** /**
* A Signature specifies which receivers and arguments a function can * A Signature specifies which receivers and arguments are valid
* legally be called with. * parameters to a function.
*/ */
class V8EXPORT Signature : public Data { class V8EXPORT Signature : public Data {
public: public:
@ -2459,6 +2480,19 @@ class V8EXPORT Signature : public Data {
}; };
/**
* An AccessorSignature specifies which receivers are valid parameters
* to an accessor callback.
*/
class V8EXPORT AccessorSignature : public Data {
public:
static Local<AccessorSignature> New(Handle<FunctionTemplate> receiver =
Handle<FunctionTemplate>());
private:
AccessorSignature();
};
/** /**
* A utility for determining the type of objects based on the template * A utility for determining the type of objects based on the template
* they were constructed from. * they were constructed from.
@ -2552,6 +2586,11 @@ Handle<Primitive> V8EXPORT Null();
Handle<Boolean> V8EXPORT True(); Handle<Boolean> V8EXPORT True();
Handle<Boolean> V8EXPORT False(); Handle<Boolean> V8EXPORT False();
inline Handle<Primitive> Undefined(Isolate* isolate);
inline Handle<Primitive> Null(Isolate* isolate);
inline Handle<Boolean> True(Isolate* isolate);
inline Handle<Boolean> False(Isolate* isolate);
/** /**
* A set of constraints that specifies the limits of the runtime's memory use. * A set of constraints that specifies the limits of the runtime's memory use.
@ -2802,13 +2841,13 @@ class V8EXPORT Isolate {
/** /**
* Associate embedder-specific data with the isolate * Associate embedder-specific data with the isolate
*/ */
void SetData(void* data); inline void SetData(void* data);
/** /**
* Retrive embedder-specific data from the isolate. * Retrieve embedder-specific data from the isolate.
* Returns NULL if SetData has never been called. * Returns NULL if SetData has never been called.
*/ */
void* GetData(); inline void* GetData();
private: private:
Isolate(); Isolate();
@ -3153,7 +3192,8 @@ class V8EXPORT V8 {
* that is kept alive by JavaScript objects. * that is kept alive by JavaScript objects.
* \returns the adjusted value. * \returns the adjusted value.
*/ */
static int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes); static intptr_t AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes);
/** /**
* Suspends recording of tick samples in the profiler. * Suspends recording of tick samples in the profiler.
@ -3735,6 +3775,12 @@ class V8EXPORT Locker {
}; };
/**
* A struct for exporting HeapStats data from V8, using "push" model.
*/
struct HeapStatsUpdate;
/** /**
* An interface for exporting data from V8, using "push" model. * An interface for exporting data from V8, using "push" model.
*/ */
@ -3760,6 +3806,14 @@ class V8EXPORT OutputStream { // NOLINT
* will not be called in case writing was aborted. * will not be called in case writing was aborted.
*/ */
virtual WriteResult WriteAsciiChunk(char* data, int size) = 0; virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
/**
* Writes the next chunk of heap stats data into the stream. Writing
* can be stopped by returning kAbort as function result. EndOfStream
* will not be called in case writing was aborted.
*/
virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
return kAbort;
};
}; };
@ -3848,18 +3902,6 @@ const uintptr_t kEncodablePointerMask =
PlatformSmiTagging::kEncodablePointerMask; PlatformSmiTagging::kEncodablePointerMask;
const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift; const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
template <size_t ptr_size> struct InternalConstants;
// Internal constants for 32-bit systems.
template <> struct InternalConstants<4> {
static const int kStringResourceOffset = 3 * kApiPointerSize;
};
// Internal constants for 64-bit systems.
template <> struct InternalConstants<8> {
static const int kStringResourceOffset = 3 * kApiPointerSize;
};
/** /**
* This class exports constants and functionality from within v8 that * This class exports constants and functionality from within v8 that
* is necessary to implement inline functions in the v8 api. Don't * is necessary to implement inline functions in the v8 api. Don't
@ -3871,18 +3913,31 @@ class Internals {
// the implementation of v8. // the implementation of v8.
static const int kHeapObjectMapOffset = 0; static const int kHeapObjectMapOffset = 0;
static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize; static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
static const int kStringResourceOffset = static const int kStringResourceOffset = 3 * kApiPointerSize;
InternalConstants<kApiPointerSize>::kStringResourceOffset;
static const int kOddballKindOffset = 3 * kApiPointerSize;
static const int kForeignAddressOffset = kApiPointerSize; static const int kForeignAddressOffset = kApiPointerSize;
static const int kJSObjectHeaderSize = 3 * kApiPointerSize; static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFullStringRepresentationMask = 0x07; static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02; static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kIsolateStateOffset = 0;
static const int kIsolateEmbedderDataOffset = 1 * kApiPointerSize;
static const int kIsolateRootsOffset = 3 * kApiPointerSize;
static const int kUndefinedValueRootIndex = 5;
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
static const int kEmptySymbolRootIndex = 128;
static const int kJSObjectType = 0xaa; static const int kJSObjectType = 0xaa;
static const int kFirstNonstringType = 0x80; static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x82;
static const int kForeignType = 0x85; static const int kForeignType = 0x85;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
static inline bool HasHeapObjectTag(internal::Object* value) { static inline bool HasHeapObjectTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) == return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kHeapObjectTag); kHeapObjectTag);
@ -3902,6 +3957,11 @@ class Internals {
return ReadField<uint8_t>(map, kMapInstanceTypeOffset); return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
} }
static inline int GetOddballKind(internal::Object* obj) {
typedef internal::Object O;
return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
}
static inline void* GetExternalPointerFromSmi(internal::Object* value) { static inline void* GetExternalPointerFromSmi(internal::Object* value) {
const uintptr_t address = reinterpret_cast<uintptr_t>(value); const uintptr_t address = reinterpret_cast<uintptr_t>(value);
return reinterpret_cast<void*>(address >> kPointerToSmiShift); return reinterpret_cast<void*>(address >> kPointerToSmiShift);
@ -3922,6 +3982,28 @@ class Internals {
return representation == kExternalTwoByteRepresentationTag; return representation == kExternalTwoByteRepresentationTag;
} }
static inline bool IsInitialized(v8::Isolate* isolate) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateStateOffset;
return *reinterpret_cast<int*>(addr) == 1;
}
static inline void SetEmbedderData(v8::Isolate* isolate, void* data) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset;
*reinterpret_cast<void**>(addr) = data;
}
static inline void* GetEmbedderData(v8::Isolate* isolate) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset;
return *reinterpret_cast<void**>(addr);
}
static inline internal::Object** GetRoot(v8::Isolate* isolate, int index) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateRootsOffset;
return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
}
template <typename T> template <typename T>
static inline T ReadField(Object* ptr, int offset) { static inline T ReadField(Object* ptr, int offset) {
uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag; uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
@ -4048,6 +4130,11 @@ Local<Value> Arguments::Data() const {
} }
Isolate* Arguments::GetIsolate() const {
return *reinterpret_cast<Isolate**>(&implicit_args_[kIsolateIndex]);
}
bool Arguments::IsConstructCall() const { bool Arguments::IsConstructCall() const {
return is_construct_call_; return is_construct_call_;
} }
@ -4160,6 +4247,15 @@ String* String::Cast(v8::Value* value) {
} }
Local<String> String::Empty(Isolate* isolate) {
typedef internal::Object* S;
typedef internal::Internals I;
if (!I::IsInitialized(isolate)) return Empty();
S* slot = I::GetRoot(isolate, I::kEmptySymbolRootIndex);
return Local<String>(reinterpret_cast<String*>(slot));
}
String::ExternalStringResource* String::GetExternalStringResource() const { String::ExternalStringResource* String::GetExternalStringResource() const {
typedef internal::Object O; typedef internal::Object O;
typedef internal::Internals I; typedef internal::Internals I;
@ -4178,6 +4274,42 @@ String::ExternalStringResource* String::GetExternalStringResource() const {
} }
bool Value::IsUndefined() const {
#ifdef V8_ENABLE_CHECKS
return FullIsUndefined();
#else
return QuickIsUndefined();
#endif
}
bool Value::QuickIsUndefined() const {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
return (I::GetOddballKind(obj) == I::kUndefinedOddballKind);
}
bool Value::IsNull() const {
#ifdef V8_ENABLE_CHECKS
return FullIsNull();
#else
return QuickIsNull();
#endif
}
bool Value::QuickIsNull() const {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
return (I::GetOddballKind(obj) == I::kNullOddballKind);
}
bool Value::IsString() const { bool Value::IsString() const {
#ifdef V8_ENABLE_CHECKS #ifdef V8_ENABLE_CHECKS
return FullIsString(); return FullIsString();
@ -4283,6 +4415,11 @@ External* External::Cast(v8::Value* value) {
} }
Isolate* AccessorInfo::GetIsolate() const {
return *reinterpret_cast<Isolate**>(&args_[-3]);
}
Local<Value> AccessorInfo::Data() const { Local<Value> AccessorInfo::Data() const {
return Local<Value>(reinterpret_cast<Value*>(&args_[-2])); return Local<Value>(reinterpret_cast<Value*>(&args_[-2]));
} }
@ -4298,6 +4435,54 @@ Local<Object> AccessorInfo::Holder() const {
} }
Handle<Primitive> Undefined(Isolate* isolate) {
typedef internal::Object* S;
typedef internal::Internals I;
if (!I::IsInitialized(isolate)) return Undefined();
S* slot = I::GetRoot(isolate, I::kUndefinedValueRootIndex);
return Handle<Primitive>(reinterpret_cast<Primitive*>(slot));
}
Handle<Primitive> Null(Isolate* isolate) {
typedef internal::Object* S;
typedef internal::Internals I;
if (!I::IsInitialized(isolate)) return Null();
S* slot = I::GetRoot(isolate, I::kNullValueRootIndex);
return Handle<Primitive>(reinterpret_cast<Primitive*>(slot));
}
Handle<Boolean> True(Isolate* isolate) {
typedef internal::Object* S;
typedef internal::Internals I;
if (!I::IsInitialized(isolate)) return True();
S* slot = I::GetRoot(isolate, I::kTrueValueRootIndex);
return Handle<Boolean>(reinterpret_cast<Boolean*>(slot));
}
Handle<Boolean> False(Isolate* isolate) {
typedef internal::Object* S;
typedef internal::Internals I;
if (!I::IsInitialized(isolate)) return False();
S* slot = I::GetRoot(isolate, I::kFalseValueRootIndex);
return Handle<Boolean>(reinterpret_cast<Boolean*>(slot));
}
void Isolate::SetData(void* data) {
typedef internal::Internals I;
I::SetEmbedderData(this, data);
}
void* Isolate::GetData() {
typedef internal::Internals I;
return I::GetEmbedderData(this);
}
/** /**
* \example shell.cc * \example shell.cc
* A simple shell that takes a list of expressions on the * A simple shell that takes a list of expressions on the

6
deps/v8/samples/lineprocessor.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -434,9 +434,9 @@ v8::Handle<v8::String> ReadLine() {
} }
if (res == NULL) { if (res == NULL) {
v8::Handle<v8::Primitive> t = v8::Undefined(); v8::Handle<v8::Primitive> t = v8::Undefined();
return reinterpret_cast<v8::Handle<v8::String>&>(t); return v8::Handle<v8::String>(v8::String::Cast(*t));
} }
// remove newline char // Remove newline char
for (char* pos = buffer; *pos != '\0'; pos++) { for (char* pos = buffer; *pos != '\0'; pos++) {
if (*pos == '\n') { if (*pos == '\n') {
*pos = '\0'; *pos = '\0';

8
deps/v8/samples/samples.gyp

@ -1,4 +1,4 @@
# Copyright 2011 the V8 project authors. All rights reserved. # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are # modification, are permitted provided that the following conditions are
# met: # met:
@ -48,6 +48,12 @@
'sources': [ 'sources': [
'process.cc', 'process.cc',
], ],
},
{
'target_name': 'lineprocessor',
'sources': [
'lineprocessor.cc',
],
} }
], ],
} }

25
deps/v8/samples/shell.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -67,17 +67,20 @@ static bool run_shell;
int main(int argc, char* argv[]) { int main(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true); v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
run_shell = (argc == 1); run_shell = (argc == 1);
v8::HandleScope handle_scope; int result;
v8::Persistent<v8::Context> context = CreateShellContext(); {
if (context.IsEmpty()) { v8::HandleScope handle_scope;
printf("Error creating context\n"); v8::Persistent<v8::Context> context = CreateShellContext();
return 1; if (context.IsEmpty()) {
printf("Error creating context\n");
return 1;
}
context->Enter();
result = RunMain(argc, argv);
if (run_shell) RunShell(context);
context->Exit();
context.Dispose();
} }
context->Enter();
int result = RunMain(argc, argv);
if (run_shell) RunShell(context);
context->Exit();
context.Dispose();
v8::V8::Dispose(); v8::V8::Dispose();
return result; return result;
} }

1
deps/v8/src/SConscript

@ -68,6 +68,7 @@ SOURCES = {
diy-fp.cc diy-fp.cc
dtoa.cc dtoa.cc
elements.cc elements.cc
elements-kind.cc
execution.cc execution.cc
factory.cc factory.cc
flags.cc flags.cc

4
deps/v8/src/allocation-inl.h

@ -34,12 +34,12 @@ namespace v8 {
namespace internal { namespace internal {
void* PreallocatedStorage::New(size_t size) { void* PreallocatedStorageAllocationPolicy::New(size_t size) {
return Isolate::Current()->PreallocatedStorageNew(size); return Isolate::Current()->PreallocatedStorageNew(size);
} }
void PreallocatedStorage::Delete(void* p) { void PreallocatedStorageAllocationPolicy::Delete(void* p) {
return Isolate::Current()->PreallocatedStorageDelete(p); return Isolate::Current()->PreallocatedStorageDelete(p);
} }

14
deps/v8/src/allocation.h

@ -104,7 +104,7 @@ char* StrNDup(const char* str, int n);
// and free. Used as the default policy for lists. // and free. Used as the default policy for lists.
class FreeStoreAllocationPolicy { class FreeStoreAllocationPolicy {
public: public:
INLINE(static void* New(size_t size)) { return Malloced::New(size); } INLINE(void* New(size_t size)) { return Malloced::New(size); }
INLINE(static void Delete(void* p)) { Malloced::Delete(p); } INLINE(static void Delete(void* p)) { Malloced::Delete(p); }
}; };
@ -117,12 +117,6 @@ class PreallocatedStorage {
explicit PreallocatedStorage(size_t size); explicit PreallocatedStorage(size_t size);
size_t size() { return size_; } size_t size() { return size_; }
// TODO(isolates): Get rid of these-- we'll have to change the allocator
// interface to include a pointer to an isolate to do this
// efficiently.
static inline void* New(size_t size);
static inline void Delete(void* p);
private: private:
size_t size_; size_t size_;
PreallocatedStorage* previous_; PreallocatedStorage* previous_;
@ -137,6 +131,12 @@ class PreallocatedStorage {
}; };
struct PreallocatedStorageAllocationPolicy {
INLINE(void* New(size_t size));
INLINE(static void Delete(void* ptr));
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ALLOCATION_H_ #endif // V8_ALLOCATION_H_

175
deps/v8/src/api.cc

@ -512,6 +512,16 @@ void RegisteredExtension::Register(RegisteredExtension* that) {
} }
void RegisteredExtension::UnregisterAll() {
RegisteredExtension* re = first_extension_;
while (re != NULL) {
RegisteredExtension* next = re->next();
delete re;
re = next;
}
}
void RegisterExtension(Extension* that) { void RegisterExtension(Extension* that) {
RegisteredExtension* extension = new RegisteredExtension(that); RegisteredExtension* extension = new RegisteredExtension(that);
RegisteredExtension::Register(extension); RegisteredExtension::Register(extension);
@ -980,6 +990,12 @@ Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
} }
Local<AccessorSignature> AccessorSignature::New(
Handle<FunctionTemplate> receiver) {
return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver));
}
Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) { Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
Handle<FunctionTemplate> types[1] = { type }; Handle<FunctionTemplate> types[1] = { type };
return TypeSwitch::New(1, types); return TypeSwitch::New(1, types);
@ -1047,7 +1063,8 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
AccessorSetter setter, AccessorSetter setter,
v8::Handle<Value> data, v8::Handle<Value> data,
v8::AccessControl settings, v8::AccessControl settings,
v8::PropertyAttribute attributes) { v8::PropertyAttribute attributes,
v8::Handle<AccessorSignature> signature) {
i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo(); i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo();
ASSERT(getter != NULL); ASSERT(getter != NULL);
SET_FIELD_WRAPPED(obj, set_getter, getter); SET_FIELD_WRAPPED(obj, set_getter, getter);
@ -1059,6 +1076,9 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true); if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true); if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
obj->set_property_attributes(static_cast<PropertyAttributes>(attributes)); obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
if (!signature.IsEmpty()) {
obj->set_expected_receiver_type(*Utils::OpenHandle(*signature));
}
return obj; return obj;
} }
@ -1069,7 +1089,8 @@ void FunctionTemplate::AddInstancePropertyAccessor(
AccessorSetter setter, AccessorSetter setter,
v8::Handle<Value> data, v8::Handle<Value> data,
v8::AccessControl settings, v8::AccessControl settings,
v8::PropertyAttribute attributes) { v8::PropertyAttribute attributes,
v8::Handle<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, if (IsDeadCheck(isolate,
"v8::FunctionTemplate::AddInstancePropertyAccessor()")) { "v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
@ -1078,9 +1099,9 @@ void FunctionTemplate::AddInstancePropertyAccessor(
ENTER_V8(isolate); ENTER_V8(isolate);
i::HandleScope scope(isolate); i::HandleScope scope(isolate);
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name, i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name, getter, setter, data,
getter, setter, data, settings, attributes,
settings, attributes); signature);
i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors()); i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
if (list->IsUndefined()) { if (list->IsUndefined()) {
list = NeanderArray().value(); list = NeanderArray().value();
@ -1265,7 +1286,8 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
AccessorSetter setter, AccessorSetter setter,
v8::Handle<Value> data, v8::Handle<Value> data,
AccessControl settings, AccessControl settings,
PropertyAttribute attribute) { PropertyAttribute attribute,
v8::Handle<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return; if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return;
ENTER_V8(isolate); ENTER_V8(isolate);
@ -1279,7 +1301,8 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
setter, setter,
data, data,
settings, settings,
attribute); attribute,
signature);
} }
@ -2091,17 +2114,21 @@ bool StackFrame::IsConstructor() const {
// --- D a t a --- // --- D a t a ---
bool Value::IsUndefined() const { bool Value::FullIsUndefined() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) { if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) {
return false; return false;
} }
return Utils::OpenHandle(this)->IsUndefined(); bool result = Utils::OpenHandle(this)->IsUndefined();
ASSERT_EQ(result, QuickIsUndefined());
return result;
} }
bool Value::IsNull() const { bool Value::FullIsNull() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false; if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false;
return Utils::OpenHandle(this)->IsNull(); bool result = Utils::OpenHandle(this)->IsNull();
ASSERT_EQ(result, QuickIsNull());
return result;
} }
@ -2799,9 +2826,13 @@ bool v8::Object::ForceDelete(v8::Handle<Value> key) {
i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key); i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
// When turning on access checks for a global object deoptimize all functions // When deleting a property on the global object using ForceDelete
// as optimized code does not always handle access checks. // deoptimize all functions as optimized code does not check for the hole
i::Deoptimizer::DeoptimizeGlobalObject(*self); // value with DontDelete properties. We have to deoptimize all contexts
// because of possible cross-context inlined functions.
if (self->IsJSGlobalProxy() || self->IsGlobalObject()) {
i::Deoptimizer::DeoptimizeAll();
}
EXCEPTION_PREAMBLE(isolate); EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj); i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
@ -3061,9 +3092,10 @@ bool Object::SetAccessor(Handle<String> name,
ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false); ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false);
ENTER_V8(isolate); ENTER_V8(isolate);
i::HandleScope scope(isolate); i::HandleScope scope(isolate);
i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name, v8::Handle<AccessorSignature> signature;
getter, setter, data, i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name, getter, setter, data,
settings, attributes); settings, attributes,
signature);
bool fast = Utils::OpenHandle(this)->HasFastProperties(); bool fast = Utils::OpenHandle(this)->HasFastProperties();
i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info); i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
if (result.is_null() || result->IsUndefined()) return false; if (result.is_null() || result->IsUndefined()) return false;
@ -4612,7 +4644,9 @@ void* External::Value() const {
Local<String> v8::String::Empty() { Local<String> v8::String::Empty() {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::String::Empty()"); if (!EnsureInitializedForIsolate(isolate, "v8::String::Empty()")) {
return v8::Local<String>();
}
LOG_API(isolate, "String::Empty()"); LOG_API(isolate, "String::Empty()");
return Utils::ToLocal(isolate->factory()->empty_symbol()); return Utils::ToLocal(isolate->factory()->empty_symbol());
} }
@ -5020,7 +5054,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>()); ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>());
i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (!self->HasFastElements()) { if (!self->HasFastObjectElements()) {
return Local<Object>(); return Local<Object>();
} }
i::FixedArray* elms = i::FixedArray::cast(self->elements()); i::FixedArray* elms = i::FixedArray::cast(self->elements());
@ -5198,7 +5232,7 @@ void V8::AddImplicitReferences(Persistent<Object> parent,
} }
int V8::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) { intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) { if (IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
return 0; return 0;
@ -5378,17 +5412,6 @@ void Isolate::Exit() {
} }
void Isolate::SetData(void* data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->SetData(data);
}
void* Isolate::GetData() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->GetData();
}
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) { : str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
@ -5988,7 +6011,7 @@ Handle<Value> HeapGraphEdge::GetName() const {
const HeapGraphNode* HeapGraphEdge::GetFromNode() const { const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode"); IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode");
const i::HeapEntry* from = ToInternal(this)->From(); const i::HeapEntry* from = ToInternal(this)->from();
return reinterpret_cast<const HeapGraphNode*>(from); return reinterpret_cast<const HeapGraphNode*>(from);
} }
@ -6022,7 +6045,7 @@ Handle<String> HeapGraphNode::GetName() const {
} }
uint64_t HeapGraphNode::GetId() const { SnapshotObjectId HeapGraphNode::GetId() const {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetId"); IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
return ToInternal(this)->id(); return ToInternal(this)->id();
@ -6036,13 +6059,6 @@ int HeapGraphNode::GetSelfSize() const {
} }
int HeapGraphNode::GetRetainedSize() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
return ToInternal(this)->retained_size();
}
int HeapGraphNode::GetChildrenCount() const { int HeapGraphNode::GetChildrenCount() const {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount"); IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
@ -6054,29 +6070,7 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild"); IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>( return reinterpret_cast<const HeapGraphEdge*>(
&ToInternal(this)->children()[index]); ToInternal(this)->children()[index]);
}
int HeapGraphNode::GetRetainersCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount");
return ToInternal(this)->retainers().length();
}
const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer");
return reinterpret_cast<const HeapGraphEdge*>(
ToInternal(this)->retainers()[index]);
}
const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->dominator());
} }
@ -6137,18 +6131,18 @@ const HeapGraphNode* HeapSnapshot::GetRoot() const {
} }
const HeapGraphNode* HeapSnapshot::GetNodeById(uint64_t id) const { const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById"); IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
return reinterpret_cast<const HeapGraphNode*>( return reinterpret_cast<const HeapGraphNode*>(
ToInternal(this)->GetEntryById(static_cast<i::SnapshotObjectId>(id))); ToInternal(this)->GetEntryById(id));
} }
int HeapSnapshot::GetNodesCount() const { int HeapSnapshot::GetNodesCount() const {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount"); IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
return ToInternal(this)->entries()->length(); return ToInternal(this)->entries().length();
} }
@ -6156,7 +6150,14 @@ const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode"); IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
return reinterpret_cast<const HeapGraphNode*>( return reinterpret_cast<const HeapGraphNode*>(
ToInternal(this)->entries()->at(index)); &ToInternal(this)->entries().at(index));
}
SnapshotObjectId HeapSnapshot::GetMaxSnapshotJSObjectId() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetMaxSnapshotJSObjectId");
return ToInternal(this)->max_snapshot_js_object_id();
} }
@ -6201,6 +6202,14 @@ const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
} }
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotObjectId");
i::Handle<i::Object> obj = Utils::OpenHandle(*value);
return i::HeapProfiler::GetSnapshotObjectId(obj);
}
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title, const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
HeapSnapshot::Type type, HeapSnapshot::Type type,
ActivityControl* control) { ActivityControl* control) {
@ -6220,6 +6229,27 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
} }
void HeapProfiler::StartHeapObjectsTracking() {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::StartHeapObjectsTracking");
i::HeapProfiler::StartHeapObjectsTracking();
}
void HeapProfiler::StopHeapObjectsTracking() {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::StopHeapObjectsTracking");
i::HeapProfiler::StopHeapObjectsTracking();
}
SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::PushHeapObjectsStats");
return i::HeapProfiler::PushHeapObjectsStats(stream);
}
void HeapProfiler::DeleteAllSnapshots() { void HeapProfiler::DeleteAllSnapshots() {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots"); IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots");
@ -6240,6 +6270,11 @@ int HeapProfiler::GetPersistentHandleCount() {
} }
size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
return i::HeapProfiler::GetMemorySizeUsedByProfiler();
}
v8::Testing::StressType internal::Testing::stress_type_ = v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt; v8::Testing::kStressTypeOpt;
@ -6267,7 +6302,11 @@ static void SetFlagsFromString(const char* flags) {
void Testing::PrepareStressRun(int run) { void Testing::PrepareStressRun(int run) {
static const char* kLazyOptimizations = static const char* kLazyOptimizations =
"--prepare-always-opt --nolimit-inlining --noalways-opt"; "--prepare-always-opt "
"--max-inlined-source-size=999999 "
"--max-inlined-nodes=999999 "
"--max-inlined-nodes-cumulative=999999 "
"--noalways-opt";
static const char* kForcedOptimizations = "--always-opt"; static const char* kForcedOptimizations = "--always-opt";
// If deoptimization stressed turn on frequent deoptimization. If no value // If deoptimization stressed turn on frequent deoptimization. If no value

11
deps/v8/src/api.h

@ -105,13 +105,13 @@ NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
v8::internal::Object* NeanderObject::get(int offset) { v8::internal::Object* NeanderObject::get(int offset) {
ASSERT(value()->HasFastElements()); ASSERT(value()->HasFastObjectElements());
return v8::internal::FixedArray::cast(value()->elements())->get(offset); return v8::internal::FixedArray::cast(value()->elements())->get(offset);
} }
void NeanderObject::set(int offset, v8::internal::Object* value) { void NeanderObject::set(int offset, v8::internal::Object* value) {
ASSERT(value_->HasFastElements()); ASSERT(value_->HasFastObjectElements());
v8::internal::FixedArray::cast(value_->elements())->set(offset, value); v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
} }
@ -146,6 +146,7 @@ class RegisteredExtension {
public: public:
explicit RegisteredExtension(Extension* extension); explicit RegisteredExtension(Extension* extension);
static void Register(RegisteredExtension* that); static void Register(RegisteredExtension* that);
static void UnregisterAll();
Extension* extension() { return extension_; } Extension* extension() { return extension_; }
RegisteredExtension* next() { return next_; } RegisteredExtension* next() { return next_; }
RegisteredExtension* next_auto() { return next_auto_; } RegisteredExtension* next_auto() { return next_auto_; }
@ -199,6 +200,8 @@ class Utils {
v8::internal::Handle<v8::internal::ObjectTemplateInfo> obj); v8::internal::Handle<v8::internal::ObjectTemplateInfo> obj);
static inline Local<Signature> ToLocal( static inline Local<Signature> ToLocal(
v8::internal::Handle<v8::internal::SignatureInfo> obj); v8::internal::Handle<v8::internal::SignatureInfo> obj);
static inline Local<AccessorSignature> AccessorSignatureToLocal(
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<TypeSwitch> ToLocal( static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj); v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
@ -232,6 +235,8 @@ class Utils {
OpenHandle(const v8::Context* context); OpenHandle(const v8::Context* context);
static inline v8::internal::Handle<v8::internal::SignatureInfo> static inline v8::internal::Handle<v8::internal::SignatureInfo>
OpenHandle(const v8::Signature* sig); OpenHandle(const v8::Signature* sig);
static inline v8::internal::Handle<v8::internal::FunctionTemplateInfo>
OpenHandle(const v8::AccessorSignature* sig);
static inline v8::internal::Handle<v8::internal::TypeSwitchInfo> static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
OpenHandle(const v8::TypeSwitch* that); OpenHandle(const v8::TypeSwitch* that);
static inline v8::internal::Handle<v8::internal::Foreign> static inline v8::internal::Handle<v8::internal::Foreign>
@ -275,6 +280,7 @@ MAKE_TO_LOCAL(ToLocal, Foreign, External)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate) MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate) MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature) MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch) MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
MAKE_TO_LOCAL(MessageToLocal, Object, Message) MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace) MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
@ -299,6 +305,7 @@ MAKE_OPEN_HANDLE(Template, TemplateInfo)
MAKE_OPEN_HANDLE(FunctionTemplate, FunctionTemplateInfo) MAKE_OPEN_HANDLE(FunctionTemplate, FunctionTemplateInfo)
MAKE_OPEN_HANDLE(ObjectTemplate, ObjectTemplateInfo) MAKE_OPEN_HANDLE(ObjectTemplate, ObjectTemplateInfo)
MAKE_OPEN_HANDLE(Signature, SignatureInfo) MAKE_OPEN_HANDLE(Signature, SignatureInfo)
MAKE_OPEN_HANDLE(AccessorSignature, FunctionTemplateInfo)
MAKE_OPEN_HANDLE(TypeSwitch, TypeSwitchInfo) MAKE_OPEN_HANDLE(TypeSwitch, TypeSwitchInfo)
MAKE_OPEN_HANDLE(Data, Object) MAKE_OPEN_HANDLE(Data, Object)
MAKE_OPEN_HANDLE(RegExp, JSRegExp) MAKE_OPEN_HANDLE(RegExp, JSRegExp)

9
deps/v8/src/apiutils.h

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -40,14 +40,17 @@ class ImplementationUtilities {
} }
// Packs additional parameters for the NewArguments function. |implicit_args| // Packs additional parameters for the NewArguments function. |implicit_args|
// is a pointer to the last element of 3-elements array controlled by GC. // is a pointer to the last element of 4-elements array controlled by GC.
static void PrepareArgumentsData(internal::Object** implicit_args, static void PrepareArgumentsData(internal::Object** implicit_args,
internal::Isolate* isolate,
internal::Object* data, internal::Object* data,
internal::JSFunction* callee, internal::JSFunction* callee,
internal::Object* holder) { internal::Object* holder) {
implicit_args[v8::Arguments::kDataIndex] = data; implicit_args[v8::Arguments::kDataIndex] = data;
implicit_args[v8::Arguments::kCalleeIndex] = callee; implicit_args[v8::Arguments::kCalleeIndex] = callee;
implicit_args[v8::Arguments::kHolderIndex] = holder; implicit_args[v8::Arguments::kHolderIndex] = holder;
implicit_args[v8::Arguments::kIsolateIndex] =
reinterpret_cast<internal::Object*>(isolate);
} }
static v8::Arguments NewArguments(internal::Object** implicit_args, static v8::Arguments NewArguments(internal::Object** implicit_args,
@ -55,6 +58,8 @@ class ImplementationUtilities {
bool is_construct_call) { bool is_construct_call) {
ASSERT(implicit_args[v8::Arguments::kCalleeIndex]->IsJSFunction()); ASSERT(implicit_args[v8::Arguments::kCalleeIndex]->IsJSFunction());
ASSERT(implicit_args[v8::Arguments::kHolderIndex]->IsHeapObject()); ASSERT(implicit_args[v8::Arguments::kHolderIndex]->IsHeapObject());
// The implicit isolate argument is not tagged and looks like a SMI.
ASSERT(implicit_args[v8::Arguments::kIsolateIndex]->IsSmi());
return v8::Arguments(implicit_args, argv, argc, is_construct_call); return v8::Arguments(implicit_args, argv, argc, is_construct_call);
} }

13
deps/v8/src/arguments.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -91,9 +91,11 @@ class CustomArguments : public Relocatable {
Object* data, Object* data,
Object* self, Object* self,
JSObject* holder) : Relocatable(isolate) { JSObject* holder) : Relocatable(isolate) {
values_[2] = self; ASSERT(reinterpret_cast<Object*>(isolate)->IsSmi());
values_[1] = holder; values_[3] = self;
values_[0] = data; values_[2] = holder;
values_[1] = data;
values_[0] = reinterpret_cast<Object*>(isolate);
} }
inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) { inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) {
@ -106,8 +108,9 @@ class CustomArguments : public Relocatable {
void IterateInstance(ObjectVisitor* v); void IterateInstance(ObjectVisitor* v);
Object** end() { return values_ + ARRAY_SIZE(values_) - 1; } Object** end() { return values_ + ARRAY_SIZE(values_) - 1; }
private: private:
Object* values_[3]; Object* values_[4];
}; };

9
deps/v8/src/arm/builtins-arm.cc

@ -114,7 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) { Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements; const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0); STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1); __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the // Allocate the JSArray object together with space for a fixed array with the
// requested elements. // requested elements.
@ -208,7 +208,8 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole, bool fill_with_hole,
Label* gc_required) { Label* gc_required) {
// Load the initial map from the array function. // Load the initial map from the array function.
__ LoadInitialArrayMap(array_function, scratch2, elements_array_storage); __ LoadInitialArrayMap(array_function, scratch2,
elements_array_storage, fill_with_hole);
if (FLAG_debug_code) { // Assert that array size is not zero. if (FLAG_debug_code) { // Assert that array size is not zero.
__ tst(array_size, array_size); __ tst(array_size, array_size);
@ -440,10 +441,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ b(call_generic_code); __ b(call_generic_code);
__ bind(&not_double); __ bind(&not_double);
// Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS. // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// r3: JSArray // r3: JSArray
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS, FAST_ELEMENTS,
r2, r2,
r9, r9,

146
deps/v8/src/arm/code-stubs-arm.cc

@ -3737,9 +3737,13 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Compute the return address in lr to return to after the jump below. Pc is // Compute the return address in lr to return to after the jump below. Pc is
// already at '+ 8' from the current instruction but return is after three // already at '+ 8' from the current instruction but return is after three
// instructions so add another 4 to pc to get the return address. // instructions so add another 4 to pc to get the return address.
masm->add(lr, pc, Operand(4)); {
__ str(lr, MemOperand(sp, 0)); // Prevent literal pool emission before return address.
masm->Jump(r5); Assembler::BlockConstPoolScope block_const_pool(masm);
masm->add(lr, pc, Operand(4));
__ str(lr, MemOperand(sp, 0));
masm->Jump(r5);
}
if (always_allocate) { if (always_allocate) {
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
@ -3956,14 +3960,21 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Jump to a faked try block that does the invoke, with a faked catch // Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception. // block that sets the pending exception.
__ jmp(&invoke); __ jmp(&invoke);
__ bind(&handler_entry);
handler_offset_ = handler_entry.pos(); // Block literal pool emission whilst taking the position of the handler
// Caught exception: Store result (exception) in the pending exception // entry. This avoids making the assumption that literal pools are always
// field in the JSEnv and return a failure sentinel. Coming in here the // emitted after an instruction is emitted, rather than before.
// fp will be invalid because the PushTryHandler below sets it to 0 to {
// signal the existence of the JSEntry frame. Assembler::BlockConstPoolScope block_const_pool(masm);
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, __ bind(&handler_entry);
isolate))); handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushTryHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
}
__ str(r0, MemOperand(ip)); __ str(r0, MemOperand(ip));
__ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit); __ b(&exit);
@ -4006,9 +4017,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Branch and link to JSEntryTrampoline. We don't use the double underscore // Branch and link to JSEntryTrampoline. We don't use the double underscore
// macro for the add instruction because we don't want the coverage tool // macro for the add instruction because we don't want the coverage tool
// inserting instructions here after we read the pc. // inserting instructions here after we read the pc. We block literal pool
__ mov(lr, Operand(pc)); // emission for the same reason.
masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); {
Assembler::BlockConstPoolScope block_const_pool(masm);
__ mov(lr, Operand(pc));
masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
}
// Unlink this frame from the handler chain. // Unlink this frame from the handler chain.
__ PopTryHandler(); __ PopTryHandler();
@ -4824,27 +4839,32 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer). // Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 8; const int kRegExpExecuteArguments = 9;
const int kParameterRegisters = 4; const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written. // Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers. // Arguments are before that on the stack or in registers.
// Argument 8 (sp[16]): Pass current isolate address. // Argument 9 (sp[20]): Pass current isolate address.
__ mov(r0, Operand(ExternalReference::isolate_address())); __ mov(r0, Operand(ExternalReference::isolate_address()));
__ str(r0, MemOperand(sp, 4 * kPointerSize)); __ str(r0, MemOperand(sp, 5 * kPointerSize));
// Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript. // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1)); __ mov(r0, Operand(1));
__ str(r0, MemOperand(sp, 3 * kPointerSize)); __ str(r0, MemOperand(sp, 4 * kPointerSize));
// Argument 6 (sp[8]): Start (high end) of backtracking stack memory area. // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address)); __ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0)); __ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size)); __ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0)); __ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2)); __ add(r0, r0, Operand(r2));
__ str(r0, MemOperand(sp, 3 * kPointerSize));
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(r0, Operand(0));
__ str(r0, MemOperand(sp, 2 * kPointerSize)); __ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer. // Argument 5 (sp[4]): static offsets vector buffer.
@ -4893,7 +4913,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result. // Check the result.
Label success; Label success;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); __ cmp(r0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ b(eq, &success); __ b(eq, &success);
Label failure; Label failure;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
@ -5169,9 +5191,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex); __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
__ b(ne, &call); __ b(ne, &call);
// Patch the receiver on the stack with the global receiver object. // Patch the receiver on the stack with the global receiver object.
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); __ ldr(r3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset)); __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
__ str(r2, MemOperand(sp, argc_ * kPointerSize)); __ str(r3, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call); __ bind(&call);
} }
@ -5179,9 +5201,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// r1: pushed function (to be verified) // r1: pushed function (to be verified)
__ JumpIfSmi(r1, &non_function); __ JumpIfSmi(r1, &non_function);
// Get the map of the function object. // Get the map of the function object.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, &slow); __ b(ne, &slow);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
}
// Fast-case: Invoke the function now. // Fast-case: Invoke the function now.
// r1: pushed function // r1: pushed function
ParameterCount actual(argc_); ParameterCount actual(argc_);
@ -5205,8 +5231,17 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Slow-case: Non-function called. // Slow-case: Non-function called.
__ bind(&slow); __ bind(&slow);
if (RecordCallTarget()) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
// object (undefined) so no write barrier is needed.
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
}
// Check for function proxy. // Check for function proxy.
__ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE)); __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function); __ b(ne, &non_function);
__ push(r1); // put proxy as additional argument __ push(r1); // put proxy as additional argument
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE)); __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
@ -5873,36 +5908,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r2: result string length // r2: result string length
__ ldr(r4, FieldMemOperand(r0, String::kLengthOffset)); __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
__ cmp(r2, Operand(r4, ASR, 1)); __ cmp(r2, Operand(r4, ASR, 1));
// Return original string.
__ b(eq, &return_r0); __ b(eq, &return_r0);
// Longer than original string's length or negative: unsafe arguments.
__ b(hi, &runtime);
// Shorter than original string's length: an actual substring.
Label result_longer_than_two;
// Check for special case of two character ASCII string, in which case
// we do a lookup in the symbol table first.
__ cmp(r2, Operand(2));
__ b(gt, &result_longer_than_two);
__ b(lt, &runtime);
__ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime);
// Get the two characters forming the sub string.
__ add(r0, r0, Operand(r3));
__ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
__ ldrb(r4, FieldMemOperand(r0, SeqAsciiString::kHeaderSize + 1));
// Try to lookup two character string in symbol table.
Label make_two_character_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
__ jmp(&return_r0);
// r2: result string length.
// r3: two characters combined into halfword in little endian byte order.
__ bind(&make_two_character_string);
__ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
__ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
__ jmp(&return_r0);
__ bind(&result_longer_than_two);
// Deal with different string types: update the index if necessary // Deal with different string types: update the index if necessary
// and put the underlying string into r5. // and put the underlying string into r5.
// r0: original string // r0: original string
@ -6816,6 +6827,10 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) { Register target) {
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()), __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET)); RelocInfo::CODE_TARGET));
// Prevent literal pool emission during calculation of return address.
Assembler::BlockConstPoolScope block_const_pool(masm);
// Push return address (accessible to GC through exit frame pc). // Push return address (accessible to GC through exit frame pc).
// Note that using pc with str is deprecated. // Note that using pc with str is deprecated.
Label start; Label start;
@ -7106,8 +7121,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement. // KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject // ElementsTransitionGenerator::GenerateMapChangeElementTransition
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble // and ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject // and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
@ -7176,8 +7191,13 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
// forth between a compare instructions (a nop in this position) and the // forth between a compare instructions (a nop in this position) and the
// real branch when we start and stop incremental heap marking. // real branch when we start and stop incremental heap marking.
// See RecordWriteStub::Patch for details. // See RecordWriteStub::Patch for details.
__ b(&skip_to_incremental_noncompacting); {
__ b(&skip_to_incremental_compacting); // Block literal pool emission, as the position of these two instructions
// is assumed by the patching code.
Assembler::BlockConstPoolScope block_const_pool(masm);
__ b(&skip_to_incremental_noncompacting);
__ b(&skip_to_incremental_compacting);
}
if (remembered_set_action_ == EMIT_REMEMBERED_SET) { if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
__ RememberedSetHelper(object_, __ RememberedSetHelper(object_,
@ -7370,9 +7390,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements; Label fast_elements;
__ CheckFastElements(r2, r5, &double_elements); __ CheckFastElements(r2, r5, &double_elements);
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
__ JumpIfSmi(r0, &smi_element); __ JumpIfSmi(r0, &smi_element);
__ CheckFastSmiOnlyElements(r2, r5, &fast_elements); __ CheckFastSmiElements(r2, r5, &fast_elements);
// Store into the array literal requires a elements transition. Call into // Store into the array literal requires a elements transition. Call into
// the runtime. // the runtime.
@ -7384,7 +7404,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Push(r5, r4); __ Push(r5, r4);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object. // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements); __ bind(&fast_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
@ -7395,8 +7415,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret(); __ Ret();
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
// FAST_ELEMENTS, and value is Smi. // and value is Smi.
__ bind(&smi_element); __ bind(&smi_element);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));

4
deps/v8/src/arm/codegen-arm.cc

@ -73,7 +73,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Code generators // Code generators
void ElementsTransitionGenerator::GenerateSmiOnlyToObject( void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) { MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
@ -96,7 +96,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
} }
void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Label* fail) { MacroAssembler* masm, Label* fail) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value

4
deps/v8/src/arm/debug-arm.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -125,6 +125,8 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
Assembler::kDebugBreakSlotInstructions); Assembler::kDebugBreakSlotInstructions);
} }
const bool Debug::FramePaddingLayout::kIsSupported = false;
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)

293
deps/v8/src/arm/full-codegen-arm.cc

@ -73,9 +73,6 @@ class JumpPatchSite BASE_EMBEDDED {
Assembler::BlockConstPoolScope block_const_pool(masm_); Assembler::BlockConstPoolScope block_const_pool(masm_);
__ bind(&patch_site_); __ bind(&patch_site_);
__ cmp(reg, Operand(reg)); __ cmp(reg, Operand(reg));
// Don't use b(al, ...) as that might emit the constant pool right after the
// branch. After patching when the branch is no longer unconditional
// execution can continue into the constant pool.
__ b(eq, target); // Always taken before patched. __ b(eq, target); // Always taken before patched.
} }
@ -90,6 +87,8 @@ class JumpPatchSite BASE_EMBEDDED {
} }
void EmitPatchInfo() { void EmitPatchInfo() {
// Block literal pool emission whilst recording patch site information.
Assembler::BlockConstPoolScope block_const_pool(masm_);
if (patch_site_.is_bound()) { if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_); int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg; Register reg;
@ -112,13 +111,6 @@ class JumpPatchSite BASE_EMBEDDED {
}; };
// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
UNREACHABLE();
return 24;
}
// Generate code for a JS function. On entry to the function the receiver // Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual // and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the // argument count matches the formal parameter count expected by the
@ -275,11 +267,11 @@ void FullCodeGenerator::Generate() {
// For named function expressions, declare the function name as a // For named function expressions, declare the function name as a
// constant. // constant.
if (scope()->is_function_scope() && scope()->function() != NULL) { if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableProxy* proxy = scope()->function(); VariableDeclaration* function = scope()->function();
ASSERT(proxy->var()->mode() == CONST || ASSERT(function->proxy()->var()->mode() == CONST ||
proxy->var()->mode() == CONST_HARMONY); function->proxy()->var()->mode() == CONST_HARMONY);
ASSERT(proxy->var()->location() != Variable::UNALLOCATED); ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
EmitDeclaration(proxy, proxy->var()->mode(), NULL); VisitVariableDeclaration(function);
} }
VisitDeclarations(scope()->declarations()); VisitDeclarations(scope()->declarations());
} }
@ -351,6 +343,8 @@ static const int kBackEdgeDistanceDivisor = 142;
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) { Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check"); Comment cmnt(masm_, "[ Stack check");
// Block literal pools whilst emitting stack check code.
Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok; Label ok;
if (FLAG_count_based_interrupts) { if (FLAG_count_based_interrupts) {
@ -789,62 +783,52 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
} }
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
VariableMode mode, // The variable in the declaration always resides in the current function
FunctionLiteral* function) { // context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(r1, Heap::kWithContextMapRootIndex);
__ Check(ne, "Declaration in with context.");
__ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
__ Check(ne, "Declaration in catch context.");
}
}
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
// If it was not possible to allocate the variable at compile time, we // If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the // need to "declare" it at runtime to make sure it actually exists in the
// local context. // local context.
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var(); Variable* variable = proxy->var();
bool binding_needs_init = (function == NULL) && bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
(mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (variable->location()) { switch (variable->location()) {
case Variable::UNALLOCATED: case Variable::UNALLOCATED:
++global_count_; globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(),
zone());
break; break;
case Variable::PARAMETER: case Variable::PARAMETER:
case Variable::LOCAL: case Variable::LOCAL:
if (function != NULL) { if (hole_init) {
Comment cmnt(masm_, "[ Declaration"); Comment cmnt(masm_, "[ VariableDeclaration");
VisitForAccumulatorValue(function);
__ str(result_register(), StackOperand(variable));
} else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, StackOperand(variable)); __ str(ip, StackOperand(variable));
} }
break; break;
case Variable::CONTEXT: case Variable::CONTEXT:
// The variable in the decl always resides in the current function if (hole_init) {
// context. Comment cmnt(masm_, "[ VariableDeclaration");
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); EmitDebugCheckDeclarationContext(variable);
if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(r1, Heap::kWithContextMapRootIndex);
__ Check(ne, "Declaration in with context.");
__ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
__ Check(ne, "Declaration in catch context.");
}
if (function != NULL) {
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ str(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp,
offset,
result_register(),
r2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
} else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, ContextOperand(cp, variable->index())); __ str(ip, ContextOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space. // No write barrier since the_hole_value is in old space.
@ -853,13 +837,11 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
break; break;
case Variable::LOOKUP: { case Variable::LOOKUP: {
Comment cmnt(masm_, "[ Declaration"); Comment cmnt(masm_, "[ VariableDeclaration");
__ mov(r2, Operand(variable->name())); __ mov(r2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes. // Declaration nodes are always introduced in one of four modes.
ASSERT(mode == VAR || ASSERT(mode == VAR || mode == LET ||
mode == CONST || mode == CONST || mode == CONST_HARMONY);
mode == CONST_HARMONY ||
mode == LET);
PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY) PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
? READ_ONLY : NONE; ? READ_ONLY : NONE;
__ mov(r1, Operand(Smi::FromInt(attr))); __ mov(r1, Operand(Smi::FromInt(attr)));
@ -867,11 +849,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
// Note: For variables we must not push an initial value (such as // Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we // 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value. // must not destroy the current value.
if (function != NULL) { if (hole_init) {
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(function);
} else if (binding_needs_init) {
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex); __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ Push(cp, r2, r1, r0); __ Push(cp, r2, r1, r0);
} else { } else {
@ -885,6 +863,122 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
} }
void FullCodeGenerator::VisitFunctionDeclaration(
FunctionDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
break;
}
case Variable::PARAMETER:
case Variable::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
__ str(result_register(), StackOperand(variable));
break;
}
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
__ str(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp,
offset,
result_register(),
r2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
break;
}
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ mov(r2, Operand(variable->name()));
__ mov(r1, Operand(Smi::FromInt(NONE)));
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
}
}
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
Handle<JSModule> instance = declaration->module()->interface()->Instance();
ASSERT(!instance.is_null());
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
globals_->Add(variable->name(), zone());
globals_->Add(instance, zone());
Visit(declaration->module());
break;
}
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ mov(r1, Operand(instance));
__ str(r1, ContextOperand(cp, variable->index()));
Visit(declaration->module());
break;
}
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::LOOKUP:
UNREACHABLE();
}
}
void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED:
// TODO(rossberg)
break;
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ ImportDeclaration");
EmitDebugCheckDeclarationContext(variable);
// TODO(rossberg)
break;
}
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::LOOKUP:
UNREACHABLE();
}
}
void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
// TODO(rossberg)
}
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals. // Call the runtime to declare the globals.
// The context is the first argument. // The context is the first argument.
@ -1511,7 +1605,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Mark all computed expressions that are bound to a key that // Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the // is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted. // marked expressions, no store code is emitted.
expr->CalculateEmitStore(); expr->CalculateEmitStore(zone());
AccessorTable accessor_table(isolate()->zone()); AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) { for (int i = 0; i < expr->properties()->length(); i++) {
@ -1609,7 +1703,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length()); ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind = ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS; bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values( Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1))); FixedArrayBase::cast(constant_elements->get(1)));
@ -1630,8 +1724,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else { } else {
ASSERT(constant_elements_kind == FAST_ELEMENTS || ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays); FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS ? FastCloneShallowArrayStub::CLONE_ELEMENTS
@ -1659,7 +1752,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} }
VisitForAccumulatorValue(subexpr); VisitForAccumulatorValue(subexpr);
if (constant_elements_kind == FAST_ELEMENTS) { if (IsFastObjectElementsKind(constant_elements_kind)) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize); int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ ldr(r6, MemOperand(sp)); // Copy of array literal. __ ldr(r6, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset)); __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
@ -2271,6 +2364,18 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
} }
// Record source position for debugger. // Record source position for debugger.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
// Record call targets in unoptimized code, but not in the snapshot.
if (!Serializer::enabled()) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
RecordTypeFeedbackCell(expr->id(), cell);
__ mov(r2, Operand(cell));
}
CallFunctionStub stub(arg_count, flags); CallFunctionStub stub(arg_count, flags);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub); __ CallStub(&stub);
@ -3564,7 +3669,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
__ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset)); __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch1)); __ add(string_length, string_length, Operand(scratch1), SetCC);
__ b(vs, &bailout); __ b(vs, &bailout);
__ cmp(element, elements_end); __ cmp(element, elements_end);
__ b(lt, &loop); __ b(lt, &loop);
@ -3601,7 +3706,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ b(ne, &bailout); __ b(ne, &bailout);
__ tst(scratch2, Operand(0x80000000)); __ tst(scratch2, Operand(0x80000000));
__ b(ne, &bailout); __ b(ne, &bailout);
__ add(string_length, string_length, Operand(scratch2)); __ add(string_length, string_length, Operand(scratch2), SetCC);
__ b(vs, &bailout); __ b(vs, &bailout);
__ SmiUntag(string_length); __ SmiUntag(string_length);
@ -4357,7 +4462,8 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() { void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope(); Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope()) { if (declaration_scope->is_global_scope() ||
declaration_scope->is_module_scope()) {
// Contexts nested in the global context have a canonical empty function // Contexts nested in the global context have a canonical empty function
// as their closure, not the anonymous closure containing the global // as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty // code. Pass a smi sentinel and let the runtime look up the empty
@ -4388,14 +4494,55 @@ void FullCodeGenerator::EnterFinallyBlock() {
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ add(r1, r1, Operand(r1)); // Convert to smi. __ add(r1, r1, Operand(r1)); // Convert to smi.
// Store result register while executing finally block.
__ push(r1);
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
__ ldr(r1, MemOperand(ip));
__ push(r1);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));
__ ldr(r1, MemOperand(ip));
__ push(r1);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ mov(ip, Operand(pending_message_script));
__ ldr(r1, MemOperand(ip));
__ push(r1); __ push(r1);
} }
void FullCodeGenerator::ExitFinallyBlock() { void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(r1)); ASSERT(!result_register().is(r1));
// Restore pending message from stack.
__ pop(r1);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ mov(ip, Operand(pending_message_script));
__ str(r1, MemOperand(ip));
__ pop(r1);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));
__ str(r1, MemOperand(ip));
__ pop(r1);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
__ str(r1, MemOperand(ip));
// Restore result register from stack. // Restore result register from stack.
__ pop(r1); __ pop(r1);
// Uncook return address and return. // Uncook return address and return.
__ pop(result_register()); __ pop(result_register());
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);

61
deps/v8/src/arm/ic-arm.cc

@ -1249,7 +1249,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in r0. // Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) { if (!FLAG_trace_elements_transitions) {
Label fail; Label fail;
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
__ mov(r0, r2); __ mov(r0, r2);
__ Ret(); __ Ret();
__ bind(&fail); __ bind(&fail);
@ -1462,27 +1462,27 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CompareRoot(r4, Heap::kHeapNumberMapRootIndex); __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
__ b(ne, &non_double_value); __ b(ne, &non_double_value);
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS -> // Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store. // FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS, FAST_DOUBLE_ELEMENTS,
receiver_map, receiver_map,
r4, r4,
&slow); &slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow); ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check); __ jmp(&fast_double_without_map_check);
__ bind(&non_double_value); __ bind(&non_double_value);
// Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS, FAST_ELEMENTS,
receiver_map, receiver_map,
r4, r4,
&slow); &slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm); ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store); __ jmp(&finish_object_store);
@ -1690,12 +1690,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code. // Activate inlined smi code.
if (previous_state == UNINITIALIZED) { if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address()); PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
} }
} }
void PatchInlinedSmiCode(Address address) { void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address cmp_instruction_address = Address cmp_instruction_address =
address + Assembler::kCallTargetAddressOffset; address + Assembler::kCallTargetAddressOffset;
@ -1729,34 +1729,31 @@ void PatchInlinedSmiCode(Address address) {
Instr instr_at_patch = Assembler::instr_at(patch_address); Instr instr_at_patch = Assembler::instr_at(patch_address);
Instr branch_instr = Instr branch_instr =
Assembler::instr_at(patch_address + Instruction::kInstrSize); Assembler::instr_at(patch_address + Instruction::kInstrSize);
ASSERT(Assembler::IsCmpRegister(instr_at_patch)); // This is patching a conditional "jump if not smi/jump if smi" site.
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(), // Enabling by changing from
Assembler::GetRm(instr_at_patch).code()); // cmp rx, rx
// b eq/ne, <target>
// to
// tst rx, #kSmiTagMask
// b ne/eq, <target>
// and vice-versa to be disabled again.
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
if (check == ENABLE_INLINED_SMI_CHECK) {
ASSERT(Assembler::IsCmpRegister(instr_at_patch));
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
Assembler::GetRm(instr_at_patch).code());
patcher.masm()->tst(reg, Operand(kSmiTagMask));
} else {
ASSERT(check == DISABLE_INLINED_SMI_CHECK);
ASSERT(Assembler::IsTstImmediate(instr_at_patch));
patcher.masm()->cmp(reg, reg);
}
ASSERT(Assembler::IsBranch(branch_instr)); ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) { if (Assembler::GetCondition(branch_instr) == eq) {
// This is patching a "jump if not smi" site to be active.
// Changing
// cmp rx, rx
// b eq, <target>
// to
// tst rx, #kSmiTagMask
// b ne, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(ne); patcher.EmitCondition(ne);
} else { } else {
ASSERT(Assembler::GetCondition(branch_instr) == ne); ASSERT(Assembler::GetCondition(branch_instr) == ne);
// This is patching a "jump if smi" site to be active.
// Changing
// cmp rx, rx
// b ne, <target>
// to
// tst rx, #kSmiTagMask
// b eq, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(eq); patcher.EmitCondition(eq);
} }
} }

210
deps/v8/src/arm/lithium-arm.cc

@ -108,22 +108,17 @@ void LInstruction::PrintTo(StringStream* stream) {
} }
template<int R, int I, int T> void LInstruction::PrintDataTo(StringStream* stream) {
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
stream->Add("= "); stream->Add("= ");
for (int i = 0; i < inputs_.length(); i++) { for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" "); if (i > 0) stream->Add(" ");
inputs_[i]->PrintTo(stream); InputAt(i)->PrintTo(stream);
} }
} }
template<int R, int I, int T> void LInstruction::PrintOutputOperandTo(StringStream* stream) {
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) { if (HasResult()) result()->PrintTo(stream);
for (int i = 0; i < results_.length(); i++) {
if (i > 0) stream->Add(" ");
results_[i]->PrintTo(stream);
}
} }
@ -416,9 +411,9 @@ LChunk::LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0), : spill_slot_count_(0),
info_(info), info_(info),
graph_(graph), graph_(graph),
instructions_(32), instructions_(32, graph->zone()),
pointer_maps_(8), pointer_maps_(8, graph->zone()),
inlined_closures_(1) { inlined_closures_(1, graph->zone()) {
} }
@ -432,9 +427,9 @@ int LChunk::GetNextSpillIndex(bool is_double) {
LOperand* LChunk::GetNextSpillSlot(bool is_double) { LOperand* LChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double); int index = GetNextSpillIndex(is_double);
if (is_double) { if (is_double) {
return LDoubleStackSlot::Create(index); return LDoubleStackSlot::Create(index, zone());
} else { } else {
return LStackSlot::Create(index); return LStackSlot::Create(index, zone());
} }
} }
@ -479,23 +474,23 @@ void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block); LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
int index = -1; int index = -1;
if (instr->IsControl()) { if (instr->IsControl()) {
instructions_.Add(gap); instructions_.Add(gap, zone());
index = instructions_.length(); index = instructions_.length();
instructions_.Add(instr); instructions_.Add(instr, zone());
} else { } else {
index = instructions_.length(); index = instructions_.length();
instructions_.Add(instr); instructions_.Add(instr, zone());
instructions_.Add(gap); instructions_.Add(gap, zone());
} }
if (instr->HasPointerMap()) { if (instr->HasPointerMap()) {
pointer_maps_.Add(instr->pointer_map()); pointer_maps_.Add(instr->pointer_map(), zone());
instr->pointer_map()->set_lithium_position(index); instr->pointer_map()->set_lithium_position(index);
} }
} }
LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) { LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
return LConstantOperand::Create(constant->id()); return LConstantOperand::Create(constant->id(), zone());
} }
@ -534,7 +529,8 @@ int LChunk::NearestGapPos(int index) const {
void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) { void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to); GetGapAt(index)->GetOrCreateParallelMove(
LGap::START, zone())->AddMove(from, to, zone());
} }
@ -732,22 +728,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
} }
LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id) {
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = ast_id;
return instr;
}
void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
instruction_pending_deoptimization_environment_ = NULL;
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
}
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr, HInstruction* hinstr,
CanDeoptimize can_deoptimize) { CanDeoptimize can_deoptimize) {
@ -760,8 +740,10 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
if (hinstr->HasObservableSideEffects()) { if (hinstr->HasObservableSideEffects()) {
ASSERT(hinstr->next()->IsSimulate()); ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next()); HSimulate* sim = HSimulate::cast(hinstr->next());
instr = SetInstructionPendingDeoptimizationEnvironment( ASSERT(instruction_pending_deoptimization_environment_ == NULL);
instr, sim->ast_id()); ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = sim->ast_id();
} }
// If instruction does not have side-effects lazy deoptimization // If instruction does not have side-effects lazy deoptimization
@ -779,15 +761,9 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
} }
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
instr->MarkAsSaveDoubles();
return instr;
}
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap()); ASSERT(!instr->HasPointerMap());
instr->set_pointer_map(new(zone()) LPointerMap(position_)); instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
return instr; return instr;
} }
@ -1010,7 +986,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
hydrogen_env->parameter_count(), hydrogen_env->parameter_count(),
argument_count_, argument_count_,
value_count, value_count,
outer); outer,
zone());
int argument_index = *argument_index_accumulator; int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) { for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue; if (hydrogen_env->is_special_index(i)) continue;
@ -1295,6 +1272,7 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) { LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
ASSERT(instr->value()->representation().IsInteger32()); ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32()); ASSERT(instr->representation().IsInteger32());
if (instr->HasNoUses()) return NULL;
LOperand* value = UseRegisterAtStart(instr->value()); LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LBitNotI(value)); return DefineAsRegister(new(zone()) LBitNotI(value));
} }
@ -1319,6 +1297,76 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
} }
bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
uint32_t divisor_abs = abs(divisor);
// Dividing by 0, 1, and powers of 2 is easy.
// Note that IsPowerOf2(0) returns true;
ASSERT(IsPowerOf2(0) == true);
if (IsPowerOf2(divisor_abs)) return true;
// We have magic numbers for a few specific divisors.
// Details and proofs can be found in:
// - Hacker's Delight, Henry S. Warren, Jr.
// - The PowerPC Compiler Writer’s Guide
// and probably many others.
//
// We handle
// <divisor with magic numbers> * <power of 2>
// but not
// <divisor with magic numbers> * <other divisor with magic numbers>
int32_t power_of_2_factor =
CompilerIntrinsics::CountTrailingZeros(divisor_abs);
DivMagicNumbers magic_numbers =
DivMagicNumberFor(divisor_abs >> power_of_2_factor);
if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
return false;
}
HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
// A value with an integer representation does not need to be transformed.
if (dividend->representation().IsInteger32()) {
return dividend;
// A change from an integer32 can be replaced by the integer32 value.
} else if (dividend->IsChange() &&
HChange::cast(dividend)->from().IsInteger32()) {
return HChange::cast(dividend)->value();
}
return NULL;
}
HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
// Only optimize when we have magic numbers for the divisor.
// The standard integer division routine is usually slower than transitionning
// to VFP.
if (divisor->IsConstant() &&
HConstant::cast(divisor)->HasInteger32Value()) {
HConstant* constant_val = HConstant::cast(divisor);
int32_t int32_val = constant_val->Integer32Value();
if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) {
return constant_val->CopyToRepresentation(Representation::Integer32(),
divisor->block()->zone());
}
}
return NULL;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
HValue* right = instr->right();
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegisterOrConstant(right);
LOperand* remainder = TempRegister();
ASSERT(right->IsConstant() &&
HConstant::cast(right)->HasInteger32Value() &&
HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()));
return AssignEnvironment(DefineAsRegister(
new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) { LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) { if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->left()->representation().IsInteger32());
@ -1612,7 +1660,8 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0); LOperand* object = UseFixed(instr->value(), r0);
LDateField* result = new LDateField(object, FixedTemp(r1), instr->index()); LDateField* result =
new(zone()) LDateField(object, FixedTemp(r1), instr->index());
return MarkAsCall(DefineFixed(result, r0), instr); return MarkAsCall(DefineFixed(result, r0), instr);
} }
@ -1661,10 +1710,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else { } else {
ASSERT(to.IsInteger32()); ASSERT(to.IsInteger32());
LOperand* value = UseRegisterAtStart(instr->value()); LOperand* value = UseRegisterAtStart(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
LInstruction* res = NULL; LInstruction* res = NULL;
if (!needs_check) { if (instr->value()->type().IsSmi()) {
res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check)); res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else { } else {
LOperand* temp1 = TempRegister(); LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
@ -1753,9 +1801,9 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
} }
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) { LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
LOperand* value = UseRegisterAtStart(instr->value()); LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckMap(value); LInstruction* result = new(zone()) LCheckMaps(value);
return AssignEnvironment(result); return AssignEnvironment(result);
} }
@ -2037,8 +2085,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind( LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) { HTransitionElementsKind* instr) {
if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS && ElementsKind from_kind = instr->original_map()->elements_kind();
instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) { ElementsKind to_kind = instr->transitioned_map()->elements_kind();
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object()); LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister(); LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result = LTransitionElementsKind* result =
@ -2059,16 +2108,28 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier(); bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
LOperand* obj = needs_write_barrier instr->NeedsWriteBarrierForMap();
? UseTempRegister(instr->object())
: UseRegisterAtStart(instr->object()); LOperand* obj;
if (needs_write_barrier) {
obj = instr->is_in_object()
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
obj = needs_write_barrier_for_map
? UseRegister(instr->object())
: UseRegisterAtStart(instr->object());
}
LOperand* val = needs_write_barrier LOperand* val = needs_write_barrier
? UseTempRegister(instr->value()) ? UseTempRegister(instr->value())
: UseRegister(instr->value()); : UseRegister(instr->value());
return new(zone()) LStoreNamedField(obj, val); // We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
return new(zone()) LStoreNamedField(obj, val, temp);
} }
@ -2111,7 +2172,8 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) { LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
LAllocateObject* result = new LAllocateObject(TempRegister(), TempRegister()); LAllocateObject* result =
new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result)); return AssignPointerMap(DefineAsRegister(result));
} }
@ -2242,9 +2304,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
if (pending_deoptimization_ast_id_ == instr->ast_id()) { if (pending_deoptimization_ast_id_ == instr->ast_id()) {
LInstruction* result = new(zone()) LLazyBailout; LInstruction* result = new(zone()) LLazyBailout;
result = AssignEnvironment(result); result = AssignEnvironment(result);
// Store the lazy deopt environment with the instruction if needed. Right
// now it is only used for LInstanceOfKnownGlobal.
instruction_pending_deoptimization_environment_-> instruction_pending_deoptimization_environment_->
set_deoptimization_environment(result->environment()); SetDeferredLazyDeoptimizationEnvironment(result->environment());
ClearInstructionPendingDeoptimizationEnvironment(); instruction_pending_deoptimization_environment_ = NULL;
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
return result; return result;
} }
@ -2271,8 +2336,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
undefined, undefined,
instr->call_kind(), instr->call_kind(),
instr->is_construct()); instr->is_construct());
if (instr->arguments() != NULL) { if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments(), graph()->GetArgumentsObject()); inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
} }
current_block_->UpdateEnvironment(inner); current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure()); chunk_->AddInlinedClosure(instr->closure());
@ -2281,10 +2346,21 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
LInstruction* pop = NULL;
HEnvironment* env = current_block_->last_environment();
if (instr->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
argument_count_ -= argument_count;
}
HEnvironment* outer = current_block_->last_environment()-> HEnvironment* outer = current_block_->last_environment()->
DiscardInlined(false); DiscardInlined(false);
current_block_->UpdateEnvironment(outer); current_block_->UpdateEnvironment(outer);
return NULL;
return pop;
} }

116
deps/v8/src/arm/lithium-arm.h

@ -72,7 +72,7 @@ class LCodeGen;
V(CheckFunction) \ V(CheckFunction) \
V(CheckInstanceType) \ V(CheckInstanceType) \
V(CheckNonSmi) \ V(CheckNonSmi) \
V(CheckMap) \ V(CheckMaps) \
V(CheckPrototypeMaps) \ V(CheckPrototypeMaps) \
V(CheckSmi) \ V(CheckSmi) \
V(ClampDToUint8) \ V(ClampDToUint8) \
@ -132,6 +132,7 @@ class LCodeGen;
V(LoadNamedField) \ V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \ V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \ V(LoadNamedGeneric) \
V(MathFloorOfDiv) \
V(ModI) \ V(ModI) \
V(MulI) \ V(MulI) \
V(NumberTagD) \ V(NumberTagD) \
@ -179,7 +180,8 @@ class LCodeGen;
V(CheckMapValue) \ V(CheckMapValue) \
V(LoadFieldByIndex) \ V(LoadFieldByIndex) \
V(DateField) \ V(DateField) \
V(WrapReceiver) V(WrapReceiver) \
V(Drop)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@ -203,15 +205,14 @@ class LInstruction: public ZoneObject {
LInstruction() LInstruction()
: environment_(NULL), : environment_(NULL),
hydrogen_value_(NULL), hydrogen_value_(NULL),
is_call_(false), is_call_(false) { }
is_save_doubles_(false) { }
virtual ~LInstruction() { } virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0; virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0; virtual const char* Mnemonic() const = 0;
virtual void PrintTo(StringStream* stream); virtual void PrintTo(StringStream* stream);
virtual void PrintDataTo(StringStream* stream) = 0; virtual void PrintDataTo(StringStream* stream);
virtual void PrintOutputOperandTo(StringStream* stream) = 0; virtual void PrintOutputOperandTo(StringStream* stream);
enum Opcode { enum Opcode {
// Declare a unique enum value for each instruction. // Declare a unique enum value for each instruction.
@ -246,22 +247,12 @@ class LInstruction: public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; } HValue* hydrogen_value() const { return hydrogen_value_; }
void set_deoptimization_environment(LEnvironment* env) { virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
deoptimization_environment_.set(env);
}
LEnvironment* deoptimization_environment() const {
return deoptimization_environment_.get();
}
bool HasDeoptimizationEnvironment() const {
return deoptimization_environment_.is_set();
}
void MarkAsCall() { is_call_ = true; } void MarkAsCall() { is_call_ = true; }
void MarkAsSaveDoubles() { is_save_doubles_ = true; }
// Interface to the register allocator and iterators. // Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; } bool IsMarkedAsCall() const { return is_call_; }
bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
virtual bool HasResult() const = 0; virtual bool HasResult() const = 0;
virtual LOperand* result() = 0; virtual LOperand* result() = 0;
@ -282,9 +273,7 @@ class LInstruction: public ZoneObject {
LEnvironment* environment_; LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_; SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_; HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
bool is_call_; bool is_call_;
bool is_save_doubles_;
}; };
@ -306,9 +295,6 @@ class LTemplateInstruction: public LInstruction {
int TempCount() { return T; } int TempCount() { return T; }
LOperand* TempAt(int i) { return temps_[i]; } LOperand* TempAt(int i) { return temps_[i]; }
virtual void PrintDataTo(StringStream* stream);
virtual void PrintOutputOperandTo(StringStream* stream);
protected: protected:
EmbeddedContainer<LOperand*, R> results_; EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_; EmbeddedContainer<LOperand*, I> inputs_;
@ -347,8 +333,10 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
LAST_INNER_POSITION = AFTER LAST_INNER_POSITION = AFTER
}; };
LParallelMove* GetOrCreateParallelMove(InnerPosition pos) { LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove; if (parallel_moves_[pos] == NULL) {
parallel_moves_[pos] = new(zone) LParallelMove(zone);
}
return parallel_moves_[pos]; return parallel_moves_[pos];
} }
@ -534,9 +522,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
class LArgumentsElements: public LTemplateInstruction<1, 0, 0> { class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
public: public:
LArgumentsElements() { }
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
}; };
@ -582,6 +569,21 @@ class LDivI: public LTemplateInstruction<1, 2, 0> {
}; };
class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
LOperand* temp = NULL) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
};
class LMulI: public LTemplateInstruction<1, 2, 1> { class LMulI: public LTemplateInstruction<1, 2, 1> {
public: public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) { LMulI(LOperand* left, LOperand* right, LOperand* temp) {
@ -834,6 +836,15 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal) DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
Handle<JSFunction> function() const { return hydrogen()->function(); } Handle<JSFunction> function() const { return hydrogen()->function(); }
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
lazy_deopt_env_ = env;
}
private:
LEnvironment* lazy_deopt_env_;
}; };
@ -1227,6 +1238,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; } LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
@ -1243,13 +1255,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; } LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public: public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
LOperand* key) {
inputs_[0] = external_pointer; inputs_[0] = external_pointer;
inputs_[1] = key; inputs_[1] = key;
} }
@ -1263,6 +1275,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const { ElementsKind elements_kind() const {
return hydrogen()->elements_kind(); return hydrogen()->elements_kind();
} }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
@ -1378,6 +1391,19 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
}; };
class LDrop: public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
int count() const { return count_; }
DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
private:
int count_;
};
class LThisFunction: public LTemplateInstruction<1, 0, 0> { class LThisFunction: public LTemplateInstruction<1, 0, 0> {
public: public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
@ -1460,6 +1486,7 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; } int arity() const { return hydrogen()->argument_count() - 1; }
Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
}; };
@ -1659,11 +1686,12 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
}; };
class LStoreNamedField: public LTemplateInstruction<0, 2, 0> { class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
public: public:
LStoreNamedField(LOperand* obj, LOperand* val) { LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) {
inputs_[0] = obj; inputs_[0] = obj;
inputs_[1] = val; inputs_[1] = val;
temps_[0] = temp;
} }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
@ -1717,6 +1745,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; } LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; } LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
@ -1739,6 +1768,9 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; } LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; } LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
}; };
@ -1781,6 +1813,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const { ElementsKind elements_kind() const {
return hydrogen()->elements_kind(); return hydrogen()->elements_kind();
} }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
@ -1889,14 +1922,14 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
}; };
class LCheckMap: public LTemplateInstruction<0, 1, 0> { class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LCheckMap(LOperand* value) { explicit LCheckMaps(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
} }
DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map") DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckMap) DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
}; };
@ -2236,9 +2269,11 @@ class LChunk: public ZoneObject {
} }
void AddInlinedClosure(Handle<JSFunction> closure) { void AddInlinedClosure(Handle<JSFunction> closure) {
inlined_closures_.Add(closure); inlined_closures_.Add(closure, zone());
} }
Zone* zone() const { return graph_->zone(); }
private: private:
int spill_slot_count_; int spill_slot_count_;
CompilationInfo* info_; CompilationInfo* info_;
@ -2255,7 +2290,7 @@ class LChunkBuilder BASE_EMBEDDED {
: chunk_(NULL), : chunk_(NULL),
info_(info), info_(info),
graph_(graph), graph_(graph),
zone_(graph->isolate()->zone()), zone_(graph->zone()),
status_(UNUSED), status_(UNUSED),
current_instruction_(NULL), current_instruction_(NULL),
current_block_(NULL), current_block_(NULL),
@ -2274,6 +2309,10 @@ class LChunkBuilder BASE_EMBEDDED {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO #undef DECLARE_DO
static bool HasMagicNumberForDivisor(int32_t divisor);
static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
private: private:
enum Status { enum Status {
UNUSED, UNUSED,
@ -2369,11 +2408,6 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, LInstruction* instr,
HInstruction* hinstr, HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LInstruction* MarkAsSaveDoubles(LInstruction* instr);
LInstruction* SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator); int* argument_index_accumulator);

548
deps/v8/src/arm/lithium-codegen-arm.cc

@ -571,6 +571,9 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr, LInstruction* instr,
SafepointMode safepoint_mode) { SafepointMode safepoint_mode) {
ASSERT(instr != NULL); ASSERT(instr != NULL);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
LPointerMap* pointers = instr->pointer_map(); LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position()); RecordPosition(pointers->position());
__ Call(code, mode); __ Call(code, mode);
@ -631,14 +634,15 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
++jsframe_count; ++jsframe_count;
} }
} }
Translation translation(&translations_, frame_count, jsframe_count); Translation translation(&translations_, frame_count, jsframe_count,
zone());
WriteTranslation(environment, &translation); WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length(); int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset(); int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index, environment->Register(deoptimization_index,
translation.index(), translation.index(),
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1); (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
deoptimizations_.Add(environment); deoptimizations_.Add(environment, zone());
} }
} }
@ -670,7 +674,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
// jump entry if this is the case. // jump entry if this is the case.
if (deopt_jump_table_.is_empty() || if (deopt_jump_table_.is_empty() ||
(deopt_jump_table_.last().address != entry)) { (deopt_jump_table_.last().address != entry)) {
deopt_jump_table_.Add(JumpTableEntry(entry)); deopt_jump_table_.Add(JumpTableEntry(entry), zone());
} }
__ b(cc, &deopt_jump_table_.last().label); __ b(cc, &deopt_jump_table_.last().label);
} }
@ -715,7 +719,7 @@ int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
for (int i = 0; i < deoptimization_literals_.length(); ++i) { for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i; if (deoptimization_literals_[i].is_identical_to(literal)) return i;
} }
deoptimization_literals_.Add(literal); deoptimization_literals_.Add(literal, zone());
return result; return result;
} }
@ -761,14 +765,14 @@ void LCodeGen::RecordSafepoint(
for (int i = 0; i < operands->length(); i++) { for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i); LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) { if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index()); safepoint.DefinePointerSlot(pointer->index(), zone());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
safepoint.DefinePointerRegister(ToRegister(pointer)); safepoint.DefinePointerRegister(ToRegister(pointer), zone());
} }
} }
if (kind & Safepoint::kWithRegisters) { if (kind & Safepoint::kWithRegisters) {
// Register cp always contains a pointer to the context. // Register cp always contains a pointer to the context.
safepoint.DefinePointerRegister(cp); safepoint.DefinePointerRegister(cp, zone());
} }
} }
@ -780,7 +784,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
LPointerMap empty_pointers(RelocInfo::kNoPosition); LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
RecordSafepoint(&empty_pointers, deopt_mode); RecordSafepoint(&empty_pointers, deopt_mode);
} }
@ -1034,6 +1038,100 @@ void LCodeGen::DoModI(LModI* instr) {
} }
void LCodeGen::EmitSignedIntegerDivisionByConstant(
Register result,
Register dividend,
int32_t divisor,
Register remainder,
Register scratch,
LEnvironment* environment) {
ASSERT(!AreAliased(dividend, scratch, ip));
ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
uint32_t divisor_abs = abs(divisor);
int32_t power_of_2_factor =
CompilerIntrinsics::CountTrailingZeros(divisor_abs);
switch (divisor_abs) {
case 0:
DeoptimizeIf(al, environment);
return;
case 1:
if (divisor > 0) {
__ Move(result, dividend);
} else {
__ rsb(result, dividend, Operand(0), SetCC);
DeoptimizeIf(vs, environment);
}
// Compute the remainder.
__ mov(remainder, Operand(0));
return;
default:
if (IsPowerOf2(divisor_abs)) {
// Branch and condition free code for integer division by a power
// of two.
int32_t power = WhichPowerOf2(divisor_abs);
if (power > 1) {
__ mov(scratch, Operand(dividend, ASR, power - 1));
}
__ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
__ mov(result, Operand(scratch, ASR, power));
// Negate if necessary.
// We don't need to check for overflow because the case '-1' is
// handled separately.
if (divisor < 0) {
ASSERT(divisor != -1);
__ rsb(result, result, Operand(0));
}
// Compute the remainder.
if (divisor > 0) {
__ sub(remainder, dividend, Operand(result, LSL, power));
} else {
__ add(remainder, dividend, Operand(result, LSL, power));
}
return;
} else {
// Use magic numbers for a few specific divisors.
// Details and proofs can be found in:
// - Hacker's Delight, Henry S. Warren, Jr.
// - The PowerPC Compiler Writer’s Guide
// and probably many others.
//
// We handle
// <divisor with magic numbers> * <power of 2>
// but not
// <divisor with magic numbers> * <other divisor with magic numbers>
DivMagicNumbers magic_numbers =
DivMagicNumberFor(divisor_abs >> power_of_2_factor);
// Branch and condition free code for integer division by a power
// of two.
const int32_t M = magic_numbers.M;
const int32_t s = magic_numbers.s + power_of_2_factor;
__ mov(ip, Operand(M));
__ smull(ip, scratch, dividend, ip);
if (M < 0) {
__ add(scratch, scratch, Operand(dividend));
}
if (s > 0) {
__ mov(scratch, Operand(scratch, ASR, s));
}
__ add(result, scratch, Operand(dividend, LSR, 31));
if (divisor < 0) __ rsb(result, result, Operand(0));
// Compute the remainder.
__ mov(ip, Operand(divisor));
// This sequence could be replaced with 'mls' when
// it gets implemented.
__ mul(scratch, result, ip);
__ sub(remainder, dividend, scratch);
}
}
}
void LCodeGen::DoDivI(LDivI* instr) { void LCodeGen::DoDivI(LDivI* instr) {
class DeferredDivI: public LDeferredCode { class DeferredDivI: public LDeferredCode {
public: public:
@ -1096,7 +1194,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Call the stub. The numbers in r0 and r1 have // Call the stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize. // to be tagged to Smis. If that is not possible, deoptimize.
DeferredDivI* deferred = new DeferredDivI(this, instr); DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
__ TrySmiTag(left, &deoptimize, scratch); __ TrySmiTag(left, &deoptimize, scratch);
__ TrySmiTag(right, &deoptimize, scratch); __ TrySmiTag(right, &deoptimize, scratch);
@ -1115,6 +1213,34 @@ void LCodeGen::DoDivI(LDivI* instr) {
} }
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
const Register result = ToRegister(instr->result());
const Register left = ToRegister(instr->InputAt(0));
const Register remainder = ToRegister(instr->TempAt(0));
const Register scratch = scratch0();
// We only optimize this for division by constants, because the standard
// integer division routine is usually slower than transitionning to VFP.
// This could be optimized on processors with SDIV available.
ASSERT(instr->InputAt(1)->IsConstantOperand());
int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
if (divisor < 0) {
__ cmp(left, Operand(0));
DeoptimizeIf(eq, instr->environment());
}
EmitSignedIntegerDivisionByConstant(result,
left,
divisor,
remainder,
scratch,
instr->environment());
// We operated a truncating division. Correct the result if necessary.
__ cmp(remainder, Operand(0));
__ teq(remainder, Operand(divisor), ne);
__ sub(result, result, Operand(1), LeaveCC, mi);
}
template<int T> template<int T>
void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
Token::Value op) { Token::Value op) {
@ -1562,6 +1688,9 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->result()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0));
BinaryOpStub stub(instr->op(), NO_OVERWRITE); BinaryOpStub stub(instr->op(), NO_OVERWRITE);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code. __ nop(); // Signals no inlined code.
} }
@ -2174,7 +2303,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
}; };
DeferredInstanceOfKnownGlobal* deferred; DeferredInstanceOfKnownGlobal* deferred;
deferred = new DeferredInstanceOfKnownGlobal(this, instr); deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result; Label done, false_result;
Register object = ToRegister(instr->InputAt(0)); Register object = ToRegister(instr->InputAt(0));
@ -2193,20 +2322,25 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Label cache_miss; Label cache_miss;
Register map = temp; Register map = temp;
__ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching. {
// We use Factory::the_hole_value() on purpose instead of loading from the // Block constant pool emission to ensure the positions of instructions are
// root array to force relocation to be able to later patch with // as expected by the patcher. See InstanceofStub::Generate().
// the cached map. Assembler::BlockConstPoolScope block_const_pool(masm());
Handle<JSGlobalPropertyCell> cell = __ bind(deferred->map_check()); // Label for calculating code patching.
factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); // We use Factory::the_hole_value() on purpose instead of loading from the
__ mov(ip, Operand(Handle<Object>(cell))); // root array to force relocation to be able to later patch with
__ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); // the cached map.
__ cmp(map, Operand(ip)); Handle<JSGlobalPropertyCell> cell =
__ b(ne, &cache_miss); factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
// We use Factory::the_hole_value() on purpose instead of loading from the __ mov(ip, Operand(Handle<Object>(cell)));
// root array to force relocation to be able to later patch __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
// with true or false. __ cmp(map, Operand(ip));
__ mov(result, Operand(factory()->the_hole_value())); __ b(ne, &cache_miss);
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
__ mov(result, Operand(factory()->the_hole_value()));
}
__ b(&done); __ b(&done);
// The inlined call site cache did not match. Check null and string before // The inlined call site cache did not match. Check null and string before
@ -2267,8 +2401,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
RelocInfo::CODE_TARGET, RelocInfo::CODE_TARGET,
instr, instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasDeoptimizationEnvironment()); LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
LEnvironment* env = instr->deoptimization_environment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Put the result value into the result register slot and // Put the result value into the result register slot and
// restore all registers. // restore all registers.
@ -2438,12 +2571,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object, Register object,
Handle<Map> type, Handle<Map> type,
Handle<String> name) { Handle<String> name,
LEnvironment* env) {
LookupResult lookup(isolate()); LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup); type->LookupInDescriptors(NULL, *name, &lookup);
ASSERT(lookup.IsFound() && ASSERT(lookup.IsFound() || lookup.IsCacheable());
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION)); if (lookup.IsFound() && lookup.type() == FIELD) {
if (lookup.type() == FIELD) {
int index = lookup.GetLocalFieldIndexFromMap(*type); int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize; int offset = index * kPointerSize;
if (index < 0) { if (index < 0) {
@ -2455,9 +2588,23 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize)); __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
} }
} else { } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type)); Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function); __ LoadHeapObject(result, function);
} else {
// Negative lookup.
// Check prototypes.
HeapObject* current = HeapObject::cast((*type)->prototype());
Heap* heap = type->GetHeap();
while (current != heap->null_value()) {
Handle<HeapObject> link(current);
__ LoadHeapObject(result, link);
__ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset));
__ cmp(result, Operand(Handle<Map>(JSObject::cast(current)->map())));
DeoptimizeIf(ne, env);
current = HeapObject::cast(current->map()->prototype());
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
} }
} }
@ -2465,43 +2612,45 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object()); Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Register scratch = scratch0(); Register object_map = scratch0();
int map_count = instr->hydrogen()->types()->length(); int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
if (map_count == 0 && !need_generic) {
DeoptimizeIf(al, instr->environment());
return;
}
Handle<String> name = instr->hydrogen()->name(); Handle<String> name = instr->hydrogen()->name();
if (map_count == 0) { Label done;
ASSERT(instr->hydrogen()->need_generic()); __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
__ mov(r2, Operand(name)); for (int i = 0; i < map_count; ++i) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); bool last = (i == map_count - 1);
CallCode(ic, RelocInfo::CODE_TARGET, instr); Handle<Map> map = instr->hydrogen()->types()->at(i);
} else { Label check_passed;
Label done; __ CompareMap(
__ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
for (int i = 0; i < map_count - 1; ++i) { if (last && !need_generic) {
Handle<Map> map = instr->hydrogen()->types()->at(i); DeoptimizeIf(ne, instr->environment());
__ bind(&check_passed);
EmitLoadFieldOrConstantFunction(
result, object, map, name, instr->environment());
} else {
Label next; Label next;
__ cmp(scratch, Operand(map));
__ b(ne, &next); __ b(ne, &next);
EmitLoadFieldOrConstantFunction(result, object, map, name); __ bind(&check_passed);
EmitLoadFieldOrConstantFunction(
result, object, map, name, instr->environment());
__ b(&done); __ b(&done);
__ bind(&next); __ bind(&next);
} }
Handle<Map> map = instr->hydrogen()->types()->last();
__ cmp(scratch, Operand(map));
if (instr->hydrogen()->need_generic()) {
Label generic;
__ b(ne, &generic);
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ b(&done);
__ bind(&generic);
__ mov(r2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
DeoptimizeIf(ne, instr->environment());
EmitLoadFieldOrConstantFunction(result, object, map, name);
}
__ bind(&done);
} }
if (need_generic) {
__ mov(r2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
__ bind(&done);
} }
@ -2579,8 +2728,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset)); __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ ubfx(scratch, scratch, Map::kElementsKindShift, __ ubfx(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount); Map::kElementsKindBitCount);
__ cmp(scratch, Operand(FAST_ELEMENTS)); __ cmp(scratch, Operand(GetInitialFastElementsKind()));
__ b(eq, &done); __ b(lt, &fail);
__ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
__ b(le, &done);
__ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ b(lt, &fail); __ b(lt, &fail);
__ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND)); __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
@ -2627,13 +2778,20 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result. // Load the result.
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
__ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize)); uint32_t offset = FixedArray::kHeaderSize +
(instr->additional_index() << kPointerSizeLog2);
__ ldr(result, FieldMemOperand(scratch, offset));
// Check for the hole value. // Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) { if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ cmp(result, scratch); __ tst(result, Operand(kSmiTagMask));
DeoptimizeIf(eq, instr->environment()); DeoptimizeIf(ne, instr->environment());
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
DeoptimizeIf(eq, instr->environment());
}
} }
} }
@ -2659,18 +2817,21 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
} }
Operand operand = key_is_constant Operand operand = key_is_constant
? Operand(constant_key * (1 << shift_size) + ? Operand(((constant_key + instr->additional_index()) << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag) FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size); : Operand(key, LSL, shift_size);
__ add(elements, elements, operand); __ add(elements, elements, operand);
if (!key_is_constant) { if (!key_is_constant) {
__ add(elements, elements, __ add(elements, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
(instr->additional_index() << shift_size)));
} }
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(scratch, Operand(kHoleNanUpper32)); __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
DeoptimizeIf(eq, instr->environment()); __ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
__ vldr(result, elements, 0); __ vldr(result, elements, 0);
} }
@ -2692,26 +2853,33 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
key = ToRegister(instr->key()); key = ToRegister(instr->key());
} }
int shift_size = ElementsKindToShiftSize(elements_kind); int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result()); DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant Operand operand = key_is_constant
? Operand(constant_key * (1 << shift_size)) ? Operand(constant_key << shift_size)
: Operand(key, LSL, shift_size); : Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand); __ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(result.low(), scratch0(), 0); __ vldr(result.low(), scratch0(), additional_offset);
__ vcvt_f64_f32(result, result.low()); __ vcvt_f64_f32(result, result.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), 0); __ vldr(result, scratch0(), additional_offset);
} }
} else { } else {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
if (instr->additional_index() != 0 && !key_is_constant) {
__ add(scratch0(), key, Operand(instr->additional_index()));
}
MemOperand mem_operand(key_is_constant MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer, constant_key * (1 << shift_size)) ? MemOperand(external_pointer,
: MemOperand(external_pointer, key, LSL, shift_size)); (constant_key << shift_size) + additional_offset)
: (instr->additional_index() == 0
? MemOperand(external_pointer, key, LSL, shift_size)
: MemOperand(external_pointer, scratch0(), LSL, shift_size)));
switch (elements_kind) { switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand); __ ldrsb(result, mem_operand);
@ -2739,9 +2907,12 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break; break;
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -2764,16 +2935,20 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register scratch = scratch0(); Register scratch = scratch0();
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
// Check if the calling frame is an arguments adaptor frame. if (instr->hydrogen()->from_inlined()) {
Label done, adapted; __ sub(result, sp, Operand(2 * kPointerSize));
__ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); } else {
__ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); // Check if the calling frame is an arguments adaptor frame.
__ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); Label done, adapted;
__ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
__ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Result is the frame pointer for the frame if not adapted and for the real // Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted. // frame below the adaptor frame if adapted.
__ mov(result, fp, LeaveCC, ne); __ mov(result, fp, LeaveCC, ne);
__ mov(result, scratch, LeaveCC, eq); __ mov(result, scratch, LeaveCC, eq);
}
} }
@ -2882,7 +3057,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ b(ne, &loop); __ b(ne, &loop);
__ bind(&invoke); __ bind(&invoke);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map(); LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position()); RecordPosition(pointers->position());
SafepointGenerator safepoint_generator( SafepointGenerator safepoint_generator(
@ -2907,6 +3082,11 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
} }
void LCodeGen::DoDrop(LDrop* instr) {
__ Drop(instr->count());
}
void LCodeGen::DoThisFunction(LThisFunction* instr) { void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
__ LoadHeapObject(result, instr->hydrogen()->closure()); __ LoadHeapObject(result, instr->hydrogen()->closure());
@ -2953,7 +3133,8 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function, void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int arity, int arity,
LInstruction* instr, LInstruction* instr,
CallKind call_kind) { CallKind call_kind,
R1State r1_state) {
bool can_invoke_directly = !function->NeedsArgumentsAdaption() || bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
function->shared()->formal_parameter_count() == arity; function->shared()->formal_parameter_count() == arity;
@ -2961,7 +3142,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
RecordPosition(pointers->position()); RecordPosition(pointers->position());
if (can_invoke_directly) { if (can_invoke_directly) {
__ LoadHeapObject(r1, function); if (r1_state == R1_UNINITIALIZED) {
__ LoadHeapObject(r1, function);
}
// Change context if needed. // Change context if needed.
bool change_context = bool change_context =
(info()->closure()->context() != function->context()) || (info()->closure()->context() != function->context()) ||
@ -3000,7 +3184,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
CallKnownFunction(instr->function(), CallKnownFunction(instr->function(),
instr->arity(), instr->arity(),
instr, instr,
CALL_AS_METHOD); CALL_AS_METHOD,
R1_UNINITIALIZED);
} }
@ -3109,7 +3294,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
} else { } else {
// Representation is tagged. // Representation is tagged.
DeferredMathAbsTaggedHeapNumber* deferred = DeferredMathAbsTaggedHeapNumber* deferred =
new DeferredMathAbsTaggedHeapNumber(this, instr); new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
Register input = ToRegister(instr->InputAt(0)); Register input = ToRegister(instr->InputAt(0));
// Smi check. // Smi check.
__ JumpIfNotSmi(input, deferred->entry()); __ JumpIfNotSmi(input, deferred->entry());
@ -3286,7 +3471,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
LRandom* instr_; LRandom* instr_;
}; };
DeferredDoRandom* deferred = new DeferredDoRandom(this, instr); DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any // Having marked this instruction as a call we can use any
// registers. // registers.
@ -3424,13 +3609,21 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->function()).is(r1)); ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(instr->HasPointerMap()); ASSERT(instr->HasPointerMap());
ASSERT(instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map(); if (instr->known_function().is_null()) {
RecordPosition(pointers->position()); LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); RecordPosition(pointers->position());
ParameterCount count(instr->arity()); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
__ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); ParameterCount count(instr->arity());
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
CallKnownFunction(instr->known_function(),
instr->arity(),
instr,
CALL_AS_METHOD,
R1_CONTAINS_TARGET);
}
} }
@ -3485,7 +3678,11 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0));
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION); CallKnownFunction(instr->target(),
instr->arity(),
instr,
CALL_AS_FUNCTION,
R1_UNINITIALIZED);
} }
@ -3515,6 +3712,18 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (!instr->transition().is_null()) { if (!instr->transition().is_null()) {
__ mov(scratch, Operand(instr->transition())); __ mov(scratch, Operand(instr->transition()));
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the map field.
__ RecordWriteField(object,
HeapObject::kMapOffset,
scratch,
temp,
kLRHasBeenSaved,
kSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
} }
// Do the store. // Do the store.
@ -3583,10 +3792,16 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset = int offset =
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
+ FixedArray::kHeaderSize;
__ str(value, FieldMemOperand(elements, offset)); __ str(value, FieldMemOperand(elements, offset));
} else { } else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
if (instr->additional_index() != 0) {
__ add(scratch,
scratch,
Operand(instr->additional_index() << kPointerSizeLog2));
}
__ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize)); __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
} }
@ -3615,7 +3830,6 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
Register scratch = scratch0(); Register scratch = scratch0();
bool key_is_constant = instr->key()->IsConstantOperand(); bool key_is_constant = instr->key()->IsConstantOperand();
int constant_key = 0; int constant_key = 0;
Label not_nan;
// Calculate the effective address of the slot in the array to store the // Calculate the effective address of the slot in the array to store the
// double value. // double value.
@ -3629,7 +3843,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
} }
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
Operand operand = key_is_constant Operand operand = key_is_constant
? Operand(constant_key * (1 << shift_size) + ? Operand((constant_key << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag) FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size); : Operand(key, LSL, shift_size);
__ add(scratch, elements, operand); __ add(scratch, elements, operand);
@ -3638,14 +3852,16 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} }
// Check for NaN. All NaNs must be canonicalized. if (instr->NeedsCanonicalization()) {
__ VFPCompareAndSetFlags(value, value); // Check for NaN. All NaNs must be canonicalized.
__ VFPCompareAndSetFlags(value, value);
// Only load canonical NaN if the comparison above set the overflow. // Only load canonical NaN if the comparison above set the overflow.
__ Vmov(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double(), vs); __ Vmov(value,
FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
vs);
}
__ bind(&not_nan); __ vstr(value, scratch, instr->additional_index() << shift_size);
__ vstr(value, scratch, 0);
} }
@ -3666,25 +3882,33 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
key = ToRegister(instr->key()); key = ToRegister(instr->key());
} }
int shift_size = ElementsKindToShiftSize(elements_kind); int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value())); DwVfpRegister value(ToDoubleRegister(instr->value()));
Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size)) Operand operand(key_is_constant ? Operand(constant_key << shift_size)
: Operand(key, LSL, shift_size)); : Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand); __ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value); __ vcvt_f32_f64(double_scratch0().low(), value);
__ vstr(double_scratch0().low(), scratch0(), 0); __ vstr(double_scratch0().low(), scratch0(), additional_offset);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vstr(value, scratch0(), 0); __ vstr(value, scratch0(), additional_offset);
} }
} else { } else {
Register value(ToRegister(instr->value())); Register value(ToRegister(instr->value()));
if (instr->additional_index() != 0 && !key_is_constant) {
__ add(scratch0(), key, Operand(instr->additional_index()));
}
MemOperand mem_operand(key_is_constant MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer, constant_key * (1 << shift_size)) ? MemOperand(external_pointer,
: MemOperand(external_pointer, key, LSL, shift_size)); ((constant_key + instr->additional_index())
<< shift_size))
: (instr->additional_index() == 0
? MemOperand(external_pointer, key, LSL, shift_size)
: MemOperand(external_pointer, scratch0(), LSL, shift_size)));
switch (elements_kind) { switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS:
@ -3703,7 +3927,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: case FAST_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3740,20 +3967,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(scratch, Operand(from_map)); __ cmp(scratch, Operand(from_map));
__ b(ne, &not_applicable); __ b(ne, &not_applicable);
__ mov(new_map_reg, Operand(to_map)); __ mov(new_map_reg, Operand(to_map));
if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier. // Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kLRHasBeenSaved, kDontSaveFPRegs); scratch, kLRHasBeenSaved, kDontSaveFPRegs);
} else if (from_kind == FAST_SMI_ONLY_ELEMENTS && } else if (IsFastSmiElementsKind(from_kind) &&
to_kind == FAST_DOUBLE_ELEMENTS) { IsFastDoubleElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp_reg()); Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2)); ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3)); ASSERT(new_map_reg.is(r3));
__ mov(fixed_object_reg, object_reg); __ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr); RelocInfo::CODE_TARGET, instr);
} else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) { } else if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp_reg()); Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2)); ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3)); ASSERT(new_map_reg.is(r3));
@ -3787,7 +4016,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
}; };
DeferredStringCharCodeAt* deferred = DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr); new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(), StringCharLoadGenerator::Generate(masm(),
ToRegister(instr->string()), ToRegister(instr->string()),
@ -3842,7 +4071,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
}; };
DeferredStringCharFromCode* deferred = DeferredStringCharFromCode* deferred =
new DeferredStringCharFromCode(this, instr); new(zone()) DeferredStringCharFromCode(this, instr);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code()); Register char_code = ToRegister(instr->char_code());
@ -3916,7 +4145,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
Register src = ToRegister(instr->InputAt(0)); Register src = ToRegister(instr->InputAt(0));
Register dst = ToRegister(instr->result()); Register dst = ToRegister(instr->result());
DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr); DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
__ SmiTag(dst, src, SetCC); __ SmiTag(dst, src, SetCC);
__ b(vs, deferred->entry()); __ b(vs, deferred->entry());
__ bind(deferred->exit()); __ bind(deferred->exit());
@ -3987,7 +4216,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register temp1 = ToRegister(instr->TempAt(0)); Register temp1 = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1)); Register temp2 = ToRegister(instr->TempAt(1));
DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr); DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) { if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
@ -4189,7 +4418,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input); Register input_reg = ToRegister(input);
DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr); DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
// Optimistically untag the input. // Optimistically untag the input.
// If the input is a HeapObject, SmiUntag will set the carry flag. // If the input is a HeapObject, SmiUntag will set the carry flag.
@ -4338,14 +4567,22 @@ void LCodeGen::DoCheckMapCommon(Register reg,
} }
void LCodeGen::DoCheckMap(LCheckMap* instr) { void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Register scratch = scratch0(); Register scratch = scratch0();
LOperand* input = instr->InputAt(0); LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister()); ASSERT(input->IsRegister());
Register reg = ToRegister(input); Register reg = ToRegister(input);
Handle<Map> map = instr->hydrogen()->map();
DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(), Label success;
instr->environment()); SmallMapList* map_set = instr->hydrogen()->map_set();
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
__ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP);
__ b(eq, &success);
}
Handle<Map> map = map_set->last();
DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
__ bind(&success);
} }
@ -4441,7 +4678,8 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
LAllocateObject* instr_; LAllocateObject* instr_;
}; };
DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr); DeferredAllocateObject* deferred =
new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->TempAt(0)); Register scratch = ToRegister(instr->TempAt(0));
@ -4464,6 +4702,14 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
deferred->entry(), deferred->entry(),
TAG_OBJECT); TAG_OBJECT);
__ bind(deferred->exit());
if (FLAG_debug_code) {
Label is_in_new_space;
__ JumpIfInNewSpace(result, scratch, &is_in_new_space);
__ Abort("Allocated object is not in new-space");
__ bind(&is_in_new_space);
}
// Load the initial map. // Load the initial map.
Register map = scratch; Register map = scratch;
__ LoadHeapObject(map, constructor); __ LoadHeapObject(map, constructor);
@ -4482,14 +4728,14 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
__ str(scratch, FieldMemOperand(result, property_offset)); __ str(scratch, FieldMemOperand(result, property_offset));
} }
} }
__ bind(deferred->exit());
} }
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Handle<JSFunction> constructor = instr->hydrogen()->constructor(); Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
// TODO(3095996): Get rid of this. For now, we need to make the // TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already // result register contain a valid pointer because it is already
@ -4497,9 +4743,9 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
__ mov(result, Operand(0)); __ mov(result, Operand(0));
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ LoadHeapObject(r0, constructor); __ mov(r0, Operand(Smi::FromInt(instance_size)));
__ push(r0); __ push(r0);
CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr); CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
__ StoreToSafepointRegisterSlot(r0, result); __ StoreToSafepointRegisterSlot(r0, result);
} }
@ -4511,8 +4757,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different // Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has // than the expected one. The check isn't necessary if the boilerplate has
// already been converted to FAST_ELEMENTS. // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (boilerplate_elements_kind != FAST_ELEMENTS) { if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object()); __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
// Load map into r2. // Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
@ -4633,9 +4880,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
__ str(r2, FieldMemOperand(result, total_offset + 4)); __ str(r2, FieldMemOperand(result, total_offset + 4));
} }
} else if (elements->IsFixedArray()) { } else if (elements->IsFixedArray()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
for (int i = 0; i < elements_length; i++) { for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
Handle<Object> value = JSObject::GetElement(object, i); Handle<Object> value(fast_elements->get(i));
if (value->IsJSObject()) { if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value); Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ add(r2, result, Operand(*offset)); __ add(r2, result, Operand(*offset));
@ -4659,6 +4907,24 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
void LCodeGen::DoFastLiteral(LFastLiteral* instr) { void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
int size = instr->hydrogen()->total_size(); int size = instr->hydrogen()->total_size();
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
// Load the map's "bit field 2".
__ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
__ cmp(r2, Operand(boilerplate_elements_kind));
DeoptimizeIf(ne, instr->environment());
}
// Allocate all objects that are part of the literal in one big // Allocate all objects that are part of the literal in one big
// allocation. This avoids multiple limit checks. // allocation. This avoids multiple limit checks.
@ -4923,6 +5189,8 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
int current_pc = masm()->pc_offset(); int current_pc = masm()->pc_offset();
int patch_size = Deoptimizer::patch_size(); int patch_size = Deoptimizer::patch_size();
if (current_pc < last_lazy_deopt_pc_ + patch_size) { if (current_pc < last_lazy_deopt_pc_ + patch_size) {
// Block literal pool emission for duration of padding.
Assembler::BlockConstPoolScope block_const_pool(masm());
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
ASSERT_EQ(0, padding_size % Assembler::kInstrSize); ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) { while (padding_size > 0) {
@ -4954,7 +5222,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
Register strict = scratch0(); Register strict = scratch0();
__ mov(strict, Operand(Smi::FromInt(strict_mode_flag()))); __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
__ Push(object, key, strict); __ Push(object, key, strict);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map(); LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position()); RecordPosition(pointers->position());
SafepointGenerator safepoint_generator( SafepointGenerator safepoint_generator(
@ -4967,7 +5235,7 @@ void LCodeGen::DoIn(LIn* instr) {
Register obj = ToRegister(instr->object()); Register obj = ToRegister(instr->object());
Register key = ToRegister(instr->key()); Register key = ToRegister(instr->key());
__ Push(key, obj); __ Push(key, obj);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map(); LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position()); RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
@ -5017,7 +5285,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(instr->hydrogen()->is_backwards_branch()); ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping. // Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check = DeferredStackCheck* deferred_stack_check =
new DeferredStackCheck(this, instr); new(zone()) DeferredStackCheck(this, instr);
__ LoadRoot(ip, Heap::kStackLimitRootIndex); __ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip)); __ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry()); __ b(lo, deferred_stack_check->entry());

41
deps/v8/src/arm/lithium-codegen-arm.h

@ -43,22 +43,26 @@ class SafepointGenerator;
class LCodeGen BASE_EMBEDDED { class LCodeGen BASE_EMBEDDED {
public: public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info,
Zone* zone)
: chunk_(chunk), : chunk_(chunk),
masm_(assembler), masm_(assembler),
info_(info), info_(info),
current_block_(-1), current_block_(-1),
current_instruction_(-1), current_instruction_(-1),
instructions_(chunk->instructions()), instructions_(chunk->instructions()),
deoptimizations_(4), deoptimizations_(4, zone),
deopt_jump_table_(4), deopt_jump_table_(4, zone),
deoptimization_literals_(8), deoptimization_literals_(8, zone),
inlined_function_count_(0), inlined_function_count_(0),
scope_(info->scope()), scope_(info->scope()),
status_(UNUSED), status_(UNUSED),
deferred_(8), translations_(zone),
deferred_(8, zone),
osr_pc_offset_(-1), osr_pc_offset_(-1),
last_lazy_deopt_pc_(0), last_lazy_deopt_pc_(0),
safepoints_(zone),
zone_(zone),
resolver_(this), resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) { expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions(); PopulateDeoptimizationLiteralsWithInlinedFunctions();
@ -71,6 +75,7 @@ class LCodeGen BASE_EMBEDDED {
Isolate* isolate() const { return info_->isolate(); } Isolate* isolate() const { return info_->isolate(); }
Factory* factory() const { return isolate()->factory(); } Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); } Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
// Support for converting LOperands to assembler types. // Support for converting LOperands to assembler types.
// LOperand must be a register. // LOperand must be a register.
@ -176,7 +181,7 @@ class LCodeGen BASE_EMBEDDED {
void Abort(const char* format, ...); void Abort(const char* format, ...);
void Comment(const char* format, ...); void Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); } void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should // Code generation passes. Returns true if code generation should
// continue. // continue.
@ -215,12 +220,18 @@ class LCodeGen BASE_EMBEDDED {
int argc, int argc,
LInstruction* instr); LInstruction* instr);
enum R1State {
R1_UNINITIALIZED,
R1_CONTAINS_TARGET
};
// Generate a direct call to a known function. Expects the function // Generate a direct call to a known function. Expects the function
// to be in r1. // to be in r1.
void CallKnownFunction(Handle<JSFunction> function, void CallKnownFunction(Handle<JSFunction> function,
int arity, int arity,
LInstruction* instr, LInstruction* instr,
CallKind call_kind); CallKind call_kind,
R1State r1_state);
void LoadHeapObject(Register result, Handle<HeapObject> object); void LoadHeapObject(Register result, Handle<HeapObject> object);
@ -308,7 +319,8 @@ class LCodeGen BASE_EMBEDDED {
void EmitLoadFieldOrConstantFunction(Register result, void EmitLoadFieldOrConstantFunction(Register result,
Register object, Register object,
Handle<Map> type, Handle<Map> type,
Handle<String> name); Handle<String> name,
LEnvironment* env);
// Emits optimized code to deep-copy the contents of statically known // Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate). // object graphs (e.g. object literal boilerplate).
@ -317,6 +329,17 @@ class LCodeGen BASE_EMBEDDED {
Register source, Register source,
int* offset); int* offset);
// Emit optimized code for integer division.
// Inputs are signed.
// All registers are clobbered.
// If 'remainder' is no_reg, it is not computed.
void EmitSignedIntegerDivisionByConstant(Register result,
Register dividend,
int32_t divisor,
Register remainder,
Register scratch,
LEnvironment* environment);
struct JumpTableEntry { struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry) explicit inline JumpTableEntry(Address entry)
: label(), : label(),
@ -349,6 +372,8 @@ class LCodeGen BASE_EMBEDDED {
// itself is emitted at the end of the generated code. // itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_; SafepointTableBuilder safepoints_;
Zone* zone_;
// Compiler from a set of parallel moves to a sequential list of moves. // Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_; LGapResolver resolver_;

4
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -36,7 +36,7 @@ namespace internal {
static const Register kSavedValueRegister = { 9 }; static const Register kSavedValueRegister = { 9 };
LGapResolver::LGapResolver(LCodeGen* owner) LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32), root_index_(0), in_cycle_(false), : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
saved_destination_(NULL) { } saved_destination_(NULL) { }
@ -79,7 +79,7 @@ void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) { for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i); LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move); if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
} }
Verify(); Verify();
} }

127
deps/v8/src/arm/macro-assembler-arm.cc

@ -1868,10 +1868,12 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map, void MacroAssembler::CheckFastElements(Register map,
Register scratch, Register scratch,
Label* fail) { Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1); STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
b(hi, fail); b(hi, fail);
} }
@ -1879,22 +1881,25 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map, void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch, Register scratch,
Label* fail) { Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1); STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(ls, fail); b(ls, fail);
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
b(hi, fail); b(hi, fail);
} }
void MacroAssembler::CheckFastSmiOnlyElements(Register map, void MacroAssembler::CheckFastSmiElements(Register map,
Register scratch, Register scratch,
Label* fail) { Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(hi, fail); b(hi, fail);
} }
@ -1995,24 +2000,27 @@ void MacroAssembler::CompareMap(Register obj,
Label* early_success, Label* early_success,
CompareMapMode mode) { CompareMapMode mode) {
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
cmp(scratch, Operand(map)); CompareMap(scratch, map, early_success, mode);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { }
Map* transitioned_fast_element_map(
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
ASSERT(transitioned_fast_element_map == NULL ||
map->elements_kind() != FAST_ELEMENTS);
if (transitioned_fast_element_map != NULL) {
b(eq, early_success);
cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map)));
}
Map* transitioned_double_map( void MacroAssembler::CompareMap(Register obj_map,
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL)); Handle<Map> map,
ASSERT(transitioned_double_map == NULL || Label* early_success,
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS); CompareMapMode mode) {
if (transitioned_double_map != NULL) { cmp(obj_map, Operand(map));
b(eq, early_success); if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
cmp(scratch, Operand(Handle<Map>(transitioned_double_map))); ElementsKind kind = map->elements_kind();
if (IsFastElementsKind(kind)) {
bool packed = IsFastPackedElementsKind(kind);
Map* current_map = *map;
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
current_map = current_map->LookupElementsTransitionMap(kind);
if (!current_map) break;
b(eq, early_success);
cmp(obj_map, Operand(Handle<Map>(current_map)));
}
} }
} }
} }
@ -2865,28 +2873,38 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map. // Check that the function's map is the same as the expected cached map.
int expected_index = ldr(scratch,
Context::GetContextMapIndexFromElementsKind(expected_kind); MemOperand(scratch,
ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index))); Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
cmp(map_in_out, ip); size_t offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
cmp(map_in_out, scratch);
b(ne, no_map_match); b(ne, no_map_match);
// Use the transitioned cached map. // Use the transitioned cached map.
int trans_index = offset = transitioned_kind * kPointerSize +
Context::GetContextMapIndexFromElementsKind(transitioned_kind); FixedArrayBase::kHeaderSize;
ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index))); ldr(map_in_out, FieldMemOperand(scratch, offset));
} }
void MacroAssembler::LoadInitialArrayMap( void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch, Register map_out) { Register function_in, Register scratch,
Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out)); ASSERT(!function_in.is(map_out));
Label done; Label done;
ldr(map_out, FieldMemOperand(function_in, ldr(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset)); JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) { if (!FLAG_smi_only_arrays) {
LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
FAST_ELEMENTS, LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
kind,
map_out,
scratch,
&done);
} else if (can_have_holes) {
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
map_out, map_out,
scratch, scratch,
&done); &done);
@ -3710,22 +3728,35 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
} }
bool AreAliased(Register r1, Register r2, Register r3, Register r4) { #ifdef DEBUG
if (r1.is(r2)) return true; bool AreAliased(Register reg1,
if (r1.is(r3)) return true; Register reg2,
if (r1.is(r4)) return true; Register reg3,
if (r2.is(r3)) return true; Register reg4,
if (r2.is(r4)) return true; Register reg5,
if (r3.is(r4)) return true; Register reg6) {
return false; int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
if (reg2.is_valid()) regs |= reg2.bit();
if (reg3.is_valid()) regs |= reg3.bit();
if (reg4.is_valid()) regs |= reg4.bit();
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
} }
#endif
CodePatcher::CodePatcher(byte* address, int instructions) CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address), : address_(address),
instructions_(instructions), instructions_(instructions),
size_(instructions * Assembler::kInstrSize), size_(instructions * Assembler::kInstrSize),
masm_(Isolate::Current(), address, size_ + Assembler::kGap) { masm_(NULL, address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch. // Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size // The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints. // bytes of instructions without failing with buffer size constraints.

27
deps/v8/src/arm/macro-assembler-arm.h

@ -85,7 +85,14 @@ enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
bool AreAliased(Register r1, Register r2, Register r3, Register r4); #ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg);
#endif
// MacroAssembler implements a collection of frequently used macros. // MacroAssembler implements a collection of frequently used macros.
@ -505,7 +512,8 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction. // Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in, void LoadInitialArrayMap(Register function_in,
Register scratch, Register scratch,
Register map_out); Register map_out,
bool can_have_holes);
void LoadGlobalFunction(int index, Register function); void LoadGlobalFunction(int index, Register function);
@ -795,9 +803,9 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only // Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not. // elements. Jump to the specified label if it does not.
void CheckFastSmiOnlyElements(Register map, void CheckFastSmiElements(Register map,
Register scratch, Register scratch,
Label* fail); Label* fail);
// Check to see if maybe_number can be stored as a double in // Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in // FastDoubleElements. If it can, store it at the index specified by key in
@ -823,6 +831,13 @@ class MacroAssembler: public Assembler {
Label* early_success, Label* early_success,
CompareMapMode mode = REQUIRE_EXACT_MAP); CompareMapMode mode = REQUIRE_EXACT_MAP);
// As above, but the map of the object is already loaded into the register
// which is preserved by the code generated.
void CompareMap(Register obj_map,
Handle<Map> map,
Label* early_success,
CompareMapMode mode = REQUIRE_EXACT_MAP);
// Check if the map of an object is equal to a specified map and branch to // Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a // label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
@ -1321,7 +1336,6 @@ class MacroAssembler: public Assembler {
}; };
#ifdef ENABLE_DEBUGGER_SUPPORT
// The code patcher is used to patch (typically) small parts of code e.g. for // The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher // debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. It is not legal to emit // the exact number of bytes specified must be emitted. It is not legal to emit
@ -1351,7 +1365,6 @@ class CodePatcher {
int size_; // Number of bytes of the expected patch size. int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code. MacroAssembler masm_; // Macro assembler used to generate the code.
}; };
#endif // ENABLE_DEBUGGER_SUPPORT
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------

244
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -43,45 +43,49 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP #ifndef V8_INTERPRETED_REGEXP
/* /*
* This assembler uses the following register assignment convention * This assembler uses the following register assignment convention
* - r4 : Temporarily stores the index of capture start after a matching pass
* for a global regexp.
* - r5 : Pointer to current code object (Code*) including heap object tag. * - r5 : Pointer to current code object (Code*) including heap object tag.
* - r6 : Current position in input, as negative offset from end of string. * - r6 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset! * Please notice that this is the byte offset, not the character offset!
* - r7 : Currently loaded character. Must be loaded using * - r7 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods. * LoadCurrentCharacter before using any of the dispatch methods.
* - r8 : points to tip of backtrack stack * - r8 : Points to tip of backtrack stack
* - r9 : Unused, might be used by C code and expected unchanged. * - r9 : Unused, might be used by C code and expected unchanged.
* - r10 : End of input (points to byte after last character in input). * - r10 : End of input (points to byte after last character in input).
* - r11 : Frame pointer. Used to access arguments, local variables and * - r11 : Frame pointer. Used to access arguments, local variables and
* RegExp registers. * RegExp registers.
* - r12 : IP register, used by assembler. Very volatile. * - r12 : IP register, used by assembler. Very volatile.
* - r13/sp : points to tip of C stack. * - r13/sp : Points to tip of C stack.
* *
* The remaining registers are free for computations. * The remaining registers are free for computations.
* Each call to a public method should retain this convention. * Each call to a public method should retain this convention.
* *
* The stack will have the following structure: * The stack will have the following structure:
* - fp[52] Isolate* isolate (Address of the current isolate) * - fp[56] Isolate* isolate (address of the current isolate)
* - fp[48] direct_call (if 1, direct call from JavaScript code, * - fp[52] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system). * if 0, call through the runtime system).
* - fp[44] stack_area_base (High end of the memory area to use as * - fp[48] stack_area_base (high end of the memory area to use as
* backtracking stack). * backtracking stack).
* - fp[44] capture array size (may fit multiple sets of matches)
* - fp[40] int* capture_array (int[num_saved_registers_], for output). * - fp[40] int* capture_array (int[num_saved_registers_], for output).
* - fp[36] secondary link/return address used by native call. * - fp[36] secondary link/return address used by native call.
* --- sp when called --- * --- sp when called ---
* - fp[32] return address (lr). * - fp[32] return address (lr).
* - fp[28] old frame pointer (r11). * - fp[28] old frame pointer (r11).
* - fp[0..24] backup of registers r4..r10. * - fp[0..24] backup of registers r4..r10.
* --- frame pointer ---- * --- frame pointer ----
* - fp[-4] end of input (Address of end of string). * - fp[-4] end of input (address of end of string).
* - fp[-8] start of input (Address of first character in string). * - fp[-8] start of input (address of first character in string).
* - fp[-12] start index (character index of start). * - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string). * - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] Offset of location before start of input (effectively character * - fp[-20] success counter (only for global regexps to count matches).
* - fp[-24] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a * position -1). Used to initialize capture registers to a
* non-position. * non-position.
* - fp[-24] At start (if 1, we are starting at the start of the * - fp[-28] At start (if 1, we are starting at the start of the
* string, otherwise 0) * string, otherwise 0)
* - fp[-28] register 0 (Only positions must be stored in the first * - fp[-32] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers) * - register 1 num_saved_registers_ registers)
* - ... * - ...
* - register num_registers-1 * - register num_registers-1
@ -115,8 +119,10 @@ namespace internal {
RegExpMacroAssemblerARM::RegExpMacroAssemblerARM( RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
Mode mode, Mode mode,
int registers_to_save) int registers_to_save,
: masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)), Zone* zone)
: NativeRegExpMacroAssembler(zone),
masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode), mode_(mode),
num_registers_(registers_to_save), num_registers_(registers_to_save),
num_saved_registers_(registers_to_save), num_saved_registers_(registers_to_save),
@ -197,9 +203,9 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) { void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start; Label not_at_start;
// Did we start the match at the start of the string at all? // Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kAtStart)); __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(eq, &not_at_start); BranchOrBacktrack(ne, &not_at_start);
// If we did, are we still at the start of the input? // If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart)); __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@ -212,9 +218,9 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) { void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all? // Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kAtStart)); __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(eq, on_not_at_start); BranchOrBacktrack(ne, on_not_at_start);
// If we did, are we still at the start of the input? // If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart)); __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
__ add(r0, end_of_input_address(), Operand(current_input_offset())); __ add(r0, end_of_input_address(), Operand(current_input_offset()));
@ -432,16 +438,6 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(
} }
void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1,
int reg2,
Label* on_not_equal) {
__ ldr(r0, register_location(reg1));
__ ldr(r1, register_location(reg2));
__ cmp(r0, r1);
BranchOrBacktrack(ne, on_not_equal);
}
void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c, void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
Label* on_not_equal) { Label* on_not_equal) {
__ cmp(current_character(), Operand(c)); __ cmp(current_character(), Operand(c));
@ -452,8 +448,12 @@ void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c, void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
uint32_t mask, uint32_t mask,
Label* on_equal) { Label* on_equal) {
__ and_(r0, current_character(), Operand(mask)); if (c == 0) {
__ cmp(r0, Operand(c)); __ tst(current_character(), Operand(mask));
} else {
__ and_(r0, current_character(), Operand(mask));
__ cmp(r0, Operand(c));
}
BranchOrBacktrack(eq, on_equal); BranchOrBacktrack(eq, on_equal);
} }
@ -461,8 +461,12 @@ void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c, void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c,
unsigned mask, unsigned mask,
Label* on_not_equal) { Label* on_not_equal) {
__ and_(r0, current_character(), Operand(mask)); if (c == 0) {
__ cmp(r0, Operand(c)); __ tst(current_character(), Operand(mask));
} else {
__ and_(r0, current_character(), Operand(mask));
__ cmp(r0, Operand(c));
}
BranchOrBacktrack(ne, on_not_equal); BranchOrBacktrack(ne, on_not_equal);
} }
@ -480,6 +484,44 @@ void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
} }
void RegExpMacroAssemblerARM::CheckCharacterInRange(
uc16 from,
uc16 to,
Label* on_in_range) {
__ sub(r0, current_character(), Operand(from));
__ cmp(r0, Operand(to - from));
BranchOrBacktrack(ls, on_in_range); // Unsigned lower-or-same condition.
}
void RegExpMacroAssemblerARM::CheckCharacterNotInRange(
uc16 from,
uc16 to,
Label* on_not_in_range) {
__ sub(r0, current_character(), Operand(from));
__ cmp(r0, Operand(to - from));
BranchOrBacktrack(hi, on_not_in_range); // Unsigned higher condition.
}
void RegExpMacroAssemblerARM::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ mov(r0, Operand(table));
if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
__ and_(r1, current_character(), Operand(kTableSize - 1));
__ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
} else {
__ add(r1,
current_character(),
Operand(ByteArray::kHeaderSize - kHeapObjectTag));
}
__ ldrb(r0, MemOperand(r0, r1));
__ cmp(r0, Operand(0));
BranchOrBacktrack(ne, on_bit_set);
}
bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type, bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) { Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned // Range checks (c in min..max) are generally implemented by an unsigned
@ -609,6 +651,7 @@ void RegExpMacroAssemblerARM::Fail() {
Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label return_r0;
// Finalize code - write the entry point code now we know how many // Finalize code - write the entry point code now we know how many
// registers we need. // registers we need.
@ -632,8 +675,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call // Set frame pointer in space for it if this is not a direct call
// from generated code. // from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize)); __ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ mov(r0, Operand(0, RelocInfo::NONE));
__ push(r0); // Make room for success counter and initialize it to 0.
__ push(r0); // Make room for "position - 1" constant (value is irrelevant). __ push(r0); // Make room for "position - 1" constant (value is irrelevant).
__ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers. // Check if we have space on the stack for registers.
Label stack_limit_hit; Label stack_limit_hit;
Label stack_ok; Label stack_ok;
@ -652,13 +696,13 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack // Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers. // for our working registers.
__ mov(r0, Operand(EXCEPTION)); __ mov(r0, Operand(EXCEPTION));
__ jmp(&exit_label_); __ jmp(&return_r0);
__ bind(&stack_limit_hit); __ bind(&stack_limit_hit);
CallCheckStackGuardState(r0); CallCheckStackGuardState(r0);
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
// If returned value is non-zero, we exit with the returned value as result. // If returned value is non-zero, we exit with the returned value as result.
__ b(ne, &exit_label_); __ b(ne, &return_r0);
__ bind(&stack_ok); __ bind(&stack_ok);
@ -679,41 +723,45 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// position registers. // position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne)); __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Determine whether the start index is zero, that is at the start of the // Initialize code pointer register
// string, and store that value in a local variable. __ mov(code_pointer(), Operand(masm_->CodeObject()));
__ cmp(r1, Operand(0));
__ mov(r1, Operand(1), LeaveCC, eq);
__ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ str(r1, MemOperand(frame_pointer(), kAtStart));
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
__ cmp(r1, Operand(0, RelocInfo::NONE));
__ b(ne, &load_char_start_regexp);
__ mov(current_character(), Operand('\n'), LeaveCC, eq);
__ jmp(&start_regexp);
// Global regexp restarts matching here.
__ bind(&load_char_start_regexp);
// Load previous char as initial value of current character register.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&start_regexp);
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1 // Fill saved registers with initial value = start offset - 1
if (num_saved_registers_ > 8) {
// Address of register 0. // Address of register 0.
__ add(r1, frame_pointer(), Operand(kRegisterZero)); __ add(r1, frame_pointer(), Operand(kRegisterZero));
__ mov(r2, Operand(num_saved_registers_)); __ mov(r2, Operand(num_saved_registers_));
Label init_loop; Label init_loop;
__ bind(&init_loop); __ bind(&init_loop);
__ str(r0, MemOperand(r1, kPointerSize, NegPostIndex)); __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
__ sub(r2, r2, Operand(1), SetCC); __ sub(r2, r2, Operand(1), SetCC);
__ b(ne, &init_loop); __ b(ne, &init_loop);
} else {
for (int i = 0; i < num_saved_registers_; i++) {
__ str(r0, register_location(i));
}
}
} }
// Initialize backtrack stack pointer. // Initialize backtrack stack pointer.
__ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd)); __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
// Load previous char as initial value of current character register.
Label at_start;
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &at_start);
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
__ mov(current_character(), Operand('\n'));
__ jmp(&start_label_);
__ jmp(&start_label_);
// Exit code: // Exit code:
if (success_label_.is_linked()) { if (success_label_.is_linked()) {
@ -740,6 +788,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
for (int i = 0; i < num_saved_registers_; i += 2) { for (int i = 0; i < num_saved_registers_; i += 2) {
__ ldr(r2, register_location(i)); __ ldr(r2, register_location(i));
__ ldr(r3, register_location(i + 1)); __ ldr(r3, register_location(i + 1));
if (i == 0 && global_with_zero_length_check()) {
// Keep capture start in r4 for the zero-length check later.
__ mov(r4, r2);
}
if (mode_ == UC16) { if (mode_ == UC16) {
__ add(r2, r1, Operand(r2, ASR, 1)); __ add(r2, r1, Operand(r2, ASR, 1));
__ add(r3, r1, Operand(r3, ASR, 1)); __ add(r3, r1, Operand(r3, ASR, 1));
@ -751,10 +803,58 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ str(r3, MemOperand(r0, kPointerSize, PostIndex)); __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
} }
} }
__ mov(r0, Operand(SUCCESS));
if (global()) {
// Restart matching if the regular expression is flagged as global.
__ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
__ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
__ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput));
// Increment success counter.
__ add(r0, r0, Operand(1));
__ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ sub(r1, r1, Operand(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmp(r1, Operand(num_saved_registers_));
__ b(lt, &return_r0);
__ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
// Advance the location for output.
__ add(r2, r2, Operand(num_saved_registers_ * kPointerSize));
__ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare r0 to initialize registers with its value in the next run.
__ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// r4: capture start index
__ cmp(current_input_offset(), r4);
// Not a zero-length match, restart.
__ b(ne, &load_char_start_regexp);
// Offset from the end is zero if we already reached the end.
__ cmp(current_input_offset(), Operand(0));
__ b(eq, &exit_label_);
// Advance current position after a zero-length match.
__ add(current_input_offset(),
current_input_offset(),
Operand((mode_ == UC16) ? 2 : 1));
}
__ b(&load_char_start_regexp);
} else {
__ mov(r0, Operand(SUCCESS));
}
} }
// Exit and return r0 // Exit and return r0
__ bind(&exit_label_); __ bind(&exit_label_);
if (global()) {
__ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
}
__ bind(&return_r0);
// Skip sp past regexp registers and local variables.. // Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer()); __ mov(sp, frame_pointer());
// Restore registers r4..r11 and return (restoring lr to pc). // Restore registers r4..r11 and return (restoring lr to pc).
@ -776,7 +876,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
// If returning non-zero, we should end execution with the given // If returning non-zero, we should end execution with the given
// result as return value. // result as return value.
__ b(ne, &exit_label_); __ b(ne, &return_r0);
// String might have moved: Reload end of string from frame. // String might have moved: Reload end of string from frame.
__ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@ -813,7 +913,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&exit_with_exception); __ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception. // Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(r0, Operand(EXCEPTION)); __ mov(r0, Operand(EXCEPTION));
__ jmp(&exit_label_); __ jmp(&return_r0);
} }
CodeDesc code_desc; CodeDesc code_desc;
@ -968,8 +1068,9 @@ void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
} }
void RegExpMacroAssemblerARM::Succeed() { bool RegExpMacroAssemblerARM::Succeed() {
__ jmp(&success_label_); __ jmp(&success_label_);
return global();
} }
@ -1261,8 +1362,9 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) { int characters) {
Register offset = current_input_offset(); Register offset = current_input_offset();
if (cp_offset != 0) { if (cp_offset != 0) {
__ add(r0, current_input_offset(), Operand(cp_offset * char_size())); // r4 is not being used to store the capture start index at this point.
offset = r0; __ add(r4, current_input_offset(), Operand(cp_offset * char_size()));
offset = r4;
} }
// The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
// and the operating system running on the target allow it. // and the operating system running on the target allow it.

24
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -45,7 +45,7 @@ class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
#else // V8_INTERPRETED_REGEXP #else // V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
public: public:
RegExpMacroAssemblerARM(Mode mode, int registers_to_save); RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerARM(); virtual ~RegExpMacroAssemblerARM();
virtual int stack_limit_slack(); virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by); virtual void AdvanceCurrentPosition(int by);
@ -70,7 +70,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, Label* on_no_match); virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg, virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match); Label* on_no_match);
virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal); virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c, virtual void CheckNotCharacterAfterAnd(unsigned c,
unsigned mask, unsigned mask,
@ -79,6 +78,14 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
uc16 minus, uc16 minus,
uc16 mask, uc16 mask,
Label* on_not_equal); Label* on_not_equal);
virtual void CheckCharacterInRange(uc16 from,
uc16 to,
Label* on_in_range);
virtual void CheckCharacterNotInRange(uc16 from,
uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
// Checks whether the given offset from the current position is before // Checks whether the given offset from the current position is before
// the end of the string. // the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input); virtual void CheckPosition(int cp_offset, Label* on_outside_input);
@ -105,7 +112,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg); virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by); virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to); virtual void SetRegister(int register_index, int to);
virtual void Succeed(); virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to); virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg); virtual void WriteStackPointerToRegister(int reg);
@ -129,7 +136,8 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize; static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameters placed by caller. // Stack parameters placed by caller.
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize; static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize; static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize; static const int kIsolate = kDirectCall + kPointerSize;
@ -141,10 +149,10 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kInputString = kStartIndex - kPointerSize; static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in // When adding local variables remember to push space for them in
// the frame in GetCode. // the frame in GetCode.
static const int kInputStartMinusOne = kInputString - kPointerSize; static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kAtStart = kInputStartMinusOne - kPointerSize; static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack. // First register address. Following registers are below it on the stack.
static const int kRegisterZero = kAtStart - kPointerSize; static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer. // Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024; static const size_t kRegExpCodeSize = 1024;

12
deps/v8/src/arm/simulator-arm.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -49,16 +49,16 @@ namespace internal {
(entry(p0, p1, p2, p3, p4)) (entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
void*, int*, Address, int, Isolate*); void*, int*, int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address // Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher. // should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for // The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls. // the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<arm_regexp_matcher>(entry)( \ (FUNCTION_CAST<arm_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7)) p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address) reinterpret_cast<TryCatch*>(try_catch_address)
@ -401,9 +401,9 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \ Simulator::current(Isolate::Current())->Call( \
entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7) entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \ try_catch_address == NULL ? \

342
deps/v8/src/arm/stub-cache-arm.cc

@ -435,22 +435,59 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object, Handle<JSObject> object,
int index, int index,
Handle<Map> transition, Handle<Map> transition,
Handle<String> name,
Register receiver_reg, Register receiver_reg,
Register name_reg, Register name_reg,
Register scratch, Register scratch1,
Register scratch2,
Label* miss_label) { Label* miss_label) {
// r0 : value // r0 : value
Label exit; Label exit;
LookupResult lookup(masm->isolate());
object->Lookup(*name, &lookup);
if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
// In sloppy mode, we could just return the value and be done. However, we
// might be in strict mode, where we have to throw. Since we cannot tell,
// go into slow case unconditionally.
__ jmp(miss_label);
return;
}
// Check that the map of the object hasn't changed. // Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP; : REQUIRE_EXACT_MAP;
__ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label, __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK, mode); DO_SMI_CHECK, mode);
// Perform global security token check if needed. // Perform global security token check if needed.
if (object->IsJSGlobalProxy()) { if (object->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label); __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
// Check that we are allowed to write this.
if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
JSObject* holder;
if (lookup.IsFound()) {
holder = lookup.holder();
} else {
// Find the top object.
holder = *object;
do {
holder = JSObject::cast(holder->GetPrototype());
} while (holder->GetPrototype()->IsJSObject());
}
// We need an extra register, push
__ push(name_reg);
Label miss_pop, done_check;
CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
scratch1, scratch2, name, &miss_pop);
__ jmp(&done_check);
__ bind(&miss_pop);
__ pop(name_reg);
__ jmp(miss_label);
__ bind(&done_check);
__ pop(name_reg);
} }
// Stub never generated for non-global objects that require access // Stub never generated for non-global objects that require access
@ -473,10 +510,20 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
} }
if (!transition.is_null()) { if (!transition.is_null()) {
// Update the map of the object; no write barrier updating is // Update the map of the object.
// needed because the map is never in new space. __ mov(scratch1, Operand(transition));
__ mov(ip, Operand(transition)); __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
__ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field and pass the now unused
// name_reg as scratch register.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
name_reg,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
} }
// Adjust for the number of properties stored in the object. Even in the // Adjust for the number of properties stored in the object. Even in the
@ -498,15 +545,16 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ RecordWriteField(receiver_reg, __ RecordWriteField(receiver_reg,
offset, offset,
name_reg, name_reg,
scratch, scratch1,
kLRHasNotBeenSaved, kLRHasNotBeenSaved,
kDontSaveFPRegs); kDontSaveFPRegs);
} else { } else {
// Write to the properties array. // Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize; int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array // Get the properties array
__ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); __ ldr(scratch1,
__ str(r0, FieldMemOperand(scratch, offset)); FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(r0, FieldMemOperand(scratch1, offset));
// Skip updating write barrier if storing a smi. // Skip updating write barrier if storing a smi.
__ JumpIfSmi(r0, &exit); __ JumpIfSmi(r0, &exit);
@ -514,7 +562,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address. // Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return. // Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, r0); __ mov(name_reg, r0);
__ RecordWriteField(scratch, __ RecordWriteField(scratch1,
offset, offset,
name_reg, name_reg,
receiver_reg, receiver_reg,
@ -582,6 +630,8 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(holder); __ push(holder);
__ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset)); __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
__ push(scratch); __ push(scratch);
__ mov(scratch, Operand(ExternalReference::isolate_address()));
__ push(scratch);
} }
@ -596,7 +646,7 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref = ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly), ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate()); masm->isolate());
__ mov(r0, Operand(5)); __ mov(r0, Operand(6));
__ mov(r1, Operand(ref)); __ mov(r1, Operand(ref));
CEntryStub stub(1); CEntryStub stub(1);
@ -604,9 +654,9 @@ static void CompileCallLoadPropertyWithInterceptor(
} }
static const int kFastApiCallArguments = 3; static const int kFastApiCallArguments = 4;
// Reserves space for the extra arguments to FastHandleApiCall in the // Reserves space for the extra arguments to API function in the
// caller's frame. // caller's frame.
// //
// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall. // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
@ -632,7 +682,8 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// -- sp[0] : holder (set by CheckPrototypes) // -- sp[0] : holder (set by CheckPrototypes)
// -- sp[4] : callee JS function // -- sp[4] : callee JS function
// -- sp[8] : call data // -- sp[8] : call data
// -- sp[12] : last JS argument // -- sp[12] : isolate
// -- sp[16] : last JS argument
// -- ... // -- ...
// -- sp[(argc + 3) * 4] : first JS argument // -- sp[(argc + 3) * 4] : first JS argument
// -- sp[(argc + 4) * 4] : receiver // -- sp[(argc + 4) * 4] : receiver
@ -642,7 +693,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
__ LoadHeapObject(r5, function); __ LoadHeapObject(r5, function);
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset)); __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects. // Pass the additional arguments.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data()); Handle<Object> call_data(api_call_info->data());
if (masm->isolate()->heap()->InNewSpace(*call_data)) { if (masm->isolate()->heap()->InNewSpace(*call_data)) {
@ -651,13 +702,15 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
} else { } else {
__ Move(r6, call_data); __ Move(r6, call_data);
} }
// Store JS function and call data. __ mov(r7, Operand(ExternalReference::isolate_address()));
__ stm(ib, sp, r5.bit() | r6.bit()); // Store JS function, call data and isolate.
__ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
// r2 points to call data as expected by Arguments // Prepare arguments.
// (refer to layout above). __ add(r2, sp, Operand(3 * kPointerSize));
__ add(r2, sp, Operand(2 * kPointerSize));
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
const int kApiStackSpace = 4; const int kApiStackSpace = 4;
FrameScope frame_scope(masm, StackFrame::MANUAL); FrameScope frame_scope(masm, StackFrame::MANUAL);
@ -666,9 +719,9 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// r0 = v8::Arguments& // r0 = v8::Arguments&
// Arguments is after the return address. // Arguments is after the return address.
__ add(r0, sp, Operand(1 * kPointerSize)); __ add(r0, sp, Operand(1 * kPointerSize));
// v8::Arguments::implicit_args = data // v8::Arguments::implicit_args_
__ str(r2, MemOperand(r0, 0 * kPointerSize)); __ str(r2, MemOperand(r0, 0 * kPointerSize));
// v8::Arguments::values = last argument // v8::Arguments::values_
__ add(ip, r2, Operand(argc * kPointerSize)); __ add(ip, r2, Operand(argc * kPointerSize));
__ str(ip, MemOperand(r0, 1 * kPointerSize)); __ str(ip, MemOperand(r0, 1 * kPointerSize));
// v8::Arguments::length_ = argc // v8::Arguments::length_ = argc
@ -845,7 +898,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference( __ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall), ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()), masm->isolate()),
5); 6);
// Restore the name_ register. // Restore the name_ register.
__ pop(name_); __ pop(name_);
// Leave the internal frame. // Leave the internal frame.
@ -1204,7 +1257,9 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
} else { } else {
__ Move(scratch3, Handle<Object>(callback->data())); __ Move(scratch3, Handle<Object>(callback->data()));
} }
__ Push(reg, scratch3, name_reg); __ Push(reg, scratch3);
__ mov(scratch3, Operand(ExternalReference::isolate_address()));
__ Push(scratch3, name_reg);
__ mov(r0, sp); // r0 = Handle<String> __ mov(r0, sp); // r0 = Handle<String>
const int kApiStackSpace = 1; const int kApiStackSpace = 1;
@ -1216,7 +1271,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
__ str(scratch2, MemOperand(sp, 1 * kPointerSize)); __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo& __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
const int kStackUnwindSpace = 4; const int kStackUnwindSpace = 5;
Address getter_address = v8::ToCData<Address>(callback->getter()); Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address); ApiFunction fun(getter_address);
ExternalReference ref = ExternalReference ref =
@ -1252,8 +1307,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
compile_followup_inline = true; compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS && } else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) { lookup->GetCallbackObject()->IsAccessorInfo()) {
compile_followup_inline = AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL; compile_followup_inline = callback->getter() != NULL &&
callback->IsCompatibleReceiver(*object);
} }
} }
@ -1344,20 +1400,19 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
if (!receiver.is(holder_reg)) { if (!receiver.is(holder_reg)) {
ASSERT(scratch1.is(holder_reg)); ASSERT(scratch1.is(holder_reg));
__ Push(receiver, holder_reg); __ Push(receiver, holder_reg);
__ ldr(scratch3,
FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
__ Push(scratch3, scratch2, name_reg);
} else { } else {
__ push(receiver); __ push(receiver);
__ ldr(scratch3, __ push(holder_reg);
FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
__ Push(holder_reg, scratch3, scratch2, name_reg);
} }
__ ldr(scratch3,
FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
__ mov(scratch1, Operand(ExternalReference::isolate_address()));
__ Push(scratch3, scratch1, scratch2, name_reg);
ExternalReference ref = ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty), ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
masm()->isolate()); masm()->isolate());
__ TailCallExternalReference(ref, 5, 1); __ TailCallExternalReference(ref, 6, 1);
} }
} else { // !compile_followup_inline } else { // !compile_followup_inline
// Call the runtime system to load the interceptor. // Call the runtime system to load the interceptor.
@ -1371,7 +1426,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
ExternalReference ref = ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
masm()->isolate()); masm()->isolate());
__ TailCallExternalReference(ref, 5, 1); __ TailCallExternalReference(ref, 6, 1);
} }
} }
@ -1575,16 +1630,29 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object); __ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out. // In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object); __ bind(&not_fast_object);
__ CheckFastSmiOnlyElements(r3, r7, &call_builtin); __ CheckFastSmiElements(r3, r7, &call_builtin);
// edx: receiver // edx: receiver
// r3: map // r3: map
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS, FAST_ELEMENTS,
r3, r3,
r7, r7,
&try_holey_map);
__ mov(r2, receiver);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
r3,
r7,
&call_builtin); &call_builtin);
__ mov(r2, receiver); __ mov(r2, receiver);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm()); ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
__ bind(&fast_object); __ bind(&fast_object);
} else { } else {
__ CheckFastObjectElements(r3, r3, &call_builtin); __ CheckFastObjectElements(r3, r3, &call_builtin);
@ -1739,7 +1807,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
// We can't address the last element in one operation. Compute the more // We can't address the last element in one operation. Compute the more
// expensive shift first, and use an offset later on. // expensive shift first, and use an offset later on.
__ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(r0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag)); __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ cmp(r0, r6); __ cmp(r0, r6);
__ b(eq, &call_builtin); __ b(eq, &call_builtin);
@ -1747,7 +1815,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Fill with the hole. // Fill with the hole.
__ str(r6, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag)); __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ Drop(argc + 1); __ Drop(argc + 1);
__ Ret(); __ Ret();
@ -2539,7 +2607,13 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss); GenerateStoreField(masm(),
object,
index,
transition,
name,
r1, r2, r3, r4,
&miss);
__ bind(&miss); __ bind(&miss);
Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET); __ Jump(ic, RelocInfo::CODE_TARGET);
@ -2594,6 +2668,51 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
} }
Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
Handle<JSObject> receiver,
Handle<JSFunction> setter,
Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label miss;
// Check that the map of the object hasn't changed.
__ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss, DO_SMI_CHECK,
ALLOW_ELEMENT_TRANSITION_MAPS);
{
FrameScope scope(masm(), StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ push(r0);
// Call the JavaScript getter with the receiver and the value on the stack.
__ Push(r1, r0);
ParameterCount actual(1);
__ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
CALL_AS_METHOD);
// We have to return the passed value, not the return value of the setter.
__ pop(r0);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Ret();
__ bind(&miss);
Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(CALLBACKS, name);
}
Handle<Code> StoreStubCompiler::CompileStoreInterceptor( Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> receiver, Handle<JSObject> receiver,
Handle<String> name) { Handle<String> name) {
@ -2761,6 +2880,44 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
} }
Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label miss;
// Check that the maps haven't changed.
__ JumpIfSmi(r0, &miss);
CheckPrototypes(receiver, r0, holder, r3, r4, r1, name, &miss);
{
FrameScope scope(masm(), StackFrame::INTERNAL);
// Call the JavaScript getter with the receiver on the stack.
__ push(r0);
ParameterCount actual(0);
__ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
CALL_AS_METHOD);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Ret();
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(CALLBACKS, name);
}
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object, Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder, Handle<JSObject> holder,
Handle<JSFunction> value, Handle<JSFunction> value,
@ -3085,7 +3242,13 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
// r3 is used as scratch register. r1 and r2 keep their values if a jump to // r3 is used as scratch register. r1 and r2 keep their values if a jump to
// the miss label is generated. // the miss label is generated.
GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss); GenerateStoreField(masm(),
object,
index,
transition,
name,
r2, r1, r3, r4,
&miss);
__ bind(&miss); __ bind(&miss);
__ DecrementCounter(counters->keyed_store_field(), 1, r3, r4); __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
@ -3366,8 +3529,11 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3377,6 +3543,44 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
} }
static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch0,
Register scratch1,
DwVfpRegister double_scratch0,
Label* fail) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi
// range.
__ JumpIfSmi(key, &key_ok);
__ CheckMap(key,
scratch0,
Heap::kHeapNumberMapRootIndex,
fail,
DONT_DO_SMI_CHECK);
__ sub(ip, key, Operand(kHeapObjectTag));
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
__ EmitVFPTruncate(kRoundToZero,
double_scratch0.low(),
double_scratch0,
scratch0,
scratch1,
kCheckForInexactConversion);
__ b(ne, fail);
__ vmov(scratch0, double_scratch0.low());
__ TrySmiTag(scratch0, fail, scratch1);
__ mov(key, scratch0);
__ bind(&key_ok);
} else {
// Check that the key is a smi.
__ JumpIfNotSmi(key, fail);
}
}
void KeyedLoadStubCompiler::GenerateLoadExternalArray( void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm, MacroAssembler* masm,
ElementsKind elements_kind) { ElementsKind elements_kind) {
@ -3393,8 +3597,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
// Check that the key is a smi. // Check that the key is a smi or a heap number convertible to a smi.
__ JumpIfNotSmi(key, &miss_force_generic); GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// r3: elements array // r3: elements array
@ -3453,8 +3657,11 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
} }
break; break;
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3724,8 +3931,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
// Check that the key is a smi. // Check that the key is a smi or a heap number convertible to a smi.
__ JumpIfNotSmi(key, &miss_force_generic); GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@ -3794,8 +4001,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
} }
break; break;
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3858,8 +4068,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3998,8 +4211,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -4050,8 +4266,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
// Check that the key is a smi. // Check that the key is a smi or a heap number convertible to a smi.
__ JumpIfNotSmi(r0, &miss_force_generic); GenerateSmiKeyCheck(masm, r0, r4, r5, d1, &miss_force_generic);
// Get the elements array. // Get the elements array.
__ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
@ -4102,8 +4318,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
// Check that the key is a smi. // Check that the key is a smi or a heap number convertible to a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic); GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
// Get the elements array. // Get the elements array.
__ ldr(elements_reg, __ ldr(elements_reg,
@ -4178,10 +4394,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
// Check that the key is a smi. // Check that the key is a smi or a heap number convertible to a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic); GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind); __ JumpIfNotSmi(value_reg, &transition_elements_kind);
} }
@ -4209,7 +4425,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
DONT_DO_SMI_CHECK); DONT_DO_SMI_CHECK);
__ bind(&finish_store); __ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { if (IsFastSmiElementsKind(elements_kind)) {
__ add(scratch, __ add(scratch,
elements_reg, elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -4219,7 +4435,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch)); __ str(value_reg, MemOperand(scratch));
} else { } else {
ASSERT(elements_kind == FAST_ELEMENTS); ASSERT(IsFastObjectElementsKind(elements_kind));
__ add(scratch, __ add(scratch,
elements_reg, elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -4345,7 +4561,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic);
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
__ ldr(elements_reg, __ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); FieldMemOperand(receiver_reg, JSObject::kElementsOffset));

166
deps/v8/src/array.js

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -465,15 +465,19 @@ function ArrayPush() {
} }
// Returns an array containing the array elements of the object followed
// by the array elements of each argument in order. See ECMA-262,
// section 15.4.4.7.
function ArrayConcat(arg1) { // length == 1 function ArrayConcat(arg1) { // length == 1
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) { if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined", throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.concat"]); ["Array.prototype.concat"]);
} }
var array = ToObject(this);
var arg_count = %_ArgumentsLength(); var arg_count = %_ArgumentsLength();
var arrays = new InternalArray(1 + arg_count); var arrays = new InternalArray(1 + arg_count);
arrays[0] = this; arrays[0] = array;
for (var i = 0; i < arg_count; i++) { for (var i = 0; i < arg_count; i++) {
arrays[i + 1] = %_Arguments(i); arrays[i + 1] = %_Arguments(i);
} }
@ -1027,13 +1031,28 @@ function ArrayFilter(f, receiver) {
var result = new $Array(); var result = new $Array();
var accumulator = new InternalArray(); var accumulator = new InternalArray();
var accumulator_length = 0; var accumulator_length = 0;
for (var i = 0; i < length; i++) { if (%DebugCallbackSupportsStepping(f)) {
if (i in array) { for (var i = 0; i < length; i++) {
var element = array[i]; if (i in array) {
if (%_CallFunction(receiver, element, i, array, f)) { var element = array[i];
accumulator[accumulator_length++] = element; // Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(f);
if (%_CallFunction(receiver, element, i, array, f)) {
accumulator[accumulator_length++] = element;
}
}
}
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
if (%_CallFunction(receiver, element, i, array, f)) {
accumulator[accumulator_length++] = element;
}
} }
} }
// End of duplicate.
} }
%MoveArrayContents(accumulator, result); %MoveArrayContents(accumulator, result);
return result; return result;
@ -1059,12 +1078,24 @@ function ArrayForEach(f, receiver) {
} else if (!IS_SPEC_OBJECT(receiver)) { } else if (!IS_SPEC_OBJECT(receiver)) {
receiver = ToObject(receiver); receiver = ToObject(receiver);
} }
if (%DebugCallbackSupportsStepping(f)) {
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
if (i in array) { if (i in array) {
var element = array[i]; var element = array[i];
%_CallFunction(receiver, element, i, array, f); // Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(f);
%_CallFunction(receiver, element, i, array, f);
}
} }
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
%_CallFunction(receiver, element, i, array, f);
}
}
// End of duplicate.
} }
} }
@ -1091,11 +1122,24 @@ function ArraySome(f, receiver) {
receiver = ToObject(receiver); receiver = ToObject(receiver);
} }
for (var i = 0; i < length; i++) { if (%DebugCallbackSupportsStepping(f)) {
if (i in array) { for (var i = 0; i < length; i++) {
var element = array[i]; if (i in array) {
if (%_CallFunction(receiver, element, i, array, f)) return true; var element = array[i];
// Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(f);
if (%_CallFunction(receiver, element, i, array, f)) return true;
}
}
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
if (%_CallFunction(receiver, element, i, array, f)) return true;
}
} }
// End of duplicate.
} }
return false; return false;
} }
@ -1121,11 +1165,24 @@ function ArrayEvery(f, receiver) {
receiver = ToObject(receiver); receiver = ToObject(receiver);
} }
for (var i = 0; i < length; i++) { if (%DebugCallbackSupportsStepping(f)) {
if (i in array) { for (var i = 0; i < length; i++) {
var element = array[i]; if (i in array) {
if (!%_CallFunction(receiver, element, i, array, f)) return false; var element = array[i];
// Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(f);
if (!%_CallFunction(receiver, element, i, array, f)) return false;
}
}
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
if (!%_CallFunction(receiver, element, i, array, f)) return false;
}
} }
// End of duplicate.
} }
return true; return true;
} }
@ -1152,11 +1209,24 @@ function ArrayMap(f, receiver) {
var result = new $Array(); var result = new $Array();
var accumulator = new InternalArray(length); var accumulator = new InternalArray(length);
for (var i = 0; i < length; i++) { if (%DebugCallbackSupportsStepping(f)) {
if (i in array) { for (var i = 0; i < length; i++) {
var element = array[i]; if (i in array) {
accumulator[i] = %_CallFunction(receiver, element, i, array, f); var element = array[i];
// Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(f);
accumulator[i] = %_CallFunction(receiver, element, i, array, f);
}
} }
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
accumulator[i] = %_CallFunction(receiver, element, i, array, f);
}
}
// End of duplicate.
} }
%MoveArrayContents(accumulator, result); %MoveArrayContents(accumulator, result);
return result; return result;
@ -1311,11 +1381,27 @@ function ArrayReduce(callback, current) {
} }
var receiver = %GetDefaultReceiver(callback); var receiver = %GetDefaultReceiver(callback);
for (; i < length; i++) {
if (i in array) { if (%DebugCallbackSupportsStepping(callback)) {
var element = array[i]; for (; i < length; i++) {
current = %_CallFunction(receiver, current, element, i, array, callback); if (i in array) {
var element = array[i];
// Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(callback);
current =
%_CallFunction(receiver, current, element, i, array, callback);
}
}
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (; i < length; i++) {
if (i in array) {
var element = array[i];
current =
%_CallFunction(receiver, current, element, i, array, callback);
}
} }
// End of duplicate.
} }
return current; return current;
} }
@ -1348,11 +1434,27 @@ function ArrayReduceRight(callback, current) {
} }
var receiver = %GetDefaultReceiver(callback); var receiver = %GetDefaultReceiver(callback);
for (; i >= 0; i--) {
if (i in array) { if (%DebugCallbackSupportsStepping(callback)) {
var element = array[i]; for (; i >= 0; i--) {
current = %_CallFunction(receiver, current, element, i, array, callback); if (i in array) {
var element = array[i];
// Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(callback);
current =
%_CallFunction(receiver, current, element, i, array, callback);
}
}
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (; i >= 0; i--) {
if (i in array) {
var element = array[i];
current =
%_CallFunction(receiver, current, element, i, array, callback);
}
} }
// End of duplicate.
} }
return current; return current;
} }

95
deps/v8/src/assembler.cc

@ -99,21 +99,7 @@ struct DoubleConstant BASE_EMBEDDED {
double the_hole_nan; double the_hole_nan;
}; };
struct InitializeDoubleConstants { static DoubleConstant double_constants;
static void Construct(DoubleConstant* double_constants) {
double_constants->min_int = kMinInt;
double_constants->one_half = 0.5;
double_constants->minus_zero = -0.0;
double_constants->uint8_max_value = 255;
double_constants->zero = 0.0;
double_constants->canonical_non_hole_nan = OS::nan_value();
double_constants->the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants->negative_infinity = -V8_INFINITY;
}
};
static LazyInstance<DoubleConstant, InitializeDoubleConstants>::type
double_constants = LAZY_INSTANCE_INITIALIZER;
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
@ -726,6 +712,18 @@ void RelocInfo::Verify() {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of ExternalReference // Implementation of ExternalReference
void ExternalReference::SetUp() {
double_constants.min_int = kMinInt;
double_constants.one_half = 0.5;
double_constants.minus_zero = -0.0;
double_constants.uint8_max_value = 255;
double_constants.zero = 0.0;
double_constants.canonical_non_hole_nan = OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
}
ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate) ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate)
: address_(Redirect(isolate, Builtins::c_function_address(id))) {} : address_(Redirect(isolate, Builtins::c_function_address(id))) {}
@ -957,51 +955,66 @@ ExternalReference ExternalReference::scheduled_exception_address(
} }
ExternalReference ExternalReference::address_of_pending_message_obj(
Isolate* isolate) {
return ExternalReference(isolate->pending_message_obj_address());
}
ExternalReference ExternalReference::address_of_has_pending_message(
Isolate* isolate) {
return ExternalReference(isolate->has_pending_message_address());
}
ExternalReference ExternalReference::address_of_pending_message_script(
Isolate* isolate) {
return ExternalReference(isolate->pending_message_script_address());
}
ExternalReference ExternalReference::address_of_min_int() { ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<void*>( return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
&double_constants.Pointer()->min_int));
} }
ExternalReference ExternalReference::address_of_one_half() { ExternalReference ExternalReference::address_of_one_half() {
return ExternalReference(reinterpret_cast<void*>( return ExternalReference(reinterpret_cast<void*>(&double_constants.one_half));
&double_constants.Pointer()->one_half));
} }
ExternalReference ExternalReference::address_of_minus_zero() { ExternalReference ExternalReference::address_of_minus_zero() {
return ExternalReference(reinterpret_cast<void*>( return ExternalReference(
&double_constants.Pointer()->minus_zero)); reinterpret_cast<void*>(&double_constants.minus_zero));
} }
ExternalReference ExternalReference::address_of_zero() { ExternalReference ExternalReference::address_of_zero() {
return ExternalReference(reinterpret_cast<void*>( return ExternalReference(reinterpret_cast<void*>(&double_constants.zero));
&double_constants.Pointer()->zero));
} }
ExternalReference ExternalReference::address_of_uint8_max_value() { ExternalReference ExternalReference::address_of_uint8_max_value() {
return ExternalReference(reinterpret_cast<void*>( return ExternalReference(
&double_constants.Pointer()->uint8_max_value)); reinterpret_cast<void*>(&double_constants.uint8_max_value));
} }
ExternalReference ExternalReference::address_of_negative_infinity() { ExternalReference ExternalReference::address_of_negative_infinity() {
return ExternalReference(reinterpret_cast<void*>( return ExternalReference(
&double_constants.Pointer()->negative_infinity)); reinterpret_cast<void*>(&double_constants.negative_infinity));
} }
ExternalReference ExternalReference::address_of_canonical_non_hole_nan() { ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
return ExternalReference(reinterpret_cast<void*>( return ExternalReference(
&double_constants.Pointer()->canonical_non_hole_nan)); reinterpret_cast<void*>(&double_constants.canonical_non_hole_nan));
} }
ExternalReference ExternalReference::address_of_the_hole_nan() { ExternalReference ExternalReference::address_of_the_hole_nan() {
return ExternalReference(reinterpret_cast<void*>( return ExternalReference(
&double_constants.Pointer()->the_hole_nan)); reinterpret_cast<void*>(&double_constants.the_hole_nan));
} }
@ -1138,6 +1151,12 @@ ExternalReference ExternalReference::math_log_double_function(
} }
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
}
// Helper function to compute x^y, where y is known to be an // Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of // integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry // multiplications; see the discussion in "Hacker's Delight" by Henry
@ -1158,6 +1177,20 @@ double power_double_int(double x, int y) {
double power_double_double(double x, double y) { double power_double_double(double x, double y) {
#ifdef __MINGW64_VERSION_MAJOR
// MinGW64 has a custom implementation for pow. This handles certain
// special cases that are different.
if ((x == 0.0 || isinf(x)) && isfinite(y)) {
double f;
if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
}
if (x == 2.0) {
int y_int = static_cast<int>(y);
if (y == y_int) return ldexp(1.0, y_int);
}
#endif
// The checks for special cases can be dropped in ia32 because it has already // The checks for special cases can be dropped in ia32 because it has already
// been done in generated code before bailing out here. // been done in generated code before bailing out here.
if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value(); if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value();

7
deps/v8/src/assembler.h

@ -539,6 +539,8 @@ class ExternalReference BASE_EMBEDDED {
DIRECT_GETTER_CALL DIRECT_GETTER_CALL
}; };
static void SetUp();
typedef void* ExternalReferenceRedirector(void* original, Type type); typedef void* ExternalReferenceRedirector(void* original, Type type);
ExternalReference(Builtins::CFunctionId id, Isolate* isolate); ExternalReference(Builtins::CFunctionId id, Isolate* isolate);
@ -638,6 +640,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference handle_scope_level_address(); static ExternalReference handle_scope_level_address();
static ExternalReference scheduled_exception_address(Isolate* isolate); static ExternalReference scheduled_exception_address(Isolate* isolate);
static ExternalReference address_of_pending_message_obj(Isolate* isolate);
static ExternalReference address_of_has_pending_message(Isolate* isolate);
static ExternalReference address_of_pending_message_script(Isolate* isolate);
// Static variables containing common double constants. // Static variables containing common double constants.
static ExternalReference address_of_min_int(); static ExternalReference address_of_min_int();
@ -654,6 +659,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference math_tan_double_function(Isolate* isolate); static ExternalReference math_tan_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate); static ExternalReference math_log_double_function(Isolate* isolate);
static ExternalReference page_flags(Page* page);
Address address() const {return reinterpret_cast<Address>(address_);} Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT

269
deps/v8/src/ast.cc

@ -242,8 +242,11 @@ bool IsEqualNumber(void* first, void* second) {
} }
void ObjectLiteral::CalculateEmitStore() { void ObjectLiteral::CalculateEmitStore(Zone* zone) {
ZoneHashMap table(Literal::Match); ZoneAllocationPolicy allocator(zone);
ZoneHashMap table(Literal::Match, ZoneHashMap::kDefaultHashMapCapacity,
allocator);
for (int i = properties()->length() - 1; i >= 0; i--) { for (int i = properties()->length() - 1; i >= 0; i--) {
ObjectLiteral::Property* property = properties()->at(i); ObjectLiteral::Property* property = properties()->at(i);
Literal* literal = property->key(); Literal* literal = property->key();
@ -252,23 +255,23 @@ void ObjectLiteral::CalculateEmitStore() {
// If the key of a computed property is in the table, do not emit // If the key of a computed property is in the table, do not emit
// a store for the property later. // a store for the property later.
if (property->kind() == ObjectLiteral::Property::COMPUTED && if (property->kind() == ObjectLiteral::Property::COMPUTED &&
table.Lookup(literal, hash, false) != NULL) { table.Lookup(literal, hash, false, allocator) != NULL) {
property->set_emit_store(false); property->set_emit_store(false);
} else { } else {
// Add key to the table. // Add key to the table.
table.Lookup(literal, hash, true); table.Lookup(literal, hash, true, allocator);
} }
} }
} }
void TargetCollector::AddTarget(Label* target) { void TargetCollector::AddTarget(Label* target, Zone* zone) {
// Add the label to the collector, but discard duplicates. // Add the label to the collector, but discard duplicates.
int length = targets_.length(); int length = targets_.length();
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
if (targets_[i] == target) return; if (targets_[i] == target) return;
} }
targets_.Add(target); targets_.Add(target, zone);
} }
@ -397,7 +400,8 @@ bool FunctionDeclaration::IsInlineable() const {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Recording of type feedback // Recording of type feedback
void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) { void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Zone* zone) {
// Record type feedback from the oracle in the AST. // Record type feedback from the oracle in the AST.
is_uninitialized_ = oracle->LoadIsUninitialized(this); is_uninitialized_ = oracle->LoadIsUninitialized(this);
if (is_uninitialized_) return; if (is_uninitialized_) return;
@ -421,15 +425,17 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
} else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) { } else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
is_string_access_ = true; is_string_access_ = true;
} else if (is_monomorphic_) { } else if (is_monomorphic_) {
receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this)); receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this),
zone);
} else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) { } else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism); receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_); oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
} }
} }
void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) { void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Zone* zone) {
Property* prop = target()->AsProperty(); Property* prop = target()->AsProperty();
ASSERT(prop != NULL); ASSERT(prop != NULL);
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this); is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
@ -441,22 +447,23 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
oracle->StoreReceiverTypes(this, name, &receiver_types_); oracle->StoreReceiverTypes(this, name, &receiver_types_);
} else if (is_monomorphic_) { } else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores. // Record receiver type for monomorphic keyed stores.
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this)); receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this), zone);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) { } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism); receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_); oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
} }
} }
void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) { void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Zone* zone) {
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this); is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
receiver_types_.Clear(); receiver_types_.Clear();
if (is_monomorphic_) { if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores. // Record receiver type for monomorphic keyed stores.
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this)); receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this), zone);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) { } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism); receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_); oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
} }
} }
@ -507,7 +514,6 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
// We don't know the target. // We don't know the target.
return false; return false;
case MAP_TRANSITION: case MAP_TRANSITION:
case ELEMENTS_TRANSITION:
case CONSTANT_TRANSITION: case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR: case NULL_DESCRIPTOR:
// Perhaps something interesting is up in the prototype chain... // Perhaps something interesting is up in the prototype chain...
@ -784,7 +790,7 @@ bool RegExpCapture::IsAnchoredAtEnd() {
// output formats are alike. // output formats are alike.
class RegExpUnparser: public RegExpVisitor { class RegExpUnparser: public RegExpVisitor {
public: public:
RegExpUnparser(); explicit RegExpUnparser(Zone* zone);
void VisitCharacterRange(CharacterRange that); void VisitCharacterRange(CharacterRange that);
SmartArrayPointer<const char> ToString() { return stream_.ToCString(); } SmartArrayPointer<const char> ToString() { return stream_.ToCString(); }
#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data); #define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data);
@ -794,10 +800,11 @@ class RegExpUnparser: public RegExpVisitor {
StringStream* stream() { return &stream_; } StringStream* stream() { return &stream_; }
HeapStringAllocator alloc_; HeapStringAllocator alloc_;
StringStream stream_; StringStream stream_;
Zone* zone_;
}; };
RegExpUnparser::RegExpUnparser() : stream_(&alloc_) { RegExpUnparser::RegExpUnparser(Zone* zone) : stream_(&alloc_), zone_(zone) {
} }
@ -837,9 +844,9 @@ void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
if (that->is_negated()) if (that->is_negated())
stream()->Add("^"); stream()->Add("^");
stream()->Add("["); stream()->Add("[");
for (int i = 0; i < that->ranges()->length(); i++) { for (int i = 0; i < that->ranges(zone_)->length(); i++) {
if (i > 0) stream()->Add(" "); if (i > 0) stream()->Add(" ");
VisitCharacterRange(that->ranges()->at(i)); VisitCharacterRange(that->ranges(zone_)->at(i));
} }
stream()->Add("]"); stream()->Add("]");
return NULL; return NULL;
@ -941,8 +948,8 @@ void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
} }
SmartArrayPointer<const char> RegExpTree::ToString() { SmartArrayPointer<const char> RegExpTree::ToString(Zone* zone) {
RegExpUnparser unparser; RegExpUnparser unparser(zone);
Accept(&unparser, NULL); Accept(&unparser, NULL);
return unparser.ToString(); return unparser.ToString();
} }
@ -962,6 +969,14 @@ RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
} }
static int IncreaseBy(int previous, int increase) {
if (RegExpTree::kInfinity - previous < increase) {
return RegExpTree::kInfinity;
} else {
return previous + increase;
}
}
RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes) RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
: nodes_(nodes) { : nodes_(nodes) {
ASSERT(nodes->length() > 1); ASSERT(nodes->length() > 1);
@ -969,13 +984,10 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
max_match_ = 0; max_match_ = 0;
for (int i = 0; i < nodes->length(); i++) { for (int i = 0; i < nodes->length(); i++) {
RegExpTree* node = nodes->at(i); RegExpTree* node = nodes->at(i);
min_match_ += node->min_match(); int node_min_match = node->min_match();
min_match_ = IncreaseBy(min_match_, node_min_match);
int node_max_match = node->max_match(); int node_max_match = node->max_match();
if (kInfinity - max_match_ < node_max_match) { max_match_ = IncreaseBy(max_match_, node_max_match);
max_match_ = kInfinity;
} else {
max_match_ += node->max_match();
}
} }
} }
@ -993,138 +1005,78 @@ CaseClause::CaseClause(Isolate* isolate,
} }
#define INCREASE_NODE_COUNT(NodeType) \ #define REGULAR_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \ increase_node_count(); \
} }
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_flag(kDontOptimize); \
add_flag(kDontInline); \
add_flag(kDontSelfOptimize); \
}
#define DONT_INLINE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_flag(kDontInline); \
}
#define DONT_SELFOPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_flag(kDontSelfOptimize); \
}
INCREASE_NODE_COUNT(VariableDeclaration) REGULAR_NODE(VariableDeclaration)
INCREASE_NODE_COUNT(FunctionDeclaration) REGULAR_NODE(FunctionDeclaration)
INCREASE_NODE_COUNT(ModuleDeclaration) REGULAR_NODE(Block)
INCREASE_NODE_COUNT(ImportDeclaration) REGULAR_NODE(ExpressionStatement)
INCREASE_NODE_COUNT(ExportDeclaration) REGULAR_NODE(EmptyStatement)
INCREASE_NODE_COUNT(ModuleLiteral) REGULAR_NODE(IfStatement)
INCREASE_NODE_COUNT(ModuleVariable) REGULAR_NODE(ContinueStatement)
INCREASE_NODE_COUNT(ModulePath) REGULAR_NODE(BreakStatement)
INCREASE_NODE_COUNT(ModuleUrl) REGULAR_NODE(ReturnStatement)
INCREASE_NODE_COUNT(Block) REGULAR_NODE(SwitchStatement)
INCREASE_NODE_COUNT(ExpressionStatement) REGULAR_NODE(Conditional)
INCREASE_NODE_COUNT(EmptyStatement) REGULAR_NODE(Literal)
INCREASE_NODE_COUNT(IfStatement) REGULAR_NODE(ObjectLiteral)
INCREASE_NODE_COUNT(ContinueStatement) REGULAR_NODE(Assignment)
INCREASE_NODE_COUNT(BreakStatement) REGULAR_NODE(Throw)
INCREASE_NODE_COUNT(ReturnStatement) REGULAR_NODE(Property)
INCREASE_NODE_COUNT(Conditional) REGULAR_NODE(UnaryOperation)
INCREASE_NODE_COUNT(Literal) REGULAR_NODE(CountOperation)
INCREASE_NODE_COUNT(ObjectLiteral) REGULAR_NODE(BinaryOperation)
INCREASE_NODE_COUNT(Assignment) REGULAR_NODE(CompareOperation)
INCREASE_NODE_COUNT(Throw) REGULAR_NODE(ThisFunction)
INCREASE_NODE_COUNT(Property) REGULAR_NODE(Call)
INCREASE_NODE_COUNT(UnaryOperation) REGULAR_NODE(CallNew)
INCREASE_NODE_COUNT(CountOperation) // In theory, for VariableProxy we'd have to add:
INCREASE_NODE_COUNT(BinaryOperation) // if (node->var()->IsLookupSlot()) add_flag(kDontInline);
INCREASE_NODE_COUNT(CompareOperation) // But node->var() is usually not bound yet at VariableProxy creation time, and
INCREASE_NODE_COUNT(ThisFunction) // LOOKUP variables only result from constructs that cannot be inlined anyway.
INCREASE_NODE_COUNT(Call) REGULAR_NODE(VariableProxy)
INCREASE_NODE_COUNT(CallNew)
DONT_OPTIMIZE_NODE(ModuleDeclaration)
#undef INCREASE_NODE_COUNT DONT_OPTIMIZE_NODE(ImportDeclaration)
DONT_OPTIMIZE_NODE(ExportDeclaration)
DONT_OPTIMIZE_NODE(ModuleLiteral)
void AstConstructionVisitor::VisitWithStatement(WithStatement* node) { DONT_OPTIMIZE_NODE(ModuleVariable)
increase_node_count(); DONT_OPTIMIZE_NODE(ModulePath)
add_flag(kDontOptimize); DONT_OPTIMIZE_NODE(ModuleUrl)
add_flag(kDontInline); DONT_OPTIMIZE_NODE(WithStatement)
} DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
void AstConstructionVisitor::VisitSwitchStatement(SwitchStatement* node) { DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
increase_node_count();
add_flag(kDontInline); DONT_INLINE_NODE(FunctionLiteral)
} DONT_INLINE_NODE(RegExpLiteral) // TODO(1322): Allow materialized literals.
DONT_INLINE_NODE(ArrayLiteral) // TODO(1322): Allow materialized literals.
void AstConstructionVisitor::VisitDoWhileStatement(DoWhileStatement* node) { DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
increase_node_count(); DONT_SELFOPTIMIZE_NODE(WhileStatement)
add_flag(kDontSelfOptimize); DONT_SELFOPTIMIZE_NODE(ForStatement)
} DONT_SELFOPTIMIZE_NODE(ForInStatement)
void AstConstructionVisitor::VisitWhileStatement(WhileStatement* node) {
increase_node_count();
add_flag(kDontSelfOptimize);
}
void AstConstructionVisitor::VisitForStatement(ForStatement* node) {
increase_node_count();
add_flag(kDontSelfOptimize);
}
void AstConstructionVisitor::VisitForInStatement(ForInStatement* node) {
increase_node_count();
add_flag(kDontSelfOptimize);
}
void AstConstructionVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
increase_node_count();
add_flag(kDontOptimize);
add_flag(kDontInline);
}
void AstConstructionVisitor::VisitTryFinallyStatement(
TryFinallyStatement* node) {
increase_node_count();
add_flag(kDontOptimize);
add_flag(kDontInline);
}
void AstConstructionVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
increase_node_count();
add_flag(kDontOptimize);
add_flag(kDontInline);
}
void AstConstructionVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
increase_node_count();
add_flag(kDontInline);
}
void AstConstructionVisitor::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* node) {
increase_node_count();
add_flag(kDontOptimize);
add_flag(kDontInline);
}
void AstConstructionVisitor::VisitVariableProxy(VariableProxy* node) {
increase_node_count();
// In theory, we'd have to add:
// if(node->var()->IsLookupSlot()) { add_flag(kDontInline); }
// However, node->var() is usually not bound yet at VariableProxy creation
// time, and LOOKUP variables only result from constructs that cannot
// be inlined anyway.
}
void AstConstructionVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
increase_node_count();
add_flag(kDontInline); // TODO(1322): Allow materialized literals.
}
void AstConstructionVisitor::VisitArrayLiteral(ArrayLiteral* node) {
increase_node_count();
add_flag(kDontInline); // TODO(1322): Allow materialized literals.
}
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) { void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count(); increase_node_count();
@ -1142,6 +1094,11 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
} }
} }
#undef REGULAR_NODE
#undef DONT_OPTIMIZE_NODE
#undef DONT_INLINE_NODE
#undef DONT_SELFOPTIMIZE_NODE
Handle<String> Literal::ToString() { Handle<String> Literal::ToString() {
if (handle_->IsString()) return Handle<String>::cast(handle_); if (handle_->IsString()) return Handle<String>::cast(handle_);

81
deps/v8/src/ast.h

@ -266,16 +266,17 @@ class Statement: public AstNode {
class SmallMapList { class SmallMapList {
public: public:
SmallMapList() {} SmallMapList() {}
explicit SmallMapList(int capacity) : list_(capacity) {} SmallMapList(int capacity, Zone* zone) : list_(capacity, zone) {}
void Reserve(int capacity) { list_.Reserve(capacity); } void Reserve(int capacity, Zone* zone) { list_.Reserve(capacity, zone); }
void Clear() { list_.Clear(); } void Clear() { list_.Clear(); }
void Sort() { list_.Sort(); }
bool is_empty() const { return list_.is_empty(); } bool is_empty() const { return list_.is_empty(); }
int length() const { return list_.length(); } int length() const { return list_.length(); }
void Add(Handle<Map> handle) { void Add(Handle<Map> handle, Zone* zone) {
list_.Add(handle.location()); list_.Add(handle.location(), zone);
} }
Handle<Map> at(int i) const { Handle<Map> at(int i) const {
@ -415,13 +416,15 @@ class Block: public BreakableStatement {
public: public:
DECLARE_NODE_TYPE(Block) DECLARE_NODE_TYPE(Block)
void AddStatement(Statement* statement) { statements_.Add(statement); } void AddStatement(Statement* statement, Zone* zone) {
statements_.Add(statement, zone);
}
ZoneList<Statement*>* statements() { return &statements_; } ZoneList<Statement*>* statements() { return &statements_; }
bool is_initializer_block() const { return is_initializer_block_; } bool is_initializer_block() const { return is_initializer_block_; }
Scope* block_scope() const { return block_scope_; } Scope* scope() const { return scope_; }
void set_block_scope(Scope* block_scope) { block_scope_ = block_scope; } void set_scope(Scope* scope) { scope_ = scope; }
protected: protected:
template<class> friend class AstNodeFactory; template<class> friend class AstNodeFactory;
@ -429,17 +432,18 @@ class Block: public BreakableStatement {
Block(Isolate* isolate, Block(Isolate* isolate,
ZoneStringList* labels, ZoneStringList* labels,
int capacity, int capacity,
bool is_initializer_block) bool is_initializer_block,
Zone* zone)
: BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY), : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
statements_(capacity), statements_(capacity, zone),
is_initializer_block_(is_initializer_block), is_initializer_block_(is_initializer_block),
block_scope_(NULL) { scope_(NULL) {
} }
private: private:
ZoneList<Statement*> statements_; ZoneList<Statement*> statements_;
bool is_initializer_block_; bool is_initializer_block_;
Scope* block_scope_; Scope* scope_;
}; };
@ -594,7 +598,7 @@ class Module: public AstNode {
Interface* interface() const { return interface_; } Interface* interface() const { return interface_; }
protected: protected:
Module() : interface_(Interface::NewModule()) {} explicit Module(Zone* zone) : interface_(Interface::NewModule(zone)) {}
explicit Module(Interface* interface) : interface_(interface) {} explicit Module(Interface* interface) : interface_(interface) {}
private: private:
@ -607,6 +611,7 @@ class ModuleLiteral: public Module {
DECLARE_NODE_TYPE(ModuleLiteral) DECLARE_NODE_TYPE(ModuleLiteral)
Block* body() const { return body_; } Block* body() const { return body_; }
Handle<Context> context() const { return context_; }
protected: protected:
template<class> friend class AstNodeFactory; template<class> friend class AstNodeFactory;
@ -618,6 +623,7 @@ class ModuleLiteral: public Module {
private: private:
Block* body_; Block* body_;
Handle<Context> context_;
}; };
@ -647,8 +653,9 @@ class ModulePath: public Module {
protected: protected:
template<class> friend class AstNodeFactory; template<class> friend class AstNodeFactory;
ModulePath(Module* module, Handle<String> name) ModulePath(Module* module, Handle<String> name, Zone* zone)
: module_(module), : Module(zone),
module_(module),
name_(name) { name_(name) {
} }
@ -667,7 +674,8 @@ class ModuleUrl: public Module {
protected: protected:
template<class> friend class AstNodeFactory; template<class> friend class AstNodeFactory;
explicit ModuleUrl(Handle<String> url) : url_(url) { ModuleUrl(Handle<String> url, Zone* zone)
: Module(zone), url_(url) {
} }
private: private:
@ -1095,12 +1103,12 @@ class IfStatement: public Statement {
// stack in the compiler; this should probably be reworked. // stack in the compiler; this should probably be reworked.
class TargetCollector: public AstNode { class TargetCollector: public AstNode {
public: public:
TargetCollector() : targets_(0) { } explicit TargetCollector(Zone* zone) : targets_(0, zone) { }
// Adds a jump target to the collector. The collector stores a pointer not // Adds a jump target to the collector. The collector stores a pointer not
// a copy of the target to make binding work, so make sure not to pass in // a copy of the target to make binding work, so make sure not to pass in
// references to something on the stack. // references to something on the stack.
void AddTarget(Label* target); void AddTarget(Label* target, Zone* zone);
// Virtual behaviour. TargetCollectors are never part of the AST. // Virtual behaviour. TargetCollectors are never part of the AST.
virtual void Accept(AstVisitor* v) { UNREACHABLE(); } virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
@ -1358,7 +1366,7 @@ class ObjectLiteral: public MaterializedLiteral {
// Mark all computed expressions that are bound to a key that // Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the // is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted. // marked expressions, no store code is emitted.
void CalculateEmitStore(); void CalculateEmitStore(Zone* zone);
enum Flags { enum Flags {
kNoFlags = 0, kNoFlags = 0,
@ -1523,7 +1531,7 @@ class Property: public Expression {
bool IsFunctionPrototype() const { return is_function_prototype_; } bool IsFunctionPrototype() const { return is_function_prototype_; }
// Type feedback information. // Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle); void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() { return is_monomorphic_; } virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; } virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
bool IsArrayLength() { return is_array_length_; } bool IsArrayLength() { return is_array_length_; }
@ -1796,7 +1804,7 @@ class CountOperation: public Expression {
virtual void MarkAsStatement() { is_prefix_ = true; } virtual void MarkAsStatement() { is_prefix_ = true; }
void RecordTypeFeedback(TypeFeedbackOracle* oracle); void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* znoe);
virtual bool IsMonomorphic() { return is_monomorphic_; } virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; } virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
@ -1949,7 +1957,7 @@ class Assignment: public Expression {
void mark_block_end() { block_end_ = true; } void mark_block_end() { block_end_ = true; }
// Type feedback information. // Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle); void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() { return is_monomorphic_; } virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; } virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
@ -2208,8 +2216,8 @@ class RegExpTree: public ZoneObject {
// Returns the interval of registers used for captures within this // Returns the interval of registers used for captures within this
// expression. // expression.
virtual Interval CaptureRegisters() { return Interval::Empty(); } virtual Interval CaptureRegisters() { return Interval::Empty(); }
virtual void AppendToText(RegExpText* text); virtual void AppendToText(RegExpText* text, Zone* zone);
SmartArrayPointer<const char> ToString(); SmartArrayPointer<const char> ToString(Zone* zone);
#define MAKE_ASTYPE(Name) \ #define MAKE_ASTYPE(Name) \
virtual RegExp##Name* As##Name(); \ virtual RegExp##Name* As##Name(); \
virtual bool Is##Name(); virtual bool Is##Name();
@ -2294,7 +2302,7 @@ class CharacterSet BASE_EMBEDDED {
explicit CharacterSet(ZoneList<CharacterRange>* ranges) explicit CharacterSet(ZoneList<CharacterRange>* ranges)
: ranges_(ranges), : ranges_(ranges),
standard_set_type_(0) {} standard_set_type_(0) {}
ZoneList<CharacterRange>* ranges(); ZoneList<CharacterRange>* ranges(Zone* zone);
uc16 standard_set_type() { return standard_set_type_; } uc16 standard_set_type() { return standard_set_type_; }
void set_standard_set_type(uc16 special_set_type) { void set_standard_set_type(uc16 special_set_type) {
standard_set_type_ = special_set_type; standard_set_type_ = special_set_type;
@ -2325,11 +2333,11 @@ class RegExpCharacterClass: public RegExpTree {
virtual bool IsTextElement() { return true; } virtual bool IsTextElement() { return true; }
virtual int min_match() { return 1; } virtual int min_match() { return 1; }
virtual int max_match() { return 1; } virtual int max_match() { return 1; }
virtual void AppendToText(RegExpText* text); virtual void AppendToText(RegExpText* text, Zone* zone);
CharacterSet character_set() { return set_; } CharacterSet character_set() { return set_; }
// TODO(lrn): Remove need for complex version if is_standard that // TODO(lrn): Remove need for complex version if is_standard that
// recognizes a mangled standard set and just do { return set_.is_special(); } // recognizes a mangled standard set and just do { return set_.is_special(); }
bool is_standard(); bool is_standard(Zone* zone);
// Returns a value representing the standard character set if is_standard() // Returns a value representing the standard character set if is_standard()
// returns true. // returns true.
// Currently used values are: // Currently used values are:
@ -2342,7 +2350,7 @@ class RegExpCharacterClass: public RegExpTree {
// . : non-unicode non-newline // . : non-unicode non-newline
// * : All characters // * : All characters
uc16 standard_type() { return set_.standard_set_type(); } uc16 standard_type() { return set_.standard_set_type(); }
ZoneList<CharacterRange>* ranges() { return set_.ranges(); } ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
bool is_negated() { return is_negated_; } bool is_negated() { return is_negated_; }
private: private:
@ -2362,7 +2370,7 @@ class RegExpAtom: public RegExpTree {
virtual bool IsTextElement() { return true; } virtual bool IsTextElement() { return true; }
virtual int min_match() { return data_.length(); } virtual int min_match() { return data_.length(); }
virtual int max_match() { return data_.length(); } virtual int max_match() { return data_.length(); }
virtual void AppendToText(RegExpText* text); virtual void AppendToText(RegExpText* text, Zone* zone);
Vector<const uc16> data() { return data_; } Vector<const uc16> data() { return data_; }
int length() { return data_.length(); } int length() { return data_.length(); }
private: private:
@ -2372,7 +2380,7 @@ class RegExpAtom: public RegExpTree {
class RegExpText: public RegExpTree { class RegExpText: public RegExpTree {
public: public:
RegExpText() : elements_(2), length_(0) {} explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
virtual void* Accept(RegExpVisitor* visitor, void* data); virtual void* Accept(RegExpVisitor* visitor, void* data);
virtual RegExpNode* ToNode(RegExpCompiler* compiler, virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success); RegExpNode* on_success);
@ -2381,9 +2389,9 @@ class RegExpText: public RegExpTree {
virtual bool IsTextElement() { return true; } virtual bool IsTextElement() { return true; }
virtual int min_match() { return length_; } virtual int min_match() { return length_; }
virtual int max_match() { return length_; } virtual int max_match() { return length_; }
virtual void AppendToText(RegExpText* text); virtual void AppendToText(RegExpText* text, Zone* zone);
void AddElement(TextElement elm) { void AddElement(TextElement elm, Zone* zone) {
elements_.Add(elm); elements_.Add(elm, zone);
length_ += elm.length(); length_ += elm.length();
} }
ZoneList<TextElement>* elements() { return &elements_; } ZoneList<TextElement>* elements() { return &elements_; }
@ -2691,20 +2699,21 @@ class AstNodeFactory BASE_EMBEDDED {
} }
ModulePath* NewModulePath(Module* origin, Handle<String> name) { ModulePath* NewModulePath(Module* origin, Handle<String> name) {
ModulePath* module = new(zone_) ModulePath(origin, name); ModulePath* module = new(zone_) ModulePath(origin, name, zone_);
VISIT_AND_RETURN(ModulePath, module) VISIT_AND_RETURN(ModulePath, module)
} }
ModuleUrl* NewModuleUrl(Handle<String> url) { ModuleUrl* NewModuleUrl(Handle<String> url) {
ModuleUrl* module = new(zone_) ModuleUrl(url); ModuleUrl* module = new(zone_) ModuleUrl(url, zone_);
VISIT_AND_RETURN(ModuleUrl, module) VISIT_AND_RETURN(ModuleUrl, module)
} }
Block* NewBlock(ZoneStringList* labels, Block* NewBlock(ZoneStringList* labels,
int capacity, int capacity,
bool is_initializer_block) { bool is_initializer_block,
Zone* zone) {
Block* block = new(zone_) Block( Block* block = new(zone_) Block(
isolate_, labels, capacity, is_initializer_block); isolate_, labels, capacity, is_initializer_block, zone);
VISIT_AND_RETURN(Block, block) VISIT_AND_RETURN(Block, block)
} }

38
deps/v8/src/bootstrapper.cc

@ -484,8 +484,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
global_context()->set_initial_object_prototype(*prototype); global_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype); SetPrototype(object_fun, prototype);
object_function_map-> object_function_map->set_instance_descriptors(
set_instance_descriptors(heap->empty_descriptor_array()); heap->empty_descriptor_array());
} }
// Allocate the empty function as the prototype for function ECMAScript // Allocate the empty function as the prototype for function ECMAScript
@ -516,12 +516,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
function_instance_map_writable_prototype_->set_prototype(*empty_function); function_instance_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later // Allocate the function map first and then patch the prototype later
Handle<Map> empty_fm = factory->CopyMapDropDescriptors( Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
function_without_prototype_map); empty_function_map->set_prototype(
empty_fm->set_instance_descriptors( global_context()->object_function()->prototype());
function_without_prototype_map->instance_descriptors()); empty_function->set_map(*empty_function_map);
empty_fm->set_prototype(global_context()->object_function()->prototype());
empty_function->set_map(*empty_fm);
return empty_function; return empty_function;
} }
@ -1011,7 +1009,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
proto_map->set_prototype(global_context()->initial_object_prototype()); proto_map->set_prototype(global_context()->initial_object_prototype());
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map); Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
heap->empty_string()); heap->query_colon_symbol());
proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
heap->false_value()); heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
@ -1094,7 +1092,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Check the state of the object. // Check the state of the object.
ASSERT(result->HasFastProperties()); ASSERT(result->HasFastProperties());
ASSERT(result->HasFastElements()); ASSERT(result->HasFastObjectElements());
#endif #endif
} }
@ -1187,7 +1185,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Check the state of the object. // Check the state of the object.
ASSERT(result->HasFastProperties()); ASSERT(result->HasFastProperties());
ASSERT(result->HasFastElements()); ASSERT(result->HasFastObjectElements());
#endif #endif
} }
@ -1634,10 +1632,11 @@ bool Genesis::InstallNatives() {
// through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
// transition easy to trap. Moreover, they rarely are smi-only. // transition easy to trap. Moreover, they rarely are smi-only.
MaybeObject* maybe_map = MaybeObject* maybe_map =
array_function->initial_map()->CopyDropTransitions(); array_function->initial_map()->CopyDropTransitions(
DescriptorArray::MAY_BE_SHARED);
Map* new_map; Map* new_map;
if (!maybe_map->To<Map>(&new_map)) return false; if (!maybe_map->To(&new_map)) return false;
new_map->set_elements_kind(FAST_ELEMENTS); new_map->set_elements_kind(FAST_HOLEY_ELEMENTS);
array_function->set_initial_map(new_map); array_function->set_initial_map(new_map);
// Make "length" magic on instances. // Make "length" magic on instances.
@ -2094,14 +2093,10 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
Handle<JSFunction> function Handle<JSFunction> function
= Handle<JSFunction>(JSFunction::cast(function_object)); = Handle<JSFunction>(JSFunction::cast(function_object));
builtins->set_javascript_builtin(id, *function); builtins->set_javascript_builtin(id, *function);
Handle<SharedFunctionInfo> shared if (!JSFunction::CompileLazy(function, CLEAR_EXCEPTION)) {
= Handle<SharedFunctionInfo>(function->shared());
if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
return false; return false;
} }
// Set the code object on the function object. builtins->set_javascript_builtin_code(id, function->shared()->code());
function->ReplaceCode(function->shared()->code());
builtins->set_javascript_builtin_code(id, shared->code());
} }
return true; return true;
} }
@ -2159,7 +2154,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<DescriptorArray> descs = Handle<DescriptorArray> descs =
Handle<DescriptorArray>(from->map()->instance_descriptors()); Handle<DescriptorArray>(from->map()->instance_descriptors());
for (int i = 0; i < descs->number_of_descriptors(); i++) { for (int i = 0; i < descs->number_of_descriptors(); i++) {
PropertyDetails details = PropertyDetails(descs->GetDetails(i)); PropertyDetails details = descs->GetDetails(i);
switch (details.type()) { switch (details.type()) {
case FIELD: { case FIELD: {
HandleScope inner; HandleScope inner;
@ -2197,7 +2192,6 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
break; break;
} }
case MAP_TRANSITION: case MAP_TRANSITION:
case ELEMENTS_TRANSITION:
case CONSTANT_TRANSITION: case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR: case NULL_DESCRIPTOR:
// Ignore non-properties. // Ignore non-properties.

153
deps/v8/src/builtins.cc

@ -200,9 +200,12 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
array->set_elements(heap->empty_fixed_array()); array->set_elements(heap->empty_fixed_array());
if (!FLAG_smi_only_arrays) { if (!FLAG_smi_only_arrays) {
Context* global_context = isolate->context()->global_context(); Context* global_context = isolate->context()->global_context();
if (array->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS && if (array->GetElementsKind() == GetInitialFastElementsKind() &&
!global_context->object_js_array_map()->IsUndefined()) { !global_context->js_array_maps()->IsUndefined()) {
array->set_map(Map::cast(global_context->object_js_array_map())); FixedArray* map_array =
FixedArray::cast(global_context->js_array_maps());
array->set_map(Map::cast(map_array->
get(TERMINAL_FAST_ELEMENTS_KIND)));
} }
} }
} else { } else {
@ -222,6 +225,13 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len); { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj; if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
} }
ElementsKind elements_kind = array->GetElementsKind();
if (!IsFastHoleyElementsKind(elements_kind)) {
elements_kind = GetHoleyElementsKind(elements_kind);
MaybeObject* maybe_array =
array->TransitionElementsKind(elements_kind);
if (maybe_array->IsFailure()) return maybe_array;
}
// We do not use SetContent to skip the unnecessary elements type check. // We do not use SetContent to skip the unnecessary elements type check.
array->set_elements(FixedArray::cast(fixed_array)); array->set_elements(FixedArray::cast(fixed_array));
array->set_length(Smi::cast(obj)); array->set_length(Smi::cast(obj));
@ -250,7 +260,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Allocate an appropriately typed elements array. // Allocate an appropriately typed elements array.
MaybeObject* maybe_elms; MaybeObject* maybe_elms;
ElementsKind elements_kind = array->GetElementsKind(); ElementsKind elements_kind = array->GetElementsKind();
if (elements_kind == FAST_DOUBLE_ELEMENTS) { if (IsFastDoubleElementsKind(elements_kind)) {
maybe_elms = heap->AllocateUninitializedFixedDoubleArray( maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
number_of_elements); number_of_elements);
} else { } else {
@ -261,13 +271,15 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Fill in the content // Fill in the content
switch (array->GetElementsKind()) { switch (array->GetElementsKind()) {
case FAST_SMI_ONLY_ELEMENTS: { case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS: {
FixedArray* smi_elms = FixedArray::cast(elms); FixedArray* smi_elms = FixedArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) { for (int index = 0; index < number_of_elements; index++) {
smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER); smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
} }
break; break;
} }
case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: { case FAST_ELEMENTS: {
AssertNoAllocation no_gc; AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@ -277,6 +289,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
} }
break; break;
} }
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: { case FAST_DOUBLE_ELEMENTS: {
FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms); FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) { for (int index = 0; index < number_of_elements; index++) {
@ -412,7 +425,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
HeapObject* elms = array->elements(); HeapObject* elms = array->elements();
Map* map = elms->map(); Map* map = elms->map();
if (map == heap->fixed_array_map()) { if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastElements()) return elms; if (args == NULL || array->HasFastObjectElements()) return elms;
if (array->HasFastDoubleElements()) { if (array->HasFastDoubleElements()) {
ASSERT(elms == heap->empty_fixed_array()); ASSERT(elms == heap->empty_fixed_array());
MaybeObject* maybe_transition = MaybeObject* maybe_transition =
@ -422,7 +435,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
} }
} else if (map == heap->fixed_cow_array_map()) { } else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements(); MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || array->HasFastElements() || if (args == NULL || array->HasFastObjectElements() ||
maybe_writable_result->IsFailure()) { maybe_writable_result->IsFailure()) {
return maybe_writable_result; return maybe_writable_result;
} }
@ -516,8 +529,8 @@ BUILTIN(ArrayPush) {
} }
FixedArray* new_elms = FixedArray::cast(obj); FixedArray* new_elms = FixedArray::cast(obj);
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0, ElementsKind kind = array->GetElementsKind();
new_elms, FAST_ELEMENTS, 0, len); CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len);
FillWithHoles(heap, new_elms, new_length, capacity); FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms; elms = new_elms;
@ -588,7 +601,7 @@ BUILTIN(ArrayShift) {
} }
FixedArray* elms = FixedArray::cast(elms_obj); FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements()); ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value(); if (len == 0) return heap->undefined_value();
@ -630,7 +643,7 @@ BUILTIN(ArrayUnshift) {
} }
FixedArray* elms = FixedArray::cast(elms_obj); FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements()); ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1; int to_add = args.length() - 1;
@ -652,8 +665,8 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj; if (!maybe_obj->ToObject(&obj)) return maybe_obj;
} }
FixedArray* new_elms = FixedArray::cast(obj); FixedArray* new_elms = FixedArray::cast(obj);
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0, ElementsKind kind = array->GetElementsKind();
new_elms, FAST_ELEMENTS, to_add, len); CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len);
FillWithHoles(heap, new_elms, new_length, capacity); FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms; elms = new_elms;
array->set_elements(elms); array->set_elements(elms);
@ -682,7 +695,7 @@ BUILTIN(ArraySlice) {
int len = -1; int len = -1;
if (receiver->IsJSArray()) { if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
if (!array->HasFastTypeElements() || if (!array->HasFastSmiOrObjectElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) { !IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args); return CallJsBuiltin(isolate, "ArraySlice", args);
} }
@ -698,7 +711,7 @@ BUILTIN(ArraySlice) {
bool is_arguments_object_with_fast_elements = bool is_arguments_object_with_fast_elements =
receiver->IsJSObject() receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map && JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastTypeElements(); && JSObject::cast(receiver)->HasFastSmiOrObjectElements();
if (!is_arguments_object_with_fast_elements) { if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args); return CallJsBuiltin(isolate, "ArraySlice", args);
} }
@ -763,9 +776,9 @@ BUILTIN(ArraySlice) {
JSArray* result_array; JSArray* result_array;
if (!maybe_array->To(&result_array)) return maybe_array; if (!maybe_array->To(&result_array)) return maybe_array;
CopyObjectToObjectElements(elms, FAST_ELEMENTS, k, CopyObjectToObjectElements(elms, elements_kind, k,
FixedArray::cast(result_array->elements()), FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, result_len); elements_kind, 0, result_len);
return result_array; return result_array;
} }
@ -786,7 +799,7 @@ BUILTIN(ArraySplice) {
} }
FixedArray* elms = FixedArray::cast(elms_obj); FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements()); ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
@ -837,9 +850,9 @@ BUILTIN(ArraySplice) {
{ {
// Fill newly created array. // Fill newly created array.
CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start, CopyObjectToObjectElements(elms, elements_kind, actual_start,
FixedArray::cast(result_array->elements()), FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, actual_delete_count); elements_kind, 0, actual_delete_count);
} }
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
@ -888,12 +901,13 @@ BUILTIN(ArraySplice) {
{ {
// Copy the part before actual_start as is. // Copy the part before actual_start as is.
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0, ElementsKind kind = array->GetElementsKind();
new_elms, FAST_ELEMENTS, 0, actual_start); CopyObjectToObjectElements(elms, kind, 0,
new_elms, kind, 0, actual_start);
const int to_copy = len - actual_delete_count - actual_start; const int to_copy = len - actual_delete_count - actual_start;
CopyObjectToObjectElements(elms, FAST_ELEMENTS, CopyObjectToObjectElements(elms, kind,
actual_start + actual_delete_count, actual_start + actual_delete_count,
new_elms, FAST_ELEMENTS, new_elms, kind,
actual_start + item_count, to_copy); actual_start + item_count, to_copy);
} }
@ -940,11 +954,12 @@ BUILTIN(ArrayConcat) {
// and calculating total length. // and calculating total length.
int n_arguments = args.length(); int n_arguments = args.length();
int result_len = 0; int result_len = 0;
ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS; ElementsKind elements_kind = GetInitialFastElementsKind();
for (int i = 0; i < n_arguments; i++) { for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i]; Object* arg = args[i];
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements() if (!arg->IsJSArray() ||
|| JSArray::cast(arg)->GetPrototype() != array_proto) { !JSArray::cast(arg)->HasFastSmiOrObjectElements() ||
JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args); return CallJsBuiltin(isolate, "ArrayConcat", args);
} }
@ -961,8 +976,18 @@ BUILTIN(ArrayConcat) {
return CallJsBuiltin(isolate, "ArrayConcat", args); return CallJsBuiltin(isolate, "ArrayConcat", args);
} }
if (!JSArray::cast(arg)->HasFastSmiOnlyElements()) { if (!JSArray::cast(arg)->HasFastSmiElements()) {
elements_kind = FAST_ELEMENTS; if (IsFastSmiElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
elements_kind = FAST_HOLEY_ELEMENTS;
} else {
elements_kind = FAST_ELEMENTS;
}
}
}
if (JSArray::cast(arg)->HasFastHoleyElements()) {
elements_kind = GetHoleyElementsKind(elements_kind);
} }
} }
@ -982,8 +1007,8 @@ BUILTIN(ArrayConcat) {
JSArray* array = JSArray::cast(args[i]); JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
FixedArray* elms = FixedArray::cast(array->elements()); FixedArray* elms = FixedArray::cast(array->elements());
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0, CopyObjectToObjectElements(elms, elements_kind, 0,
result_elms, FAST_ELEMENTS, result_elms, elements_kind,
start_pos, len); start_pos, len);
start_pos += len; start_pos += len;
} }
@ -1103,7 +1128,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
CustomArguments custom(isolate); CustomArguments custom(isolate);
v8::ImplementationUtilities::PrepareArgumentsData(custom.end(), v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
data_obj, *function, raw_holder); isolate, data_obj, *function, raw_holder);
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments( v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
custom.end(), custom.end(),
@ -1143,68 +1168,6 @@ BUILTIN(HandleApiCallConstruct) {
} }
#ifdef DEBUG
static void VerifyTypeCheck(Handle<JSObject> object,
Handle<JSFunction> function) {
ASSERT(function->shared()->IsApiFunction());
FunctionTemplateInfo* info = function->shared()->get_api_func_data();
if (info->signature()->IsUndefined()) return;
SignatureInfo* signature = SignatureInfo::cast(info->signature());
Object* receiver_type = signature->receiver();
if (receiver_type->IsUndefined()) return;
FunctionTemplateInfo* type = FunctionTemplateInfo::cast(receiver_type);
ASSERT(object->IsInstanceOf(type));
}
#endif
BUILTIN(FastHandleApiCall) {
ASSERT(!CalledAsConstructor(isolate));
Heap* heap = isolate->heap();
const bool is_construct = false;
// We expect four more arguments: callback, function, call data, and holder.
const int args_length = args.length() - 4;
ASSERT(args_length >= 0);
Object* callback_obj = args[args_length];
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
&args[args_length + 1],
&args[0] - 1,
args_length - 1,
is_construct);
#ifdef DEBUG
VerifyTypeCheck(Utils::OpenHandle(*new_args.Holder()),
Utils::OpenHandle(*new_args.Callee()));
#endif
HandleScope scope(isolate);
Object* result;
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
VMState state(isolate, EXTERNAL);
ExternalCallbackScope call_scope(isolate,
v8::ToCData<Address>(callback_obj));
v8::InvocationCallback callback =
v8::ToCData<v8::InvocationCallback>(callback_obj);
value = callback(new_args);
}
if (value.IsEmpty()) {
result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return result;
}
// Helper function to handle calls to non-function objects created through the // Helper function to handle calls to non-function objects created through the
// API. The object can be called as either a constructor (using new) or just as // API. The object can be called as either a constructor (using new) or just as
// a function (without new). // a function (without new).
@ -1243,7 +1206,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
CustomArguments custom(isolate); CustomArguments custom(isolate);
v8::ImplementationUtilities::PrepareArgumentsData(custom.end(), v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
call_data->data(), constructor, obj); isolate, call_data->data(), constructor, obj);
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments( v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
custom.end(), custom.end(),
&args[0] - 1, &args[0] - 1,

1
deps/v8/src/builtins.h

@ -56,7 +56,6 @@ enum BuiltinExtraArguments {
V(ArrayConcat, NO_EXTRA_ARGUMENTS) \ V(ArrayConcat, NO_EXTRA_ARGUMENTS) \
\ \
V(HandleApiCall, NEEDS_CALLED_FUNCTION) \ V(HandleApiCall, NEEDS_CALLED_FUNCTION) \
V(FastHandleApiCall, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \ V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \ V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \ V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \

35
deps/v8/src/bytecodes-irregexp.h

@ -72,24 +72,23 @@ V(AND_CHECK_4_CHARS, 27, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32 */ \ V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32 */ \ V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32 */ \ V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 addr32 */ \ V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 uc16 addr32 */ \
V(CHECK_LT, 32, 8) /* bc8 pad8 uc16 addr32 */ \ V(CHECK_CHAR_IN_RANGE, 32, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
V(CHECK_GT, 33, 8) /* bc8 pad8 uc16 addr32 */ \ V(CHECK_CHAR_NOT_IN_RANGE, 33, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
V(CHECK_NOT_BACK_REF, 34, 8) /* bc8 reg_idx24 addr32 */ \ V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128 */ \
V(CHECK_NOT_BACK_REF_NO_CASE, 35, 8) /* bc8 reg_idx24 addr32 */ \ V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32 */ \
V(CHECK_NOT_REGS_EQUAL, 36, 12) /* bc8 regidx24 reg_idx32 addr32 */ \ V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32 */ \
V(LOOKUP_MAP1, 37, 12) /* bc8 pad8 start16 bit_map_addr32 addr32 */ \ V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \
V(LOOKUP_MAP2, 38, 96) /* bc8 pad8 start16 half_nibble_map_addr32* */ \ V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */ \
V(LOOKUP_MAP8, 39, 96) /* bc8 pad8 start16 byte_map addr32* */ \ V(CHECK_NOT_REGS_EQUAL, 39, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
V(LOOKUP_HI_MAP8, 40, 96) /* bc8 start24 byte_map_addr32 addr32* */ \ V(CHECK_REGISTER_LT, 40, 12) /* bc8 reg_idx24 value32 addr32 */ \
V(CHECK_REGISTER_LT, 41, 12) /* bc8 reg_idx24 value32 addr32 */ \ V(CHECK_REGISTER_GE, 41, 12) /* bc8 reg_idx24 value32 addr32 */ \
V(CHECK_REGISTER_GE, 42, 12) /* bc8 reg_idx24 value32 addr32 */ \ V(CHECK_REGISTER_EQ_POS, 42, 8) /* bc8 reg_idx24 addr32 */ \
V(CHECK_REGISTER_EQ_POS, 43, 8) /* bc8 reg_idx24 addr32 */ \ V(CHECK_AT_START, 43, 8) /* bc8 pad24 addr32 */ \
V(CHECK_AT_START, 44, 8) /* bc8 pad24 addr32 */ \ V(CHECK_NOT_AT_START, 44, 8) /* bc8 pad24 addr32 */ \
V(CHECK_NOT_AT_START, 45, 8) /* bc8 pad24 addr32 */ \ V(CHECK_GREEDY, 45, 8) /* bc8 pad24 addr32 */ \
V(CHECK_GREEDY, 46, 8) /* bc8 pad24 addr32 */ \ V(ADVANCE_CP_AND_GOTO, 46, 8) /* bc8 offset24 addr32 */ \
V(ADVANCE_CP_AND_GOTO, 47, 8) /* bc8 offset24 addr32 */ \ V(SET_CURRENT_POSITION_FROM_END, 47, 4) /* bc8 idx24 */
V(SET_CURRENT_POSITION_FROM_END, 48, 4) /* bc8 idx24 */
#define DECLARE_BYTECODES(name, code, length) \ #define DECLARE_BYTECODES(name, code, length) \
static const int BC_##name = code; static const int BC_##name = code;

51
deps/v8/src/code-stubs.cc

@ -73,21 +73,12 @@ SmartArrayPointer<const char> CodeStub::GetName() {
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) { void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey());
Isolate* isolate = masm->isolate(); Isolate* isolate = masm->isolate();
SmartArrayPointer<const char> name = GetName(); SmartArrayPointer<const char> name = GetName();
PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name)); PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
GDBJIT(AddCode(GDBJITInterface::STUB, *name, code)); GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
Counters* counters = isolate->counters(); Counters* counters = isolate->counters();
counters->total_stubs_code_size()->Increment(code->instruction_size()); counters->total_stubs_code_size()->Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
code->Disassemble(*name);
PrintF("\n");
}
#endif
} }
@ -125,8 +116,16 @@ Handle<Code> CodeStub::GetCode() {
GetICState()); GetICState());
Handle<Code> new_object = factory->NewCode( Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode()); desc, flags, masm.CodeObject(), NeedsImmovableCode());
RecordCodeGeneration(*new_object, &masm); new_object->set_major_key(MajorKey());
FinishCode(new_object); FinishCode(new_object);
RecordCodeGeneration(*new_object, &masm);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
new_object->Disassemble(*GetName());
PrintF("\n");
}
#endif
if (UseSpecialCache()) { if (UseSpecialCache()) {
AddToSpecialCache(new_object); AddToSpecialCache(new_object);
@ -263,10 +262,13 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
void KeyedLoadElementStub::Generate(MacroAssembler* masm) { void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) { switch (elements_kind_) {
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm); KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break; break;
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm); KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
break; break;
case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS:
@ -293,7 +295,9 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
void KeyedStoreElementStub::Generate(MacroAssembler* masm) { void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) { switch (elements_kind_) {
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: { case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS: {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm, KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_, is_js_array_,
elements_kind_, elements_kind_,
@ -301,6 +305,7 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
} }
break; break;
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_js_array_, is_js_array_,
grow_mode_); grow_mode_);
@ -431,24 +436,32 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) { void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
Label fail; Label fail;
ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
if (!FLAG_trace_elements_transitions) { if (!FLAG_trace_elements_transitions) {
if (to_ == FAST_ELEMENTS) { if (IsFastSmiOrObjectElementsKind(to_)) {
if (from_ == FAST_SMI_ONLY_ELEMENTS) { if (IsFastSmiOrObjectElementsKind(from_)) {
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm); ElementsTransitionGenerator::
} else if (from_ == FAST_DOUBLE_ELEMENTS) { GenerateMapChangeElementsTransition(masm);
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(!IsFastSmiElementsKind(to_));
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail); ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
KeyedStoreStubCompiler::GenerateStoreFastElement(masm, KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_jsarray_, is_jsarray_,
FAST_ELEMENTS, to_,
grow_mode_); grow_mode_);
} else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) { } else if (IsFastSmiElementsKind(from_) &&
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); IsFastDoubleElementsKind(to_)) {
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_jsarray_, is_jsarray_,
grow_mode_); grow_mode_);
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm);
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }

1
deps/v8/src/code-stubs.h

@ -498,6 +498,7 @@ class ICCompareStub: public CodeStub {
virtual void FinishCode(Handle<Code> code) { virtual void FinishCode(Handle<Code> code) {
code->set_compare_state(state_); code->set_compare_state(state_);
code->set_compare_operation(op_);
} }
virtual CodeStub::Major MajorKey() { return CompareIC; } virtual CodeStub::Major MajorKey() { return CompareIC; }

6
deps/v8/src/codegen.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -95,8 +95,8 @@ UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic { class ElementsTransitionGenerator : public AllStatic {
public: public:
static void GenerateSmiOnlyToObject(MacroAssembler* masm); static void GenerateMapChangeElementsTransition(MacroAssembler* masm);
static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail); static void GenerateSmiToDouble(MacroAssembler* masm, Label* fail);
static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail); static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
private: private:

17
deps/v8/src/compiler-intrinsics.h

@ -40,6 +40,9 @@ class CompilerIntrinsics {
// Returns number of zero bits following most significant 1 bit. // Returns number of zero bits following most significant 1 bit.
// Undefined for zero value. // Undefined for zero value.
INLINE(static int CountLeadingZeros(uint32_t value)); INLINE(static int CountLeadingZeros(uint32_t value));
// Returns the number of bits set.
INLINE(static int CountSetBits(uint32_t value));
}; };
#ifdef __GNUC__ #ifdef __GNUC__
@ -51,6 +54,10 @@ int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
return __builtin_clz(value); return __builtin_clz(value);
} }
int CompilerIntrinsics::CountSetBits(uint32_t value) {
return __builtin_popcount(value);
}
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
#pragma intrinsic(_BitScanForward) #pragma intrinsic(_BitScanForward)
@ -68,6 +75,16 @@ int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
return 31 - static_cast<int>(result); return 31 - static_cast<int>(result);
} }
int CompilerIntrinsics::CountSetBits(uint32_t value) {
// Manually count set bits.
value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
return value;
}
#else #else
#error Unsupported compiler #error Unsupported compiler
#endif #endif

21
deps/v8/src/compiler.cc

@ -294,8 +294,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
} }
Handle<Context> global_context(info->closure()->context()->global_context()); Handle<Context> global_context(info->closure()->context()->global_context());
TypeFeedbackOracle oracle(code, global_context, info->isolate()); TypeFeedbackOracle oracle(code, global_context, info->isolate(),
HGraphBuilder builder(info, &oracle); info->isolate()->zone());
HGraphBuilder builder(info, &oracle, info->isolate()->zone());
HPhase phase(HPhase::kTotal); HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph(); HGraph* graph = builder.CreateGraph();
if (info->isolate()->has_pending_exception()) { if (info->isolate()->has_pending_exception()) {
@ -304,7 +305,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
} }
if (graph != NULL) { if (graph != NULL) {
Handle<Code> optimized_code = graph->Compile(info); Handle<Code> optimized_code = graph->Compile(info, graph->zone());
if (!optimized_code.is_null()) { if (!optimized_code.is_null()) {
info->SetCode(optimized_code); info->SetCode(optimized_code);
FinishOptimization(info->closure(), start); FinishOptimization(info->closure(), start);
@ -346,7 +347,8 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
// the compilation info is set if compilation succeeded. // the compilation info is set if compilation succeeded.
bool succeeded = MakeCode(info); bool succeeded = MakeCode(info);
if (!info->shared_info().is_null()) { if (!info->shared_info().is_null()) {
Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope()); Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(),
info->isolate()->zone());
info->shared_info()->set_scope_info(*scope_info); info->shared_info()->set_scope_info(*scope_info);
} }
return succeeded; return succeeded;
@ -420,7 +422,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
lit->name(), lit->name(),
lit->materialized_literal_count(), lit->materialized_literal_count(),
info->code(), info->code(),
ScopeInfo::Create(info->scope())); ScopeInfo::Create(info->scope(), info->isolate()->zone()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script); Compiler::SetFunctionInfo(result, lit, true, script);
@ -462,7 +464,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
script, Debugger::NO_AFTER_COMPILE_FLAGS); script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif #endif
live_edit_tracker.RecordFunctionInfo(result, lit); live_edit_tracker.RecordFunctionInfo(result, lit, isolate->zone());
return result; return result;
} }
@ -651,7 +653,8 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// info initialization is important since set_scope_info might // info initialization is important since set_scope_info might
// trigger a GC, causing the ASSERT below to be invalid if the code // trigger a GC, causing the ASSERT below to be invalid if the code
// was flushed. By setting the code object last we avoid this. // was flushed. By setting the code object last we avoid this.
Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope()); Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info->scope(), info->isolate()->zone());
shared->set_scope_info(*scope_info); shared->set_scope_info(*scope_info);
shared->set_code(*code); shared->set_code(*code);
if (!function.is_null()) { if (!function.is_null()) {
@ -728,7 +731,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
} else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) || } else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
(!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) { (!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
ASSERT(!info.code().is_null()); ASSERT(!info.code().is_null());
scope_info = ScopeInfo::Create(info.scope()); scope_info = ScopeInfo::Create(info.scope(), info.isolate()->zone());
} else { } else {
return Handle<SharedFunctionInfo>::null(); return Handle<SharedFunctionInfo>::null();
} }
@ -747,7 +750,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// the resulting function. // the resulting function.
SetExpectedNofPropertiesFromEstimate(result, SetExpectedNofPropertiesFromEstimate(result,
literal->expected_property_count()); literal->expected_property_count());
live_edit_tracker.RecordFunctionInfo(result, literal); live_edit_tracker.RecordFunctionInfo(result, literal, info.isolate()->zone());
return result; return result;
} }

22
deps/v8/src/contexts.h

@ -106,9 +106,7 @@ enum BindingFlags {
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \ V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \ V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \ V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(SMI_JS_ARRAY_MAP_INDEX, Object, smi_js_array_map) \ V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
V(DOUBLE_JS_ARRAY_MAP_INDEX, Object, double_js_array_map) \
V(OBJECT_JS_ARRAY_MAP_INDEX, Object, object_js_array_map) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \ V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(JSON_OBJECT_INDEX, JSObject, json_object) \ V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \ V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@ -248,9 +246,7 @@ class Context: public FixedArray {
OBJECT_FUNCTION_INDEX, OBJECT_FUNCTION_INDEX,
INTERNAL_ARRAY_FUNCTION_INDEX, INTERNAL_ARRAY_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX, ARRAY_FUNCTION_INDEX,
SMI_JS_ARRAY_MAP_INDEX, JS_ARRAY_MAPS_INDEX,
DOUBLE_JS_ARRAY_MAP_INDEX,
OBJECT_JS_ARRAY_MAP_INDEX,
DATE_FUNCTION_INDEX, DATE_FUNCTION_INDEX,
JSON_OBJECT_INDEX, JSON_OBJECT_INDEX,
REGEXP_FUNCTION_INDEX, REGEXP_FUNCTION_INDEX,
@ -373,18 +369,6 @@ class Context: public FixedArray {
Object* OptimizedFunctionsListHead(); Object* OptimizedFunctionsListHead();
void ClearOptimizedFunctions(); void ClearOptimizedFunctions();
static int GetContextMapIndexFromElementsKind(
ElementsKind elements_kind) {
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return Context::DOUBLE_JS_ARRAY_MAP_INDEX;
} else if (elements_kind == FAST_ELEMENTS) {
return Context::OBJECT_JS_ARRAY_MAP_INDEX;
} else {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
return Context::SMI_JS_ARRAY_MAP_INDEX;
}
}
#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \ #define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
void set_##name(type* value) { \ void set_##name(type* value) { \
ASSERT(IsGlobalContext()); \ ASSERT(IsGlobalContext()); \
@ -397,7 +381,7 @@ class Context: public FixedArray {
GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSORS) GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSORS)
#undef GLOBAL_CONTEXT_FIELD_ACCESSORS #undef GLOBAL_CONTEXT_FIELD_ACCESSORS
// Lookup the the slot called name, starting with the current context. // Lookup the slot called name, starting with the current context.
// There are three possibilities: // There are three possibilities:
// //
// 1) result->IsContext(): // 1) result->IsContext():

4
deps/v8/src/conversions-inl.h

@ -228,9 +228,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache,
} }
ASSERT(number != 0); ASSERT(number != 0);
// The double could be constructed faster from number (mantissa), exponent return ldexp(static_cast<double>(negative ? -number : number), exponent);
// and sign. Assuming it's a rare case more simple code is used.
return static_cast<double>(negative ? -number : number) * pow(2.0, exponent);
} }

245
deps/v8/src/d8.cc

@ -26,7 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef USING_V8_SHARED // Defined when linking against shared lib on Windows. // Defined when linking against shared lib on Windows.
#if defined(USING_V8_SHARED) && !defined(V8_SHARED)
#define V8_SHARED #define V8_SHARED
#endif #endif
@ -315,151 +316,143 @@ static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
} }
const char kArrayBufferReferencePropName[] = "_is_array_buffer_"; const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
const char kArrayBufferMarkerPropName[] = "_array_buffer_ref_";
Handle<Value> Shell::CreateExternalArray(const Arguments& args, Handle<Value> Shell::CreateExternalArrayBuffer(int32_t length) {
ExternalArrayType type, static const int32_t kMaxSize = 0x7fffffff;
size_t element_size) { // Make sure the total size fits into a (signed) int.
TryCatch try_catch; if (length < 0 || length > kMaxSize) {
bool is_array_buffer_construct = element_size == 0; return ThrowException(String::New("ArrayBuffer exceeds maximum size (2G)"));
if (is_array_buffer_construct) { }
type = v8::kExternalByteArray; uint8_t* data = new uint8_t[length];
element_size = 1; if (data == NULL) {
return ThrowException(String::New("Memory allocation failed."));
} }
ASSERT(element_size == 1 || element_size == 2 || element_size == 4 || memset(data, 0, length);
element_size == 8);
Handle<Object> buffer = Object::New();
buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
Persistent<Object> persistent_array = Persistent<Object>::New(buffer);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
persistent_array.MarkIndependent();
V8::AdjustAmountOfExternalAllocatedMemory(length);
buffer->SetIndexedPropertiesToExternalArrayData(
data, v8::kExternalByteArray, length);
buffer->Set(String::New("byteLength"), Int32::New(length), ReadOnly);
return buffer;
}
Handle<Value> Shell::CreateExternalArrayBuffer(const Arguments& args) {
if (args.Length() == 0) { if (args.Length() == 0) {
return ThrowException( return ThrowException(
String::New("Array constructor must have at least one " String::New("ArrayBuffer constructor must have one parameter."));
"parameter."));
} }
bool first_arg_is_array_buffer = TryCatch try_catch;
args[0]->IsObject() && int32_t length = convertToUint(args[0], &try_catch);
args[0]->ToObject()->Get( if (try_catch.HasCaught()) return try_catch.Exception();
String::New(kArrayBufferMarkerPropName))->IsTrue();
return CreateExternalArrayBuffer(length);
}
Handle<Value> Shell::CreateExternalArray(const Arguments& args,
ExternalArrayType type,
int32_t element_size) {
TryCatch try_catch;
ASSERT(element_size == 1 || element_size == 2 ||
element_size == 4 || element_size == 8);
// Currently, only the following constructors are supported: // Currently, only the following constructors are supported:
// TypedArray(unsigned long length) // TypedArray(unsigned long length)
// TypedArray(ArrayBuffer buffer, // TypedArray(ArrayBuffer buffer,
// optional unsigned long byteOffset, // optional unsigned long byteOffset,
// optional unsigned long length) // optional unsigned long length)
if (args.Length() > 3) { Handle<Object> buffer;
int32_t length;
int32_t byteLength;
int32_t byteOffset;
if (args.Length() == 0) {
return ThrowException( return ThrowException(
String::New("Array constructor from ArrayBuffer must " String::New("Array constructor must have at least one parameter."));
"have 1-3 parameters."));
} }
if (args[0]->IsObject() &&
Local<Value> length_value = (args.Length() < 3) !args[0]->ToObject()->GetHiddenValue(
? (first_arg_is_array_buffer String::New(kArrayBufferMarkerPropName)).IsEmpty()) {
? args[0]->ToObject()->Get(String::New("length")) buffer = args[0]->ToObject();
: args[0]) int32_t bufferLength =
: args[2]; convertToUint(buffer->Get(String::New("byteLength")), &try_catch);
size_t length = convertToUint(length_value, &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
void* data = NULL;
size_t offset = 0;
Handle<Object> array = Object::New();
if (first_arg_is_array_buffer) {
Handle<Object> derived_from = args[0]->ToObject();
data = derived_from->GetIndexedPropertiesExternalArrayData();
size_t array_buffer_length = convertToUint(
derived_from->Get(String::New("length")),
&try_catch);
if (try_catch.HasCaught()) return try_catch.Exception(); if (try_catch.HasCaught()) return try_catch.Exception();
if (data == NULL && array_buffer_length != 0) { if (args.Length() < 2 || args[1]->IsUndefined()) {
return ThrowException( byteOffset = 0;
String::New("ArrayBuffer doesn't have data")); } else {
} byteOffset = convertToUint(args[1], &try_catch);
if (args.Length() > 1) {
offset = convertToUint(args[1], &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception(); if (try_catch.HasCaught()) return try_catch.Exception();
if (byteOffset > bufferLength) {
// The given byteOffset must be a multiple of the element size of the return ThrowException(String::New("byteOffset out of bounds"));
// specific type, otherwise an exception is raised. }
if (offset % element_size != 0) { if (byteOffset % element_size != 0) {
return ThrowException( return ThrowException(
String::New("offset must be multiple of element_size")); String::New("byteOffset must be multiple of element_size"));
} }
} }
if (offset > array_buffer_length) { if (args.Length() < 3 || args[2]->IsUndefined()) {
return ThrowException( byteLength = bufferLength - byteOffset;
String::New("byteOffset must be less than ArrayBuffer length.")); length = byteLength / element_size;
} if (byteLength % element_size != 0) {
if (args.Length() == 2) {
// If length is not explicitly specified, the length of the ArrayBuffer
// minus the byteOffset must be a multiple of the element size of the
// specific type, or an exception is raised.
length = array_buffer_length - offset;
}
if (args.Length() != 3) {
if (length % element_size != 0) {
return ThrowException( return ThrowException(
String::New("ArrayBuffer length minus the byteOffset must be a " String::New("buffer size must be multiple of element_size"));
"multiple of the element size")); }
} else {
length = convertToUint(args[2], &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
byteLength = length * element_size;
if (byteOffset + byteLength > bufferLength) {
return ThrowException(String::New("length out of bounds"));
} }
length /= element_size;
}
// If a given byteOffset and length references an area beyond the end of
// the ArrayBuffer an exception is raised.
if (offset + (length * element_size) > array_buffer_length) {
return ThrowException(
String::New("length references an area beyond the end of the "
"ArrayBuffer"));
} }
} else {
// Hold a reference to the ArrayBuffer so its buffer doesn't get collected. length = convertToUint(args[0], &try_catch);
array->Set(String::New(kArrayBufferReferencePropName), args[0], ReadOnly); byteLength = length * element_size;
} byteOffset = 0;
Handle<Value> result = CreateExternalArrayBuffer(byteLength);
if (is_array_buffer_construct) { if (!result->IsObject()) return result;
array->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly); buffer = result->ToObject();
} }
Persistent<Object> persistent_array = Persistent<Object>::New(array); void* data = buffer->GetIndexedPropertiesExternalArrayData();
persistent_array.MakeWeak(data, ExternalArrayWeakCallback); ASSERT(data != NULL);
persistent_array.MarkIndependent();
if (data == NULL && length != 0) {
data = calloc(length, element_size);
if (data == NULL) {
return ThrowException(String::New("Memory allocation failed."));
}
}
Handle<Object> array = Object::New();
array->SetIndexedPropertiesToExternalArrayData( array->SetIndexedPropertiesToExternalArrayData(
reinterpret_cast<uint8_t*>(data) + offset, type, static_cast<uint8_t*>(data) + byteOffset, type, length);
static_cast<int>(length)); array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly);
array->Set(String::New("length"), array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly);
Int32::New(static_cast<int32_t>(length)), ReadOnly); array->Set(String::New("length"), Int32::New(length), ReadOnly);
array->Set(String::New("BYTES_PER_ELEMENT"), array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
Int32::New(static_cast<int32_t>(element_size))); array->Set(String::New("buffer"), buffer, ReadOnly);
return array; return array;
} }
void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) { void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
HandleScope scope; HandleScope scope;
Handle<String> prop_name = String::New(kArrayBufferReferencePropName); int32_t length =
Handle<Object> converted_object = object->ToObject(); object->ToObject()->Get(String::New("byteLength"))->Uint32Value();
Local<Value> prop_value = converted_object->Get(prop_name); V8::AdjustAmountOfExternalAllocatedMemory(-length);
if (data != NULL && !prop_value->IsObject()) { delete[] static_cast<uint8_t*>(data);
free(data);
}
object.Dispose(); object.Dispose();
} }
Handle<Value> Shell::ArrayBuffer(const Arguments& args) { Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
return CreateExternalArray(args, v8::kExternalByteArray, 0); return CreateExternalArrayBuffer(args);
} }
@ -806,8 +799,8 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
global_template->Set(String::New("print"), FunctionTemplate::New(Print)); global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write)); global_template->Set(String::New("write"), FunctionTemplate::New(Write));
global_template->Set(String::New("read"), FunctionTemplate::New(Read)); global_template->Set(String::New("read"), FunctionTemplate::New(Read));
global_template->Set(String::New("readbinary"), global_template->Set(String::New("readbuffer"),
FunctionTemplate::New(ReadBinary)); FunctionTemplate::New(ReadBuffer));
global_template->Set(String::New("readline"), global_template->Set(String::New("readline"),
FunctionTemplate::New(ReadLine)); FunctionTemplate::New(ReadLine));
global_template->Set(String::New("load"), FunctionTemplate::New(Load)); global_template->Set(String::New("load"), FunctionTemplate::New(Load));
@ -977,8 +970,8 @@ void Shell::OnExit() {
printf("+--------------------------------------------+-------------+\n"); printf("+--------------------------------------------+-------------+\n");
delete [] counters; delete [] counters;
} }
if (counters_file_ != NULL) delete counters_file_;
delete counters_file_; delete counter_map_;
} }
#endif // V8_SHARED #endif // V8_SHARED
@ -1026,20 +1019,30 @@ static char* ReadChars(const char* name, int* size_out) {
} }
Handle<Value> Shell::ReadBinary(const Arguments& args) { Handle<Value> Shell::ReadBuffer(const Arguments& args) {
ASSERT(sizeof(char) == sizeof(uint8_t)); // NOLINT
String::Utf8Value filename(args[0]); String::Utf8Value filename(args[0]);
int size; int length;
if (*filename == NULL) { if (*filename == NULL) {
return ThrowException(String::New("Error loading file")); return ThrowException(String::New("Error loading file"));
} }
char* chars = ReadChars(*filename, &size);
if (chars == NULL) { uint8_t* data = reinterpret_cast<uint8_t*>(ReadChars(*filename, &length));
if (data == NULL) {
return ThrowException(String::New("Error reading file")); return ThrowException(String::New("Error reading file"));
} }
// We skip checking the string for UTF8 characters and use it raw as Handle<Object> buffer = Object::New();
// backing store for the external string with 8-bit characters. buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
BinaryResource* resource = new BinaryResource(chars, size); Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer);
return String::NewExternal(resource); persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback);
persistent_buffer.MarkIndependent();
V8::AdjustAmountOfExternalAllocatedMemory(length);
buffer->SetIndexedPropertiesToExternalArrayData(
data, kExternalUnsignedByteArray, length);
buffer->Set(String::New("byteLength"),
Int32::New(static_cast<int32_t>(length)), ReadOnly);
return buffer;
} }
@ -1203,7 +1206,7 @@ void SourceGroup::Execute() {
Handle<String> SourceGroup::ReadFile(const char* name) { Handle<String> SourceGroup::ReadFile(const char* name) {
int size; int size;
const char* chars = ReadChars(name, &size); char* chars = ReadChars(name, &size);
if (chars == NULL) return Handle<String>(); if (chars == NULL) return Handle<String>();
Handle<String> result = String::New(chars, size); Handle<String> result = String::New(chars, size);
delete[] chars; delete[] chars;

6
deps/v8/src/d8.h

@ -307,7 +307,7 @@ class Shell : public i::AllStatic {
static Handle<Value> EnableProfiler(const Arguments& args); static Handle<Value> EnableProfiler(const Arguments& args);
static Handle<Value> DisableProfiler(const Arguments& args); static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args); static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadBinary(const Arguments& args); static Handle<Value> ReadBuffer(const Arguments& args);
static Handle<String> ReadFromStdin(); static Handle<String> ReadFromStdin();
static Handle<Value> ReadLine(const Arguments& args) { static Handle<Value> ReadLine(const Arguments& args) {
return ReadFromStdin(); return ReadFromStdin();
@ -383,9 +383,11 @@ class Shell : public i::AllStatic {
static void RunShell(); static void RunShell();
static bool SetOptions(int argc, char* argv[]); static bool SetOptions(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate(); static Handle<ObjectTemplate> CreateGlobalTemplate();
static Handle<Value> CreateExternalArrayBuffer(int32_t size);
static Handle<Value> CreateExternalArrayBuffer(const Arguments& args);
static Handle<Value> CreateExternalArray(const Arguments& args, static Handle<Value> CreateExternalArray(const Arguments& args,
ExternalArrayType type, ExternalArrayType type,
size_t element_size); int32_t element_size);
static void ExternalArrayWeakCallback(Persistent<Value> object, void* data); static void ExternalArrayWeakCallback(Persistent<Value> object, void* data);
}; };

2
deps/v8/src/d8.js

@ -2174,7 +2174,7 @@ function DebugResponseDetails(response) {
} }
var current_line = from_line + num; var current_line = from_line + num;
spacer = maxdigits - (1 + Math.floor(log10(current_line))); var spacer = maxdigits - (1 + Math.floor(log10(current_line)));
if (current_line == Debug.State.currentSourceLine + 1) { if (current_line == Debug.State.currentSourceLine + 1) {
for (var i = 0; i < maxdigits; i++) { for (var i = 0; i < maxdigits; i++) {
result += '>'; result += '>';

3
deps/v8/src/dateparser-inl.h

@ -148,6 +148,9 @@ bool DateParser::Parse(Vector<Char> str,
} else { } else {
// Garbage words are illegal if a number has been read. // Garbage words are illegal if a number has been read.
if (has_read_number) return false; if (has_read_number) return false;
// The first number has to be separated from garbage words by
// whitespace or other separators.
if (scanner.Peek().IsNumber()) return false;
} }
} else if (token.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) { } else if (token.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
// Parse UTC offset (only after UTC or time). // Parse UTC offset (only after UTC or time).

38
deps/v8/src/debug-agent.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -157,7 +157,9 @@ void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) {
ScopedLock with(session_access_); ScopedLock with(session_access_);
ASSERT(session == session_); ASSERT(session == session_);
if (session == session_) { if (session == session_) {
CloseSession(); session_->Shutdown();
delete session_;
session_ = NULL;
} }
} }
@ -247,7 +249,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
while (!(c == '\n' && prev_c == '\r')) { while (!(c == '\n' && prev_c == '\r')) {
prev_c = c; prev_c = c;
received = conn->Receive(&c, 1); received = conn->Receive(&c, 1);
if (received <= 0) { if (received == 0) {
PrintF("Error %d\n", Socket::LastError()); PrintF("Error %d\n", Socket::LastError());
return SmartArrayPointer<char>(); return SmartArrayPointer<char>();
} }
@ -323,41 +325,41 @@ bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
const char* embedding_host) { const char* embedding_host) {
static const int kBufferSize = 80; static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer. char buffer[kBufferSize]; // Sending buffer.
bool ok;
int len; int len;
int r;
// Send the header. // Send the header.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Type: connect\r\n"); "Type: connect\r\n");
r = conn->Send(buffer, len); ok = conn->Send(buffer, len);
if (r != len) return false; if (!ok) return false;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"V8-Version: %s\r\n", v8::V8::GetVersion()); "V8-Version: %s\r\n", v8::V8::GetVersion());
r = conn->Send(buffer, len); ok = conn->Send(buffer, len);
if (r != len) return false; if (!ok) return false;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Protocol-Version: 1\r\n"); "Protocol-Version: 1\r\n");
r = conn->Send(buffer, len); ok = conn->Send(buffer, len);
if (r != len) return false; if (!ok) return false;
if (embedding_host != NULL) { if (embedding_host != NULL) {
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Embedding-Host: %s\r\n", embedding_host); "Embedding-Host: %s\r\n", embedding_host);
r = conn->Send(buffer, len); ok = conn->Send(buffer, len);
if (r != len) return false; if (!ok) return false;
} }
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"%s: 0\r\n", kContentLength); "%s: 0\r\n", kContentLength);
r = conn->Send(buffer, len); ok = conn->Send(buffer, len);
if (r != len) return false; if (!ok) return false;
// Terminate header with empty line. // Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n"); len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
r = conn->Send(buffer, len); ok = conn->Send(buffer, len);
if (r != len) return false; if (!ok) return false;
// No body for connect message. // No body for connect message.
@ -397,7 +399,7 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
uint16_t character = message[i]; uint16_t character = message[i];
buffer_position += buffer_position +=
unibrow::Utf8::Encode(buffer + buffer_position, character, previous); unibrow::Utf8::Encode(buffer + buffer_position, character, previous);
ASSERT(buffer_position < kBufferSize); ASSERT(buffer_position <= kBufferSize);
// Send buffer if full or last character is encoded. // Send buffer if full or last character is encoded.
if (kBufferSize - buffer_position < if (kBufferSize - buffer_position <
@ -454,7 +456,7 @@ int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
int total_received = 0; int total_received = 0;
while (total_received < len) { while (total_received < len) {
int received = conn->Receive(data + total_received, len - total_received); int received = conn->Receive(data + total_received, len - total_received);
if (received <= 0) { if (received == 0) {
return total_received; return total_received;
} }
total_received += received; total_received += received;

57
deps/v8/src/debug-debugger.js

@ -1957,7 +1957,7 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) { if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
frame_index = request.arguments.frameNumber; frame_index = request.arguments.frameNumber;
if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) { if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
return response.failed('Invalid frame number'); throw new Error('Invalid frame number');
} }
return this.exec_state_.frame(frame_index); return this.exec_state_.frame(frame_index);
} else { } else {
@ -1966,20 +1966,44 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
}; };
DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) { // Gets scope host object from request. It is either a function
// No frames no scopes. // ('functionHandle' argument must be specified) or a stack frame
if (this.exec_state_.frameCount() == 0) { // ('frameNumber' may be specified and the current frame is taken by default).
return response.failed('No scopes'); DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ =
function(request) {
if (request.arguments && "functionHandle" in request.arguments) {
if (!IS_NUMBER(request.arguments.functionHandle)) {
throw new Error('Function handle must be a number');
}
var function_mirror = LookupMirror(request.arguments.functionHandle);
if (!function_mirror) {
throw new Error('Failed to find function object by handle');
}
if (!function_mirror.isFunction()) {
throw new Error('Value of non-function type is found by handle');
}
return function_mirror;
} else {
// No frames no scopes.
if (this.exec_state_.frameCount() == 0) {
throw new Error('No scopes');
}
// Get the frame for which the scopes are requested.
var frame = this.frameForScopeRequest_(request);
return frame;
} }
}
// Get the frame for which the scopes are requested.
var frame = this.frameForScopeRequest_(request);
// Fill all scopes for this frame. DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
var total_scopes = frame.scopeCount(); var scope_holder = this.scopeHolderForScopeRequest_(request);
// Fill all scopes for this frame or function.
var total_scopes = scope_holder.scopeCount();
var scopes = []; var scopes = [];
for (var i = 0; i < total_scopes; i++) { for (var i = 0; i < total_scopes; i++) {
scopes.push(frame.scope(i)); scopes.push(scope_holder.scope(i));
} }
response.body = { response.body = {
fromScope: 0, fromScope: 0,
@ -1991,24 +2015,19 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) { DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
// No frames no scopes. // Get the frame or function for which the scope is requested.
if (this.exec_state_.frameCount() == 0) { var scope_holder = this.scopeHolderForScopeRequest_(request);
return response.failed('No scopes');
}
// Get the frame for which the scope is requested.
var frame = this.frameForScopeRequest_(request);
// With no scope argument just return top scope. // With no scope argument just return top scope.
var scope_index = 0; var scope_index = 0;
if (request.arguments && !IS_UNDEFINED(request.arguments.number)) { if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
scope_index = %ToNumber(request.arguments.number); scope_index = %ToNumber(request.arguments.number);
if (scope_index < 0 || frame.scopeCount() <= scope_index) { if (scope_index < 0 || scope_holder.scopeCount() <= scope_index) {
return response.failed('Invalid scope number'); return response.failed('Invalid scope number');
} }
} }
response.body = frame.scope(scope_index); response.body = scope_holder.scope(scope_index);
}; };

86
deps/v8/src/debug.cc

@ -892,6 +892,16 @@ void Debug::Iterate(ObjectVisitor* v) {
} }
void Debug::PutValuesOnStackAndDie(int start,
Address c_entry_fp,
Address last_fp,
Address larger_fp,
int count,
int end) {
OS::Abort();
}
Object* Debug::Break(Arguments args) { Object* Debug::Break(Arguments args) {
Heap* heap = isolate_->heap(); Heap* heap = isolate_->heap();
HandleScope scope(isolate_); HandleScope scope(isolate_);
@ -984,11 +994,34 @@ Object* Debug::Break(Arguments args) {
// Count frames until target frame // Count frames until target frame
int count = 0; int count = 0;
JavaScriptFrameIterator it(isolate_); JavaScriptFrameIterator it(isolate_);
while (!it.done() && it.frame()->fp() != thread_local_.last_fp_) { while (!it.done() && it.frame()->fp() < thread_local_.last_fp_) {
count++; count++;
it.Advance(); it.Advance();
} }
// Catch the cases that would lead to crashes and capture
// - C entry FP at which to start stack crawl.
// - FP of the frame at which we plan to stop stepping out (last FP).
// - current FP that's larger than last FP.
// - Counter for the number of steps to step out.
if (it.done()) {
// We crawled the entire stack, never reaching last_fp_.
PutValuesOnStackAndDie(0xBEEEEEEE,
frame->fp(),
thread_local_.last_fp_,
NULL,
count,
0xFEEEEEEE);
} else if (it.frame()->fp() != thread_local_.last_fp_) {
// We crawled over last_fp_, without getting a match.
PutValuesOnStackAndDie(0xBEEEEEEE,
frame->fp(),
thread_local_.last_fp_,
it.frame()->fp(),
count,
0xFEEEEEEE);
}
// If we found original frame // If we found original frame
if (it.frame()->fp() == thread_local_.last_fp_) { if (it.frame()->fp() == thread_local_.last_fp_) {
if (step_count > 1) { if (step_count > 1) {
@ -1418,7 +1451,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// Remember source position and frame to handle step next. // Remember source position and frame to handle step next.
thread_local_.last_statement_position_ = thread_local_.last_statement_position_ =
debug_info->code()->SourceStatementPosition(frame->pc()); debug_info->code()->SourceStatementPosition(frame->pc());
thread_local_.last_fp_ = frame->fp(); thread_local_.last_fp_ = frame->UnpaddedFP();
} else { } else {
// If there's restarter frame on top of the stack, just get the pointer // If there's restarter frame on top of the stack, just get the pointer
// to function which is going to be restarted. // to function which is going to be restarted.
@ -1487,7 +1520,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// propagated on the next Debug::Break. // propagated on the next Debug::Break.
thread_local_.last_statement_position_ = thread_local_.last_statement_position_ =
debug_info->code()->SourceStatementPosition(frame->pc()); debug_info->code()->SourceStatementPosition(frame->pc());
thread_local_.last_fp_ = frame->fp(); thread_local_.last_fp_ = frame->UnpaddedFP();
} }
// Step in or Step in min // Step in or Step in min
@ -1522,7 +1555,7 @@ bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
// Continue if we are still on the same frame and in the same statement. // Continue if we are still on the same frame and in the same statement.
int current_statement_position = int current_statement_position =
break_location_iterator->code()->SourceStatementPosition(frame->pc()); break_location_iterator->code()->SourceStatementPosition(frame->pc());
return thread_local_.last_fp_ == frame->fp() && return thread_local_.last_fp_ == frame->UnpaddedFP() &&
thread_local_.last_statement_position_ == current_statement_position; thread_local_.last_statement_position_ == current_statement_position;
} }
@ -1723,7 +1756,7 @@ void Debug::ClearOneShot() {
void Debug::ActivateStepIn(StackFrame* frame) { void Debug::ActivateStepIn(StackFrame* frame) {
ASSERT(!StepOutActive()); ASSERT(!StepOutActive());
thread_local_.step_into_fp_ = frame->fp(); thread_local_.step_into_fp_ = frame->UnpaddedFP();
} }
@ -1734,7 +1767,7 @@ void Debug::ClearStepIn() {
void Debug::ActivateStepOut(StackFrame* frame) { void Debug::ActivateStepOut(StackFrame* frame) {
ASSERT(!StepInActive()); ASSERT(!StepInActive());
thread_local_.step_out_fp_ = frame->fp(); thread_local_.step_out_fp_ = frame->UnpaddedFP();
} }
@ -1751,20 +1784,19 @@ void Debug::ClearStepNext() {
// Helper function to compile full code for debugging. This code will // Helper function to compile full code for debugging. This code will
// have debug break slots and deoptimization // have debug break slots and deoptimization information. Deoptimization
// information. Deoptimization information is required in case that an // information is required in case that an optimized version of this
// optimized version of this function is still activated on the // function is still activated on the stack. It will also make sure that
// stack. It will also make sure that the full code is compiled with // the full code is compiled with the same flags as the previous version,
// the same flags as the previous version - that is flags which can // that is flags which can change the code generated. The current method
// change the code generated. The current method of mapping from // of mapping from already compiled full code without debug break slots
// already compiled full code without debug break slots to full code // to full code with debug break slots depends on the generated code is
// with debug break slots depends on the generated code is otherwise // otherwise exactly the same.
// exactly the same. static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
Handle<Code> current_code) { Handle<Code> current_code) {
ASSERT(!current_code->has_debug_break_slots()); ASSERT(!current_code->has_debug_break_slots());
CompilationInfo info(shared); CompilationInfo info(function);
info.MarkCompilingForDebugging(current_code); info.MarkCompilingForDebugging(current_code);
ASSERT(!info.shared_info()->is_compiled()); ASSERT(!info.shared_info()->is_compiled());
ASSERT(!info.isolate()->has_pending_exception()); ASSERT(!info.isolate()->has_pending_exception());
@ -1776,7 +1808,7 @@ static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
info.isolate()->clear_pending_exception(); info.isolate()->clear_pending_exception();
#if DEBUG #if DEBUG
if (result) { if (result) {
Handle<Code> new_code(shared->code()); Handle<Code> new_code(function->shared()->code());
ASSERT(new_code->has_debug_break_slots()); ASSERT(new_code->has_debug_break_slots());
ASSERT(current_code->is_compiled_optimizable() == ASSERT(current_code->is_compiled_optimizable() ==
new_code->is_compiled_optimizable()); new_code->is_compiled_optimizable());
@ -1857,13 +1889,6 @@ static void RedirectActivationsToRecompiledCodeOnThread(
// break slots. // break slots.
debug_break_slot_count++; debug_break_slot_count++;
} }
if (frame_code->has_self_optimization_header() &&
!new_code->has_self_optimization_header()) {
delta -= FullCodeGenerator::self_optimization_header_size();
} else {
ASSERT(frame_code->has_self_optimization_header() ==
new_code->has_self_optimization_header());
}
int debug_break_slot_bytes = int debug_break_slot_bytes =
debug_break_slot_count * Assembler::kDebugBreakSlotLength; debug_break_slot_count * Assembler::kDebugBreakSlotLength;
if (FLAG_trace_deopt) { if (FLAG_trace_deopt) {
@ -1987,6 +2012,7 @@ void Debug::PrepareForBreakPoints() {
// patch the return address to run in the new compiled code. // patch the return address to run in the new compiled code.
for (int i = 0; i < active_functions.length(); i++) { for (int i = 0; i < active_functions.length(); i++) {
Handle<JSFunction> function = active_functions[i]; Handle<JSFunction> function = active_functions[i];
Handle<SharedFunctionInfo> shared(function->shared());
if (function->code()->kind() == Code::FUNCTION && if (function->code()->kind() == Code::FUNCTION &&
function->code()->has_debug_break_slots()) { function->code()->has_debug_break_slots()) {
@ -1994,7 +2020,6 @@ void Debug::PrepareForBreakPoints() {
continue; continue;
} }
Handle<SharedFunctionInfo> shared(function->shared());
// If recompilation is not possible just skip it. // If recompilation is not possible just skip it.
if (shared->is_toplevel() || if (shared->is_toplevel() ||
!shared->allows_lazy_compilation() || !shared->allows_lazy_compilation() ||
@ -2014,7 +2039,7 @@ void Debug::PrepareForBreakPoints() {
isolate_->debugger()->force_debugger_active(); isolate_->debugger()->force_debugger_active();
isolate_->debugger()->set_force_debugger_active(true); isolate_->debugger()->set_force_debugger_active(true);
ASSERT(current_code->kind() == Code::FUNCTION); ASSERT(current_code->kind() == Code::FUNCTION);
CompileFullCodeForDebugging(shared, current_code); CompileFullCodeForDebugging(function, current_code);
isolate_->debugger()->set_force_debugger_active( isolate_->debugger()->set_force_debugger_active(
prev_force_debugger_active); prev_force_debugger_active);
if (!shared->is_compiled()) { if (!shared->is_compiled()) {
@ -2234,6 +2259,13 @@ void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
} }
const int Debug::FramePaddingLayout::kInitialSize = 1;
// Any even value bigger than kInitialSize as needed for stack scanning.
const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1;
bool Debug::IsDebugGlobal(GlobalObject* global) { bool Debug::IsDebugGlobal(GlobalObject* global) {
return IsLoaded() && global == debug_context()->global(); return IsLoaded() && global == debug_context()->global();
} }

55
deps/v8/src/debug.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -232,6 +232,12 @@ class Debug {
void PreemptionWhileInDebugger(); void PreemptionWhileInDebugger();
void Iterate(ObjectVisitor* v); void Iterate(ObjectVisitor* v);
NO_INLINE(void PutValuesOnStackAndDie(int start,
Address c_entry_fp,
Address last_fp,
Address larger_fp,
int count,
int end));
Object* Break(Arguments args); Object* Break(Arguments args);
void SetBreakPoint(Handle<SharedFunctionInfo> shared, void SetBreakPoint(Handle<SharedFunctionInfo> shared,
Handle<Object> break_point_object, Handle<Object> break_point_object,
@ -245,6 +251,8 @@ class Debug {
bool IsBreakOnException(ExceptionBreakType type); bool IsBreakOnException(ExceptionBreakType type);
void PrepareStep(StepAction step_action, int step_count); void PrepareStep(StepAction step_action, int step_count);
void ClearStepping(); void ClearStepping();
void ClearStepOut();
bool IsStepping() { return thread_local_.step_count_ > 0; }
bool StepNextContinue(BreakLocationIterator* break_location_iterator, bool StepNextContinue(BreakLocationIterator* break_location_iterator,
JavaScriptFrame* frame); JavaScriptFrame* frame);
static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared); static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
@ -455,6 +463,50 @@ class Debug {
// Architecture-specific constant. // Architecture-specific constant.
static const bool kFrameDropperSupported; static const bool kFrameDropperSupported;
/**
* Defines layout of a stack frame that supports padding. This is a regular
* internal frame that has a flexible stack structure. LiveEdit can shift
* its lower part up the stack, taking up the 'padding' space when additional
* stack memory is required.
* Such frame is expected immediately above the topmost JavaScript frame.
*
* Stack Layout:
* --- Top
* LiveEdit routine frames
* ---
* C frames of debug handler
* ---
* ...
* ---
* An internal frame that has n padding words:
* - any number of words as needed by code -- upper part of frame
* - padding size: a Smi storing n -- current size of padding
* - padding: n words filled with kPaddingValue in form of Smi
* - 3 context/type words of a regular InternalFrame
* - fp
* ---
* Topmost JavaScript frame
* ---
* ...
* --- Bottom
*/
class FramePaddingLayout : public AllStatic {
public:
// Architecture-specific constant.
static const bool kIsSupported;
// A size of frame base including fp. Padding words starts right above
// the base.
static const int kFrameBaseSize = 4;
// A number of words that should be reserved on stack for the LiveEdit use.
// Normally equals 1. Stored on stack in form of Smi.
static const int kInitialSize;
// A value that padding words are filled with (in form of Smi). Going
// bottom-top, the first word not having this value is a counter word.
static const int kPaddingValue;
};
private: private:
explicit Debug(Isolate* isolate); explicit Debug(Isolate* isolate);
~Debug(); ~Debug();
@ -464,7 +516,6 @@ class Debug {
void ActivateStepIn(StackFrame* frame); void ActivateStepIn(StackFrame* frame);
void ClearStepIn(); void ClearStepIn();
void ActivateStepOut(StackFrame* frame); void ActivateStepOut(StackFrame* frame);
void ClearStepOut();
void ClearStepNext(); void ClearStepNext();
// Returns whether the compile succeeded. // Returns whether the compile succeeded.
void RemoveDebugInfo(Handle<DebugInfo> debug_info); void RemoveDebugInfo(Handle<DebugInfo> debug_info);

68
deps/v8/src/deoptimizer.cc

@ -354,6 +354,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
bailout_type_(type), bailout_type_(type),
from_(from), from_(from),
fp_to_sp_delta_(fp_to_sp_delta), fp_to_sp_delta_(fp_to_sp_delta),
has_alignment_padding_(0),
input_(NULL), input_(NULL),
output_count_(0), output_count_(0),
jsframe_count_(0), jsframe_count_(0),
@ -378,6 +379,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
reinterpret_cast<intptr_t>(from), reinterpret_cast<intptr_t>(from),
fp_to_sp_delta - (2 * kPointerSize)); fp_to_sp_delta - (2 * kPointerSize));
} }
function->shared()->increment_deopt_count();
// Find the optimized code. // Find the optimized code.
if (type == EAGER) { if (type == EAGER) {
ASSERT(from == NULL); ASSERT(from == NULL);
@ -593,12 +595,14 @@ void Deoptimizer::DoComputeOutputFrames() {
PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ", PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function)); reinterpret_cast<intptr_t>(function));
function->PrintName(); function->PrintName();
PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, took %0.3f ms]\n", PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
" took %0.3f ms]\n",
node_id, node_id,
output_[index]->GetPc(), output_[index]->GetPc(),
FullCodeGenerator::State2String( FullCodeGenerator::State2String(
static_cast<FullCodeGenerator::State>( static_cast<FullCodeGenerator::State>(
output_[index]->GetState()->value())), output_[index]->GetState()->value())),
has_alignment_padding_ ? "with padding" : "no padding",
ms); ms);
} }
} }
@ -769,7 +773,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
if (FLAG_trace_deopt) { if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ", PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset); output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ", PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
output_offset, output_offset,
input_value, input_value,
input_offset); input_offset);
@ -789,7 +793,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
if (FLAG_trace_deopt) { if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ", PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset); output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- %" V8PRIdPTR " ; [esp + %d] (%s)\n", PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
output_offset, output_offset,
value, value,
input_offset, input_offset,
@ -815,7 +819,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
input_->GetOffsetFromSlotIndex(input_slot_index); input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset); double value = input_->GetDoubleFrameSlot(input_offset);
if (FLAG_trace_deopt) { if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n", PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
output_[frame_index]->GetTop() + output_offset, output_[frame_index]->GetTop() + output_offset,
output_offset, output_offset,
value, value,
@ -1290,7 +1294,7 @@ Object* FrameDescription::GetExpression(int index) {
} }
void TranslationBuffer::Add(int32_t value) { void TranslationBuffer::Add(int32_t value, Zone* zone) {
// Encode the sign bit in the least significant bit. // Encode the sign bit in the least significant bit.
bool is_negative = (value < 0); bool is_negative = (value < 0);
uint32_t bits = ((is_negative ? -value : value) << 1) | uint32_t bits = ((is_negative ? -value : value) << 1) |
@ -1299,7 +1303,7 @@ void TranslationBuffer::Add(int32_t value) {
// each byte to indicate whether or not more bytes follow. // each byte to indicate whether or not more bytes follow.
do { do {
uint32_t next = bits >> 7; uint32_t next = bits >> 7;
contents_.Add(((bits << 1) & 0xFF) | (next != 0)); contents_.Add(((bits << 1) & 0xFF) | (next != 0), zone);
bits = next; bits = next;
} while (bits != 0); } while (bits != 0);
} }
@ -1332,76 +1336,76 @@ Handle<ByteArray> TranslationBuffer::CreateByteArray() {
void Translation::BeginConstructStubFrame(int literal_id, unsigned height) { void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
buffer_->Add(CONSTRUCT_STUB_FRAME); buffer_->Add(CONSTRUCT_STUB_FRAME, zone());
buffer_->Add(literal_id); buffer_->Add(literal_id, zone());
buffer_->Add(height); buffer_->Add(height, zone());
} }
void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) { void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
buffer_->Add(ARGUMENTS_ADAPTOR_FRAME); buffer_->Add(ARGUMENTS_ADAPTOR_FRAME, zone());
buffer_->Add(literal_id); buffer_->Add(literal_id, zone());
buffer_->Add(height); buffer_->Add(height, zone());
} }
void Translation::BeginJSFrame(int node_id, int literal_id, unsigned height) { void Translation::BeginJSFrame(int node_id, int literal_id, unsigned height) {
buffer_->Add(JS_FRAME); buffer_->Add(JS_FRAME, zone());
buffer_->Add(node_id); buffer_->Add(node_id, zone());
buffer_->Add(literal_id); buffer_->Add(literal_id, zone());
buffer_->Add(height); buffer_->Add(height, zone());
} }
void Translation::StoreRegister(Register reg) { void Translation::StoreRegister(Register reg) {
buffer_->Add(REGISTER); buffer_->Add(REGISTER, zone());
buffer_->Add(reg.code()); buffer_->Add(reg.code(), zone());
} }
void Translation::StoreInt32Register(Register reg) { void Translation::StoreInt32Register(Register reg) {
buffer_->Add(INT32_REGISTER); buffer_->Add(INT32_REGISTER, zone());
buffer_->Add(reg.code()); buffer_->Add(reg.code(), zone());
} }
void Translation::StoreDoubleRegister(DoubleRegister reg) { void Translation::StoreDoubleRegister(DoubleRegister reg) {
buffer_->Add(DOUBLE_REGISTER); buffer_->Add(DOUBLE_REGISTER, zone());
buffer_->Add(DoubleRegister::ToAllocationIndex(reg)); buffer_->Add(DoubleRegister::ToAllocationIndex(reg), zone());
} }
void Translation::StoreStackSlot(int index) { void Translation::StoreStackSlot(int index) {
buffer_->Add(STACK_SLOT); buffer_->Add(STACK_SLOT, zone());
buffer_->Add(index); buffer_->Add(index, zone());
} }
void Translation::StoreInt32StackSlot(int index) { void Translation::StoreInt32StackSlot(int index) {
buffer_->Add(INT32_STACK_SLOT); buffer_->Add(INT32_STACK_SLOT, zone());
buffer_->Add(index); buffer_->Add(index, zone());
} }
void Translation::StoreDoubleStackSlot(int index) { void Translation::StoreDoubleStackSlot(int index) {
buffer_->Add(DOUBLE_STACK_SLOT); buffer_->Add(DOUBLE_STACK_SLOT, zone());
buffer_->Add(index); buffer_->Add(index, zone());
} }
void Translation::StoreLiteral(int literal_id) { void Translation::StoreLiteral(int literal_id) {
buffer_->Add(LITERAL); buffer_->Add(LITERAL, zone());
buffer_->Add(literal_id); buffer_->Add(literal_id, zone());
} }
void Translation::StoreArgumentsObject() { void Translation::StoreArgumentsObject() {
buffer_->Add(ARGUMENTS_OBJECT); buffer_->Add(ARGUMENTS_OBJECT, zone());
} }
void Translation::MarkDuplicate() { void Translation::MarkDuplicate() {
buffer_->Add(DUPLICATE); buffer_->Add(DUPLICATE, zone());
} }

24
deps/v8/src/deoptimizer.h

@ -221,6 +221,10 @@ class Deoptimizer : public Malloced {
} }
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); } static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
static int has_alignment_padding_offset() {
return OFFSET_OF(Deoptimizer, has_alignment_padding_);
}
static int GetDeoptimizedCodeCount(Isolate* isolate); static int GetDeoptimizedCodeCount(Isolate* isolate);
static const int kNotDeoptimizationEntry = -1; static const int kNotDeoptimizationEntry = -1;
@ -322,6 +326,7 @@ class Deoptimizer : public Malloced {
BailoutType bailout_type_; BailoutType bailout_type_;
Address from_; Address from_;
int fp_to_sp_delta_; int fp_to_sp_delta_;
int has_alignment_padding_;
// Input frame description. // Input frame description.
FrameDescription* input_; FrameDescription* input_;
@ -515,10 +520,10 @@ class FrameDescription {
class TranslationBuffer BASE_EMBEDDED { class TranslationBuffer BASE_EMBEDDED {
public: public:
TranslationBuffer() : contents_(256) { } explicit TranslationBuffer(Zone* zone) : contents_(256, zone) { }
int CurrentIndex() const { return contents_.length(); } int CurrentIndex() const { return contents_.length(); }
void Add(int32_t value); void Add(int32_t value, Zone* zone);
Handle<ByteArray> CreateByteArray(); Handle<ByteArray> CreateByteArray();
@ -569,12 +574,14 @@ class Translation BASE_EMBEDDED {
DUPLICATE DUPLICATE
}; };
Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count) Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
Zone* zone)
: buffer_(buffer), : buffer_(buffer),
index_(buffer->CurrentIndex()) { index_(buffer->CurrentIndex()),
buffer_->Add(BEGIN); zone_(zone) {
buffer_->Add(frame_count); buffer_->Add(BEGIN, zone);
buffer_->Add(jsframe_count); buffer_->Add(frame_count, zone);
buffer_->Add(jsframe_count, zone);
} }
int index() const { return index_; } int index() const { return index_; }
@ -593,6 +600,8 @@ class Translation BASE_EMBEDDED {
void StoreArgumentsObject(); void StoreArgumentsObject();
void MarkDuplicate(); void MarkDuplicate();
Zone* zone() const { return zone_; }
static int NumberOfOperandsFor(Opcode opcode); static int NumberOfOperandsFor(Opcode opcode);
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER) #if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
@ -602,6 +611,7 @@ class Translation BASE_EMBEDDED {
private: private:
TranslationBuffer* buffer_; TranslationBuffer* buffer_;
int index_; int index_;
Zone* zone_;
}; };

6
deps/v8/src/double.h

@ -130,12 +130,6 @@ class Double {
return (d64 & kExponentMask) == kExponentMask; return (d64 & kExponentMask) == kExponentMask;
} }
bool IsNan() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&
((d64 & kSignificandMask) != 0);
}
bool IsInfinite() const { bool IsInfinite() const {
uint64_t d64 = AsUint64(); uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) && return ((d64 & kExponentMask) == kExponentMask) &&

134
deps/v8/src/elements-kind.cc

@ -0,0 +1,134 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "elements-kind.h"
#include "api.h"
#include "elements.h"
#include "objects.h"
namespace v8 {
namespace internal {
void PrintElementsKind(FILE* out, ElementsKind kind) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
PrintF(out, "%s", accessor->name());
}
ElementsKind GetInitialFastElementsKind() {
if (FLAG_packed_arrays) {
return FAST_SMI_ELEMENTS;
} else {
return FAST_HOLEY_SMI_ELEMENTS;
}
}
struct InitializeFastElementsKindSequence {
static void Construct(
ElementsKind** fast_elements_kind_sequence_ptr) {
ElementsKind* fast_elements_kind_sequence =
new ElementsKind[kFastElementsKindCount];
*fast_elements_kind_sequence_ptr = fast_elements_kind_sequence;
STATIC_ASSERT(FAST_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND);
fast_elements_kind_sequence[0] = FAST_SMI_ELEMENTS;
fast_elements_kind_sequence[1] = FAST_HOLEY_SMI_ELEMENTS;
fast_elements_kind_sequence[2] = FAST_DOUBLE_ELEMENTS;
fast_elements_kind_sequence[3] = FAST_HOLEY_DOUBLE_ELEMENTS;
fast_elements_kind_sequence[4] = FAST_ELEMENTS;
fast_elements_kind_sequence[5] = FAST_HOLEY_ELEMENTS;
}
};
static LazyInstance<ElementsKind*,
InitializeFastElementsKindSequence>::type
fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER;
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
ASSERT(sequence_number >= 0 &&
sequence_number < kFastElementsKindCount);
return fast_elements_kind_sequence.Get()[sequence_number];
}
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
for (int i = 0; i < kFastElementsKindCount; ++i) {
if (fast_elements_kind_sequence.Get()[i] == elements_kind) {
return i;
}
}
UNREACHABLE();
return 0;
}
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
bool allow_only_packed) {
ASSERT(IsFastElementsKind(elements_kind));
ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
while (true) {
int index =
GetSequenceIndexFromFastElementsKind(elements_kind) + 1;
elements_kind = GetFastElementsKindFromSequenceIndex(index);
if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) {
return elements_kind;
}
}
UNREACHABLE();
return TERMINAL_FAST_ELEMENTS_KIND;
}
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind) {
switch (from_kind) {
case FAST_SMI_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS;
case FAST_HOLEY_SMI_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS &&
to_kind != FAST_HOLEY_SMI_ELEMENTS;
case FAST_DOUBLE_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS &&
to_kind != FAST_HOLEY_SMI_ELEMENTS &&
to_kind != FAST_DOUBLE_ELEMENTS;
case FAST_HOLEY_DOUBLE_ELEMENTS:
return to_kind == FAST_ELEMENTS ||
to_kind == FAST_HOLEY_ELEMENTS;
case FAST_ELEMENTS:
return to_kind == FAST_HOLEY_ELEMENTS;
case FAST_HOLEY_ELEMENTS:
return false;
default:
return false;
}
}
} } // namespace v8::internal

221
deps/v8/src/elements-kind.h

@ -0,0 +1,221 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ELEMENTS_KIND_H_
#define V8_ELEMENTS_KIND_H_
#include "v8checks.h"
namespace v8 {
namespace internal {
enum ElementsKind {
// The "fast" kind for elements that only contain SMI values. Must be first
// to make it possible to efficiently check maps for this kind.
FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
// The "fast" kind for tagged values. Must be second to make it possible to
// efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
// together at once.
FAST_ELEMENTS,
FAST_HOLEY_ELEMENTS,
// The "fast" kind for unwrapped, non-tagged double values.
FAST_DOUBLE_ELEMENTS,
FAST_HOLEY_DOUBLE_ELEMENTS,
// The "slow" kind.
DICTIONARY_ELEMENTS,
NON_STRICT_ARGUMENTS_ELEMENTS,
// The "fast" kind for external arrays
EXTERNAL_BYTE_ELEMENTS,
EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
EXTERNAL_SHORT_ELEMENTS,
EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
EXTERNAL_INT_ELEMENTS,
EXTERNAL_UNSIGNED_INT_ELEMENTS,
EXTERNAL_FLOAT_ELEMENTS,
EXTERNAL_DOUBLE_ELEMENTS,
EXTERNAL_PIXEL_ELEMENTS,
// Derived constants from ElementsKind
FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS,
FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS
};
const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
FIRST_FAST_ELEMENTS_KIND + 1;
void PrintElementsKind(FILE* out, ElementsKind kind);
ElementsKind GetInitialFastElementsKind();
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_index);
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
inline bool IsDictionaryElementsKind(ElementsKind kind) {
return kind == DICTIONARY_ELEMENTS;
}
inline bool IsExternalArrayElementsKind(ElementsKind kind) {
return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
}
inline bool IsFastElementsKind(ElementsKind kind) {
ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
}
inline bool IsFastDoubleElementsKind(ElementsKind kind) {
return kind == FAST_DOUBLE_ELEMENTS ||
kind == FAST_HOLEY_DOUBLE_ELEMENTS;
}
inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS ||
kind == FAST_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsFastSmiElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS;
}
inline bool IsFastObjectElementsKind(ElementsKind kind) {
return kind == FAST_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsFastHoleyElementsKind(ElementsKind kind) {
return kind == FAST_HOLEY_SMI_ELEMENTS ||
kind == FAST_HOLEY_DOUBLE_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsHoleyElementsKind(ElementsKind kind) {
return IsFastHoleyElementsKind(kind) ||
kind == DICTIONARY_ELEMENTS;
}
inline bool IsFastPackedElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_DOUBLE_ELEMENTS ||
kind == FAST_ELEMENTS;
}
inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) {
if (holey_kind == FAST_HOLEY_SMI_ELEMENTS) {
return FAST_SMI_ELEMENTS;
}
if (holey_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
return FAST_DOUBLE_ELEMENTS;
}
if (holey_kind == FAST_HOLEY_ELEMENTS) {
return FAST_ELEMENTS;
}
return holey_kind;
}
inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
if (packed_kind == FAST_SMI_ELEMENTS) {
return FAST_HOLEY_SMI_ELEMENTS;
}
if (packed_kind == FAST_DOUBLE_ELEMENTS) {
return FAST_HOLEY_DOUBLE_ELEMENTS;
}
if (packed_kind == FAST_ELEMENTS) {
return FAST_HOLEY_ELEMENTS;
}
return packed_kind;
}
inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
ASSERT(IsFastSmiElementsKind(from_kind));
return (from_kind == FAST_SMI_ELEMENTS)
? FAST_ELEMENTS
: FAST_HOLEY_ELEMENTS;
}
inline bool IsSimpleMapChangeTransition(ElementsKind from_kind,
ElementsKind to_kind) {
return (GetHoleyElementsKind(from_kind) == to_kind) ||
(IsFastSmiElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind));
}
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind);
inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
return IsFastElementsKind(from_kind) &&
from_kind != TERMINAL_FAST_ELEMENTS_KIND;
}
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
bool allow_only_packed);
inline bool CanTransitionToMoreGeneralFastElementsKind(
ElementsKind elements_kind,
bool allow_only_packed) {
return IsFastElementsKind(elements_kind) &&
(elements_kind != TERMINAL_FAST_ELEMENTS_KIND &&
(!allow_only_packed || elements_kind != FAST_ELEMENTS));
}
} } // namespace v8::internal
#endif // V8_ELEMENTS_KIND_H_

681
deps/v8/src/elements.cc

File diff suppressed because it is too large

65
deps/v8/src/elements.h

@ -28,6 +28,7 @@
#ifndef V8_ELEMENTS_H_ #ifndef V8_ELEMENTS_H_
#define V8_ELEMENTS_H_ #define V8_ELEMENTS_H_
#include "elements-kind.h"
#include "objects.h" #include "objects.h"
#include "heap.h" #include "heap.h"
#include "isolate.h" #include "isolate.h"
@ -45,6 +46,10 @@ class ElementsAccessor {
virtual ElementsKind kind() const = 0; virtual ElementsKind kind() const = 0;
const char* name() const { return name_; } const char* name() const { return name_; }
// Checks the elements of an object for consistency, asserting when a problem
// is found.
virtual void Validate(JSObject* obj) = 0;
// Returns true if a holder contains an element with the specified key // Returns true if a holder contains an element with the specified key
// without iterating up the prototype chain. The caller can optionally pass // without iterating up the prototype chain. The caller can optionally pass
// in the backing store to use for the check, which must be compatible with // in the backing store to use for the check, which must be compatible with
@ -60,18 +65,19 @@ class ElementsAccessor {
// can optionally pass in the backing store to use for the check, which must // can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If // be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store. // backing_store is NULL, the holder->elements() is used as the backing store.
virtual MaybeObject* Get(Object* receiver, MUST_USE_RESULT virtual MaybeObject* Get(
JSObject* holder, Object* receiver,
uint32_t key, JSObject* holder,
FixedArrayBase* backing_store = NULL) = 0; uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Modifies the length data property as specified for JSArrays and resizes the // Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of // underlying backing store accordingly. The method honors the semantics of
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
// have non-deletable elements can only be shrunk to the size of highest // have non-deletable elements can only be shrunk to the size of highest
// element that is non-deletable. // element that is non-deletable.
virtual MaybeObject* SetLength(JSArray* holder, MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* holder,
Object* new_length) = 0; Object* new_length) = 0;
// Modifies both the length and capacity of a JSArray, resizing the underlying // Modifies both the length and capacity of a JSArray, resizing the underlying
// backing store as necessary. This method does NOT honor the semantics of // backing store as necessary. This method does NOT honor the semantics of
@ -79,14 +85,14 @@ class ElementsAccessor {
// elements. This method should only be called for array expansion OR by // elements. This method should only be called for array expansion OR by
// runtime JavaScript code that use InternalArrays and don't care about // runtime JavaScript code that use InternalArrays and don't care about
// EcmaScript 5.1 semantics. // EcmaScript 5.1 semantics.
virtual MaybeObject* SetCapacityAndLength(JSArray* array, MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
int capacity, int capacity,
int length) = 0; int length) = 0;
// Deletes an element in an object, returning a new elements backing store. // Deletes an element in an object, returning a new elements backing store.
virtual MaybeObject* Delete(JSObject* holder, MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* holder,
uint32_t key, uint32_t key,
JSReceiver::DeleteMode mode) = 0; JSReceiver::DeleteMode mode) = 0;
// If kCopyToEnd is specified as the copy_size to CopyElements, it copies all // If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
// of elements from source after source_start to the destination array. // of elements from source after source_start to the destination array.
@ -101,26 +107,28 @@ class ElementsAccessor {
// the source JSObject or JSArray in source_holder. If the holder's backing // the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is // store is available, it can be passed in source and source_holder is
// ignored. // ignored.
virtual MaybeObject* CopyElements(JSObject* source_holder, MUST_USE_RESULT virtual MaybeObject* CopyElements(
uint32_t source_start, JSObject* source_holder,
FixedArrayBase* destination, uint32_t source_start,
ElementsKind destination_kind, FixedArrayBase* destination,
uint32_t destination_start, ElementsKind destination_kind,
int copy_size, uint32_t destination_start,
FixedArrayBase* source = NULL) = 0; int copy_size,
FixedArrayBase* source = NULL) = 0;
MaybeObject* CopyElements(JSObject* from_holder,
FixedArrayBase* to, MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
ElementsKind to_kind, FixedArrayBase* to,
FixedArrayBase* from = NULL) { ElementsKind to_kind,
FixedArrayBase* from = NULL) {
return CopyElements(from_holder, 0, to, to_kind, 0, return CopyElements(from_holder, 0, to, to_kind, 0,
kCopyToEndAndInitializeToHole, from); kCopyToEndAndInitializeToHole, from);
} }
virtual MaybeObject* AddElementsToFixedArray(Object* receiver, MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
JSObject* holder, Object* receiver,
FixedArray* to, JSObject* holder,
FixedArrayBase* from = NULL) = 0; FixedArray* to,
FixedArrayBase* from = NULL) = 0;
// Returns a shared ElementsAccessor for the specified ElementsKind. // Returns a shared ElementsAccessor for the specified ElementsKind.
static ElementsAccessor* ForKind(ElementsKind elements_kind) { static ElementsAccessor* ForKind(ElementsKind elements_kind) {
@ -131,6 +139,7 @@ class ElementsAccessor {
static ElementsAccessor* ForArray(FixedArrayBase* array); static ElementsAccessor* ForArray(FixedArrayBase* array);
static void InitializeOncePerProcess(); static void InitializeOncePerProcess();
static void TearDown();
protected: protected:
friend class NonStrictArgumentsElementsAccessor; friend class NonStrictArgumentsElementsAccessor;

7
deps/v8/src/extensions/externalize-string-extension.cc

@ -133,11 +133,8 @@ v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
void ExternalizeStringExtension::Register() { void ExternalizeStringExtension::Register() {
static ExternalizeStringExtension* externalize_extension = NULL; static ExternalizeStringExtension externalize_extension;
if (externalize_extension == NULL) static v8::DeclareExtension declaration(&externalize_extension);
externalize_extension = new ExternalizeStringExtension;
static v8::DeclareExtension externalize_extension_declaration(
externalize_extension);
} }
} } // namespace v8::internal } } // namespace v8::internal

5
deps/v8/src/extensions/gc-extension.cc

@ -46,9 +46,8 @@ v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
void GCExtension::Register() { void GCExtension::Register() {
static GCExtension* gc_extension = NULL; static GCExtension gc_extension;
if (gc_extension == NULL) gc_extension = new GCExtension(); static v8::DeclareExtension declaration(&gc_extension);
static v8::DeclareExtension gc_extension_declaration(gc_extension);
} }
} } // namespace v8::internal } } // namespace v8::internal

80
deps/v8/src/factory.cc

@ -34,6 +34,7 @@
#include "macro-assembler.h" #include "macro-assembler.h"
#include "objects.h" #include "objects.h"
#include "objects-visiting.h" #include "objects-visiting.h"
#include "platform.h"
#include "scopeinfo.h" #include "scopeinfo.h"
namespace v8 { namespace v8 {
@ -114,7 +115,8 @@ Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) { Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
ASSERT(0 <= number_of_descriptors); ASSERT(0 <= number_of_descriptors);
CALL_HEAP_FUNCTION(isolate(), CALL_HEAP_FUNCTION(isolate(),
DescriptorArray::Allocate(number_of_descriptors), DescriptorArray::Allocate(number_of_descriptors,
DescriptorArray::MAY_BE_SHARED),
DescriptorArray); DescriptorArray);
} }
@ -291,6 +293,15 @@ Handle<Context> Factory::NewGlobalContext() {
} }
Handle<Context> Factory::NewModuleContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateModuleContext(*previous, *scope_info),
Context);
}
Handle<Context> Factory::NewFunctionContext(int length, Handle<Context> Factory::NewFunctionContext(int length,
Handle<JSFunction> function) { Handle<JSFunction> function) {
CALL_HEAP_FUNCTION( CALL_HEAP_FUNCTION(
@ -324,10 +335,9 @@ Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
} }
Handle<Context> Factory::NewBlockContext( Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
Handle<JSFunction> function, Handle<Context> previous,
Handle<Context> previous, Handle<ScopeInfo> scope_info) {
Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION( CALL_HEAP_FUNCTION(
isolate(), isolate(),
isolate()->heap()->AllocateBlockContext(*function, isolate()->heap()->AllocateBlockContext(*function,
@ -487,7 +497,9 @@ Handle<Map> Factory::CopyMap(Handle<Map> src,
Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) { Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
CALL_HEAP_FUNCTION(isolate(), src->CopyDropTransitions(), Map); CALL_HEAP_FUNCTION(isolate(),
src->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED),
Map);
} }
@ -667,6 +679,43 @@ Handle<Object> Factory::NewError(const char* type,
} }
Handle<String> Factory::EmergencyNewError(const char* type,
Handle<JSArray> args) {
const int kBufferSize = 1000;
char buffer[kBufferSize];
size_t space = kBufferSize;
char* p = &buffer[0];
Vector<char> v(buffer, kBufferSize);
OS::StrNCpy(v, type, space);
space -= Min(space, strlen(type));
p = &buffer[kBufferSize] - space;
for (unsigned i = 0; i < ARRAY_SIZE(args); i++) {
if (space > 0) {
*p++ = ' ';
space--;
if (space > 0) {
MaybeObject* maybe_arg = args->GetElement(i);
Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
const char* arg = *arg_str->ToCString();
Vector<char> v2(p, space);
OS::StrNCpy(v2, arg, space);
space -= Min(space, strlen(arg));
p = &buffer[kBufferSize] - space;
}
}
}
if (space > 0) {
*p = '\0';
} else {
buffer[kBufferSize - 1] = '\0';
}
Handle<String> error_string = NewStringFromUtf8(CStrVector(buffer), TENURED);
return error_string;
}
Handle<Object> Factory::NewError(const char* maker, Handle<Object> Factory::NewError(const char* maker,
const char* type, const char* type,
Handle<JSArray> args) { Handle<JSArray> args) {
@ -675,8 +724,9 @@ Handle<Object> Factory::NewError(const char* maker,
isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str)); isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str));
// If the builtins haven't been properly configured yet this error // If the builtins haven't been properly configured yet this error
// constructor may not have been defined. Bail out. // constructor may not have been defined. Bail out.
if (!fun_obj->IsJSFunction()) if (!fun_obj->IsJSFunction()) {
return undefined_value(); return EmergencyNewError(type, args);
}
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj); Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
Handle<Object> type_obj = LookupAsciiSymbol(type); Handle<Object> type_obj = LookupAsciiSymbol(type);
Handle<Object> argv[] = { type_obj, args }; Handle<Object> argv[] = { type_obj, args };
@ -767,7 +817,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
instance_size != JSObject::kHeaderSize) { instance_size != JSObject::kHeaderSize) {
Handle<Map> initial_map = NewMap(type, Handle<Map> initial_map = NewMap(type,
instance_size, instance_size,
FAST_SMI_ONLY_ELEMENTS); GetInitialFastElementsKind());
function->set_initial_map(*initial_map); function->set_initial_map(*initial_map);
initial_map->set_constructor(*function); initial_map->set_constructor(*function);
} }
@ -892,7 +942,7 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
Handle<String> key = Handle<String> key =
SymbolFromString(Handle<String>(String::cast(entry->name()))); SymbolFromString(Handle<String>(String::cast(entry->name())));
// Check if a descriptor with this name already exists before writing. // Check if a descriptor with this name already exists before writing.
if (result->LinearSearch(*key, descriptor_count) == if (result->LinearSearch(EXPECT_UNSORTED, *key, descriptor_count) ==
DescriptorArray::kNotFound) { DescriptorArray::kNotFound) {
CallbacksDescriptor desc(*key, *entry, entry->property_attributes()); CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
result->Set(descriptor_count, &desc, witness); result->Set(descriptor_count, &desc, witness);
@ -928,6 +978,13 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
} }
Handle<JSModule> Factory::NewJSModule() {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSModule(), JSModule);
}
Handle<GlobalObject> Factory::NewGlobalObject( Handle<GlobalObject> Factory::NewGlobalObject(
Handle<JSFunction> constructor) { Handle<JSFunction> constructor) {
CALL_HEAP_FUNCTION(isolate(), CALL_HEAP_FUNCTION(isolate(),
@ -998,10 +1055,11 @@ void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
void Factory::EnsureCanContainElements(Handle<JSArray> array, void Factory::EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements, Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode) { EnsureElementsMode mode) {
CALL_HEAP_FUNCTION_VOID( CALL_HEAP_FUNCTION_VOID(
isolate(), isolate(),
array->EnsureCanContainElements(*elements, mode)); array->EnsureCanContainElements(*elements, length, mode));
} }

30
deps/v8/src/factory.h

@ -162,9 +162,12 @@ class Factory {
// Create a global (but otherwise uninitialized) context. // Create a global (but otherwise uninitialized) context.
Handle<Context> NewGlobalContext(); Handle<Context> NewGlobalContext();
// Create a module context.
Handle<Context> NewModuleContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info);
// Create a function context. // Create a function context.
Handle<Context> NewFunctionContext(int length, Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function);
Handle<JSFunction> function);
// Create a catch context. // Create a catch context.
Handle<Context> NewCatchContext(Handle<JSFunction> function, Handle<Context> NewCatchContext(Handle<JSFunction> function,
@ -177,7 +180,7 @@ class Factory {
Handle<Context> previous, Handle<Context> previous,
Handle<JSObject> extension); Handle<JSObject> extension);
// Create a 'block' context. // Create a block context.
Handle<Context> NewBlockContext(Handle<JSFunction> function, Handle<Context> NewBlockContext(Handle<JSFunction> function,
Handle<Context> previous, Handle<Context> previous,
Handle<ScopeInfo> scope_info); Handle<ScopeInfo> scope_info);
@ -213,9 +216,10 @@ class Factory {
Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell( Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value); Handle<Object> value);
Handle<Map> NewMap(InstanceType type, Handle<Map> NewMap(
int instance_size, InstanceType type,
ElementsKind elements_kind = FAST_ELEMENTS); int instance_size,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function); Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@ -262,14 +266,18 @@ class Factory {
// runtime. // runtime.
Handle<JSObject> NewJSObjectFromMap(Handle<Map> map); Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
// JS modules are pretenured.
Handle<JSModule> NewJSModule();
// JS arrays are pretenured when allocated by the parser. // JS arrays are pretenured when allocated by the parser.
Handle<JSArray> NewJSArray(int capacity, Handle<JSArray> NewJSArray(
ElementsKind elements_kind = FAST_ELEMENTS, int capacity,
PretenureFlag pretenure = NOT_TENURED); ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArrayWithElements( Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements, Handle<FixedArrayBase> elements,
ElementsKind elements_kind = FAST_ELEMENTS, ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
void SetElementsCapacityAndLength(Handle<JSArray> array, void SetElementsCapacityAndLength(Handle<JSArray> array,
@ -281,6 +289,7 @@ class Factory {
void EnsureCanContainHeapObjectElements(Handle<JSArray> array); void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
void EnsureCanContainElements(Handle<JSArray> array, void EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements, Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode); EnsureElementsMode mode);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype); Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
@ -329,6 +338,7 @@ class Factory {
Handle<Object> NewError(const char* maker, const char* type, Handle<Object> NewError(const char* maker, const char* type,
Handle<JSArray> args); Handle<JSArray> args);
Handle<String> EmergencyNewError(const char* type, Handle<JSArray> args);
Handle<Object> NewError(const char* maker, const char* type, Handle<Object> NewError(const char* maker, const char* type,
Vector< Handle<Object> > args); Vector< Handle<Object> > args);
Handle<Object> NewError(const char* type, Handle<Object> NewError(const char* type,

16
deps/v8/src/flag-definitions.h

@ -132,6 +132,10 @@ public:
// Flags for language modes and experimental language features. // Flags for language modes and experimental language features.
DEFINE_bool(use_strict, false, "enforce strict mode") DEFINE_bool(use_strict, false, "enforce strict mode")
DEFINE_bool(es5_readonly, false,
"activate correct semantics for inheriting readonliness")
DEFINE_bool(es52_globals, false,
"activate new semantics for global var declarations")
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof") DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
DEFINE_bool(harmony_scoping, false, "enable harmony block scoping") DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
@ -148,6 +152,7 @@ DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_implication(harmony_modules, harmony_scoping)
// Flags for experimental implementation features. // Flags for experimental implementation features.
DEFINE_bool(packed_arrays, false, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values") DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(clever_optimizations, DEFINE_bool(clever_optimizations,
true, true,
@ -165,7 +170,12 @@ DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering") DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing") DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining") DEFINE_bool(use_inlining, true, "use function inlining")
DEFINE_bool(limit_inlining, true, "limit code size growth from inlining") DEFINE_int(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
DEFINE_int(max_inlined_nodes, 196,
"maximum number of AST nodes considered for a single inlining")
DEFINE_int(max_inlined_nodes_cumulative, 196,
"maximum cumulative number of AST nodes considered for inlining")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion") DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
DEFINE_bool(collect_megamorphic_maps_from_stub_cache, DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
true, true,
@ -188,6 +198,10 @@ DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases") DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining") DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement") DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(array_bounds_checks_elimination, false,
"perform array bounds checks elimination")
DEFINE_bool(array_index_dehoisting, false,
"perform array index dehoisting")
DEFINE_bool(trace_osr, false, "trace on-stack replacement") DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs") DEFINE_int(stress_runs, 0, "number of stress runs")

54
deps/v8/src/frames.cc

@ -469,6 +469,20 @@ StackFrame::Type StackFrame::GetCallerState(State* state) const {
} }
Address StackFrame::UnpaddedFP() const {
#if defined(V8_TARGET_ARCH_IA32)
if (!is_optimized()) return fp();
int32_t alignment_state = Memory::int32_at(
fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset);
return (alignment_state == kAlignmentPaddingPushed) ?
(fp() + kPointerSize) : fp();
#else
return fp();
#endif
}
Code* EntryFrame::unchecked_code() const { Code* EntryFrame::unchecked_code() const {
return HEAP->raw_unchecked_js_entry_code(); return HEAP->raw_unchecked_js_entry_code();
} }
@ -1359,34 +1373,28 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
int NumRegs(RegList reglist) { int NumRegs(RegList reglist) {
int n = 0; return CompilerIntrinsics::CountSetBits(reglist);
while (reglist != 0) {
n++;
reglist &= reglist - 1; // clear one bit
}
return n;
} }
struct JSCallerSavedCodeData { struct JSCallerSavedCodeData {
JSCallerSavedCodeData() {
int i = 0;
for (int r = 0; r < kNumRegs; r++)
if ((kJSCallerSaved & (1 << r)) != 0)
reg_code[i++] = r;
ASSERT(i == kNumJSCallerSaved);
}
int reg_code[kNumJSCallerSaved]; int reg_code[kNumJSCallerSaved];
}; };
JSCallerSavedCodeData caller_saved_code_data;
static LazyInstance<JSCallerSavedCodeData>::type caller_saved_code_data = void SetUpJSCallerSavedCodeData() {
LAZY_INSTANCE_INITIALIZER; int i = 0;
for (int r = 0; r < kNumRegs; r++)
if ((kJSCallerSaved & (1 << r)) != 0)
caller_saved_code_data.reg_code[i++] = r;
ASSERT(i == kNumJSCallerSaved);
}
int JSCallerSavedCode(int n) { int JSCallerSavedCode(int n) {
ASSERT(0 <= n && n < kNumJSCallerSaved); ASSERT(0 <= n && n < kNumJSCallerSaved);
return caller_saved_code_data.Get().reg_code[n]; return caller_saved_code_data.reg_code[n];
} }
@ -1400,11 +1408,11 @@ class field##_Wrapper : public ZoneObject { \
STACK_FRAME_TYPE_LIST(DEFINE_WRAPPER) STACK_FRAME_TYPE_LIST(DEFINE_WRAPPER)
#undef DEFINE_WRAPPER #undef DEFINE_WRAPPER
static StackFrame* AllocateFrameCopy(StackFrame* frame) { static StackFrame* AllocateFrameCopy(StackFrame* frame, Zone* zone) {
#define FRAME_TYPE_CASE(type, field) \ #define FRAME_TYPE_CASE(type, field) \
case StackFrame::type: { \ case StackFrame::type: { \
field##_Wrapper* wrapper = \ field##_Wrapper* wrapper = \
new field##_Wrapper(*(reinterpret_cast<field*>(frame))); \ new(zone) field##_Wrapper(*(reinterpret_cast<field*>(frame))); \
return &wrapper->frame_; \ return &wrapper->frame_; \
} }
@ -1416,11 +1424,11 @@ static StackFrame* AllocateFrameCopy(StackFrame* frame) {
return NULL; return NULL;
} }
Vector<StackFrame*> CreateStackMap() { Vector<StackFrame*> CreateStackMap(Zone* zone) {
ZoneList<StackFrame*> list(10); ZoneList<StackFrame*> list(10, zone);
for (StackFrameIterator it; !it.done(); it.Advance()) { for (StackFrameIterator it; !it.done(); it.Advance()) {
StackFrame* frame = AllocateFrameCopy(it.frame()); StackFrame* frame = AllocateFrameCopy(it.frame(), zone);
list.Add(frame); list.Add(frame, zone);
} }
return list.ToVector(); return list.ToVector();
} }

12
deps/v8/src/frames.h

@ -40,6 +40,8 @@ typedef uint32_t RegList;
// Get the number of registers in a given register list. // Get the number of registers in a given register list.
int NumRegs(RegList list); int NumRegs(RegList list);
void SetUpJSCallerSavedCodeData();
// Return the code of the n-th saved register available to JavaScript. // Return the code of the n-th saved register available to JavaScript.
int JSCallerSavedCode(int n); int JSCallerSavedCode(int n);
@ -204,11 +206,19 @@ class StackFrame BASE_EMBEDDED {
Address fp() const { return state_.fp; } Address fp() const { return state_.fp; }
Address caller_sp() const { return GetCallerStackPointer(); } Address caller_sp() const { return GetCallerStackPointer(); }
// If this frame is optimized and was dynamically aligned return its old
// unaligned frame pointer. When the frame is deoptimized its FP will shift
// up one word and become unaligned.
Address UnpaddedFP() const;
Address pc() const { return *pc_address(); } Address pc() const { return *pc_address(); }
void set_pc(Address pc) { *pc_address() = pc; } void set_pc(Address pc) { *pc_address() = pc; }
virtual void SetCallerFp(Address caller_fp) = 0; virtual void SetCallerFp(Address caller_fp) = 0;
// Manually changes value of fp in this object.
void UpdateFp(Address fp) { state_.fp = fp; }
Address* pc_address() const { return state_.pc_address; } Address* pc_address() const { return state_.pc_address; }
// Get the id of this stack frame. // Get the id of this stack frame.
@ -883,7 +893,7 @@ class StackFrameLocator BASE_EMBEDDED {
// Reads all frames on the current stack and copies them into the current // Reads all frames on the current stack and copies them into the current
// zone memory. // zone memory.
Vector<StackFrame*> CreateStackMap(); Vector<StackFrame*> CreateStackMap(Zone* zone);
} } // namespace v8::internal } } // namespace v8::internal

141
deps/v8/src/full-codegen.cc

@ -303,7 +303,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
masm.positions_recorder()->StartGDBJITLineInfoRecording(); masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif #endif
FullCodeGenerator cgen(&masm, info); FullCodeGenerator cgen(&masm, info, isolate->zone());
cgen.Generate(); cgen.Generate();
if (cgen.HasStackOverflow()) { if (cgen.HasStackOverflow()) {
ASSERT(!isolate->has_pending_exception()); ASSERT(!isolate->has_pending_exception());
@ -316,7 +316,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_optimizable(info->IsOptimizable() && code->set_optimizable(info->IsOptimizable() &&
!info->function()->flags()->Contains(kDontOptimize) && !info->function()->flags()->Contains(kDontOptimize) &&
info->function()->scope()->AllowsLazyRecompilation()); info->function()->scope()->AllowsLazyRecompilation());
code->set_self_optimization_header(cgen.has_self_optimization_header_);
cgen.PopulateDeoptimizationData(code); cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code); cgen.PopulateTypeFeedbackInfo(code);
cgen.PopulateTypeFeedbackCells(code); cgen.PopulateTypeFeedbackCells(code);
@ -332,9 +331,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_stack_check_table_offset(table_offset); code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info); CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle. info->SetCode(code); // May be an empty handle.
if (!code.is_null()) {
isolate->runtime_profiler()->NotifyCodeGenerated(code->instruction_size());
}
#ifdef ENABLE_GDB_JIT_INTERFACE #ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit && !code.is_null()) { if (FLAG_gdbjit && !code.is_null()) {
GDBJITLineInfo* lineinfo = GDBJITLineInfo* lineinfo =
@ -444,14 +440,14 @@ void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
} }
} }
#endif // DEBUG #endif // DEBUG
bailout_entries_.Add(entry); bailout_entries_.Add(entry, zone());
} }
void FullCodeGenerator::RecordTypeFeedbackCell( void FullCodeGenerator::RecordTypeFeedbackCell(
unsigned id, Handle<JSGlobalPropertyCell> cell) { unsigned id, Handle<JSGlobalPropertyCell> cell) {
TypeFeedbackCellEntry entry = { id, cell }; TypeFeedbackCellEntry entry = { id, cell };
type_feedback_cells_.Add(entry); type_feedback_cells_.Add(entry, zone());
} }
@ -460,7 +456,7 @@ void FullCodeGenerator::RecordStackCheck(unsigned ast_id) {
// state. // state.
ASSERT(masm_->pc_offset() > 0); ASSERT(masm_->pc_offset() > 0);
BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) }; BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
stack_checks_.Add(entry); stack_checks_.Add(entry, zone());
} }
@ -573,88 +569,91 @@ void FullCodeGenerator::DoTest(const TestContext* context) {
void FullCodeGenerator::VisitDeclarations( void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) { ZoneList<Declaration*>* declarations) {
int save_global_count = global_count_; ZoneList<Handle<Object> >* saved_globals = globals_;
global_count_ = 0; ZoneList<Handle<Object> > inner_globals(10, zone());
globals_ = &inner_globals;
AstVisitor::VisitDeclarations(declarations); AstVisitor::VisitDeclarations(declarations);
if (!globals_->is_empty()) {
// Batch declare global functions and variables.
if (global_count_ > 0) {
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(2 * global_count_, TENURED);
int length = declarations->length();
for (int j = 0, i = 0; i < length; i++) {
Declaration* decl = declarations->at(i);
Variable* var = decl->proxy()->var();
if (var->IsUnallocated()) {
array->set(j++, *(var->name()));
FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
if (fun_decl == NULL) {
if (var->binding_needs_init()) {
// In case this binding needs initialization use the hole.
array->set_the_hole(j++);
} else {
array->set_undefined(j++);
}
} else {
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(fun_decl->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) {
SetStackOverflow();
return;
}
array->set(j++, *function);
}
}
}
// Invoke the platform-dependent code generator to do the actual // Invoke the platform-dependent code generator to do the actual
// declaration the global functions and variables. // declaration the global functions and variables.
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_->length(), TENURED);
for (int i = 0; i < globals_->length(); ++i)
array->set(i, *globals_->at(i));
DeclareGlobals(array); DeclareGlobals(array);
} }
global_count_ = save_global_count; globals_ = saved_globals;
}
void FullCodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
EmitDeclaration(decl->proxy(), decl->mode(), NULL);
} }
void FullCodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) { void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
EmitDeclaration(decl->proxy(), decl->mode(), decl->fun()); Handle<JSModule> instance = module->interface()->Instance();
} ASSERT(!instance.is_null());
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* decl) {
EmitDeclaration(decl->proxy(), decl->mode(), NULL);
}
void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) {
EmitDeclaration(decl->proxy(), decl->mode(), NULL);
}
// Allocate a module context statically.
Block* block = module->body();
Scope* saved_scope = scope();
scope_ = block->scope();
Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
// Generate code for module creation and linking.
Comment cmnt(masm_, "[ ModuleLiteral");
SetStatementPosition(block);
if (scope_info->HasContext()) {
// Set up module context.
__ Push(scope_info);
__ Push(instance);
__ CallRuntime(Runtime::kPushModuleContext, 2);
StoreToFrameField(
StandardFrameConstants::kContextOffset, context_register());
}
void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) { {
// TODO(rossberg) Comment cmnt(masm_, "[ Declarations");
} VisitDeclarations(scope_->declarations());
}
scope_ = saved_scope;
if (scope_info->HasContext()) {
// Pop module context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
// Update local stack frame context field.
StoreToFrameField(
StandardFrameConstants::kContextOffset, context_register());
}
void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) { // Populate module instance object.
// TODO(rossberg) const PropertyAttributes attr =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
for (Interface::Iterator it = module->interface()->iterator();
!it.done(); it.Advance()) {
if (it.interface()->IsModule()) {
Handle<Object> value = it.interface()->Instance();
ASSERT(!value.is_null());
JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
} else {
// TODO(rossberg): set proper getters instead of undefined...
// instance->DefineAccessor(*it.name(), ACCESSOR_GETTER, *getter, attr);
Handle<Object> value(isolate()->heap()->undefined_value());
JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
}
}
USE(instance->PreventExtensions());
} }
void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) { void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) {
// TODO(rossberg) // Noting to do.
// The instance object is resolved statically through the module's interface.
} }
void FullCodeGenerator::VisitModulePath(ModulePath* module) { void FullCodeGenerator::VisitModulePath(ModulePath* module) {
// TODO(rossberg) // Noting to do.
// The instance object is resolved statically through the module's interface.
} }
@ -916,9 +915,9 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
Scope* saved_scope = scope(); Scope* saved_scope = scope();
// Push a block context when entering a block with block scoped variables. // Push a block context when entering a block with block scoped variables.
if (stmt->block_scope() != NULL) { if (stmt->scope() != NULL) {
{ Comment cmnt(masm_, "[ Extend block context"); { Comment cmnt(masm_, "[ Extend block context");
scope_ = stmt->block_scope(); scope_ = stmt->scope();
Handle<ScopeInfo> scope_info = scope_->GetScopeInfo(); Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS; int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
__ Push(scope_info); __ Push(scope_info);
@ -945,7 +944,7 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
// Pop block context if necessary. // Pop block context if necessary.
if (stmt->block_scope() != NULL) { if (stmt->scope() != NULL) {
LoadContextField(context_register(), Context::PREVIOUS_INDEX); LoadContextField(context_register(), Context::PREVIOUS_INDEX);
// Update local stack frame context field. // Update local stack frame context field.
StoreToFrameField(StandardFrameConstants::kContextOffset, StoreToFrameField(StandardFrameConstants::kContextOffset,

54
deps/v8/src/full-codegen.h

@ -77,28 +77,25 @@ class FullCodeGenerator: public AstVisitor {
TOS_REG TOS_REG
}; };
FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info) FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info,
Zone* zone)
: masm_(masm), : masm_(masm),
info_(info), info_(info),
scope_(info->scope()), scope_(info->scope()),
nesting_stack_(NULL), nesting_stack_(NULL),
loop_depth_(0), loop_depth_(0),
global_count_(0), globals_(NULL),
context_(NULL), context_(NULL),
bailout_entries_(info->HasDeoptimizationSupport() bailout_entries_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0), ? info->function()->ast_node_count() : 0, zone),
stack_checks_(2), // There's always at least one. stack_checks_(2, zone), // There's always at least one.
type_feedback_cells_(info->HasDeoptimizationSupport() type_feedback_cells_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0), ? info->function()->ast_node_count() : 0, zone),
ic_total_count_(0), ic_total_count_(0),
has_self_optimization_header_(false) { } zone_(zone) { }
static bool MakeCode(CompilationInfo* info); static bool MakeCode(CompilationInfo* info);
// Returns the platform-specific size in bytes of the self-optimization
// header.
static int self_optimization_header_size();
// Encode state and pc-offset as a BitField<type, start, size>. // Encode state and pc-offset as a BitField<type, start, size>.
// Only use 30 bits because we encode the result as a smi. // Only use 30 bits because we encode the result as a smi.
class StateField : public BitField<State, 0, 1> { }; class StateField : public BitField<State, 0, 1> { };
@ -113,6 +110,8 @@ class FullCodeGenerator: public AstVisitor {
return NULL; return NULL;
} }
Zone* zone() const { return zone_; }
private: private:
class Breakable; class Breakable;
class Iteration; class Iteration;
@ -207,7 +206,7 @@ class FullCodeGenerator: public AstVisitor {
virtual ~NestedBlock() {} virtual ~NestedBlock() {}
virtual NestedStatement* Exit(int* stack_depth, int* context_length) { virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
if (statement()->AsBlock()->block_scope() != NULL) { if (statement()->AsBlock()->scope() != NULL) {
++(*context_length); ++(*context_length);
} }
return previous_; return previous_;
@ -241,7 +240,7 @@ class FullCodeGenerator: public AstVisitor {
// The finally block of a try/finally statement. // The finally block of a try/finally statement.
class Finally : public NestedStatement { class Finally : public NestedStatement {
public: public:
static const int kElementCount = 2; static const int kElementCount = 5;
explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { } explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { }
virtual ~Finally() {} virtual ~Finally() {}
@ -418,12 +417,9 @@ class FullCodeGenerator: public AstVisitor {
Label* if_true, Label* if_true,
Label* if_false); Label* if_false);
// Platform-specific code for a variable, constant, or function // If enabled, emit debug code for checking that the current context is
// declaration. Functions have an initial value. // neither a with nor a catch context.
// Increments global_count_ for unallocated variables. void EmitDebugCheckDeclarationContext(Variable* variable);
void EmitDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function);
// Platform-specific code for checking the stack limit at the back edge of // Platform-specific code for checking the stack limit at the back edge of
// a loop. // a loop.
@ -553,12 +549,8 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); } Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); } bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); } bool is_native() { return info_->is_native(); }
bool is_classic_mode() { bool is_classic_mode() { return language_mode() == CLASSIC_MODE; }
return language_mode() == CLASSIC_MODE; LanguageMode language_mode() { return function()->language_mode(); }
}
LanguageMode language_mode() {
return function()->language_mode();
}
FunctionLiteral* function() { return info_->function(); } FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return scope_; } Scope* scope() { return scope_; }
@ -790,15 +782,15 @@ class FullCodeGenerator: public AstVisitor {
Label return_label_; Label return_label_;
NestedStatement* nesting_stack_; NestedStatement* nesting_stack_;
int loop_depth_; int loop_depth_;
int global_count_; ZoneList<Handle<Object> >* globals_;
const ExpressionContext* context_; const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_; ZoneList<BailoutEntry> bailout_entries_;
ZoneList<BailoutEntry> stack_checks_; ZoneList<BailoutEntry> stack_checks_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_; ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_; int ic_total_count_;
bool has_self_optimization_header_;
Handle<FixedArray> handler_table_; Handle<FixedArray> handler_table_;
Handle<JSGlobalPropertyCell> profiling_counter_; Handle<JSGlobalPropertyCell> profiling_counter_;
Zone* zone_;
friend class NestedStatement; friend class NestedStatement;
@ -809,16 +801,16 @@ class FullCodeGenerator: public AstVisitor {
// A map from property names to getter/setter pairs allocated in the zone. // A map from property names to getter/setter pairs allocated in the zone.
class AccessorTable: public TemplateHashMap<Literal, class AccessorTable: public TemplateHashMap<Literal,
ObjectLiteral::Accessors, ObjectLiteral::Accessors,
ZoneListAllocationPolicy> { ZoneAllocationPolicy> {
public: public:
explicit AccessorTable(Zone* zone) : explicit AccessorTable(Zone* zone) :
TemplateHashMap<Literal, TemplateHashMap<Literal, ObjectLiteral::Accessors,
ObjectLiteral::Accessors, ZoneAllocationPolicy>(Literal::Match,
ZoneListAllocationPolicy>(Literal::Match), ZoneAllocationPolicy(zone)),
zone_(zone) { } zone_(zone) { }
Iterator lookup(Literal* literal) { Iterator lookup(Literal* literal) {
Iterator it = find(literal, true); Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors(); if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors();
return it; return it;
} }

15
deps/v8/src/func-name-inferrer.cc

@ -34,11 +34,12 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
FuncNameInferrer::FuncNameInferrer(Isolate* isolate) FuncNameInferrer::FuncNameInferrer(Isolate* isolate, Zone* zone)
: isolate_(isolate), : isolate_(isolate),
entries_stack_(10), entries_stack_(10, zone),
names_stack_(5), names_stack_(5, zone),
funcs_to_infer_(4) { funcs_to_infer_(4, zone),
zone_(zone) {
} }
@ -48,21 +49,21 @@ void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
// and starts with a capital letter. // and starts with a capital letter.
if (name->length() > 0 && Runtime::IsUpperCaseChar( if (name->length() > 0 && Runtime::IsUpperCaseChar(
isolate()->runtime_state(), name->Get(0))) { isolate()->runtime_state(), name->Get(0))) {
names_stack_.Add(Name(name, kEnclosingConstructorName)); names_stack_.Add(Name(name, kEnclosingConstructorName), zone());
} }
} }
void FuncNameInferrer::PushLiteralName(Handle<String> name) { void FuncNameInferrer::PushLiteralName(Handle<String> name) {
if (IsOpen() && !isolate()->heap()->prototype_symbol()->Equals(*name)) { if (IsOpen() && !isolate()->heap()->prototype_symbol()->Equals(*name)) {
names_stack_.Add(Name(name, kLiteralName)); names_stack_.Add(Name(name, kLiteralName), zone());
} }
} }
void FuncNameInferrer::PushVariableName(Handle<String> name) { void FuncNameInferrer::PushVariableName(Handle<String> name) {
if (IsOpen() && !isolate()->heap()->result_symbol()->Equals(*name)) { if (IsOpen() && !isolate()->heap()->result_symbol()->Equals(*name)) {
names_stack_.Add(Name(name, kVariableName)); names_stack_.Add(Name(name, kVariableName), zone());
} }
} }

10
deps/v8/src/func-name-inferrer.h

@ -45,7 +45,7 @@ class Isolate;
// a name. // a name.
class FuncNameInferrer : public ZoneObject { class FuncNameInferrer : public ZoneObject {
public: public:
explicit FuncNameInferrer(Isolate* isolate); FuncNameInferrer(Isolate* isolate, Zone* zone);
// Returns whether we have entered name collection state. // Returns whether we have entered name collection state.
bool IsOpen() const { return !entries_stack_.is_empty(); } bool IsOpen() const { return !entries_stack_.is_empty(); }
@ -55,7 +55,7 @@ class FuncNameInferrer : public ZoneObject {
// Enters name collection state. // Enters name collection state.
void Enter() { void Enter() {
entries_stack_.Add(names_stack_.length()); entries_stack_.Add(names_stack_.length(), zone());
} }
// Pushes an encountered name onto names stack when in collection state. // Pushes an encountered name onto names stack when in collection state.
@ -66,7 +66,7 @@ class FuncNameInferrer : public ZoneObject {
// Adds a function to infer name for. // Adds a function to infer name for.
void AddFunction(FunctionLiteral* func_to_infer) { void AddFunction(FunctionLiteral* func_to_infer) {
if (IsOpen()) { if (IsOpen()) {
funcs_to_infer_.Add(func_to_infer); funcs_to_infer_.Add(func_to_infer, zone());
} }
} }
@ -88,6 +88,8 @@ class FuncNameInferrer : public ZoneObject {
void Leave() { void Leave() {
ASSERT(IsOpen()); ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast()); names_stack_.Rewind(entries_stack_.RemoveLast());
if (entries_stack_.is_empty())
funcs_to_infer_.Clear();
} }
private: private:
@ -103,6 +105,7 @@ class FuncNameInferrer : public ZoneObject {
}; };
Isolate* isolate() { return isolate_; } Isolate* isolate() { return isolate_; }
Zone* zone() const { return zone_; }
// Constructs a full name in dotted notation from gathered names. // Constructs a full name in dotted notation from gathered names.
Handle<String> MakeNameFromStack(); Handle<String> MakeNameFromStack();
@ -117,6 +120,7 @@ class FuncNameInferrer : public ZoneObject {
ZoneList<int> entries_stack_; ZoneList<int> entries_stack_;
ZoneList<Name> names_stack_; ZoneList<Name> names_stack_;
ZoneList<FunctionLiteral*> funcs_to_infer_; ZoneList<FunctionLiteral*> funcs_to_infer_;
Zone* zone_;
DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer); DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
}; };

6
deps/v8/src/handles.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -729,9 +729,9 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate); Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
for (int i = 0; i < descs->number_of_descriptors(); i++) { for (int i = 0; i < descs->number_of_descriptors(); i++) {
if (descs->IsProperty(i) && !descs->IsDontEnum(i)) { if (descs->IsProperty(i) && !descs->GetDetails(i).IsDontEnum()) {
storage->set(index, descs->GetKey(i)); storage->set(index, descs->GetKey(i));
PropertyDetails details(descs->GetDetails(i)); PropertyDetails details = descs->GetDetails(i);
sort_array->set(index, Smi::FromInt(details.index())); sort_array->set(index, Smi::FromInt(details.index()));
if (!indices.is_null()) { if (!indices.is_null()) {
if (details.type() != FIELD) { if (details.type() != FIELD) {

102
deps/v8/src/hashmap.h

@ -40,9 +40,16 @@ class TemplateHashMapImpl {
public: public:
typedef bool (*MatchFun) (void* key1, void* key2); typedef bool (*MatchFun) (void* key1, void* key2);
// The default capacity. This is used by the call sites which want
// to pass in a non-default AllocationPolicy but want to use the
// default value of capacity specified by the implementation.
static const uint32_t kDefaultHashMapCapacity = 8;
// initial_capacity is the size of the initial hash map; // initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0). // it must be a power of 2 (and thus must not be 0).
TemplateHashMapImpl(MatchFun match, uint32_t initial_capacity = 8); TemplateHashMapImpl(MatchFun match,
uint32_t capacity = kDefaultHashMapCapacity,
AllocationPolicy allocator = AllocationPolicy());
~TemplateHashMapImpl(); ~TemplateHashMapImpl();
@ -60,10 +67,13 @@ class TemplateHashMapImpl {
// but insert is set, a new entry is inserted with // but insert is set, a new entry is inserted with
// corresponding key, key hash, and NULL value. // corresponding key, key hash, and NULL value.
// Otherwise, NULL is returned. // Otherwise, NULL is returned.
Entry* Lookup(void* key, uint32_t hash, bool insert); Entry* Lookup(void* key, uint32_t hash, bool insert,
AllocationPolicy allocator = AllocationPolicy());
// Removes the entry with matching key. // Removes the entry with matching key.
void Remove(void* key, uint32_t hash); // It returns the value of the deleted entry
// or null if there is no value for such key.
void* Remove(void* key, uint32_t hash);
// Empties the hash map (occupancy() == 0). // Empties the hash map (occupancy() == 0).
void Clear(); void Clear();
@ -95,29 +105,30 @@ class TemplateHashMapImpl {
Entry* map_end() const { return map_ + capacity_; } Entry* map_end() const { return map_ + capacity_; }
Entry* Probe(void* key, uint32_t hash); Entry* Probe(void* key, uint32_t hash);
void Initialize(uint32_t capacity); void Initialize(uint32_t capacity, AllocationPolicy allocator);
void Resize(); void Resize(AllocationPolicy allocator);
}; };
typedef TemplateHashMapImpl<FreeStoreAllocationPolicy> HashMap; typedef TemplateHashMapImpl<FreeStoreAllocationPolicy> HashMap;
template<class P> template<class AllocationPolicy>
TemplateHashMapImpl<P>::TemplateHashMapImpl(MatchFun match, TemplateHashMapImpl<AllocationPolicy>::TemplateHashMapImpl(
uint32_t initial_capacity) { MatchFun match, uint32_t initial_capacity, AllocationPolicy allocator) {
match_ = match; match_ = match;
Initialize(initial_capacity); Initialize(initial_capacity, allocator);
} }
template<class P> template<class AllocationPolicy>
TemplateHashMapImpl<P>::~TemplateHashMapImpl() { TemplateHashMapImpl<AllocationPolicy>::~TemplateHashMapImpl() {
P::Delete(map_); AllocationPolicy::Delete(map_);
} }
template<class P> template<class AllocationPolicy>
typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup( typename TemplateHashMapImpl<AllocationPolicy>::Entry*
void* key, uint32_t hash, bool insert) { TemplateHashMapImpl<AllocationPolicy>::Lookup(
void* key, uint32_t hash, bool insert, AllocationPolicy allocator) {
// Find a matching entry. // Find a matching entry.
Entry* p = Probe(key, hash); Entry* p = Probe(key, hash);
if (p->key != NULL) { if (p->key != NULL) {
@ -133,7 +144,7 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
// Grow the map if we reached >= 80% occupancy. // Grow the map if we reached >= 80% occupancy.
if (occupancy_ + occupancy_/4 >= capacity_) { if (occupancy_ + occupancy_/4 >= capacity_) {
Resize(); Resize(allocator);
p = Probe(key, hash); p = Probe(key, hash);
} }
@ -145,15 +156,16 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
} }
template<class P> template<class AllocationPolicy>
void TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) { void* TemplateHashMapImpl<AllocationPolicy>::Remove(void* key, uint32_t hash) {
// Lookup the entry for the key to remove. // Lookup the entry for the key to remove.
Entry* p = Probe(key, hash); Entry* p = Probe(key, hash);
if (p->key == NULL) { if (p->key == NULL) {
// Key not found nothing to remove. // Key not found nothing to remove.
return; return NULL;
} }
void* value = p->value;
// To remove an entry we need to ensure that it does not create an empty // To remove an entry we need to ensure that it does not create an empty
// entry that will cause the search for another entry to stop too soon. If all // entry that will cause the search for another entry to stop too soon. If all
// the entries between the entry to remove and the next empty slot have their // the entries between the entry to remove and the next empty slot have their
@ -202,11 +214,12 @@ void TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
// Clear the entry which is allowed to en emptied. // Clear the entry which is allowed to en emptied.
p->key = NULL; p->key = NULL;
occupancy_--; occupancy_--;
return value;
} }
template<class P> template<class AllocationPolicy>
void TemplateHashMapImpl<P>::Clear() { void TemplateHashMapImpl<AllocationPolicy>::Clear() {
// Mark all entries as empty. // Mark all entries as empty.
const Entry* end = map_end(); const Entry* end = map_end();
for (Entry* p = map_; p < end; p++) { for (Entry* p = map_; p < end; p++) {
@ -216,15 +229,16 @@ void TemplateHashMapImpl<P>::Clear() {
} }
template<class P> template<class AllocationPolicy>
typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Start() const { typename TemplateHashMapImpl<AllocationPolicy>::Entry*
TemplateHashMapImpl<AllocationPolicy>::Start() const {
return Next(map_ - 1); return Next(map_ - 1);
} }
template<class P> template<class AllocationPolicy>
typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Next(Entry* p) typename TemplateHashMapImpl<AllocationPolicy>::Entry*
const { TemplateHashMapImpl<AllocationPolicy>::Next(Entry* p) const {
const Entry* end = map_end(); const Entry* end = map_end();
ASSERT(map_ - 1 <= p && p < end); ASSERT(map_ - 1 <= p && p < end);
for (p++; p < end; p++) { for (p++; p < end; p++) {
@ -236,9 +250,9 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Next(Entry* p)
} }
template<class P> template<class AllocationPolicy>
typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Probe(void* key, typename TemplateHashMapImpl<AllocationPolicy>::Entry*
uint32_t hash) { TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) {
ASSERT(key != NULL); ASSERT(key != NULL);
ASSERT(IsPowerOf2(capacity_)); ASSERT(IsPowerOf2(capacity_));
@ -258,10 +272,11 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Probe(void* key,
} }
template<class P> template<class AllocationPolicy>
void TemplateHashMapImpl<P>::Initialize(uint32_t capacity) { void TemplateHashMapImpl<AllocationPolicy>::Initialize(
uint32_t capacity, AllocationPolicy allocator) {
ASSERT(IsPowerOf2(capacity)); ASSERT(IsPowerOf2(capacity));
map_ = reinterpret_cast<Entry*>(P::New(capacity * sizeof(Entry))); map_ = reinterpret_cast<Entry*>(allocator.New(capacity * sizeof(Entry)));
if (map_ == NULL) { if (map_ == NULL) {
v8::internal::FatalProcessOutOfMemory("HashMap::Initialize"); v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
return; return;
@ -271,24 +286,24 @@ void TemplateHashMapImpl<P>::Initialize(uint32_t capacity) {
} }
template<class P> template<class AllocationPolicy>
void TemplateHashMapImpl<P>::Resize() { void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
Entry* map = map_; Entry* map = map_;
uint32_t n = occupancy_; uint32_t n = occupancy_;
// Allocate larger map. // Allocate larger map.
Initialize(capacity_ * 2); Initialize(capacity_ * 2, allocator);
// Rehash all current entries. // Rehash all current entries.
for (Entry* p = map; n > 0; p++) { for (Entry* p = map; n > 0; p++) {
if (p->key != NULL) { if (p->key != NULL) {
Lookup(p->key, p->hash, true)->value = p->value; Lookup(p->key, p->hash, true, allocator)->value = p->value;
n--; n--;
} }
} }
// Delete old map. // Delete old map.
P::Delete(map); AllocationPolicy::Delete(map);
} }
@ -325,13 +340,18 @@ class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
}; };
TemplateHashMap( TemplateHashMap(
typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match) typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match,
: TemplateHashMapImpl<AllocationPolicy>(match) { } AllocationPolicy allocator = AllocationPolicy())
: TemplateHashMapImpl<AllocationPolicy>(
match,
TemplateHashMapImpl<AllocationPolicy>::kDefaultHashMapCapacity,
allocator) { }
Iterator begin() const { return Iterator(this, this->Start()); } Iterator begin() const { return Iterator(this, this->Start()); }
Iterator end() const { return Iterator(this, NULL); } Iterator end() const { return Iterator(this, NULL); }
Iterator find(Key* key, bool insert = false) { Iterator find(Key* key, bool insert = false,
return Iterator(this, this->Lookup(key, key->Hash(), insert)); AllocationPolicy allocator = AllocationPolicy()) {
return Iterator(this, this->Lookup(key, key->Hash(), insert, allocator));
} }
}; };

27
deps/v8/src/heap-inl.h

@ -460,15 +460,16 @@ MaybeObject* Heap::PrepareForCompare(String* str) {
} }
int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) { intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes) {
ASSERT(HasBeenSetUp()); ASSERT(HasBeenSetUp());
int amount = amount_of_external_allocated_memory_ + change_in_bytes; intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
if (change_in_bytes >= 0) { if (change_in_bytes >= 0) {
// Avoid overflow. // Avoid overflow.
if (amount > amount_of_external_allocated_memory_) { if (amount > amount_of_external_allocated_memory_) {
amount_of_external_allocated_memory_ = amount; amount_of_external_allocated_memory_ = amount;
} }
int amount_since_last_global_gc = intptr_t amount_since_last_global_gc =
amount_of_external_allocated_memory_ - amount_of_external_allocated_memory_ -
amount_of_external_allocated_memory_at_last_global_gc_; amount_of_external_allocated_memory_at_last_global_gc_;
if (amount_since_last_global_gc > external_allocation_limit_) { if (amount_since_last_global_gc > external_allocation_limit_) {
@ -594,12 +595,24 @@ void ExternalStringTable::Iterate(ObjectVisitor* v) {
void ExternalStringTable::Verify() { void ExternalStringTable::Verify() {
#ifdef DEBUG #ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) { for (int i = 0; i < new_space_strings_.length(); ++i) {
ASSERT(heap_->InNewSpace(new_space_strings_[i])); Object* obj = Object::cast(new_space_strings_[i]);
ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value()); // TODO(yangguo): check that the object is indeed an external string.
ASSERT(heap_->InNewSpace(obj));
ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
}
} }
for (int i = 0; i < old_space_strings_.length(); ++i) { for (int i = 0; i < old_space_strings_.length(); ++i) {
ASSERT(!heap_->InNewSpace(old_space_strings_[i])); Object* obj = Object::cast(old_space_strings_[i]);
ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value()); // TODO(yangguo): check that the object is indeed an external string.
ASSERT(!heap_->InNewSpace(obj));
ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
}
} }
#endif #endif
} }

50
deps/v8/src/heap-profiler.cc

@ -33,7 +33,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
HeapProfiler::HeapProfiler() HeapProfiler::HeapProfiler()
: snapshots_(new HeapSnapshotsCollection()), : snapshots_(new HeapSnapshotsCollection()),
next_snapshot_uid_(1) { next_snapshot_uid_(1) {
@ -86,6 +85,24 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
} }
void HeapProfiler::StartHeapObjectsTracking() {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
Isolate::Current()->heap_profiler()->StartHeapObjectsTrackingImpl();
}
void HeapProfiler::StopHeapObjectsTracking() {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
Isolate::Current()->heap_profiler()->StopHeapObjectsTrackingImpl();
}
SnapshotObjectId HeapProfiler::PushHeapObjectsStats(v8::OutputStream* stream) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->PushHeapObjectsStatsImpl(stream);
}
void HeapProfiler::DefineWrapperClass( void HeapProfiler::DefineWrapperClass(
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) { uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId); ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
@ -136,6 +153,28 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control); return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control);
} }
void HeapProfiler::StartHeapObjectsTrackingImpl() {
snapshots_->StartHeapObjectsTracking();
}
SnapshotObjectId HeapProfiler::PushHeapObjectsStatsImpl(OutputStream* stream) {
return snapshots_->PushHeapObjectsStats(stream);
}
void HeapProfiler::StopHeapObjectsTrackingImpl() {
snapshots_->StopHeapObjectsTracking();
}
size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);
size_t size = profiler->snapshots_->GetUsedMemorySize();
return size;
}
int HeapProfiler::GetSnapshotsCount() { int HeapProfiler::GetSnapshotsCount() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler(); HeapProfiler* profiler = Isolate::Current()->heap_profiler();
@ -158,6 +197,15 @@ HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
} }
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
if (!obj->IsHeapObject())
return v8::HeapProfiler::kUnknownObjectId;
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);
return profiler->snapshots_->FindObjectId(HeapObject::cast(*obj)->address());
}
void HeapProfiler::DeleteAllSnapshots() { void HeapProfiler::DeleteAllSnapshots() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler(); HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL); ASSERT(profiler != NULL);

13
deps/v8/src/heap-profiler.h

@ -44,22 +44,27 @@ class HeapSnapshotsCollection;
} \ } \
} while (false) } while (false)
// The HeapProfiler writes data to the log files, which can be postprocessed
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
class HeapProfiler { class HeapProfiler {
public: public:
static void SetUp(); static void SetUp();
static void TearDown(); static void TearDown();
static size_t GetMemorySizeUsedByProfiler();
static HeapSnapshot* TakeSnapshot(const char* name, static HeapSnapshot* TakeSnapshot(const char* name,
int type, int type,
v8::ActivityControl* control); v8::ActivityControl* control);
static HeapSnapshot* TakeSnapshot(String* name, static HeapSnapshot* TakeSnapshot(String* name,
int type, int type,
v8::ActivityControl* control); v8::ActivityControl* control);
static void StartHeapObjectsTracking();
static void StopHeapObjectsTracking();
static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
static int GetSnapshotsCount(); static int GetSnapshotsCount();
static HeapSnapshot* GetSnapshot(int index); static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid); static HeapSnapshot* FindSnapshot(unsigned uid);
static SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
static void DeleteAllSnapshots(); static void DeleteAllSnapshots();
void ObjectMoveEvent(Address from, Address to); void ObjectMoveEvent(Address from, Address to);
@ -84,6 +89,10 @@ class HeapProfiler {
v8::ActivityControl* control); v8::ActivityControl* control);
void ResetSnapshots(); void ResetSnapshots();
void StartHeapObjectsTrackingImpl();
void StopHeapObjectsTrackingImpl();
SnapshotObjectId PushHeapObjectsStatsImpl(OutputStream* stream);
HeapSnapshotsCollection* snapshots_; HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_; unsigned next_snapshot_uid_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_; List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;

325
deps/v8/src/heap.cc

@ -42,6 +42,7 @@
#include "natives.h" #include "natives.h"
#include "objects-visiting.h" #include "objects-visiting.h"
#include "objects-visiting-inl.h" #include "objects-visiting-inl.h"
#include "once.h"
#include "runtime-profiler.h" #include "runtime-profiler.h"
#include "scopeinfo.h" #include "scopeinfo.h"
#include "snapshot.h" #include "snapshot.h"
@ -60,8 +61,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
static LazyMutex gc_initializer_mutex = LAZY_MUTEX_INITIALIZER;
Heap::Heap() Heap::Heap()
: isolate_(NULL), : isolate_(NULL),
@ -177,6 +176,9 @@ Heap::Heap()
global_contexts_list_ = NULL; global_contexts_list_ = NULL;
mark_compact_collector_.heap_ = this; mark_compact_collector_.heap_ = this;
external_string_table_.heap_ = this; external_string_table_.heap_ = this;
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
} }
@ -244,12 +246,17 @@ int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space, GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
const char** reason) { const char** reason) {
// Is global GC requested? // Is global GC requested?
if (space != NEW_SPACE || FLAG_gc_global) { if (space != NEW_SPACE) {
isolate_->counters()->gc_compactor_caused_by_request()->Increment(); isolate_->counters()->gc_compactor_caused_by_request()->Increment();
*reason = "GC in old space requested"; *reason = "GC in old space requested";
return MARK_COMPACTOR; return MARK_COMPACTOR;
} }
if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
*reason = "GC in old space forced by flags";
return MARK_COMPACTOR;
}
// Is enough data promoted to justify a global GC? // Is enough data promoted to justify a global GC?
if (OldGenerationPromotionLimitReached()) { if (OldGenerationPromotionLimitReached()) {
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment(); isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
@ -806,7 +813,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size); UpdateSurvivalRateTrend(start_new_space_size);
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize(); size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
if (high_survival_rate_during_scavenges && if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) { IsStableOrIncreasingSurvivalTrend()) {
@ -1130,6 +1137,27 @@ void PromotionQueue::RelocateQueueHead() {
} }
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
public:
explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
virtual Object* RetainAs(Object* object) {
if (!heap_->InFromSpace(object)) {
return object;
}
MapWord map_word = HeapObject::cast(object)->map_word();
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
}
return NULL;
}
private:
Heap* heap_;
};
void Heap::Scavenge() { void Heap::Scavenge() {
#ifdef DEBUG #ifdef DEBUG
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(); if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
@ -1228,6 +1256,9 @@ void Heap::Scavenge() {
} }
incremental_marking()->UpdateMarkingDequeAfterScavenge(); incremental_marking()->UpdateMarkingDequeAfterScavenge();
ScavengeWeakObjectRetainer weak_object_retainer(this);
ProcessWeakReferences(&weak_object_retainer);
ASSERT(new_space_front == new_space_.top()); ASSERT(new_space_front == new_space_.top());
// Set age mark. // Set age mark.
@ -1314,7 +1345,8 @@ void Heap::UpdateReferencesInExternalStringTable(
static Object* ProcessFunctionWeakReferences(Heap* heap, static Object* ProcessFunctionWeakReferences(Heap* heap,
Object* function, Object* function,
WeakObjectRetainer* retainer) { WeakObjectRetainer* retainer,
bool record_slots) {
Object* undefined = heap->undefined_value(); Object* undefined = heap->undefined_value();
Object* head = undefined; Object* head = undefined;
JSFunction* tail = NULL; JSFunction* tail = NULL;
@ -1331,6 +1363,12 @@ static Object* ProcessFunctionWeakReferences(Heap* heap,
// Subsequent elements in the list. // Subsequent elements in the list.
ASSERT(tail != NULL); ASSERT(tail != NULL);
tail->set_next_function_link(retain); tail->set_next_function_link(retain);
if (record_slots) {
Object** next_function =
HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
heap->mark_compact_collector()->RecordSlot(
next_function, next_function, retain);
}
} }
// Retained function is new tail. // Retained function is new tail.
candidate_function = reinterpret_cast<JSFunction*>(retain); candidate_function = reinterpret_cast<JSFunction*>(retain);
@ -1359,6 +1397,15 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
Object* head = undefined; Object* head = undefined;
Context* tail = NULL; Context* tail = NULL;
Object* candidate = global_contexts_list_; Object* candidate = global_contexts_list_;
// We don't record weak slots during marking or scavenges.
// Instead we do it once when we complete mark-compact cycle.
// Note that write barrier has no effect if we are already in the middle of
// compacting mark-sweep cycle and we have to record slots manually.
bool record_slots =
gc_state() == MARK_COMPACT &&
mark_compact_collector()->is_compacting();
while (candidate != undefined) { while (candidate != undefined) {
// Check whether to keep the candidate in the list. // Check whether to keep the candidate in the list.
Context* candidate_context = reinterpret_cast<Context*>(candidate); Context* candidate_context = reinterpret_cast<Context*>(candidate);
@ -1374,6 +1421,14 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
Context::NEXT_CONTEXT_LINK, Context::NEXT_CONTEXT_LINK,
retain, retain,
UPDATE_WRITE_BARRIER); UPDATE_WRITE_BARRIER);
if (record_slots) {
Object** next_context =
HeapObject::RawField(
tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
mark_compact_collector()->RecordSlot(
next_context, next_context, retain);
}
} }
// Retained context is new tail. // Retained context is new tail.
candidate_context = reinterpret_cast<Context*>(retain); candidate_context = reinterpret_cast<Context*>(retain);
@ -1386,11 +1441,19 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
ProcessFunctionWeakReferences( ProcessFunctionWeakReferences(
this, this,
candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST), candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
retainer); retainer,
record_slots);
candidate_context->set_unchecked(this, candidate_context->set_unchecked(this,
Context::OPTIMIZED_FUNCTIONS_LIST, Context::OPTIMIZED_FUNCTIONS_LIST,
function_list_head, function_list_head,
UPDATE_WRITE_BARRIER); UPDATE_WRITE_BARRIER);
if (record_slots) {
Object** optimized_functions =
HeapObject::RawField(
tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
mark_compact_collector()->RecordSlot(
optimized_functions, optimized_functions, function_list_head);
}
} }
// Move to next element in the list. // Move to next element in the list.
@ -1490,6 +1553,27 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
} }
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
HeapObject* object,
int size));
static HeapObject* EnsureDoubleAligned(Heap* heap,
HeapObject* object,
int size) {
if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
heap->CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
} else {
heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
kPointerSize);
return object;
}
}
enum LoggingAndProfiling { enum LoggingAndProfiling {
LOGGING_AND_PROFILING_ENABLED, LOGGING_AND_PROFILING_ENABLED,
LOGGING_AND_PROFILING_DISABLED LOGGING_AND_PROFILING_DISABLED
@ -1613,7 +1697,10 @@ class ScavengingVisitor : public StaticVisitorBase {
} }
} }
template<ObjectContents object_contents, SizeRestriction size_restriction>
template<ObjectContents object_contents,
SizeRestriction size_restriction,
int alignment>
static inline void EvacuateObject(Map* map, static inline void EvacuateObject(Map* map,
HeapObject** slot, HeapObject** slot,
HeapObject* object, HeapObject* object,
@ -1622,19 +1709,26 @@ class ScavengingVisitor : public StaticVisitorBase {
(object_size <= Page::kMaxNonCodeHeapObjectSize)); (object_size <= Page::kMaxNonCodeHeapObjectSize));
SLOW_ASSERT(object->Size() == object_size); SLOW_ASSERT(object->Size() == object_size);
int allocation_size = object_size;
if (alignment != kObjectAlignment) {
ASSERT(alignment == kDoubleAlignment);
allocation_size += kPointerSize;
}
Heap* heap = map->GetHeap(); Heap* heap = map->GetHeap();
if (heap->ShouldBePromoted(object->address(), object_size)) { if (heap->ShouldBePromoted(object->address(), object_size)) {
MaybeObject* maybe_result; MaybeObject* maybe_result;
if ((size_restriction != SMALL) && if ((size_restriction != SMALL) &&
(object_size > Page::kMaxNonCodeHeapObjectSize)) { (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
maybe_result = heap->lo_space()->AllocateRaw(object_size, maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
NOT_EXECUTABLE); NOT_EXECUTABLE);
} else { } else {
if (object_contents == DATA_OBJECT) { if (object_contents == DATA_OBJECT) {
maybe_result = heap->old_data_space()->AllocateRaw(object_size); maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
} else { } else {
maybe_result = heap->old_pointer_space()->AllocateRaw(object_size); maybe_result =
heap->old_pointer_space()->AllocateRaw(allocation_size);
} }
} }
@ -1642,6 +1736,10 @@ class ScavengingVisitor : public StaticVisitorBase {
if (maybe_result->ToObject(&result)) { if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result); HeapObject* target = HeapObject::cast(result);
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
}
// Order is important: slot might be inside of the target if target // Order is important: slot might be inside of the target if target
// was allocated over a dead object and slot comes from the store // was allocated over a dead object and slot comes from the store
// buffer. // buffer.
@ -1649,18 +1747,27 @@ class ScavengingVisitor : public StaticVisitorBase {
MigrateObject(heap, object, target, object_size); MigrateObject(heap, object, target, object_size);
if (object_contents == POINTER_OBJECT) { if (object_contents == POINTER_OBJECT) {
heap->promotion_queue()->insert(target, object_size); if (map->instance_type() == JS_FUNCTION_TYPE) {
heap->promotion_queue()->insert(
target, JSFunction::kNonWeakFieldsEndOffset);
} else {
heap->promotion_queue()->insert(target, object_size);
}
} }
heap->tracer()->increment_promoted_objects_size(object_size); heap->tracer()->increment_promoted_objects_size(object_size);
return; return;
} }
} }
MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size); MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked(); Object* result = allocation->ToObjectUnchecked();
HeapObject* target = HeapObject::cast(result); HeapObject* target = HeapObject::cast(result);
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
}
// Order is important: slot might be inside of the target if target // Order is important: slot might be inside of the target if target
// was allocated over a dead object and slot comes from the store // was allocated over a dead object and slot comes from the store
// buffer. // buffer.
@ -1696,7 +1803,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot, HeapObject** slot,
HeapObject* object) { HeapObject* object) {
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map, EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
slot, slot,
object, object,
object_size); object_size);
@ -1708,10 +1815,11 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) { HeapObject* object) {
int length = reinterpret_cast<FixedDoubleArray*>(object)->length(); int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
int object_size = FixedDoubleArray::SizeFor(length); int object_size = FixedDoubleArray::SizeFor(length);
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
slot, map,
object, slot,
object_size); object,
object_size);
} }
@ -1719,7 +1827,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot, HeapObject** slot,
HeapObject* object) { HeapObject* object) {
int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize(); int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
map, slot, object, object_size);
} }
@ -1728,7 +1837,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) { HeapObject* object) {
int object_size = SeqAsciiString::cast(object)-> int object_size = SeqAsciiString::cast(object)->
SeqAsciiStringSize(map->instance_type()); SeqAsciiStringSize(map->instance_type());
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
map, slot, object, object_size);
} }
@ -1737,7 +1847,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) { HeapObject* object) {
int object_size = SeqTwoByteString::cast(object)-> int object_size = SeqTwoByteString::cast(object)->
SeqTwoByteStringSize(map->instance_type()); SeqTwoByteStringSize(map->instance_type());
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
map, slot, object, object_size);
} }
@ -1780,7 +1891,8 @@ class ScavengingVisitor : public StaticVisitorBase {
} }
int object_size = ConsString::kSize; int object_size = ConsString::kSize;
EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size); EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
map, slot, object, object_size);
} }
template<ObjectContents object_contents> template<ObjectContents object_contents>
@ -1790,14 +1902,16 @@ class ScavengingVisitor : public StaticVisitorBase {
static inline void VisitSpecialized(Map* map, static inline void VisitSpecialized(Map* map,
HeapObject** slot, HeapObject** slot,
HeapObject* object) { HeapObject* object) {
EvacuateObject<object_contents, SMALL>(map, slot, object, object_size); EvacuateObject<object_contents, SMALL, kObjectAlignment>(
map, slot, object, object_size);
} }
static inline void Visit(Map* map, static inline void Visit(Map* map,
HeapObject** slot, HeapObject** slot,
HeapObject* object) { HeapObject* object) {
int object_size = map->instance_size(); int object_size = map->instance_size();
EvacuateObject<object_contents, SMALL>(map, slot, object, object_size); EvacuateObject<object_contents, SMALL, kObjectAlignment>(
map, slot, object, object_size);
} }
}; };
@ -1914,7 +2028,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_pre_allocated_property_fields(0); map->set_pre_allocated_property_fields(0);
map->init_instance_descriptors(); map->init_instance_descriptors();
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER); map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER); map->init_prototype_transitions(undefined_value());
map->set_unused_property_fields(0); map->set_unused_property_fields(0);
map->set_bit_field(0); map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible); map->set_bit_field2(1 << Map::kIsExtensible);
@ -2053,15 +2167,15 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps. // Fix the instance_descriptors for the existing maps.
meta_map()->init_instance_descriptors(); meta_map()->init_instance_descriptors();
meta_map()->set_code_cache(empty_fixed_array()); meta_map()->set_code_cache(empty_fixed_array());
meta_map()->set_prototype_transitions(empty_fixed_array()); meta_map()->init_prototype_transitions(undefined_value());
fixed_array_map()->init_instance_descriptors(); fixed_array_map()->init_instance_descriptors();
fixed_array_map()->set_code_cache(empty_fixed_array()); fixed_array_map()->set_code_cache(empty_fixed_array());
fixed_array_map()->set_prototype_transitions(empty_fixed_array()); fixed_array_map()->init_prototype_transitions(undefined_value());
oddball_map()->init_instance_descriptors(); oddball_map()->init_instance_descriptors();
oddball_map()->set_code_cache(empty_fixed_array()); oddball_map()->set_code_cache(empty_fixed_array());
oddball_map()->set_prototype_transitions(empty_fixed_array()); oddball_map()->init_prototype_transitions(undefined_value());
// Fix prototype object for existing maps. // Fix prototype object for existing maps.
meta_map()->set_prototype(null_value()); meta_map()->set_prototype(null_value());
@ -2360,7 +2474,7 @@ bool Heap::CreateApiObjects() {
// bottleneck to trap the Smi-only -> fast elements transition, and there // bottleneck to trap the Smi-only -> fast elements transition, and there
// appears to be no benefit for optimize this case. // appears to be no benefit for optimize this case.
Map* new_neander_map = Map::cast(obj); Map* new_neander_map = Map::cast(obj);
new_neander_map->set_elements_kind(FAST_ELEMENTS); new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
set_neander_map(new_neander_map); set_neander_map(new_neander_map);
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map()); { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
@ -2908,8 +3022,8 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER); share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER); share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
share->set_ast_node_count(0); share->set_ast_node_count(0);
share->set_deopt_counter(FLAG_deopt_every_n_times); share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
share->set_ic_age(0); share->set_counters(0);
// Set integer fields (smi or int, depending on the architecture). // Set integer fields (smi or int, depending on the architecture).
share->set_length(0); share->set_length(0);
@ -2941,6 +3055,7 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
} }
JSMessageObject* message = JSMessageObject::cast(result); JSMessageObject* message = JSMessageObject::cast(result);
message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER); message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
message->initialize_elements();
message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER); message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
message->set_type(type); message->set_type(type);
message->set_arguments(arguments); message->set_arguments(arguments);
@ -3217,6 +3332,8 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
return Failure::OutOfMemoryException(); return Failure::OutOfMemoryException();
} }
ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
Map* map = external_ascii_string_map(); Map* map = external_ascii_string_map();
Object* result; Object* result;
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE); { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
@ -3554,7 +3671,8 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Map* new_map; Map* new_map;
ASSERT(object_function->has_initial_map()); ASSERT(object_function->has_initial_map());
{ MaybeObject* maybe_map = { MaybeObject* maybe_map =
object_function->initial_map()->CopyDropTransitions(); object_function->initial_map()->CopyDropTransitions(
DescriptorArray::MAY_BE_SHARED);
if (!maybe_map->To<Map>(&new_map)) return maybe_map; if (!maybe_map->To<Map>(&new_map)) return maybe_map;
} }
Object* prototype; Object* prototype;
@ -3642,7 +3760,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
// Check the state of the object // Check the state of the object
ASSERT(JSObject::cast(result)->HasFastProperties()); ASSERT(JSObject::cast(result)->HasFastProperties());
ASSERT(JSObject::cast(result)->HasFastElements()); ASSERT(JSObject::cast(result)->HasFastObjectElements());
return result; return result;
} }
@ -3687,7 +3805,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
map->set_inobject_properties(in_object_properties); map->set_inobject_properties(in_object_properties);
map->set_unused_property_fields(in_object_properties); map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype); map->set_prototype(prototype);
ASSERT(map->has_fast_elements()); ASSERT(map->has_fast_object_elements());
// If the function has only simple this property assignments add // If the function has only simple this property assignments add
// field descriptors for these to the initial map as the object // field descriptors for these to the initial map as the object
@ -3702,7 +3820,8 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
fun->shared()->ForbidInlineConstructor(); fun->shared()->ForbidInlineConstructor();
} else { } else {
DescriptorArray* descriptors; DescriptorArray* descriptors;
{ MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count); { MaybeObject* maybe_descriptors_obj =
DescriptorArray::Allocate(count, DescriptorArray::MAY_BE_SHARED);
if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) { if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
return maybe_descriptors_obj; return maybe_descriptors_obj;
} }
@ -3804,8 +3923,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj), InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties), FixedArray::cast(properties),
map); map);
ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() || ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
JSObject::cast(obj)->HasFastElements());
return obj; return obj;
} }
@ -3833,6 +3951,16 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
} }
MaybeObject* Heap::AllocateJSModule() {
// Allocate a fresh map. Modules do not have a prototype.
Map* map;
MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
if (!maybe_map->To(&map)) return maybe_map;
// Allocate the object based on the map.
return AllocateJSObjectFromMap(map, TENURED);
}
MaybeObject* Heap::AllocateJSArrayAndStorage( MaybeObject* Heap::AllocateJSArrayAndStorage(
ElementsKind elements_kind, ElementsKind elements_kind,
int length, int length,
@ -3840,6 +3968,9 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
ArrayStorageAllocationMode mode, ArrayStorageAllocationMode mode,
PretenureFlag pretenure) { PretenureFlag pretenure) {
ASSERT(capacity >= length); ASSERT(capacity >= length);
if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure); MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array; JSArray* array;
if (!maybe_array->To(&array)) return maybe_array; if (!maybe_array->To(&array)) return maybe_array;
@ -3860,8 +3991,7 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity); maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
} }
} else { } else {
ASSERT(elements_kind == FAST_ELEMENTS || ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
elements_kind == FAST_SMI_ONLY_ELEMENTS);
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
maybe_elms = AllocateUninitializedFixedArray(capacity); maybe_elms = AllocateUninitializedFixedArray(capacity);
} else { } else {
@ -3887,6 +4017,7 @@ MaybeObject* Heap::AllocateJSArrayWithElements(
array->set_elements(elements); array->set_elements(elements);
array->set_length(Smi::FromInt(elements->length())); array->set_length(Smi::FromInt(elements->length()));
array->ValidateElements();
return array; return array;
} }
@ -3969,7 +4100,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
// Fill these accessors into the dictionary. // Fill these accessors into the dictionary.
DescriptorArray* descs = map->instance_descriptors(); DescriptorArray* descs = map->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) { for (int i = 0; i < descs->number_of_descriptors(); i++) {
PropertyDetails details(descs->GetDetails(i)); PropertyDetails details = descs->GetDetails(i);
ASSERT(details.type() == CALLBACKS); // Only accessors are expected. ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
PropertyDetails d = PropertyDetails d =
PropertyDetails(details.attributes(), CALLBACKS, details.index()); PropertyDetails(details.attributes(), CALLBACKS, details.index());
@ -4371,6 +4502,16 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
String::cast(result)->set_length(length); String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField); String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size()); ASSERT_EQ(size, HeapObject::cast(result)->Size());
#ifdef DEBUG
if (FLAG_verify_heap) {
// Initialize string's content to ensure ASCII-ness (character range 0-127)
// as required when verifying the heap.
char* dest = SeqAsciiString::cast(result)->GetChars();
memset(dest, 0x0F, length * kCharSize);
}
#endif // DEBUG
return result; return result;
} }
@ -4417,13 +4558,13 @@ MaybeObject* Heap::AllocateJSArray(
Context* global_context = isolate()->context()->global_context(); Context* global_context = isolate()->context()->global_context();
JSFunction* array_function = global_context->array_function(); JSFunction* array_function = global_context->array_function();
Map* map = array_function->initial_map(); Map* map = array_function->initial_map();
if (elements_kind == FAST_DOUBLE_ELEMENTS) { Object* maybe_map_array = global_context->js_array_maps();
map = Map::cast(global_context->double_js_array_map()); if (!maybe_map_array->IsUndefined()) {
} else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) { Object* maybe_transitioned_map =
map = Map::cast(global_context->object_js_array_map()); FixedArray::cast(maybe_map_array)->get(elements_kind);
} else { if (!maybe_transitioned_map->IsUndefined()) {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS); map = Map::cast(maybe_transitioned_map);
ASSERT(map == global_context->smi_js_array_map()); }
} }
return AllocateJSObjectFromMap(map, pretenure); return AllocateJSObjectFromMap(map, pretenure);
@ -4662,6 +4803,11 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
AllocationSpace space = AllocationSpace space =
(pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
int size = FixedDoubleArray::SizeFor(length); int size = FixedDoubleArray::SizeFor(length);
#ifndef V8_HOST_ARCH_64_BIT
size += kPointerSize;
#endif
if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) { if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
// Too big for new space. // Too big for new space.
space = LO_SPACE; space = LO_SPACE;
@ -4674,7 +4820,12 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
AllocationSpace retry_space = AllocationSpace retry_space =
(size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE; (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
return AllocateRaw(size, space, retry_space); HeapObject* object;
{ MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
}
return EnsureDoubleAligned(this, object, size);
} }
@ -4698,15 +4849,29 @@ MaybeObject* Heap::AllocateGlobalContext() {
} }
Context* context = reinterpret_cast<Context*>(result); Context* context = reinterpret_cast<Context*>(result);
context->set_map_no_write_barrier(global_context_map()); context->set_map_no_write_barrier(global_context_map());
context->set_smi_js_array_map(undefined_value()); context->set_js_array_maps(undefined_value());
context->set_double_js_array_map(undefined_value());
context->set_object_js_array_map(undefined_value());
ASSERT(context->IsGlobalContext()); ASSERT(context->IsGlobalContext());
ASSERT(result->IsContext()); ASSERT(result->IsContext());
return result; return result;
} }
MaybeObject* Heap::AllocateModuleContext(Context* previous,
ScopeInfo* scope_info) {
Object* result;
{ MaybeObject* maybe_result =
AllocateFixedArrayWithHoles(scope_info->ContextLength(), TENURED);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_no_write_barrier(module_context_map());
context->set_previous(previous);
context->set_extension(scope_info);
context->set_global(previous->global());
return context;
}
MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) { MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
ASSERT(length >= Context::MIN_CONTEXT_SLOTS); ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
Object* result; Object* result;
@ -4849,8 +5014,10 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
bool Heap::IdleNotification(int hint) { bool Heap::IdleNotification(int hint) {
const int kMaxHint = 1000; const int kMaxHint = 1000;
intptr_t size_factor = Min(Max(hint, 30), kMaxHint) / 10; intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
// The size factor is in range [3..100]. // The size factor is in range [5..250]. The numbers here are chosen from
// experiments. If you changes them, make sure to test with
// chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold; intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
if (contexts_disposed_ > 0) { if (contexts_disposed_ > 0) {
@ -4874,11 +5041,14 @@ bool Heap::IdleNotification(int hint) {
// Take into account that we might have decided to delay full collection // Take into account that we might have decided to delay full collection
// because incremental marking is in progress. // because incremental marking is in progress.
ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped()); ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
// After context disposal there is likely a lot of garbage remaining, reset
// the idle notification counters in order to trigger more incremental GCs
// on subsequent idle notifications.
StartIdleRound();
return false; return false;
} }
if (hint >= kMaxHint || !FLAG_incremental_marking || if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
FLAG_expose_gc || Serializer::enabled()) {
return IdleGlobalGC(); return IdleGlobalGC();
} }
@ -4917,10 +5087,6 @@ bool Heap::IdleNotification(int hint) {
} }
if (incremental_marking()->IsStopped()) { if (incremental_marking()->IsStopped()) {
if (!WorthStartingGCWhenIdle()) {
FinishIdleRound();
return true;
}
incremental_marking()->Start(); incremental_marking()->Start();
} }
@ -5558,6 +5724,11 @@ bool Heap::ConfigureHeap(int max_semispace_size,
intptr_t max_executable_size) { intptr_t max_executable_size) {
if (HasBeenSetUp()) return false; if (HasBeenSetUp()) return false;
if (FLAG_stress_compaction) {
// This will cause more frequent GCs when stressing.
max_semispace_size_ = Page::kPageSize;
}
if (max_semispace_size > 0) { if (max_semispace_size > 0) {
if (max_semispace_size < Page::kPageSize) { if (max_semispace_size < Page::kPageSize) {
max_semispace_size = Page::kPageSize; max_semispace_size = Page::kPageSize;
@ -5662,16 +5833,6 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
} }
intptr_t Heap::PromotedSpaceSize() {
return old_pointer_space_->Size()
+ old_data_space_->Size()
+ code_space_->Size()
+ map_space_->Size()
+ cell_space_->Size()
+ lo_space_->Size();
}
intptr_t Heap::PromotedSpaceSizeOfObjects() { intptr_t Heap::PromotedSpaceSizeOfObjects() {
return old_pointer_space_->SizeOfObjects() return old_pointer_space_->SizeOfObjects()
+ old_data_space_->SizeOfObjects() + old_data_space_->SizeOfObjects()
@ -5682,7 +5843,7 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
} }
int Heap::PromotedExternalMemorySize() { intptr_t Heap::PromotedExternalMemorySize() {
if (amount_of_external_allocated_memory_ if (amount_of_external_allocated_memory_
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0; <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
return amount_of_external_allocated_memory_ return amount_of_external_allocated_memory_
@ -5855,6 +6016,15 @@ class HeapDebugUtils {
#endif #endif
V8_DECLARE_ONCE(initialize_gc_once);
static void InitializeGCOnce() {
InitializeScavengingVisitorsTables();
NewSpaceScavenger::Initialize();
MarkCompactCollector::Initialize();
}
bool Heap::SetUp(bool create_heap_objects) { bool Heap::SetUp(bool create_heap_objects) {
#ifdef DEBUG #ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval; allocation_timeout_ = FLAG_gc_interval;
@ -5873,15 +6043,7 @@ bool Heap::SetUp(bool create_heap_objects) {
if (!ConfigureHeapDefault()) return false; if (!ConfigureHeapDefault()) return false;
} }
gc_initializer_mutex.Pointer()->Lock(); CallOnce(&initialize_gc_once, &InitializeGCOnce);
static bool initialized_gc = false;
if (!initialized_gc) {
initialized_gc = true;
InitializeScavengingVisitorsTables();
NewSpaceScavenger::Initialize();
MarkCompactCollector::Initialize();
}
gc_initializer_mutex.Pointer()->Unlock();
MarkMapPointersAsEncoded(false); MarkMapPointersAsEncoded(false);
@ -5993,6 +6155,11 @@ void Heap::SetStackLimits() {
void Heap::TearDown() { void Heap::TearDown() {
#ifdef DEBUG
if (FLAG_verify_heap) {
Verify();
}
#endif
if (FLAG_print_cumulative_gc_stat) { if (FLAG_print_cumulative_gc_stat) {
PrintF("\n\n"); PrintF("\n\n");
PrintF("gc_count=%d ", gc_count_); PrintF("gc_count=%d ", gc_count_);

59
deps/v8/src/heap.h

@ -243,7 +243,8 @@ namespace internal {
V(compare_ic_symbol, ".compare_ic") \ V(compare_ic_symbol, ".compare_ic") \
V(infinity_symbol, "Infinity") \ V(infinity_symbol, "Infinity") \
V(minus_infinity_symbol, "-Infinity") \ V(minus_infinity_symbol, "-Infinity") \
V(hidden_stack_trace_symbol, "v8::hidden_stack_trace") V(hidden_stack_trace_symbol, "v8::hidden_stack_trace") \
V(query_colon_symbol, "(?:)")
// Forward declarations. // Forward declarations.
class GCTracer; class GCTracer;
@ -529,6 +530,8 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateJSObject( MUST_USE_RESULT MaybeObject* AllocateJSObject(
JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED); JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateJSModule();
// Allocate a JSArray with no elements // Allocate a JSArray with no elements
MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray( MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
ElementsKind elements_kind, ElementsKind elements_kind,
@ -618,7 +621,7 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateMap( MUST_USE_RESULT MaybeObject* AllocateMap(
InstanceType instance_type, InstanceType instance_type,
int instance_size, int instance_size,
ElementsKind elements_kind = FAST_ELEMENTS); ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
// Allocates a partial map for bootstrapping. // Allocates a partial map for bootstrapping.
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type, MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@ -820,6 +823,10 @@ class Heap {
// Allocate a global (but otherwise uninitialized) context. // Allocate a global (but otherwise uninitialized) context.
MUST_USE_RESULT MaybeObject* AllocateGlobalContext(); MUST_USE_RESULT MaybeObject* AllocateGlobalContext();
// Allocate a module context.
MUST_USE_RESULT MaybeObject* AllocateModuleContext(Context* previous,
ScopeInfo* scope_info);
// Allocate a function context. // Allocate a function context.
MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length, MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
JSFunction* function); JSFunction* function);
@ -1326,7 +1333,8 @@ class Heap {
// Adjusts the amount of registered external memory. // Adjusts the amount of registered external memory.
// Returns the adjusted value. // Returns the adjusted value.
inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes); inline intptr_t AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes);
// Allocate uninitialized fixed array. // Allocate uninitialized fixed array.
MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length); MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length);
@ -1334,7 +1342,7 @@ class Heap {
PretenureFlag pretenure); PretenureFlag pretenure);
inline intptr_t PromotedTotalSize() { inline intptr_t PromotedTotalSize() {
return PromotedSpaceSize() + PromotedExternalMemorySize(); return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
} }
// True if we have reached the allocation limit in the old generation that // True if we have reached the allocation limit in the old generation that
@ -1355,19 +1363,6 @@ class Heap {
static const intptr_t kMinimumAllocationLimit = static const intptr_t kMinimumAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB); 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
// When we sweep lazily we initially guess that there is no garbage on the
// heap and set the limits for the next GC accordingly. As we sweep we find
// out that some of the pages contained garbage and we have to adjust
// downwards the size of the heap. This means the limits that control the
// timing of the next GC also need to be adjusted downwards.
void LowerOldGenLimits(intptr_t adjustment) {
size_of_old_gen_at_last_old_space_gc_ -= adjustment;
old_gen_promotion_limit_ =
OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_allocation_limit_ =
OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
}
intptr_t OldGenPromotionLimit(intptr_t old_gen_size) { intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 10 : 3; const int divisor = FLAG_stress_compaction ? 10 : 3;
intptr_t limit = intptr_t limit =
@ -1411,6 +1406,12 @@ class Heap {
kRootListLength kRootListLength
}; };
STATIC_CHECK(kUndefinedValueRootIndex == Internals::kUndefinedValueRootIndex);
STATIC_CHECK(kNullValueRootIndex == Internals::kNullValueRootIndex);
STATIC_CHECK(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
STATIC_CHECK(kempty_symbolRootIndex == Internals::kEmptySymbolRootIndex);
MUST_USE_RESULT MaybeObject* NumberToString( MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true); Object* number, bool check_number_string_cache = true);
MUST_USE_RESULT MaybeObject* Uint32ToString( MUST_USE_RESULT MaybeObject* Uint32ToString(
@ -1442,6 +1443,8 @@ class Heap {
inline bool NextGCIsLikelyToBeFull() { inline bool NextGCIsLikelyToBeFull() {
if (FLAG_gc_global) return true; if (FLAG_gc_global) return true;
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
intptr_t total_promoted = PromotedTotalSize(); intptr_t total_promoted = PromotedTotalSize();
intptr_t adjusted_promotion_limit = intptr_t adjusted_promotion_limit =
@ -1452,7 +1455,7 @@ class Heap {
intptr_t adjusted_allocation_limit = intptr_t adjusted_allocation_limit =
old_gen_allocation_limit_ - new_space_.Capacity() / 5; old_gen_allocation_limit_ - new_space_.Capacity() / 5;
if (PromotedSpaceSize() >= adjusted_allocation_limit) return true; if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true;
return false; return false;
} }
@ -1490,7 +1493,6 @@ class Heap {
GCTracer* tracer() { return tracer_; } GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces. // Returns the size of objects residing in non new spaces.
intptr_t PromotedSpaceSize();
intptr_t PromotedSpaceSizeOfObjects(); intptr_t PromotedSpaceSizeOfObjects();
double total_regexp_code_generated() { return total_regexp_code_generated_; } double total_regexp_code_generated() { return total_regexp_code_generated_; }
@ -1595,7 +1597,7 @@ class Heap {
} }
void AgeInlineCaches() { void AgeInlineCaches() {
++global_ic_age_; global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
} }
private: private:
@ -1605,6 +1607,8 @@ class Heap {
// more expedient to get at the isolate directly from within Heap methods. // more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_; Isolate* isolate_;
Object* roots_[kRootListLength];
intptr_t code_range_size_; intptr_t code_range_size_;
int reserved_semispace_size_; int reserved_semispace_size_;
int max_semispace_size_; int max_semispace_size_;
@ -1646,7 +1650,7 @@ class Heap {
int gc_post_processing_depth_; int gc_post_processing_depth_;
// Returns the amount of external memory registered since last global gc. // Returns the amount of external memory registered since last global gc.
int PromotedExternalMemorySize(); intptr_t PromotedExternalMemorySize();
int ms_count_; // how many mark-sweep collections happened int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened unsigned int gc_count_; // how many gc happened
@ -1711,17 +1715,15 @@ class Heap {
// The amount of external memory registered through the API kept alive // The amount of external memory registered through the API kept alive
// by global handles // by global handles
int amount_of_external_allocated_memory_; intptr_t amount_of_external_allocated_memory_;
// Caches the amount of external memory registered at the last global gc. // Caches the amount of external memory registered at the last global gc.
int amount_of_external_allocated_memory_at_last_global_gc_; intptr_t amount_of_external_allocated_memory_at_last_global_gc_;
// Indicates that an allocation has failed in the old generation since the // Indicates that an allocation has failed in the old generation since the
// last GC. // last GC.
int old_gen_exhausted_; int old_gen_exhausted_;
Object* roots_[kRootListLength];
Object* global_contexts_list_; Object* global_contexts_list_;
StoreBufferRebuilder store_buffer_rebuilder_; StoreBufferRebuilder store_buffer_rebuilder_;
@ -1974,13 +1976,6 @@ class Heap {
return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold); return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
} }
bool WorthStartingGCWhenIdle() {
if (contexts_disposed_ > 0) {
return true;
}
return incremental_marking()->WorthActivating();
}
// Estimates how many milliseconds a Mark-Sweep would take to complete. // Estimates how many milliseconds a Mark-Sweep would take to complete.
// In idle notification handler we assume that this function will return: // In idle notification handler we assume that this function will return:
// - a number less than 10 for small heaps, which are less than 8Mb. // - a number less than 10 for small heaps, which are less than 8Mb.

249
deps/v8/src/hydrogen-instructions.cc

@ -336,7 +336,8 @@ HUseListNode* HValue::RemoveUse(HValue* value, int index) {
// Do not reuse use list nodes in debug mode, zap them. // Do not reuse use list nodes in debug mode, zap them.
if (current != NULL) { if (current != NULL) {
HUseListNode* temp = HUseListNode* temp =
new HUseListNode(current->value(), current->index(), NULL); new(block()->zone())
HUseListNode(current->value(), current->index(), NULL);
current->Zap(); current->Zap();
current = temp; current = temp;
} }
@ -416,6 +417,7 @@ void HValue::Kill() {
SetFlag(kIsDead); SetFlag(kIsDead);
for (int i = 0; i < OperandCount(); ++i) { for (int i = 0; i < OperandCount(); ++i) {
HValue* operand = OperandAt(i); HValue* operand = OperandAt(i);
if (operand == NULL) continue;
HUseListNode* first = operand->use_list_; HUseListNode* first = operand->use_list_;
if (first != NULL && first->value() == this && first->index() == i) { if (first != NULL && first->value() == this && first->index() == i) {
operand->use_list_ = first->tail(); operand->use_list_ = first->tail();
@ -462,7 +464,8 @@ void HValue::PrintChangesTo(StringStream* stream) {
add_comma = true; \ add_comma = true; \
stream->Add(#type); \ stream->Add(#type); \
} }
GVN_FLAG_LIST(PRINT_DO); GVN_TRACKED_FLAG_LIST(PRINT_DO);
GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
#undef PRINT_DO #undef PRINT_DO
} }
stream->Add("]"); stream->Add("]");
@ -493,8 +496,8 @@ void HValue::RegisterUse(int index, HValue* new_value) {
if (new_value != NULL) { if (new_value != NULL) {
if (removed == NULL) { if (removed == NULL) {
new_value->use_list_ = new_value->use_list_ = new(new_value->block()->zone()) HUseListNode(
new HUseListNode(this, index, new_value->use_list_); this, index, new_value->use_list_);
} else { } else {
removed->set_tail(new_value->use_list_); removed->set_tail(new_value->use_list_);
new_value->use_list_ = removed; new_value->use_list_ = removed;
@ -599,6 +602,9 @@ void HInstruction::InsertAfter(HInstruction* previous) {
SetBlock(block); SetBlock(block);
previous->next_ = this; previous->next_ = this;
if (next != NULL) next->previous_ = this; if (next != NULL) next->previous_ = this;
if (block->last() == previous) {
block->set_last(this);
}
} }
@ -608,6 +614,7 @@ void HInstruction::Verify() {
HBasicBlock* cur_block = block(); HBasicBlock* cur_block = block();
for (int i = 0; i < OperandCount(); ++i) { for (int i = 0; i < OperandCount(); ++i) {
HValue* other_operand = OperandAt(i); HValue* other_operand = OperandAt(i);
if (other_operand == NULL) continue;
HBasicBlock* other_block = other_operand->block(); HBasicBlock* other_block = other_operand->block();
if (cur_block == other_block) { if (cur_block == other_block) {
if (!other_operand->IsPhi()) { if (!other_operand->IsPhi()) {
@ -866,6 +873,17 @@ HValue* HBitwise::Canonicalize() {
} }
HValue* HBitNot::Canonicalize() {
// Optimize ~~x, a common pattern used for ToInt32(x).
if (value()->IsBitNot()) {
HValue* result = HBitNot::cast(value())->value();
ASSERT(result->representation().IsInteger32());
return result;
}
return this;
}
HValue* HAdd::Canonicalize() { HValue* HAdd::Canonicalize() {
if (!representation().IsInteger32()) return this; if (!representation().IsInteger32()) return this;
if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow); if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
@ -916,6 +934,62 @@ void HJSArrayLength::PrintDataTo(StringStream* stream) {
} }
HValue* HUnaryMathOperation::Canonicalize() {
if (op() == kMathFloor) {
// If the input is integer32 then we replace the floor instruction
// with its input. This happens before the representation changes are
// introduced.
if (value()->representation().IsInteger32()) return value();
#ifdef V8_TARGET_ARCH_ARM
if (value()->IsDiv() && (value()->UseCount() == 1)) {
// TODO(2038): Implement this optimization for non ARM architectures.
HDiv* hdiv = HDiv::cast(value());
HValue* left = hdiv->left();
HValue* right = hdiv->right();
// Try to simplify left and right values of the division.
HValue* new_left =
LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(left);
HValue* new_right =
LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(right);
// Return if left or right are not optimizable.
if ((new_left == NULL) || (new_right == NULL)) return this;
// Insert the new values in the graph.
if (new_left->IsInstruction() &&
!HInstruction::cast(new_left)->IsLinked()) {
HInstruction::cast(new_left)->InsertBefore(this);
}
if (new_right->IsInstruction() &&
!HInstruction::cast(new_right)->IsLinked()) {
HInstruction::cast(new_right)->InsertBefore(this);
}
HMathFloorOfDiv* instr = new(block()->zone()) HMathFloorOfDiv(context(),
new_left,
new_right);
// Replace this HMathFloor instruction by the new HMathFloorOfDiv.
instr->InsertBefore(this);
ReplaceAllUsesWith(instr);
Kill();
// We know the division had no other uses than this HMathFloor. Delete it.
// Also delete the arguments of the division if they are not used any
// more.
hdiv->DeleteAndReplaceWith(NULL);
ASSERT(left->IsChange() || left->IsConstant());
ASSERT(right->IsChange() || right->IsConstant());
if (left->HasNoUses()) left->DeleteAndReplaceWith(NULL);
if (right->HasNoUses()) right->DeleteAndReplaceWith(NULL);
// Return NULL to remove this instruction from the graph.
return NULL;
}
#endif // V8_TARGET_ARCH_ARM
}
return this;
}
HValue* HCheckInstanceType::Canonicalize() { HValue* HCheckInstanceType::Canonicalize() {
if (check_ == IS_STRING && if (check_ == IS_STRING &&
!value()->type().IsUninitialized() && !value()->type().IsUninitialized() &&
@ -965,16 +1039,13 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
} }
void HCheckMap::PrintDataTo(StringStream* stream) { void HCheckMaps::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream); value()->PrintNameTo(stream);
stream->Add(" %p", *map()); stream->Add(" [%p", *map_set()->first());
if (mode() == REQUIRE_EXACT_MAP) { for (int i = 1; i < map_set()->length(); ++i) {
stream->Add(" [EXACT]"); stream->Add(",%p", *map_set()->at(i));
} else if (!has_element_transitions_) {
stream->Add(" [EXACT*]");
} else {
stream->Add(" [MATCH ELEMENTS]");
} }
stream->Add("]");
} }
@ -1181,7 +1252,7 @@ void HPhi::PrintTo(StringStream* stream) {
void HPhi::AddInput(HValue* value) { void HPhi::AddInput(HValue* value) {
inputs_.Add(NULL); inputs_.Add(NULL, value->block()->zone());
SetOperandAt(OperandCount() - 1, value); SetOperandAt(OperandCount() - 1, value);
// Mark phis that may have 'arguments' directly or indirectly as an operand. // Mark phis that may have 'arguments' directly or indirectly as an operand.
if (!CheckFlag(kIsArguments) && value->CheckFlag(kIsArguments)) { if (!CheckFlag(kIsArguments) && value->CheckFlag(kIsArguments)) {
@ -1228,14 +1299,33 @@ void HPhi::InitRealUses(int phi_id) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) { for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value(); HValue* value = it.value();
if (!value->IsPhi()) { if (!value->IsPhi()) {
Representation rep = value->RequiredInputRepresentation(it.index()); Representation rep = value->ObservedInputRepresentation(it.index());
non_phi_uses_[rep.kind()] += value->LoopWeight(); non_phi_uses_[rep.kind()] += value->LoopWeight();
if (FLAG_trace_representation) {
PrintF("%d %s is used by %d %s as %s\n",
this->id(),
this->Mnemonic(),
value->id(),
value->Mnemonic(),
rep.Mnemonic());
}
} }
} }
} }
void HPhi::AddNonPhiUsesFrom(HPhi* other) { void HPhi::AddNonPhiUsesFrom(HPhi* other) {
if (FLAG_trace_representation) {
PrintF("adding to %d %s uses of %d %s: i%d d%d t%d\n",
this->id(),
this->Mnemonic(),
other->id(),
other->Mnemonic(),
other->non_phi_uses_[Representation::kInteger32],
other->non_phi_uses_[Representation::kDouble],
other->non_phi_uses_[Representation::kTagged]);
}
for (int i = 0; i < Representation::kNumRepresentations; i++) { for (int i = 0; i < Representation::kNumRepresentations; i++) {
indirect_uses_[i] += other->non_phi_uses_[i]; indirect_uses_[i] += other->non_phi_uses_[i];
} }
@ -1249,6 +1339,12 @@ void HPhi::AddIndirectUsesTo(int* dest) {
} }
void HPhi::ResetInteger32Uses() {
non_phi_uses_[Representation::kInteger32] = 0;
indirect_uses_[Representation::kInteger32] = 0;
}
void HSimulate::PrintDataTo(StringStream* stream) { void HSimulate::PrintDataTo(StringStream* stream) {
stream->Add("id=%d", ast_id()); stream->Add("id=%d", ast_id());
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_); if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
@ -1302,18 +1398,18 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
} }
HConstant* HConstant::CopyToRepresentation(Representation r) const { HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (r.IsInteger32() && !has_int32_value_) return NULL; if (r.IsInteger32() && !has_int32_value_) return NULL;
if (r.IsDouble() && !has_double_value_) return NULL; if (r.IsDouble() && !has_double_value_) return NULL;
return new HConstant(handle_, r); return new(zone) HConstant(handle_, r);
} }
HConstant* HConstant::CopyToTruncatedInt32() const { HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const {
if (!has_double_value_) return NULL; if (!has_double_value_) return NULL;
int32_t truncated = NumberToInt32(*handle_); int32_t truncated = NumberToInt32(*handle_);
return new HConstant(FACTORY->NewNumberFromInt(truncated), return new(zone) HConstant(FACTORY->NewNumberFromInt(truncated),
Representation::Integer32()); Representation::Integer32());
} }
@ -1522,17 +1618,51 @@ void HLoadNamedField::PrintDataTo(StringStream* stream) {
} }
// Returns true if an instance of this map can never find a property with this
// name in its prototype chain. This means all prototypes up to the top are
// fast and don't have the name in them. It would be good if we could optimize
// polymorphic loads where the property is sometimes found in the prototype
// chain.
static bool PrototypeChainCanNeverResolve(
Handle<Map> map, Handle<String> name) {
Isolate* isolate = map->GetIsolate();
Object* current = map->prototype();
while (current != isolate->heap()->null_value()) {
if (current->IsJSGlobalProxy() ||
current->IsGlobalObject() ||
!current->IsJSObject() ||
JSObject::cast(current)->IsAccessCheckNeeded() ||
!JSObject::cast(current)->HasFastProperties()) {
return false;
}
LookupResult lookup(isolate);
JSObject::cast(current)->map()->LookupInDescriptors(NULL, *name, &lookup);
if (lookup.IsFound()) {
if (lookup.type() != MAP_TRANSITION) return false;
} else if (!lookup.IsCacheable()) {
return false;
}
current = JSObject::cast(current)->GetPrototype();
}
return true;
}
HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context, HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
HValue* object, HValue* object,
SmallMapList* types, SmallMapList* types,
Handle<String> name) Handle<String> name,
: types_(Min(types->length(), kMaxLoadPolymorphism)), Zone* zone)
: types_(Min(types->length(), kMaxLoadPolymorphism), zone),
name_(name), name_(name),
need_generic_(false) { need_generic_(false) {
SetOperandAt(0, context); SetOperandAt(0, context);
SetOperandAt(1, object); SetOperandAt(1, object);
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetGVNFlag(kDependsOnMaps); SetGVNFlag(kDependsOnMaps);
SmallMapList negative_lookups;
for (int i = 0; for (int i = 0;
i < types->length() && types_.length() < kMaxLoadPolymorphism; i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) { ++i) {
@ -1548,21 +1678,39 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
} else { } else {
SetGVNFlag(kDependsOnBackingStoreFields); SetGVNFlag(kDependsOnBackingStoreFields);
} }
types_.Add(types->at(i)); types_.Add(types->at(i), zone);
break; break;
} }
case CONSTANT_FUNCTION: case CONSTANT_FUNCTION:
types_.Add(types->at(i)); types_.Add(types->at(i), zone);
break;
case MAP_TRANSITION:
if (PrototypeChainCanNeverResolve(map, name)) {
negative_lookups.Add(types->at(i), zone);
}
break; break;
default: default:
break; break;
} }
} else if (lookup.IsCacheable()) {
if (PrototypeChainCanNeverResolve(map, name)) {
negative_lookups.Add(types->at(i), zone);
}
} }
} }
if (types_.length() == types->length() && FLAG_deoptimize_uncommon_cases) { bool need_generic =
(types->length() != negative_lookups.length() + types_.length());
if (!need_generic && FLAG_deoptimize_uncommon_cases) {
SetFlag(kUseGVN); SetFlag(kUseGVN);
for (int i = 0; i < negative_lookups.length(); i++) {
types_.Add(negative_lookups.at(i), zone);
}
} else { } else {
// We don't have an easy way to handle both a call (to the generic stub) and
// a deopt in the same hydrogen instruction, so in this case we don't add
// the negative lookups which can deopt - just let the generic stub handle
// them.
SetAllSideEffects(); SetAllSideEffects();
need_generic_ = true; need_generic_ = true;
} }
@ -1607,11 +1755,14 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
stream->Add("["); stream->Add("[");
key()->PrintNameTo(stream); key()->PrintNameTo(stream);
stream->Add("]"); stream->Add("]");
if (RequiresHoleCheck()) {
stream->Add(" check_hole");
}
} }
bool HLoadKeyedFastElement::RequiresHoleCheck() { bool HLoadKeyedFastElement::RequiresHoleCheck() {
if (hole_check_mode_ == OMIT_HOLE_CHECK) { if (IsFastPackedElementsKind(elements_kind())) {
return false; return false;
} }
@ -1657,12 +1808,11 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
new(block()->zone()) HCheckMapValue(object(), names_cache->map()); new(block()->zone()) HCheckMapValue(object(), names_cache->map());
HInstruction* index = new(block()->zone()) HLoadKeyedFastElement( HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
index_cache, index_cache,
key_load->key(), key_load->key());
HLoadKeyedFastElement::OMIT_HOLE_CHECK);
HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
object(), index);
map_check->InsertBefore(this); map_check->InsertBefore(this);
index->InsertBefore(this); index->InsertBefore(this);
HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
object(), index);
load->InsertBefore(this); load->InsertBefore(this);
return load; return load;
} }
@ -1706,8 +1856,11 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo(
stream->Add("pixel"); stream->Add("pixel");
break; break;
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -1736,6 +1889,9 @@ void HStoreNamedField::PrintDataTo(StringStream* stream) {
stream->Add(" = "); stream->Add(" = ");
value()->PrintNameTo(stream); value()->PrintNameTo(stream);
stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : ""); stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
if (NeedsWriteBarrier()) {
stream->Add(" (write-barrier)");
}
if (!transition().is_null()) { if (!transition().is_null()) {
stream->Add(" (transition map %p)", *transition()); stream->Add(" (transition map %p)", *transition());
} }
@ -1801,9 +1957,12 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel"); stream->Add("pixel");
break; break;
case FAST_SMI_ONLY_ELEMENTS: case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -1818,7 +1977,13 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
void HTransitionElementsKind::PrintDataTo(StringStream* stream) { void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream); object()->PrintNameTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map()); ElementsKind from_kind = original_map()->elements_kind();
ElementsKind to_kind = transitioned_map()->elements_kind();
stream->Add(" %p [%s] -> %p [%s]",
*original_map(),
ElementsAccessor::ForKind(from_kind)->name(),
*transitioned_map(),
ElementsAccessor::ForKind(to_kind)->name());
} }
@ -1879,7 +2044,7 @@ HType HValue::CalculateInferredType() {
} }
HType HCheckMap::CalculateInferredType() { HType HCheckMaps::CalculateInferredType() {
return value()->type(); return value()->type();
} }
@ -2089,6 +2254,17 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
} }
bool HStoreKeyedFastDoubleElement::NeedsCanonicalization() {
// If value was loaded from unboxed double backing store or
// converted from an integer then we don't have to canonicalize it.
if (value()->IsLoadKeyedFastDoubleElement() ||
(value()->IsChange() && HChange::cast(value())->from().IsInteger32())) {
return false;
}
return true;
}
#define H_CONSTANT_INT32(val) \ #define H_CONSTANT_INT32(val) \
new(zone) HConstant(FACTORY->NewNumberFromInt(val, TENURED), \ new(zone) HConstant(FACTORY->NewNumberFromInt(val, TENURED), \
Representation::Integer32()) Representation::Integer32())
@ -2257,6 +2433,13 @@ void HIn::PrintDataTo(StringStream* stream) {
} }
void HBitwise::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(op_));
stream->Add(" ");
HBitwiseBinaryOperation::PrintDataTo(stream);
}
Representation HPhi::InferredRepresentation() { Representation HPhi::InferredRepresentation() {
bool double_occurred = false; bool double_occurred = false;
bool int32_occurred = false; bool int32_occurred = false;

466
deps/v8/src/hydrogen-instructions.h

@ -85,7 +85,7 @@ class LChunkBuilder;
V(Change) \ V(Change) \
V(CheckFunction) \ V(CheckFunction) \
V(CheckInstanceType) \ V(CheckInstanceType) \
V(CheckMap) \ V(CheckMaps) \
V(CheckNonSmi) \ V(CheckNonSmi) \
V(CheckPrototypeMaps) \ V(CheckPrototypeMaps) \
V(CheckSmi) \ V(CheckSmi) \
@ -140,6 +140,7 @@ class LChunkBuilder;
V(LoadNamedField) \ V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \ V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \ V(LoadNamedGeneric) \
V(MathFloorOfDiv) \
V(Mod) \ V(Mod) \
V(Mul) \ V(Mul) \
V(ObjectLiteral) \ V(ObjectLiteral) \
@ -188,7 +189,10 @@ class LChunkBuilder;
V(DateField) \ V(DateField) \
V(WrapReceiver) V(WrapReceiver)
#define GVN_FLAG_LIST(V) \ #define GVN_TRACKED_FLAG_LIST(V) \
V(NewSpacePromotion)
#define GVN_UNTRACKED_FLAG_LIST(V) \
V(Calls) \ V(Calls) \
V(InobjectFields) \ V(InobjectFields) \
V(BackingStoreFields) \ V(BackingStoreFields) \
@ -506,14 +510,18 @@ class HUseIterator BASE_EMBEDDED {
// There must be one corresponding kDepends flag for every kChanges flag and // There must be one corresponding kDepends flag for every kChanges flag and
// the order of the kChanges flags must be exactly the same as of the kDepends // the order of the kChanges flags must be exactly the same as of the kDepends
// flags. // flags. All tracked flags should appear before untracked ones.
enum GVNFlag { enum GVNFlag {
// Declare global value numbering flags. // Declare global value numbering flags.
#define DECLARE_FLAG(type) kChanges##type, kDependsOn##type, #define DECLARE_FLAG(type) kChanges##type, kDependsOn##type,
GVN_FLAG_LIST(DECLARE_FLAG) GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
#undef DECLARE_FLAG #undef DECLARE_FLAG
kAfterLastFlag, kAfterLastFlag,
kLastFlag = kAfterLastFlag - 1 kLastFlag = kAfterLastFlag - 1,
#define COUNT_FLAG(type) + 1
kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG)
#undef COUNT_FLAG
}; };
typedef EnumSet<GVNFlag> GVNFlagSet; typedef EnumSet<GVNFlag> GVNFlagSet;
@ -530,6 +538,10 @@ class HValue: public ZoneObject {
// implement DataEquals(), which will be used to determine if other // implement DataEquals(), which will be used to determine if other
// occurrences of the instruction are indeed the same. // occurrences of the instruction are indeed the same.
kUseGVN, kUseGVN,
// Track instructions that are dominating side effects. If an instruction
// sets this flag, it must implement SetSideEffectDominator() and should
// indicate which side effects to track by setting GVN flags.
kTrackSideEffectDominators,
kCanOverflow, kCanOverflow,
kBailoutOnMinusZero, kBailoutOnMinusZero,
kCanBeDivByZero, kCanBeDivByZero,
@ -544,6 +556,12 @@ class HValue: public ZoneObject {
static const int kChangesToDependsFlagsLeftShift = 1; static const int kChangesToDependsFlagsLeftShift = 1;
static GVNFlag ChangesFlagFromInt(int x) {
return static_cast<GVNFlag>(x * 2);
}
static GVNFlag DependsOnFlagFromInt(int x) {
return static_cast<GVNFlag>(x * 2 + 1);
}
static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) { static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) {
return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift); return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift);
} }
@ -702,6 +720,11 @@ class HValue: public ZoneObject {
return representation(); return representation();
} }
// Type feedback access.
virtual Representation ObservedInputRepresentation(int index) {
return RequiredInputRepresentation(index);
}
// This gives the instruction an opportunity to replace itself with an // This gives the instruction an opportunity to replace itself with an
// instruction that does the same in some better way. To replace an // instruction that does the same in some better way. To replace an
// instruction with a new one, first add the new instruction to the graph, // instruction with a new one, first add the new instruction to the graph,
@ -726,6 +749,13 @@ class HValue: public ZoneObject {
virtual HType CalculateInferredType(); virtual HType CalculateInferredType();
// This function must be overridden for instructions which have the
// kTrackSideEffectDominators flag set, to track instructions that are
// dominating side effects.
virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
UNREACHABLE();
}
#ifdef DEBUG #ifdef DEBUG
virtual void Verify() = 0; virtual void Verify() = 0;
#endif #endif
@ -756,7 +786,8 @@ class HValue: public ZoneObject {
GVNFlagSet result; GVNFlagSet result;
// Create changes mask. // Create changes mask.
#define ADD_FLAG(type) result.Add(kDependsOn##type); #define ADD_FLAG(type) result.Add(kDependsOn##type);
GVN_FLAG_LIST(ADD_FLAG) GVN_TRACKED_FLAG_LIST(ADD_FLAG)
GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
#undef ADD_FLAG #undef ADD_FLAG
return result; return result;
} }
@ -765,7 +796,8 @@ class HValue: public ZoneObject {
GVNFlagSet result; GVNFlagSet result;
// Create changes mask. // Create changes mask.
#define ADD_FLAG(type) result.Add(kChanges##type); #define ADD_FLAG(type) result.Add(kChanges##type);
GVN_FLAG_LIST(ADD_FLAG) GVN_TRACKED_FLAG_LIST(ADD_FLAG)
GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
#undef ADD_FLAG #undef ADD_FLAG
return result; return result;
} }
@ -781,6 +813,7 @@ class HValue: public ZoneObject {
// an executing program (i.e. are not safe to repeat, move or remove); // an executing program (i.e. are not safe to repeat, move or remove);
static GVNFlagSet AllObservableSideEffectsFlagSet() { static GVNFlagSet AllObservableSideEffectsFlagSet() {
GVNFlagSet result = AllChangesFlagSet(); GVNFlagSet result = AllChangesFlagSet();
result.Remove(kChangesNewSpacePromotion);
result.Remove(kChangesElementsKind); result.Remove(kChangesElementsKind);
result.Remove(kChangesElementsPointer); result.Remove(kChangesElementsPointer);
result.Remove(kChangesMaps); result.Remove(kChangesMaps);
@ -959,7 +992,8 @@ class HSoftDeoptimize: public HTemplateInstruction<0> {
class HDeoptimize: public HControlInstruction { class HDeoptimize: public HControlInstruction {
public: public:
explicit HDeoptimize(int environment_length) : values_(environment_length) { } HDeoptimize(int environment_length, Zone* zone)
: values_(environment_length, zone) { }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
return Representation::None(); return Representation::None();
@ -978,8 +1012,8 @@ class HDeoptimize: public HControlInstruction {
UNREACHABLE(); UNREACHABLE();
} }
void AddEnvironmentValue(HValue* value) { void AddEnvironmentValue(HValue* value, Zone* zone) {
values_.Add(NULL); values_.Add(NULL, zone);
SetOperandAt(values_.length() - 1, value); SetOperandAt(values_.length() - 1, value);
} }
@ -1196,6 +1230,7 @@ class HChange: public HUnaryOperation {
SetFlag(kUseGVN); SetFlag(kUseGVN);
if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined); if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
if (is_truncating) SetFlag(kTruncatingToInt32); if (is_truncating) SetFlag(kTruncatingToInt32);
if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
} }
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
@ -1246,11 +1281,12 @@ class HClampToUint8: public HUnaryOperation {
class HSimulate: public HInstruction { class HSimulate: public HInstruction {
public: public:
HSimulate(int ast_id, int pop_count) HSimulate(int ast_id, int pop_count, Zone* zone)
: ast_id_(ast_id), : ast_id_(ast_id),
pop_count_(pop_count), pop_count_(pop_count),
values_(2), values_(2, zone),
assigned_indexes_(2) {} assigned_indexes_(2, zone),
zone_(zone) {}
virtual ~HSimulate() {} virtual ~HSimulate() {}
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
@ -1298,9 +1334,9 @@ class HSimulate: public HInstruction {
private: private:
static const int kNoIndex = -1; static const int kNoIndex = -1;
void AddValue(int index, HValue* value) { void AddValue(int index, HValue* value) {
assigned_indexes_.Add(index); assigned_indexes_.Add(index, zone_);
// Resize the list of pushed values. // Resize the list of pushed values.
values_.Add(NULL); values_.Add(NULL, zone_);
// Set the operand through the base method in HValue to make sure that the // Set the operand through the base method in HValue to make sure that the
// use lists are correctly updated. // use lists are correctly updated.
SetOperandAt(values_.length() - 1, value); SetOperandAt(values_.length() - 1, value);
@ -1309,6 +1345,7 @@ class HSimulate: public HInstruction {
int pop_count_; int pop_count_;
ZoneList<HValue*> values_; ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_; ZoneList<int> assigned_indexes_;
Zone* zone_;
}; };
@ -1321,6 +1358,7 @@ class HStackCheck: public HTemplateInstruction<1> {
HStackCheck(HValue* context, Type type) : type_(type) { HStackCheck(HValue* context, Type type) : type_(type) {
SetOperandAt(0, context); SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);
} }
HValue* context() { return OperandAt(0); } HValue* context() { return OperandAt(0); }
@ -1354,13 +1392,15 @@ class HEnterInlined: public HTemplateInstruction<0> {
FunctionLiteral* function, FunctionLiteral* function,
CallKind call_kind, CallKind call_kind,
bool is_construct, bool is_construct,
Variable* arguments) Variable* arguments_var,
ZoneList<HValue*>* arguments_values)
: closure_(closure), : closure_(closure),
arguments_count_(arguments_count), arguments_count_(arguments_count),
function_(function), function_(function),
call_kind_(call_kind), call_kind_(call_kind),
is_construct_(is_construct), is_construct_(is_construct),
arguments_(arguments) { arguments_var_(arguments_var),
arguments_values_(arguments_values) {
} }
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
@ -1375,7 +1415,8 @@ class HEnterInlined: public HTemplateInstruction<0> {
return Representation::None(); return Representation::None();
} }
Variable* arguments() { return arguments_; } Variable* arguments_var() { return arguments_var_; }
ZoneList<HValue*>* arguments_values() { return arguments_values_; }
DECLARE_CONCRETE_INSTRUCTION(EnterInlined) DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
@ -1385,19 +1426,28 @@ class HEnterInlined: public HTemplateInstruction<0> {
FunctionLiteral* function_; FunctionLiteral* function_;
CallKind call_kind_; CallKind call_kind_;
bool is_construct_; bool is_construct_;
Variable* arguments_; Variable* arguments_var_;
ZoneList<HValue*>* arguments_values_;
}; };
class HLeaveInlined: public HTemplateInstruction<0> { class HLeaveInlined: public HTemplateInstruction<0> {
public: public:
HLeaveInlined() {} explicit HLeaveInlined(bool arguments_pushed)
: arguments_pushed_(arguments_pushed) { }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
return Representation::None(); return Representation::None();
} }
bool arguments_pushed() {
return arguments_pushed_;
}
DECLARE_CONCRETE_INSTRUCTION(LeaveInlined) DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
private:
bool arguments_pushed_;
}; };
@ -1605,14 +1655,26 @@ class HInvokeFunction: public HBinaryCall {
: HBinaryCall(context, function, argument_count) { : HBinaryCall(context, function, argument_count) {
} }
HInvokeFunction(HValue* context,
HValue* function,
Handle<JSFunction> known_function,
int argument_count)
: HBinaryCall(context, function, argument_count),
known_function_(known_function) {
}
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged(); return Representation::Tagged();
} }
HValue* context() { return first(); } HValue* context() { return first(); }
HValue* function() { return second(); } HValue* function() { return second(); }
Handle<JSFunction> known_function() { return known_function_; }
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction) DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
private:
Handle<JSFunction> known_function_;
}; };
@ -1786,7 +1848,9 @@ class HCallRuntime: public HCall<1> {
class HJSArrayLength: public HTemplateInstruction<2> { class HJSArrayLength: public HTemplateInstruction<2> {
public: public:
HJSArrayLength(HValue* value, HValue* typecheck) { HJSArrayLength(HValue* value, HValue* typecheck,
HType type = HType::Tagged()) {
set_type(type);
// The length of an array is stored as a tagged value in the array // The length of an array is stored as a tagged value in the array
// object. It is guaranteed to be 32 bit integer, but it can be // object. It is guaranteed to be 32 bit integer, but it can be
// represented as either a smi or heap number. // represented as either a smi or heap number.
@ -1810,7 +1874,7 @@ class HJSArrayLength: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength) DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)
protected: protected:
virtual bool DataEquals(HValue* other) { return true; } virtual bool DataEquals(HValue* other_raw) { return true; }
}; };
@ -1865,6 +1929,8 @@ class HBitNot: public HUnaryOperation {
} }
virtual HType CalculateInferredType(); virtual HType CalculateInferredType();
virtual HValue* Canonicalize();
DECLARE_CONCRETE_INSTRUCTION(BitNot) DECLARE_CONCRETE_INSTRUCTION(BitNot)
protected: protected:
@ -1887,6 +1953,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
case kMathAbs: case kMathAbs:
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation); SetFlag(kFlexibleRepresentation);
SetGVNFlag(kChangesNewSpacePromotion);
break; break;
case kMathSqrt: case kMathSqrt:
case kMathPowHalf: case kMathPowHalf:
@ -1895,6 +1962,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
case kMathCos: case kMathCos:
case kMathTan: case kMathTan:
set_representation(Representation::Double()); set_representation(Representation::Double());
SetGVNFlag(kChangesNewSpacePromotion);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -1935,15 +2003,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
} }
} }
virtual HValue* Canonicalize() { virtual HValue* Canonicalize();
// If the input is integer32 then we replace the floor instruction
// with its inputs. This happens before the representation changes are
// introduced.
if (op() == kMathFloor) {
if (value()->representation().IsInteger32()) return value();
}
return this;
}
BuiltinFunctionId op() const { return op_; } BuiltinFunctionId op() const { return op_; }
const char* OpName() const; const char* OpName() const;
@ -2003,14 +2063,10 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
}; };
class HCheckMap: public HTemplateInstruction<2> { class HCheckMaps: public HTemplateInstruction<2> {
public: public:
HCheckMap(HValue* value, HCheckMaps(HValue* value, Handle<Map> map, Zone* zone,
Handle<Map> map, HValue* typecheck = NULL) {
HValue* typecheck = NULL,
CompareMapMode mode = REQUIRE_EXACT_MAP)
: map_(map),
mode_(mode) {
SetOperandAt(0, value); SetOperandAt(0, value);
// If callers don't depend on a typecheck, they can pass in NULL. In that // If callers don't depend on a typecheck, they can pass in NULL. In that
// case we use a copy of the |value| argument as a dummy value. // case we use a copy of the |value| argument as a dummy value.
@ -2018,14 +2074,43 @@ class HCheckMap: public HTemplateInstruction<2> {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlag(kUseGVN); SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps); SetGVNFlag(kDependsOnMaps);
// If the map to check doesn't have the untransitioned elements, it must not SetGVNFlag(kDependsOnElementsKind);
// be hoisted above TransitionElements instructions. map_set()->Add(map, zone);
if (mode == REQUIRE_EXACT_MAP || !map->has_fast_smi_only_elements()) { }
SetGVNFlag(kDependsOnElementsKind); HCheckMaps(HValue* value, SmallMapList* maps, Zone* zone) {
SetOperandAt(0, value);
SetOperandAt(1, value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kDependsOnElementsKind);
for (int i = 0; i < maps->length(); i++) {
map_set()->Add(maps->at(i), zone);
} }
has_element_transitions_ = map_set()->Sort();
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL) != NULL || }
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL) != NULL;
static HCheckMaps* NewWithTransitions(HValue* object, Handle<Map> map,
Zone* zone) {
HCheckMaps* check_map = new(zone) HCheckMaps(object, map, zone);
SmallMapList* map_set = check_map->map_set();
// Since transitioned elements maps of the initial map don't fail the map
// check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
check_map->ClearGVNFlag(kDependsOnElementsKind);
ElementsKind kind = map->elements_kind();
bool packed = IsFastPackedElementsKind(kind);
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
Map* transitioned_map =
map->LookupElementsTransitionMap(kind);
if (transitioned_map) {
map_set->Add(Handle<Map>(transitioned_map), zone);
}
};
map_set->Sort();
return check_map;
} }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
@ -2035,25 +2120,23 @@ class HCheckMap: public HTemplateInstruction<2> {
virtual HType CalculateInferredType(); virtual HType CalculateInferredType();
HValue* value() { return OperandAt(0); } HValue* value() { return OperandAt(0); }
Handle<Map> map() const { return map_; } SmallMapList* map_set() { return &map_set_; }
CompareMapMode mode() const { return mode_; }
DECLARE_CONCRETE_INSTRUCTION(CheckMap) DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected: protected:
virtual bool DataEquals(HValue* other) { virtual bool DataEquals(HValue* other) {
HCheckMap* b = HCheckMap::cast(other); HCheckMaps* b = HCheckMaps::cast(other);
// Two CheckMaps instructions are DataEqual if their maps are identical and // Relies on the fact that map_set has been sorted before.
// they have the same mode. The mode comparison can be ignored if the map if (map_set()->length() != b->map_set()->length()) return false;
// has no elements transitions. for (int i = 0; i < map_set()->length(); i++) {
return map_.is_identical_to(b->map()) && if (!map_set()->at(i).is_identical_to(b->map_set()->at(i))) return false;
(b->mode() == mode() || !has_element_transitions_); }
return true;
} }
private: private:
bool has_element_transitions_; SmallMapList map_set_;
Handle<Map> map_;
CompareMapMode mode_;
}; };
@ -2092,17 +2175,17 @@ class HCheckFunction: public HUnaryOperation {
class HCheckInstanceType: public HUnaryOperation { class HCheckInstanceType: public HUnaryOperation {
public: public:
static HCheckInstanceType* NewIsSpecObject(HValue* value) { static HCheckInstanceType* NewIsSpecObject(HValue* value, Zone* zone) {
return new HCheckInstanceType(value, IS_SPEC_OBJECT); return new(zone) HCheckInstanceType(value, IS_SPEC_OBJECT);
} }
static HCheckInstanceType* NewIsJSArray(HValue* value) { static HCheckInstanceType* NewIsJSArray(HValue* value, Zone* zone) {
return new HCheckInstanceType(value, IS_JS_ARRAY); return new(zone) HCheckInstanceType(value, IS_JS_ARRAY);
} }
static HCheckInstanceType* NewIsString(HValue* value) { static HCheckInstanceType* NewIsString(HValue* value, Zone* zone) {
return new HCheckInstanceType(value, IS_STRING); return new(zone) HCheckInstanceType(value, IS_STRING);
} }
static HCheckInstanceType* NewIsSymbol(HValue* value) { static HCheckInstanceType* NewIsSymbol(HValue* value, Zone* zone) {
return new HCheckInstanceType(value, IS_SYMBOL); return new(zone) HCheckInstanceType(value, IS_SYMBOL);
} }
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
@ -2251,8 +2334,8 @@ class HCheckSmi: public HUnaryOperation {
class HPhi: public HValue { class HPhi: public HValue {
public: public:
explicit HPhi(int merged_index) HPhi(int merged_index, Zone* zone)
: inputs_(2), : inputs_(2, zone),
merged_index_(merged_index), merged_index_(merged_index),
phi_id_(-1), phi_id_(-1),
is_live_(false), is_live_(false),
@ -2331,11 +2414,15 @@ class HPhi: public HValue {
bool AllOperandsConvertibleToInteger() { bool AllOperandsConvertibleToInteger() {
for (int i = 0; i < OperandCount(); ++i) { for (int i = 0; i < OperandCount(); ++i) {
if (!OperandAt(i)->IsConvertibleToInteger()) return false; if (!OperandAt(i)->IsConvertibleToInteger()) {
return false;
}
} }
return true; return true;
} }
void ResetInteger32Uses();
protected: protected:
virtual void DeleteFromGraph(); virtual void DeleteFromGraph();
virtual void InternalSetOperandAt(int index, HValue* value) { virtual void InternalSetOperandAt(int index, HValue* value) {
@ -2407,8 +2494,8 @@ class HConstant: public HTemplateInstruction<0> {
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType(); virtual HType CalculateInferredType();
bool IsInteger() const { return handle_->IsSmi(); } bool IsInteger() const { return handle_->IsSmi(); }
HConstant* CopyToRepresentation(Representation r) const; HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
HConstant* CopyToTruncatedInt32() const; HConstant* CopyToTruncatedInt32(Zone* zone) const;
bool HasInteger32Value() const { return has_int32_value_; } bool HasInteger32Value() const { return has_int32_value_; }
int32_t Integer32Value() const { int32_t Integer32Value() const {
ASSERT(HasInteger32Value()); ASSERT(HasInteger32Value());
@ -2485,6 +2572,7 @@ class HBinaryOperation: public HTemplateInstruction<3> {
if (IsCommutative() && left()->IsConstant()) return right(); if (IsCommutative() && left()->IsConstant()) return right();
return left(); return left();
} }
HValue* MostConstantOperand() { HValue* MostConstantOperand() {
if (IsCommutative() && left()->IsConstant()) return left(); if (IsCommutative() && left()->IsConstant()) return left();
return right(); return right();
@ -2549,7 +2637,7 @@ class HApplyArguments: public HTemplateInstruction<4> {
class HArgumentsElements: public HTemplateInstruction<0> { class HArgumentsElements: public HTemplateInstruction<0> {
public: public:
HArgumentsElements() { explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
// The value produced by this instruction is a pointer into the stack // The value produced by this instruction is a pointer into the stack
// that looks as if it was a smi because of alignment. // that looks as if it was a smi because of alignment.
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
@ -2562,8 +2650,12 @@ class HArgumentsElements: public HTemplateInstruction<0> {
return Representation::None(); return Representation::None();
} }
bool from_inlined() const { return from_inlined_; }
protected: protected:
virtual bool DataEquals(HValue* other) { return true; } virtual bool DataEquals(HValue* other) { return true; }
bool from_inlined_;
}; };
@ -2646,6 +2738,9 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation); SetFlag(kFlexibleRepresentation);
SetAllSideEffects(); SetAllSideEffects();
observed_input_representation_[0] = Representation::Tagged();
observed_input_representation_[1] = Representation::None();
observed_input_representation_[2] = Representation::None();
} }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
@ -2665,7 +2760,38 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
virtual HType CalculateInferredType(); virtual HType CalculateInferredType();
virtual Representation ObservedInputRepresentation(int index) {
return observed_input_representation_[index];
}
void InitializeObservedInputRepresentation(Representation r) {
observed_input_representation_[1] = r;
observed_input_representation_[2] = r;
}
DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation) DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
private:
Representation observed_input_representation_[3];
};
class HMathFloorOfDiv: public HBinaryOperation {
public:
HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right) {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Integer32();
}
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
protected:
virtual bool DataEquals(HValue* other) { return true; }
}; };
@ -3083,6 +3209,7 @@ class HPower: public HTemplateInstruction<2> {
SetOperandAt(1, right); SetOperandAt(1, right);
set_representation(Representation::Double()); set_representation(Representation::Double());
SetFlag(kUseGVN); SetFlag(kUseGVN);
SetGVNFlag(kChangesNewSpacePromotion);
} }
HValue* left() { return OperandAt(0); } HValue* left() { return OperandAt(0); }
@ -3282,6 +3409,8 @@ class HBitwise: public HBitwiseBinaryOperation {
HValue* left, HValue* left,
HValue* right); HValue* right);
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(Bitwise) DECLARE_CONCRETE_INSTRUCTION(Bitwise)
protected: protected:
@ -3529,6 +3658,12 @@ inline bool StoringValueNeedsWriteBarrier(HValue* value) {
} }
inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
HValue* new_space_dominator) {
return !object->IsAllocateObject() || (object != new_space_dominator);
}
class HStoreGlobalCell: public HUnaryOperation { class HStoreGlobalCell: public HUnaryOperation {
public: public:
HStoreGlobalCell(HValue* value, HStoreGlobalCell(HValue* value,
@ -3759,7 +3894,8 @@ class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> {
HLoadNamedFieldPolymorphic(HValue* context, HLoadNamedFieldPolymorphic(HValue* context,
HValue* object, HValue* object,
SmallMapList* types, SmallMapList* types,
Handle<String> name); Handle<String> name,
Zone* zone);
HValue* context() { return OperandAt(0); } HValue* context() { return OperandAt(0); }
HValue* object() { return OperandAt(1); } HValue* object() { return OperandAt(1); }
@ -3836,15 +3972,29 @@ class HLoadFunctionPrototype: public HUnaryOperation {
virtual bool DataEquals(HValue* other) { return true; } virtual bool DataEquals(HValue* other) { return true; }
}; };
class ArrayInstructionInterface {
class HLoadKeyedFastElement: public HTemplateInstruction<2> {
public: public:
enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK }; virtual HValue* GetKey() = 0;
virtual void SetKey(HValue* key) = 0;
virtual void SetIndexOffset(uint32_t index_offset) = 0;
virtual bool IsDehoisted() = 0;
virtual void SetDehoisted(bool is_dehoisted) = 0;
virtual ~ArrayInstructionInterface() { };
};
class HLoadKeyedFastElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public:
HLoadKeyedFastElement(HValue* obj, HLoadKeyedFastElement(HValue* obj,
HValue* key, HValue* key,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK) ElementsKind elements_kind = FAST_ELEMENTS)
: hole_check_mode_(hole_check_mode) { : bit_field_(0) {
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
bit_field_ = ElementsKindField::encode(elements_kind);
if (IsFastSmiElementsKind(elements_kind) &&
IsFastPackedElementsKind(elements_kind)) {
set_type(HType::Smi());
}
SetOperandAt(0, obj); SetOperandAt(0, obj);
SetOperandAt(1, key); SetOperandAt(1, key);
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
@ -3854,6 +4004,19 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> {
HValue* object() { return OperandAt(0); } HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); } HValue* key() { return OperandAt(1); }
uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
void SetIndexOffset(uint32_t index_offset) {
bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
}
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return IsDehoistedField::decode(bit_field_); }
void SetDehoisted(bool is_dehoisted) {
bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
}
ElementsKind elements_kind() const {
return ElementsKindField::decode(bit_field_);
}
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32. // The key is supposed to be Integer32.
@ -3872,17 +4035,32 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> {
virtual bool DataEquals(HValue* other) { virtual bool DataEquals(HValue* other) {
if (!other->IsLoadKeyedFastElement()) return false; if (!other->IsLoadKeyedFastElement()) return false;
HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other); HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
return hole_check_mode_ == other_load->hole_check_mode_; if (IsDehoisted() && index_offset() != other_load->index_offset())
return false;
return elements_kind() == other_load->elements_kind();
} }
private: private:
HoleCheckMode hole_check_mode_; class ElementsKindField: public BitField<ElementsKind, 0, 4> {};
class IndexOffsetField: public BitField<uint32_t, 4, 27> {};
class IsDehoistedField: public BitField<bool, 31, 1> {};
uint32_t bit_field_;
}; };
class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> { enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
class HLoadKeyedFastDoubleElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public: public:
HLoadKeyedFastDoubleElement(HValue* elements, HValue* key) { HLoadKeyedFastDoubleElement(
HValue* elements,
HValue* key,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
: index_offset_(0),
is_dehoisted_(false),
hole_check_mode_(hole_check_mode) {
SetOperandAt(0, elements); SetOperandAt(0, elements);
SetOperandAt(1, key); SetOperandAt(1, key);
set_representation(Representation::Double()); set_representation(Representation::Double());
@ -3892,6 +4070,12 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
HValue* elements() { return OperandAt(0); } HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); } HValue* key() { return OperandAt(1); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32. // The key is supposed to be Integer32.
@ -3900,21 +4084,38 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
: Representation::Integer32(); : Representation::Integer32();
} }
bool RequiresHoleCheck() {
return hole_check_mode_ == PERFORM_HOLE_CHECK;
}
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement) DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
protected: protected:
virtual bool DataEquals(HValue* other) { return true; } virtual bool DataEquals(HValue* other) {
if (!other->IsLoadKeyedFastDoubleElement()) return false;
HLoadKeyedFastDoubleElement* other_load =
HLoadKeyedFastDoubleElement::cast(other);
return hole_check_mode_ == other_load->hole_check_mode_;
}
private:
uint32_t index_offset_;
bool is_dehoisted_;
HoleCheckMode hole_check_mode_;
}; };
class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> { class HLoadKeyedSpecializedArrayElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public: public:
HLoadKeyedSpecializedArrayElement(HValue* external_elements, HLoadKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key, HValue* key,
ElementsKind elements_kind) ElementsKind elements_kind)
: elements_kind_(elements_kind) { : elements_kind_(elements_kind),
index_offset_(0),
is_dehoisted_(false) {
SetOperandAt(0, external_elements); SetOperandAt(0, external_elements);
SetOperandAt(1, key); SetOperandAt(1, key);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
@ -3942,6 +4143,12 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
HValue* external_pointer() { return OperandAt(0); } HValue* external_pointer() { return OperandAt(0); }
HValue* key() { return OperandAt(1); } HValue* key() { return OperandAt(1); }
ElementsKind elements_kind() const { return elements_kind_; } ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Range* InferRange(Zone* zone); virtual Range* InferRange(Zone* zone);
@ -3957,6 +4164,8 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
private: private:
ElementsKind elements_kind_; ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
}; };
@ -3995,9 +4204,12 @@ class HStoreNamedField: public HTemplateInstruction<2> {
int offset) int offset)
: name_(name), : name_(name),
is_in_object_(in_object), is_in_object_(in_object),
offset_(offset) { offset_(offset),
new_space_dominator_(NULL) {
SetOperandAt(0, obj); SetOperandAt(0, obj);
SetOperandAt(1, val); SetOperandAt(1, val);
SetFlag(kTrackSideEffectDominators);
SetGVNFlag(kDependsOnNewSpacePromotion);
if (is_in_object_) { if (is_in_object_) {
SetGVNFlag(kChangesInobjectFields); SetGVNFlag(kChangesInobjectFields);
} else { } else {
@ -4010,6 +4222,10 @@ class HStoreNamedField: public HTemplateInstruction<2> {
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged(); return Representation::Tagged();
} }
virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
ASSERT(side_effect == kChangesNewSpacePromotion);
new_space_dominator_ = dominator;
}
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
HValue* object() { return OperandAt(0); } HValue* object() { return OperandAt(0); }
@ -4020,9 +4236,15 @@ class HStoreNamedField: public HTemplateInstruction<2> {
int offset() const { return offset_; } int offset() const { return offset_; }
Handle<Map> transition() const { return transition_; } Handle<Map> transition() const { return transition_; }
void set_transition(Handle<Map> map) { transition_ = map; } void set_transition(Handle<Map> map) { transition_ = map; }
HValue* new_space_dominator() const { return new_space_dominator_; }
bool NeedsWriteBarrier() { bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value()); return StoringValueNeedsWriteBarrier(value()) &&
ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
bool NeedsWriteBarrierForMap() {
return ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
} }
private: private:
@ -4030,6 +4252,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
bool is_in_object_; bool is_in_object_;
int offset_; int offset_;
Handle<Map> transition_; Handle<Map> transition_;
HValue* new_space_dominator_;
}; };
@ -4068,11 +4291,12 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
}; };
class HStoreKeyedFastElement: public HTemplateInstruction<3> { class HStoreKeyedFastElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public: public:
HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val, HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
ElementsKind elements_kind = FAST_ELEMENTS) ElementsKind elements_kind = FAST_ELEMENTS)
: elements_kind_(elements_kind) { : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
SetOperandAt(0, obj); SetOperandAt(0, obj);
SetOperandAt(1, key); SetOperandAt(1, key);
SetOperandAt(2, val); SetOperandAt(2, val);
@ -4090,8 +4314,14 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> {
HValue* key() { return OperandAt(1); } HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); } HValue* value() { return OperandAt(2); }
bool value_is_smi() { bool value_is_smi() {
return elements_kind_ == FAST_SMI_ONLY_ELEMENTS; return IsFastSmiElementsKind(elements_kind_);
} }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() { bool NeedsWriteBarrier() {
if (value_is_smi()) { if (value_is_smi()) {
@ -4107,14 +4337,18 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> {
private: private:
ElementsKind elements_kind_; ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
}; };
class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> { class HStoreKeyedFastDoubleElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public: public:
HStoreKeyedFastDoubleElement(HValue* elements, HStoreKeyedFastDoubleElement(HValue* elements,
HValue* key, HValue* key,
HValue* val) { HValue* val)
: index_offset_(0), is_dehoisted_(false) {
SetOperandAt(0, elements); SetOperandAt(0, elements);
SetOperandAt(1, key); SetOperandAt(1, key);
SetOperandAt(2, val); SetOperandAt(2, val);
@ -4134,24 +4368,37 @@ class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
HValue* elements() { return OperandAt(0); } HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); } HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); } HValue* value() { return OperandAt(2); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() { bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value()); return StoringValueNeedsWriteBarrier(value());
} }
bool NeedsCanonicalization();
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement) DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement)
private:
uint32_t index_offset_;
bool is_dehoisted_;
}; };
class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> { class HStoreKeyedSpecializedArrayElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public: public:
HStoreKeyedSpecializedArrayElement(HValue* external_elements, HStoreKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key, HValue* key,
HValue* val, HValue* val,
ElementsKind elements_kind) ElementsKind elements_kind)
: elements_kind_(elements_kind) { : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
SetGVNFlag(kChangesSpecializedArrayElements); SetGVNFlag(kChangesSpecializedArrayElements);
SetOperandAt(0, external_elements); SetOperandAt(0, external_elements);
SetOperandAt(1, key); SetOperandAt(1, key);
@ -4179,11 +4426,19 @@ class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
HValue* key() { return OperandAt(1); } HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); } HValue* value() { return OperandAt(2); }
ElementsKind elements_kind() const { return elements_kind_; } ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement) DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
private: private:
ElementsKind elements_kind_; ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
}; };
@ -4230,8 +4485,19 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
transitioned_map_(transitioned_map) { transitioned_map_(transitioned_map) {
SetOperandAt(0, object); SetOperandAt(0, object);
SetFlag(kUseGVN); SetFlag(kUseGVN);
// Don't set GVN DependOn flags here. That would defeat GVN's detection of
// congruent HTransitionElementsKind instructions. Instruction hoisting
// handles HTransitionElementsKind instruction specially, explicitly adding
// DependsOn flags during its dependency calculations.
SetGVNFlag(kChangesElementsKind); SetGVNFlag(kChangesElementsKind);
SetGVNFlag(kChangesElementsPointer); if (original_map->has_fast_double_elements()) {
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion);
}
if (transitioned_map->has_fast_double_elements()) {
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion);
}
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
} }
@ -4293,6 +4559,7 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
set_representation(Representation::Integer32()); set_representation(Representation::Integer32());
SetFlag(kUseGVN); SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps); SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kChangesNewSpacePromotion);
} }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
@ -4324,6 +4591,7 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
SetOperandAt(1, char_code); SetOperandAt(1, char_code);
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlag(kUseGVN); SetFlag(kUseGVN);
SetGVNFlag(kChangesNewSpacePromotion);
} }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
@ -4376,8 +4644,12 @@ class HAllocateObject: public HTemplateInstruction<1> {
: constructor_(constructor) { : constructor_(constructor) {
SetOperandAt(0, context); SetOperandAt(0, context);
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetGVNFlag(kChangesNewSpacePromotion);
} }
// Maximum instance size for which allocations will be inlined.
static const int kMaxSize = 64 * kPointerSize;
HValue* context() { return OperandAt(0); } HValue* context() { return OperandAt(0); }
Handle<JSFunction> constructor() { return constructor_; } Handle<JSFunction> constructor() { return constructor_; }
@ -4421,6 +4693,7 @@ class HFastLiteral: public HMaterializedLiteral<1> {
boilerplate_(boilerplate), boilerplate_(boilerplate),
total_size_(total_size) { total_size_(total_size) {
SetOperandAt(0, context); SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);
} }
// Maximum depth and total number of elements and properties for literal // Maximum depth and total number of elements and properties for literal
@ -4456,12 +4729,13 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
length_(length), length_(length),
boilerplate_object_(boilerplate_object) { boilerplate_object_(boilerplate_object) {
SetOperandAt(0, context); SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);
} }
HValue* context() { return OperandAt(0); } HValue* context() { return OperandAt(0); }
ElementsKind boilerplate_elements_kind() const { ElementsKind boilerplate_elements_kind() const {
if (!boilerplate_object_->IsJSObject()) { if (!boilerplate_object_->IsJSObject()) {
return FAST_ELEMENTS; return TERMINAL_FAST_ELEMENTS_KIND;
} }
return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind(); return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
} }
@ -4496,6 +4770,7 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
fast_elements_(fast_elements), fast_elements_(fast_elements),
has_function_(has_function) { has_function_(has_function) {
SetOperandAt(0, context); SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);
} }
HValue* context() { return OperandAt(0); } HValue* context() { return OperandAt(0); }
@ -4557,6 +4832,7 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
: shared_info_(shared), pretenure_(pretenure) { : shared_info_(shared), pretenure_(pretenure) {
SetOperandAt(0, context); SetOperandAt(0, context);
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetGVNFlag(kChangesNewSpacePromotion);
} }
HValue* context() { return OperandAt(0); } HValue* context() { return OperandAt(0); }

2015
deps/v8/src/hydrogen.cc

File diff suppressed because it is too large

147
deps/v8/src/hydrogen.h

@ -42,6 +42,7 @@ namespace internal {
// Forward declarations. // Forward declarations.
class BitVector; class BitVector;
class FunctionState;
class HEnvironment; class HEnvironment;
class HGraph; class HGraph;
class HLoopInformation; class HLoopInformation;
@ -76,7 +77,7 @@ class HBasicBlock: public ZoneObject {
return &deleted_phis_; return &deleted_phis_;
} }
void RecordDeletedPhi(int merge_index) { void RecordDeletedPhi(int merge_index) {
deleted_phis_.Add(merge_index); deleted_phis_.Add(merge_index, zone());
} }
HBasicBlock* dominator() const { return dominator_; } HBasicBlock* dominator() const { return dominator_; }
HEnvironment* last_environment() const { return last_environment_; } HEnvironment* last_environment() const { return last_environment_; }
@ -121,7 +122,7 @@ class HBasicBlock: public ZoneObject {
void Finish(HControlInstruction* last); void Finish(HControlInstruction* last);
void FinishExit(HControlInstruction* instruction); void FinishExit(HControlInstruction* instruction);
void Goto(HBasicBlock* block, bool drop_extra = false); void Goto(HBasicBlock* block, FunctionState* state = NULL);
int PredecessorIndexOf(HBasicBlock* predecessor) const; int PredecessorIndexOf(HBasicBlock* predecessor) const;
void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); } void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
@ -136,7 +137,7 @@ class HBasicBlock: public ZoneObject {
// instruction and updating the bailout environment. // instruction and updating the bailout environment.
void AddLeaveInlined(HValue* return_value, void AddLeaveInlined(HValue* return_value,
HBasicBlock* target, HBasicBlock* target,
bool drop_extra = false); FunctionState* state = NULL);
// If a target block is tagged as an inline function return, all // If a target block is tagged as an inline function return, all
// predecessors should contain the inlined exit sequence: // predecessors should contain the inlined exit sequence:
@ -157,7 +158,7 @@ class HBasicBlock: public ZoneObject {
dominates_loop_successors_ = true; dominates_loop_successors_ = true;
} }
inline Zone* zone(); inline Zone* zone() const;
#ifdef DEBUG #ifdef DEBUG
void Verify(); void Verify();
@ -211,12 +212,12 @@ class HPredecessorIterator BASE_EMBEDDED {
class HLoopInformation: public ZoneObject { class HLoopInformation: public ZoneObject {
public: public:
explicit HLoopInformation(HBasicBlock* loop_header) HLoopInformation(HBasicBlock* loop_header, Zone* zone)
: back_edges_(4), : back_edges_(4, zone),
loop_header_(loop_header), loop_header_(loop_header),
blocks_(8), blocks_(8, zone),
stack_check_(NULL) { stack_check_(NULL) {
blocks_.Add(loop_header); blocks_.Add(loop_header, zone);
} }
virtual ~HLoopInformation() {} virtual ~HLoopInformation() {}
@ -240,13 +241,13 @@ class HLoopInformation: public ZoneObject {
HStackCheck* stack_check_; HStackCheck* stack_check_;
}; };
class BoundsCheckTable;
class HGraph: public ZoneObject { class HGraph: public ZoneObject {
public: public:
explicit HGraph(CompilationInfo* info); HGraph(CompilationInfo* info, Zone* zone);
Isolate* isolate() { return isolate_; } Isolate* isolate() { return isolate_; }
Zone* zone() { return isolate_->zone(); } Zone* zone() const { return zone_; }
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; } const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
const ZoneList<HPhi*>* phi_list() const { return phi_list_; } const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
@ -265,6 +266,8 @@ class HGraph: public ZoneObject {
void OrderBlocks(); void OrderBlocks();
void AssignDominators(); void AssignDominators();
void ReplaceCheckedValues(); void ReplaceCheckedValues();
void EliminateRedundantBoundsChecks();
void DehoistSimpleArrayIndexComputations();
void PropagateDeoptimizingMark(); void PropagateDeoptimizingMark();
// Returns false if there are phi-uses of the arguments-object // Returns false if there are phi-uses of the arguments-object
@ -277,7 +280,7 @@ class HGraph: public ZoneObject {
void CollectPhis(); void CollectPhis();
Handle<Code> Compile(CompilationInfo* info); Handle<Code> Compile(CompilationInfo* info, Zone* zone);
void set_undefined_constant(HConstant* constant) { void set_undefined_constant(HConstant* constant) {
undefined_constant_.set(constant); undefined_constant_.set(constant);
@ -301,7 +304,7 @@ class HGraph: public ZoneObject {
int GetMaximumValueID() const { return values_.length(); } int GetMaximumValueID() const { return values_.length(); }
int GetNextBlockID() { return next_block_id_++; } int GetNextBlockID() { return next_block_id_++; }
int GetNextValueID(HValue* value) { int GetNextValueID(HValue* value) {
values_.Add(value); values_.Add(value, zone());
return values_.length() - 1; return values_.length() - 1;
} }
HValue* LookupValue(int id) const { HValue* LookupValue(int id) const {
@ -333,6 +336,14 @@ class HGraph: public ZoneObject {
osr_values_.set(values); osr_values_.set(values);
} }
void MarkRecursive() {
is_recursive_ = true;
}
bool is_recursive() const {
return is_recursive_;
}
private: private:
void Postorder(HBasicBlock* block, void Postorder(HBasicBlock* block,
BitVector* visited, BitVector* visited,
@ -357,6 +368,7 @@ class HGraph: public ZoneObject {
void InferTypes(ZoneList<HValue*>* worklist); void InferTypes(ZoneList<HValue*>* worklist);
void InitializeInferredTypes(int from_inclusive, int to_inclusive); void InitializeInferredTypes(int from_inclusive, int to_inclusive);
void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor); void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
void EliminateRedundantBoundsChecks(HBasicBlock* bb, BoundsCheckTable* table);
Isolate* isolate_; Isolate* isolate_;
int next_block_id_; int next_block_id_;
@ -376,11 +388,15 @@ class HGraph: public ZoneObject {
SetOncePointer<HBasicBlock> osr_loop_entry_; SetOncePointer<HBasicBlock> osr_loop_entry_;
SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_; SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
Zone* zone_;
bool is_recursive_;
DISALLOW_COPY_AND_ASSIGN(HGraph); DISALLOW_COPY_AND_ASSIGN(HGraph);
}; };
Zone* HBasicBlock::zone() { return graph_->zone(); } Zone* HBasicBlock::zone() const { return graph_->zone(); }
// Type of stack frame an environment might refer to. // Type of stack frame an environment might refer to.
@ -391,7 +407,8 @@ class HEnvironment: public ZoneObject {
public: public:
HEnvironment(HEnvironment* outer, HEnvironment(HEnvironment* outer,
Scope* scope, Scope* scope,
Handle<JSFunction> closure); Handle<JSFunction> closure,
Zone* zone);
HEnvironment* DiscardInlined(bool drop_extra) { HEnvironment* DiscardInlined(bool drop_extra) {
HEnvironment* outer = outer_; HEnvironment* outer = outer_;
@ -458,7 +475,7 @@ class HEnvironment: public ZoneObject {
void Push(HValue* value) { void Push(HValue* value) {
ASSERT(value != NULL); ASSERT(value != NULL);
++push_count_; ++push_count_;
values_.Add(value); values_.Add(value, zone());
} }
HValue* Pop() { HValue* Pop() {
@ -515,13 +532,16 @@ class HEnvironment: public ZoneObject {
void PrintTo(StringStream* stream); void PrintTo(StringStream* stream);
void PrintToStd(); void PrintToStd();
Zone* zone() const { return zone_; }
private: private:
explicit HEnvironment(const HEnvironment* other); HEnvironment(const HEnvironment* other, Zone* zone);
HEnvironment(HEnvironment* outer, HEnvironment(HEnvironment* outer,
Handle<JSFunction> closure, Handle<JSFunction> closure,
FrameType frame_type, FrameType frame_type,
int arguments); int arguments,
Zone* zone);
// Create an artificial stub environment (e.g. for argument adaptor or // Create an artificial stub environment (e.g. for argument adaptor or
// constructor stub). // constructor stub).
@ -559,6 +579,7 @@ class HEnvironment: public ZoneObject {
int pop_count_; int pop_count_;
int push_count_; int push_count_;
int ast_id_; int ast_id_;
Zone* zone_;
}; };
@ -603,7 +624,7 @@ class AstContext {
HGraphBuilder* owner() const { return owner_; } HGraphBuilder* owner() const { return owner_; }
inline Zone* zone(); inline Zone* zone() const;
// We want to be able to assert, in a context-specific way, that the stack // We want to be able to assert, in a context-specific way, that the stack
// height makes sense when the context is filled. // height makes sense when the context is filled.
@ -715,6 +736,16 @@ class FunctionState {
FunctionState* outer() { return outer_; } FunctionState* outer() { return outer_; }
HEnterInlined* entry() { return entry_; }
void set_entry(HEnterInlined* entry) { entry_ = entry; }
HArgumentsElements* arguments_elements() { return arguments_elements_; }
void set_arguments_elements(HArgumentsElements* arguments_elements) {
arguments_elements_ = arguments_elements;
}
bool arguments_pushed() { return arguments_elements() != NULL; }
private: private:
HGraphBuilder* owner_; HGraphBuilder* owner_;
@ -741,6 +772,12 @@ class FunctionState {
// return blocks. NULL in all other cases. // return blocks. NULL in all other cases.
TestContext* test_context_; TestContext* test_context_;
// When inlining HEnterInlined instruction corresponding to the function
// entry.
HEnterInlined* entry_;
HArgumentsElements* arguments_elements_;
FunctionState* outer_; FunctionState* outer_;
}; };
@ -801,7 +838,7 @@ class HGraphBuilder: public AstVisitor {
BreakAndContinueScope* next_; BreakAndContinueScope* next_;
}; };
HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle); HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle, Zone* zone);
HGraph* CreateGraph(); HGraph* CreateGraph();
@ -851,15 +888,11 @@ class HGraphBuilder: public AstVisitor {
static const int kMaxLoadPolymorphism = 4; static const int kMaxLoadPolymorphism = 4;
static const int kMaxStorePolymorphism = 4; static const int kMaxStorePolymorphism = 4;
static const int kMaxInlinedNodes = 196;
static const int kMaxInlinedSize = 196;
static const int kMaxSourceSize = 600;
// Even in the 'unlimited' case we have to have some limit in order not to // Even in the 'unlimited' case we have to have some limit in order not to
// overflow the stack. // overflow the stack.
static const int kUnlimitedMaxInlinedNodes = 1000; static const int kUnlimitedMaxInlinedSourceSize = 100000;
static const int kUnlimitedMaxInlinedSize = 1000; static const int kUnlimitedMaxInlinedNodes = 10000;
static const int kUnlimitedMaxSourceSize = 600; static const int kUnlimitedMaxInlinedNodesCumulative = 10000;
// Simple accessors. // Simple accessors.
void set_function_state(FunctionState* state) { function_state_ = state; } void set_function_state(FunctionState* state) { function_state_ = state; }
@ -896,11 +929,6 @@ class HGraphBuilder: public AstVisitor {
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION) INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
#undef INLINE_FUNCTION_GENERATOR_DECLARATION #undef INLINE_FUNCTION_GENERATOR_DECLARATION
void HandleDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function,
int* global_count);
void VisitDelete(UnaryOperation* expr); void VisitDelete(UnaryOperation* expr);
void VisitVoid(UnaryOperation* expr); void VisitVoid(UnaryOperation* expr);
void VisitTypeof(UnaryOperation* expr); void VisitTypeof(UnaryOperation* expr);
@ -994,11 +1022,13 @@ class HGraphBuilder: public AstVisitor {
LookupResult* lookup, LookupResult* lookup,
bool is_store); bool is_store);
void EnsureArgumentsArePushedForAccess();
bool TryArgumentsAccess(Property* expr); bool TryArgumentsAccess(Property* expr);
// Try to optimize fun.apply(receiver, arguments) pattern. // Try to optimize fun.apply(receiver, arguments) pattern.
bool TryCallApply(Call* expr); bool TryCallApply(Call* expr);
int InliningAstSize(Handle<JSFunction> target);
bool TryInline(CallKind call_kind, bool TryInline(CallKind call_kind,
Handle<JSFunction> target, Handle<JSFunction> target,
ZoneList<Expression*>* arguments, ZoneList<Expression*>* arguments,
@ -1029,6 +1059,10 @@ class HGraphBuilder: public AstVisitor {
void HandlePropertyAssignment(Assignment* expr); void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr); void HandleCompoundAssignment(Assignment* expr);
void HandlePolymorphicLoadNamedField(Property* expr,
HValue* object,
SmallMapList* types,
Handle<String> name);
void HandlePolymorphicStoreNamedField(Assignment* expr, void HandlePolymorphicStoreNamedField(Assignment* expr,
HValue* object, HValue* object,
HValue* value, HValue* value,
@ -1076,6 +1110,7 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildMonomorphicElementAccess(HValue* object, HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key, HValue* key,
HValue* val, HValue* val,
HValue* dependency,
Handle<Map> map, Handle<Map> map,
bool is_store); bool is_store);
HValue* HandlePolymorphicElementAccess(HValue* object, HValue* HandlePolymorphicElementAccess(HValue* object,
@ -1126,7 +1161,7 @@ class HGraphBuilder: public AstVisitor {
Handle<Map> receiver_map, Handle<Map> receiver_map,
bool smi_and_map_check); bool smi_and_map_check);
Zone* zone() { return zone_; } Zone* zone() const { return zone_; }
// The translation state of the currently-being-translated function. // The translation state of the currently-being-translated function.
FunctionState* function_state_; FunctionState* function_state_;
@ -1145,6 +1180,7 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* current_block_; HBasicBlock* current_block_;
int inlined_count_; int inlined_count_;
ZoneList<Handle<Object> > globals_;
Zone* zone_; Zone* zone_;
@ -1157,12 +1193,12 @@ class HGraphBuilder: public AstVisitor {
}; };
Zone* AstContext::zone() { return owner_->zone(); } Zone* AstContext::zone() const { return owner_->zone(); }
class HValueMap: public ZoneObject { class HValueMap: public ZoneObject {
public: public:
HValueMap() explicit HValueMap(Zone* zone)
: array_size_(0), : array_size_(0),
lists_size_(0), lists_size_(0),
count_(0), count_(0),
@ -1170,15 +1206,15 @@ class HValueMap: public ZoneObject {
array_(NULL), array_(NULL),
lists_(NULL), lists_(NULL),
free_list_head_(kNil) { free_list_head_(kNil) {
ResizeLists(kInitialSize); ResizeLists(kInitialSize, zone);
Resize(kInitialSize); Resize(kInitialSize, zone);
} }
void Kill(GVNFlagSet flags); void Kill(GVNFlagSet flags);
void Add(HValue* value) { void Add(HValue* value, Zone* zone) {
present_flags_.Add(value->gvn_flags()); present_flags_.Add(value->gvn_flags());
Insert(value); Insert(value, zone);
} }
HValue* Lookup(HValue* value) const; HValue* Lookup(HValue* value) const;
@ -1202,9 +1238,9 @@ class HValueMap: public ZoneObject {
HValueMap(Zone* zone, const HValueMap* other); HValueMap(Zone* zone, const HValueMap* other);
void Resize(int new_size); void Resize(int new_size, Zone* zone);
void ResizeLists(int new_size); void ResizeLists(int new_size, Zone* zone);
void Insert(HValue* value); void Insert(HValue* value, Zone* zone);
uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); } uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
int array_size_; int array_size_;
@ -1219,6 +1255,31 @@ class HValueMap: public ZoneObject {
}; };
class HSideEffectMap BASE_EMBEDDED {
public:
HSideEffectMap();
explicit HSideEffectMap(HSideEffectMap* other);
HSideEffectMap& operator= (const HSideEffectMap& other);
void Kill(GVNFlagSet flags);
void Store(GVNFlagSet flags, HInstruction* instr);
bool IsEmpty() const { return count_ == 0; }
inline HInstruction* operator[](int i) const {
ASSERT(0 <= i);
ASSERT(i < kNumberOfTrackedSideEffects);
return data_[i];
}
inline HInstruction* at(int i) const { return operator[](i); }
private:
int count_;
HInstruction* data_[kNumberOfTrackedSideEffects];
};
class HStatistics: public Malloced { class HStatistics: public Malloced {
public: public:
void Initialize(CompilationInfo* info); void Initialize(CompilationInfo* info);
@ -1332,7 +1393,7 @@ class HTracer: public Malloced {
WriteChars(filename, "", 0, false); WriteChars(filename, "", 0, false);
} }
void TraceLiveRange(LiveRange* range, const char* type); void TraceLiveRange(LiveRange* range, const char* type, Zone* zone);
void Trace(const char* name, HGraph* graph, LChunk* chunk); void Trace(const char* name, HGraph* graph, LChunk* chunk);
void FlushToFile(); void FlushToFile();

3
deps/v8/src/ia32/assembler-ia32.h

@ -640,6 +640,9 @@ class Assembler : public AssemblerBase {
static const byte kJccShortPrefix = 0x70; static const byte kJccShortPrefix = 0x70;
static const byte kJncShortOpcode = kJccShortPrefix | not_carry; static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
static const byte kJcShortOpcode = kJccShortPrefix | carry; static const byte kJcShortOpcode = kJccShortPrefix | carry;
static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
static const byte kJzShortOpcode = kJccShortPrefix | zero;
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Code generation // Code generation

20
deps/v8/src/ia32/builtins-ia32.cc

@ -831,7 +831,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Copy all arguments from the array to the stack. // Copy all arguments from the array to the stack.
Label entry, loop; Label entry, loop;
__ mov(eax, Operand(ebp, kIndexOffset)); __ mov(ecx, Operand(ebp, kIndexOffset));
__ jmp(&entry); __ jmp(&entry);
__ bind(&loop); __ bind(&loop);
__ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments
@ -848,16 +848,17 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(eax); __ push(eax);
// Update the index on the stack and in register eax. // Update the index on the stack and in register eax.
__ mov(eax, Operand(ebp, kIndexOffset)); __ mov(ecx, Operand(ebp, kIndexOffset));
__ add(eax, Immediate(1 << kSmiTagSize)); __ add(ecx, Immediate(1 << kSmiTagSize));
__ mov(Operand(ebp, kIndexOffset), eax); __ mov(Operand(ebp, kIndexOffset), ecx);
__ bind(&entry); __ bind(&entry);
__ cmp(eax, Operand(ebp, kLimitOffset)); __ cmp(ecx, Operand(ebp, kLimitOffset));
__ j(not_equal, &loop); __ j(not_equal, &loop);
// Invoke the function. // Invoke the function.
Label call_proxy; Label call_proxy;
__ mov(eax, ecx);
ParameterCount actual(eax); ParameterCount actual(eax);
__ SmiUntag(eax); __ SmiUntag(eax);
__ mov(edi, Operand(ebp, kFunctionOffset)); __ mov(edi, Operand(ebp, kFunctionOffset));
@ -899,7 +900,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
const int initial_capacity = JSArray::kPreallocatedArrayElements; const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0); STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1); __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the // Allocate the JSArray object together with space for a fixed array with the
// requested elements. // requested elements.
@ -1002,7 +1003,8 @@ static void AllocateJSArray(MacroAssembler* masm,
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
__ LoadInitialArrayMap(array_function, scratch, elements_array); __ LoadInitialArrayMap(array_function, scratch,
elements_array, fill_with_hole);
// Allocate the JSArray object together with space for a FixedArray with the // Allocate the JSArray object together with space for a FixedArray with the
// requested elements. // requested elements.
@ -1273,11 +1275,11 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ jmp(&prepare_generic_code_call); __ jmp(&prepare_generic_code_call);
__ bind(&not_double); __ bind(&not_double);
// Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS. // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
__ mov(ebx, Operand(esp, 0)); __ mov(ebx, Operand(esp, 0));
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset)); __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional( __ LoadTransitionedArrayMapConditional(
FAST_SMI_ONLY_ELEMENTS, FAST_SMI_ELEMENTS,
FAST_ELEMENTS, FAST_ELEMENTS,
edi, edi,
eax, eax,

150
deps/v8/src/ia32/code-stubs-ia32.cc

@ -1681,6 +1681,11 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
} }
// Input:
// edx: left operand (tagged)
// eax: right operand (tagged)
// Output:
// eax: result (tagged)
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label call_runtime; Label call_runtime;
ASSERT(operands_type_ == BinaryOpIC::INT32); ASSERT(operands_type_ == BinaryOpIC::INT32);
@ -1690,31 +1695,37 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::ADD: case Token::ADD:
case Token::SUB: case Token::SUB:
case Token::MUL: case Token::MUL:
case Token::DIV: { case Token::DIV:
case Token::MOD: {
Label not_floats; Label not_floats;
Label not_int32; Label not_int32;
if (CpuFeatures::IsSupported(SSE2)) { if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2); CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats); FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx); FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
switch (op_) { if (op_ == Token::MOD) {
case Token::ADD: __ addsd(xmm0, xmm1); break; GenerateRegisterArgsPush(masm);
case Token::SUB: __ subsd(xmm0, xmm1); break; __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
case Token::MUL: __ mulsd(xmm0, xmm1); break; } else {
case Token::DIV: __ divsd(xmm0, xmm1); break; switch (op_) {
default: UNREACHABLE(); case Token::ADD: __ addsd(xmm0, xmm1); break;
} case Token::SUB: __ subsd(xmm0, xmm1); break;
// Check result type if it is currently Int32. case Token::MUL: __ mulsd(xmm0, xmm1); break;
if (result_type_ <= BinaryOpIC::INT32) { case Token::DIV: __ divsd(xmm0, xmm1); break;
__ cvttsd2si(ecx, Operand(xmm0)); default: UNREACHABLE();
__ cvtsi2sd(xmm2, ecx); }
__ ucomisd(xmm0, xmm2); // Check result type if it is currently Int32.
__ j(not_zero, &not_int32); if (result_type_ <= BinaryOpIC::INT32) {
__ j(carry, &not_int32); __ cvttsd2si(ecx, Operand(xmm0));
__ cvtsi2sd(xmm2, ecx);
__ ucomisd(xmm0, xmm2);
__ j(not_zero, &not_int32);
__ j(carry, &not_int32);
}
GenerateHeapResultAllocation(masm, &call_runtime);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
} }
GenerateHeapResultAllocation(masm, &call_runtime);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
} else { // SSE2 not available, use FPU. } else { // SSE2 not available, use FPU.
FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx); FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
FloatingPointHelper::LoadFloatOperands( FloatingPointHelper::LoadFloatOperands(
@ -1722,20 +1733,28 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
ecx, ecx,
FloatingPointHelper::ARGS_IN_REGISTERS); FloatingPointHelper::ARGS_IN_REGISTERS);
FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32); FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
switch (op_) { if (op_ == Token::MOD) {
case Token::ADD: __ faddp(1); break; // The operands are now on the FPU stack, but we don't need them.
case Token::SUB: __ fsubp(1); break; __ fstp(0);
case Token::MUL: __ fmulp(1); break; __ fstp(0);
case Token::DIV: __ fdivp(1); break; GenerateRegisterArgsPush(masm);
default: UNREACHABLE(); __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
} else {
switch (op_) {
case Token::ADD: __ faddp(1); break;
case Token::SUB: __ fsubp(1); break;
case Token::MUL: __ fmulp(1); break;
case Token::DIV: __ fdivp(1); break;
default: UNREACHABLE();
}
Label after_alloc_failure;
GenerateHeapResultAllocation(masm, &after_alloc_failure);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
__ fstp(0); // Pop FPU stack before calling runtime.
__ jmp(&call_runtime);
} }
Label after_alloc_failure;
GenerateHeapResultAllocation(masm, &after_alloc_failure);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
__ ffree();
__ jmp(&call_runtime);
} }
__ bind(&not_floats); __ bind(&not_floats);
@ -1744,10 +1763,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
break; break;
} }
case Token::MOD: {
// For MOD we go directly to runtime in the non-smi case.
break;
}
case Token::BIT_OR: case Token::BIT_OR:
case Token::BIT_AND: case Token::BIT_AND:
case Token::BIT_XOR: case Token::BIT_XOR:
@ -1758,11 +1773,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label not_floats; Label not_floats;
Label not_int32; Label not_int32;
Label non_smi_result; Label non_smi_result;
/* {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
}*/
FloatingPointHelper::LoadUnknownsAsIntegers(masm, FloatingPointHelper::LoadUnknownsAsIntegers(masm,
use_sse3_, use_sse3_,
&not_floats); &not_floats);
@ -1833,8 +1843,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
default: UNREACHABLE(); break; default: UNREACHABLE(); break;
} }
// If an allocation fails, or SHR or MOD hit a hard case, // If an allocation fails, or SHR hits a hard case, use the runtime system to
// use the runtime system to get the correct result. // get the correct result.
__ bind(&call_runtime); __ bind(&call_runtime);
switch (op_) { switch (op_) {
@ -1855,8 +1865,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break; break;
case Token::MOD: case Token::MOD:
GenerateRegisterArgsPush(masm);
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
break; break;
case Token::BIT_OR: case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
@ -1957,7 +1965,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0); __ ret(0);
__ bind(&after_alloc_failure); __ bind(&after_alloc_failure);
__ ffree(); __ fstp(0); // Pop FPU stack before calling runtime.
__ jmp(&call_runtime); __ jmp(&call_runtime);
} }
@ -2161,8 +2169,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0); __ ret(0);
__ bind(&after_alloc_failure); __ bind(&after_alloc_failure);
__ ffree(); __ fstp(0); // Pop FPU stack before calling runtime.
__ jmp(&call_runtime); __ jmp(&call_runtime);
} }
__ bind(&not_floats); __ bind(&not_floats);
break; break;
@ -3814,20 +3822,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->regexp_entry_native(), 1); __ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer). // Isolates: note we add an additional parameter here (isolate pointer).
static const int kRegExpExecuteArguments = 8; static const int kRegExpExecuteArguments = 9;
__ EnterApiExitFrame(kRegExpExecuteArguments); __ EnterApiExitFrame(kRegExpExecuteArguments);
// Argument 8: Pass current isolate address. // Argument 9: Pass current isolate address.
__ mov(Operand(esp, 7 * kPointerSize), __ mov(Operand(esp, 8 * kPointerSize),
Immediate(ExternalReference::isolate_address())); Immediate(ExternalReference::isolate_address()));
// Argument 7: Indicate that this is a direct call from JavaScript. // Argument 8: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 6 * kPointerSize), Immediate(1)); __ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
// Argument 6: Start (high end) of backtracking stack memory area. // Argument 7: Start (high end) of backtracking stack memory area.
__ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address)); __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
__ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size)); __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
__ mov(Operand(esp, 5 * kPointerSize), esi); __ mov(Operand(esp, 6 * kPointerSize), esi);
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
// Argument 5: static offsets vector buffer. // Argument 5: static offsets vector buffer.
__ mov(Operand(esp, 4 * kPointerSize), __ mov(Operand(esp, 4 * kPointerSize),
@ -3890,7 +3902,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result. // Check the result.
Label success; Label success;
__ cmp(eax, NativeRegExpMacroAssembler::SUCCESS); __ cmp(eax, 1);
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ j(equal, &success); __ j(equal, &success);
Label failure; Label failure;
__ cmp(eax, NativeRegExpMacroAssembler::FAILURE); __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
@ -5006,11 +5020,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ j(not_equal, &not_outermost_js, Label::kNear); __ j(not_equal, &not_outermost_js, Label::kNear);
__ mov(Operand::StaticVariable(js_entry_sp), ebp); __ mov(Operand::StaticVariable(js_entry_sp), ebp);
__ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
Label cont; __ jmp(&invoke, Label::kNear);
__ jmp(&cont, Label::kNear);
__ bind(&not_outermost_js); __ bind(&not_outermost_js);
__ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
__ bind(&cont);
// Jump to a faked try block that does the invoke, with a faked catch // Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception. // block that sets the pending exception.
@ -6162,7 +6174,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ sub(ecx, edx); __ sub(ecx, edx);
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset)); __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
Label not_original_string; Label not_original_string;
__ j(not_equal, &not_original_string, Label::kNear); // Shorter than original string's length: an actual substring.
__ j(below, &not_original_string, Label::kNear);
// Longer than original string's length or negative: unsafe arguments.
__ j(above, &runtime);
// Return original string.
Counters* counters = masm->isolate()->counters(); Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1); __ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize); __ ret(3 * kPointerSize);
@ -7047,8 +7063,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement. // KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET}, { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
{ REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET}, { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateSmiOnlyToObject // ElementsTransitionGenerator::GenerateMapChangeElementTransition
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble // and ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject // and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET}, { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
{ REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET}, { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
@ -7320,9 +7336,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ CheckFastElements(edi, &double_elements); __ CheckFastElements(edi, &double_elements);
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
__ JumpIfSmi(eax, &smi_element); __ JumpIfSmi(eax, &smi_element);
__ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear); __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
// Store into the array literal requires a elements transition. Call into // Store into the array literal requires a elements transition. Call into
// the runtime. // the runtime.
@ -7344,7 +7360,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ pop(edx); __ pop(edx);
__ jmp(&slow_elements); __ jmp(&slow_elements);
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object. // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements); __ bind(&fast_elements);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size, __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
@ -7357,15 +7373,15 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
__ ret(0); __ ret(0);
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
// FAST_ELEMENTS, and value is Smi. // and value is Smi.
__ bind(&smi_element); __ bind(&smi_element);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ mov(FieldOperand(ebx, ecx, times_half_pointer_size, __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
FixedArrayBase::kHeaderSize), eax); FixedArrayBase::kHeaderSize), eax);
__ ret(0); __ ret(0);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements); __ bind(&double_elements);
__ push(edx); __ push(edx);

22
deps/v8/src/ia32/codegen-ia32.cc

@ -351,7 +351,7 @@ OS::MemCopyFunction CreateMemCopyFunction() {
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateSmiOnlyToObject( void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) { MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
@ -372,7 +372,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
} }
void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Label* fail) { MacroAssembler* masm, Label* fail) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
@ -397,9 +397,25 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// Allocate new FixedDoubleArray. // Allocate new FixedDoubleArray.
// edx: receiver // edx: receiver
// edi: length of source FixedArray (smi-tagged) // edi: length of source FixedArray (smi-tagged)
__ lea(esi, Operand(edi, times_4, FixedDoubleArray::kHeaderSize)); __ lea(esi, Operand(edi,
times_4,
FixedDoubleArray::kHeaderSize + kPointerSize));
__ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT); __ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
Label aligned, aligned_done;
__ test(eax, Immediate(kDoubleAlignmentMask - kHeapObjectTag));
__ j(zero, &aligned, Label::kNear);
__ mov(FieldOperand(eax, 0),
Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
__ add(eax, Immediate(kPointerSize));
__ jmp(&aligned_done);
__ bind(&aligned);
__ mov(Operand(eax, esi, times_1, -kPointerSize-1),
Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
__ bind(&aligned_done);
// eax: destination FixedDoubleArray // eax: destination FixedDoubleArray
// edi: number of elements // edi: number of elements
// edx: receiver // edx: receiver

39
deps/v8/src/ia32/debug-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -91,9 +91,11 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength); rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
} }
// All debug break stubs support padding for LiveEdit.
const bool Debug::FramePaddingLayout::kIsSupported = true;
#define __ ACCESS_MASM(masm)
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm, static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs, RegList object_regs,
@ -103,6 +105,13 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
__ push(Immediate(Smi::FromInt(
Debug::FramePaddingLayout::kPaddingValue)));
}
__ push(Immediate(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)));
// Store the registers containing live values on the expression stack to // Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values // make sure that these are correctly updated during GC. Non object values
// are stored as a smi causing it to be untouched by GC. // are stored as a smi causing it to be untouched by GC.
@ -134,6 +143,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
CEntryStub ceb(1); CEntryStub ceb(1);
__ CallStub(&ceb); __ CallStub(&ceb);
// Automatically find register that could be used after register restore.
// We need one register for padding skip instructions.
Register unused_reg = { -1 };
// Restore the register values containing object pointers from the // Restore the register values containing object pointers from the
// expression stack. // expression stack.
for (int i = kNumJSCallerSaved; --i >= 0;) { for (int i = kNumJSCallerSaved; --i >= 0;) {
@ -142,15 +155,29 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ Set(reg, Immediate(kDebugZapValue)); __ Set(reg, Immediate(kDebugZapValue));
} }
bool taken = reg.code() == esi.code();
if ((object_regs & (1 << r)) != 0) { if ((object_regs & (1 << r)) != 0) {
__ pop(reg); __ pop(reg);
taken = true;
} }
if ((non_object_regs & (1 << r)) != 0) { if ((non_object_regs & (1 << r)) != 0) {
__ pop(reg); __ pop(reg);
__ SmiUntag(reg); __ SmiUntag(reg);
taken = true;
}
if (!taken) {
unused_reg = reg;
} }
} }
ASSERT(unused_reg.code() != -1);
// Read current padding counter and skip corresponding number of words.
__ pop(unused_reg);
// We divide stored value by 2 (untagging) and multiply it by word's size.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
__ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
// Get rid of the internal frame. // Get rid of the internal frame.
} }
@ -172,10 +199,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) { void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-ia32.cc). // Register state for IC load call (from ic-ia32.cc).
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name // -- ecx : name
// -- edx : receiver
// ----------------------------------- // -----------------------------------
Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), 0, false); Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
} }
@ -194,10 +221,10 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-ia32.cc). // Register state for keyed IC load call (from ic-ia32.cc).
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver // -- edx : receiver
// -- eax : key
// ----------------------------------- // -----------------------------------
Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), 0, false); Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
} }

98
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -239,13 +239,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
// ok: // ok:
if (FLAG_count_based_interrupts) { if (FLAG_count_based_interrupts) {
ASSERT_EQ(*(call_target_address - 3), kJnsInstruction); ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(*(call_target_address - 2), kJnsOffset); ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
} else { } else {
ASSERT_EQ(*(call_target_address - 3), kJaeInstruction); ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
ASSERT_EQ(*(call_target_address - 2), kJaeOffset); ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
} }
ASSERT_EQ(*(call_target_address - 1), kCallInstruction); ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne; *(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo; *(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address, Assembler::set_target_address_at(call_target_address,
@ -266,9 +266,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch. // restore the conditional branch.
ASSERT_EQ(*(call_target_address - 3), kNopByteOne); ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
ASSERT_EQ(*(call_target_address - 2), kNopByteTwo); ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
ASSERT_EQ(*(call_target_address - 1), kCallInstruction); ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (FLAG_count_based_interrupts) { if (FLAG_count_based_interrupts) {
*(call_target_address - 3) = kJnsInstruction; *(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset; *(call_target_address - 2) = kJnsOffset;
@ -351,10 +351,12 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ", PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_)); reinterpret_cast<intptr_t>(function_));
function_->PrintName(); function_->PrintName();
PrintF(" => node=%u, frame=%d->%d]\n", PrintF(" => node=%u, frame=%d->%d, ebp:esp=0x%08x:0x%08x]\n",
ast_id, ast_id,
input_frame_size, input_frame_size,
output_frame_size); output_frame_size,
input_->GetRegister(ebp.code()),
input_->GetRegister(esp.code()));
} }
// There's only one output frame in the OSR case. // There's only one output frame in the OSR case.
@ -404,7 +406,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
name = "function"; name = "function";
break; break;
} }
PrintF(" [esp + %d] <- 0x%08x ; [esp + %d] (fixed part - %s)\n", PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
output_offset, output_offset,
input_value, input_value,
input_offset, input_offset,
@ -415,6 +417,24 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_offset -= kPointerSize; output_offset -= kPointerSize;
} }
// All OSR stack frames are dynamically aligned to an 8-byte boundary.
int frame_pointer = input_->GetRegister(ebp.code());
if ((frame_pointer & kPointerSize) != 0) {
frame_pointer -= kPointerSize;
has_alignment_padding_ = 1;
}
int32_t alignment_state = (has_alignment_padding_ == 1) ?
kAlignmentPaddingPushed :
kNoAlignmentPadding;
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- 0x%08x ; (alignment state)\n",
output_offset,
alignment_state);
}
output_[0]->SetFrameSlot(output_offset, alignment_state);
output_offset -= kPointerSize;
// Translate the rest of the frame. // Translate the rest of the frame.
while (ok && input_offset >= 0) { while (ok && input_offset >= 0) {
ok = DoOsrTranslateCommand(&iterator, &input_offset); ok = DoOsrTranslateCommand(&iterator, &input_offset);
@ -427,7 +447,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_)); output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else { } else {
// Set up the frame pointer and the context pointer. // Set up the frame pointer and the context pointer.
output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code())); output_[0]->SetRegister(ebp.code(), frame_pointer);
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code())); output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value(); unsigned pc_offset = data->OsrPcOffset()->value();
@ -688,24 +708,38 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
ASSERT(output_[frame_index] == NULL); ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame; output_[frame_index] = output_frame;
// Compute the incoming parameter translation.
int parameter_count = function->shared()->formal_parameter_count() + 1;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
unsigned alignment_state_offset =
input_offset - parameter_count * kPointerSize -
StandardFrameConstants::kFixedFrameSize -
kPointerSize;
ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
JavaScriptFrameConstants::kLocal0Offset);
// The top address for the bottommost output frame can be computed from // The top address for the bottommost output frame can be computed from
// the input frame pointer and the output frame's height. For all // the input frame pointer and the output frame's height. For all
// subsequent output frames, it can be computed from the previous one's // subsequent output frames, it can be computed from the previous one's
// top address and the current frame's size. // top address and the current frame's size.
uint32_t top_address; uint32_t top_address;
if (is_bottommost) { if (is_bottommost) {
int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
has_alignment_padding_ =
(alignment_state == kAlignmentPaddingPushed) ? 1 : 0;
// 2 = context and function in the frame. // 2 = context and function in the frame.
top_address = // If the optimized frame had alignment padding, adjust the frame pointer
input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes; // to point to the new position of the old frame pointer after padding
// is removed. Subtract 2 * kPointerSize for the context and function slots.
top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
height_in_bytes + has_alignment_padding_ * kPointerSize;
} else { } else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size; top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
} }
output_frame->SetTop(top_address); output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = function->shared()->formal_parameter_count() + 1;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) { for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize; output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset); DoTranslateCommand(iterator, frame_index, output_offset);
@ -747,13 +781,17 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
} }
output_frame->SetFrameSlot(output_offset, value); output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset; intptr_t fp_value = top_address + output_offset;
ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value); ASSERT(!is_bottommost ||
(input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize) ==
fp_value);
output_frame->SetFp(fp_value); output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value); if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) { if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value); fp_value, output_offset, value);
} }
ASSERT(!is_bottommost || !has_alignment_padding_ ||
(fp_value & kPointerSize) != 0);
// For the bottommost output frame the context can be gotten from the input // For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function // frame. For all subsequent output frames it can be gotten from the function
@ -948,6 +986,28 @@ void Deoptimizer::EntryGenerator::Generate() {
} }
__ pop(eax); __ pop(eax);
if (type() != OSR) {
// If frame was dynamically aligned, pop padding.
Label no_padding;
__ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
Immediate(0));
__ j(equal, &no_padding);
__ pop(ecx);
if (FLAG_debug_code) {
__ cmp(ecx, Immediate(kAlignmentZapValue));
__ Assert(equal, "alignment marker expected");
}
__ bind(&no_padding);
} else {
// If frame needs dynamic alignment push padding.
Label no_padding;
__ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
Immediate(0));
__ j(equal, &no_padding);
__ push(Immediate(kAlignmentZapValue));
__ bind(&no_padding);
}
// Replace the current frame with the output frames. // Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop; Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the // Outer loop state: eax = current FrameDescription**, edx = one past the

6
deps/v8/src/ia32/frames-ia32.h

@ -53,6 +53,10 @@ typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints. // Number of registers for which space is reserved in safepoints.
const int kNumSafepointRegisters = 8; const int kNumSafepointRegisters = 8;
const int kNoAlignmentPadding = 0;
const int kAlignmentPaddingPushed = 2;
const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
// ---------------------------------------------------- // ----------------------------------------------------
@ -119,6 +123,8 @@ class JavaScriptFrameConstants : public AllStatic {
// Caller SP-relative. // Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize; static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize; static const int kReceiverOffset = -1 * kPointerSize;
static const int kDynamicAlignmentStateOffset = kLocal0Offset;
}; };

307
deps/v8/src/ia32/full-codegen-ia32.cc

@ -101,13 +101,6 @@ class JumpPatchSite BASE_EMBEDDED {
}; };
// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
UNREACHABLE();
return 13;
}
// Generate code for a JS function. On entry to the function the receiver // Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the // and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the // return address on top of them. The actual argument count matches the
@ -269,11 +262,11 @@ void FullCodeGenerator::Generate() {
// For named function expressions, declare the function name as a // For named function expressions, declare the function name as a
// constant. // constant.
if (scope()->is_function_scope() && scope()->function() != NULL) { if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableProxy* proxy = scope()->function(); VariableDeclaration* function = scope()->function();
ASSERT(proxy->var()->mode() == CONST || ASSERT(function->proxy()->var()->mode() == CONST ||
proxy->var()->mode() == CONST_HARMONY); function->proxy()->var()->mode() == CONST_HARMONY);
ASSERT(proxy->var()->location() != Variable::UNALLOCATED); ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
EmitDeclaration(proxy, proxy->var()->mode(), NULL); VisitVariableDeclaration(function);
} }
VisitDeclarations(scope()->declarations()); VisitDeclarations(scope()->declarations());
} }
@ -763,60 +756,51 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
} }
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
VariableMode mode, // The variable in the declaration always resides in the current function
FunctionLiteral* function) { // context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
__ cmp(ebx, isolate()->factory()->with_context_map());
__ Check(not_equal, "Declaration in with context.");
__ cmp(ebx, isolate()->factory()->catch_context_map());
__ Check(not_equal, "Declaration in catch context.");
}
}
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
// If it was not possible to allocate the variable at compile time, we // If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the // need to "declare" it at runtime to make sure it actually exists in the
// local context. // local context.
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var(); Variable* variable = proxy->var();
bool binding_needs_init = (function == NULL) && bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
(mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (variable->location()) { switch (variable->location()) {
case Variable::UNALLOCATED: case Variable::UNALLOCATED:
++global_count_; globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(), zone());
break; break;
case Variable::PARAMETER: case Variable::PARAMETER:
case Variable::LOCAL: case Variable::LOCAL:
if (function != NULL) { if (hole_init) {
Comment cmnt(masm_, "[ Declaration"); Comment cmnt(masm_, "[ VariableDeclaration");
VisitForAccumulatorValue(function);
__ mov(StackOperand(variable), result_register());
} else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ mov(StackOperand(variable), __ mov(StackOperand(variable),
Immediate(isolate()->factory()->the_hole_value())); Immediate(isolate()->factory()->the_hole_value()));
} }
break; break;
case Variable::CONTEXT: case Variable::CONTEXT:
// The variable in the decl always resides in the current function if (hole_init) {
// context. Comment cmnt(masm_, "[ VariableDeclaration");
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); EmitDebugCheckDeclarationContext(variable);
if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
__ cmp(ebx, isolate()->factory()->with_context_map());
__ Check(not_equal, "Declaration in with context.");
__ cmp(ebx, isolate()->factory()->catch_context_map());
__ Check(not_equal, "Declaration in catch context.");
}
if (function != NULL) {
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ mov(ContextOperand(esi, variable->index()), result_register());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(esi,
Context::SlotOffset(variable->index()),
result_register(),
ecx,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
} else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ mov(ContextOperand(esi, variable->index()), __ mov(ContextOperand(esi, variable->index()),
Immediate(isolate()->factory()->the_hole_value())); Immediate(isolate()->factory()->the_hole_value()));
// No write barrier since the hole value is in old space. // No write barrier since the hole value is in old space.
@ -825,14 +809,12 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
break; break;
case Variable::LOOKUP: { case Variable::LOOKUP: {
Comment cmnt(masm_, "[ Declaration"); Comment cmnt(masm_, "[ VariableDeclaration");
__ push(esi); __ push(esi);
__ push(Immediate(variable->name())); __ push(Immediate(variable->name()));
// Declaration nodes are always introduced in one of four modes. // VariableDeclaration nodes are always introduced in one of four modes.
ASSERT(mode == VAR || ASSERT(mode == VAR || mode == LET ||
mode == CONST || mode == CONST || mode == CONST_HARMONY);
mode == CONST_HARMONY ||
mode == LET);
PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY) PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
? READ_ONLY : NONE; ? READ_ONLY : NONE;
__ push(Immediate(Smi::FromInt(attr))); __ push(Immediate(Smi::FromInt(attr)));
@ -840,9 +822,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
// Note: For variables we must not push an initial value (such as // Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we // 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value. // must not destroy the current value.
if (function != NULL) { if (hole_init) {
VisitForStackValue(function);
} else if (binding_needs_init) {
__ push(Immediate(isolate()->factory()->the_hole_value())); __ push(Immediate(isolate()->factory()->the_hole_value()));
} else { } else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value. __ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
@ -854,6 +834,118 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
} }
void FullCodeGenerator::VisitFunctionDeclaration(
FunctionDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
break;
}
case Variable::PARAMETER:
case Variable::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
__ mov(StackOperand(variable), result_register());
break;
}
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
__ mov(ContextOperand(esi, variable->index()), result_register());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(esi,
Context::SlotOffset(variable->index()),
result_register(),
ecx,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
break;
}
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ push(esi);
__ push(Immediate(variable->name()));
__ push(Immediate(Smi::FromInt(NONE)));
VisitForStackValue(declaration->fun());
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
}
}
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
Handle<JSModule> instance = declaration->module()->interface()->Instance();
ASSERT(!instance.is_null());
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
globals_->Add(variable->name(), zone());
globals_->Add(instance, zone());
Visit(declaration->module());
break;
}
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ mov(ContextOperand(esi, variable->index()), Immediate(instance));
Visit(declaration->module());
break;
}
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::LOOKUP:
UNREACHABLE();
}
}
void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED:
// TODO(rossberg)
break;
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ ImportDeclaration");
EmitDebugCheckDeclarationContext(variable);
// TODO(rossberg)
break;
}
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::LOOKUP:
UNREACHABLE();
}
}
void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
// TODO(rossberg)
}
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals. // Call the runtime to declare the globals.
__ push(esi); // The context is the first argument. __ push(esi); // The context is the first argument.
@ -1194,7 +1286,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// All extension objects were empty and it is safe to use a global // All extension objects were empty and it is safe to use a global
// load IC call. // load IC call.
__ mov(eax, GlobalObjectOperand()); __ mov(edx, GlobalObjectOperand());
__ mov(ecx, var->name()); __ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
@ -1278,7 +1370,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, "Global variable"); Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global // Use inline caching. Variable name is passed in ecx and the global
// object in eax. // object in eax.
__ mov(eax, GlobalObjectOperand()); __ mov(edx, GlobalObjectOperand());
__ mov(ecx, var->name()); __ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
@ -1465,7 +1557,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Mark all computed expressions that are bound to a key that // Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the // is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted. // marked expressions, no store code is emitted.
expr->CalculateEmitStore(); expr->CalculateEmitStore(zone());
AccessorTable accessor_table(isolate()->zone()); AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) { for (int i = 0; i < expr->properties()->length(); i++) {
@ -1557,7 +1649,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length()); ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind = ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS; bool has_constant_fast_elements =
IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values( Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1))); FixedArrayBase::cast(constant_elements->get(1)));
@ -1568,7 +1661,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Heap* heap = isolate()->heap(); Heap* heap = isolate()->heap();
if (has_constant_fast_elements && if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) { constant_elements_values->map() == heap->fixed_cow_array_map()) {
// If the elements are already FAST_ELEMENTS, the boilerplate cannot // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance. // change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1); __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
FastCloneShallowArrayStub stub( FastCloneShallowArrayStub stub(
@ -1580,10 +1673,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else { } else {
ASSERT(constant_elements_kind == FAST_ELEMENTS || ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays); FLAG_smi_only_arrays);
// If the elements are already FAST_ELEMENTS, the boilerplate cannot // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance. // change, so it's possible to specialize the stub in advance.
FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS ? FastCloneShallowArrayStub::CLONE_ELEMENTS
@ -1611,9 +1703,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} }
VisitForAccumulatorValue(subexpr); VisitForAccumulatorValue(subexpr);
if (constant_elements_kind == FAST_ELEMENTS) { if (IsFastObjectElementsKind(constant_elements_kind)) {
// Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// transition and don't need to call the runtime stub. // cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize); int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ mov(ebx, Operand(esp, 0)); // Copy of array literal. __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
@ -1672,9 +1764,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break; break;
case NAMED_PROPERTY: case NAMED_PROPERTY:
if (expr->is_compound()) { if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator. // We need the receiver both on the stack and in edx.
VisitForAccumulatorValue(property->obj()); VisitForStackValue(property->obj());
__ push(result_register()); __ mov(edx, Operand(esp, 0));
} else { } else {
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
} }
@ -1682,9 +1774,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case KEYED_PROPERTY: { case KEYED_PROPERTY: {
if (expr->is_compound()) { if (expr->is_compound()) {
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
VisitForAccumulatorValue(property->key()); VisitForStackValue(property->key());
__ mov(edx, Operand(esp, 0)); __ mov(edx, Operand(esp, kPointerSize)); // Object.
__ push(eax); __ mov(ecx, Operand(esp, 0)); // Key.
} else { } else {
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
VisitForStackValue(property->key()); VisitForStackValue(property->key());
@ -1927,7 +2019,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForStackValue(prop->obj()); VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key()); VisitForAccumulatorValue(prop->key());
__ mov(ecx, eax); __ mov(ecx, eax);
__ pop(edx); __ pop(edx); // Receiver.
__ pop(eax); // Restore value. __ pop(eax); // Restore value.
Handle<Code> ic = is_classic_mode() Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize() ? isolate()->builtins()->KeyedStoreIC_Initialize()
@ -2033,6 +2125,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC. // Assignment to a property, using a named store IC.
// eax : value
// esp[0] : receiver
Property* prop = expr->target()->AsProperty(); Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL); ASSERT(prop != NULL);
ASSERT(prop->key()->AsLiteral() != NULL); ASSERT(prop->key()->AsLiteral() != NULL);
@ -2075,6 +2170,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC. // Assignment to a property, using a keyed store IC.
// eax : value
// esp[0] : key
// esp[kPointerSize] : receiver
// If the assignment starts a block of assignments to the same object, // If the assignment starts a block of assignments to the same object,
// change to slow case to avoid the quadratic behavior of repeatedly // change to slow case to avoid the quadratic behavior of repeatedly
@ -2087,7 +2185,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(result_register()); __ pop(result_register());
} }
__ pop(ecx); __ pop(ecx); // Key.
if (expr->ends_initialization_block()) { if (expr->ends_initialization_block()) {
__ mov(edx, Operand(esp, 0)); // Leave receiver on the stack for later. __ mov(edx, Operand(esp, 0)); // Leave receiver on the stack for later.
} else { } else {
@ -2120,12 +2218,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) { if (key->IsPropertyName()) {
VisitForAccumulatorValue(expr->obj()); VisitForAccumulatorValue(expr->obj());
__ mov(edx, result_register());
EmitNamedPropertyLoad(expr); EmitNamedPropertyLoad(expr);
context()->Plug(eax); context()->Plug(eax);
} else { } else {
VisitForStackValue(expr->obj()); VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key()); VisitForAccumulatorValue(expr->key());
__ pop(edx); __ pop(edx); // Object.
__ mov(ecx, result_register()); // Key.
EmitKeyedPropertyLoad(expr); EmitKeyedPropertyLoad(expr);
context()->Plug(eax); context()->Plug(eax);
} }
@ -3924,15 +4024,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ push(Immediate(Smi::FromInt(0))); __ push(Immediate(Smi::FromInt(0)));
} }
if (assign_type == NAMED_PROPERTY) { if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the accumulator. // Put the object both on the stack and in edx.
VisitForAccumulatorValue(prop->obj()); VisitForAccumulatorValue(prop->obj());
__ push(eax); __ push(eax);
__ mov(edx, eax);
EmitNamedPropertyLoad(prop); EmitNamedPropertyLoad(prop);
} else { } else {
VisitForStackValue(prop->obj()); VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key()); VisitForStackValue(prop->key());
__ mov(edx, Operand(esp, 0)); __ mov(edx, Operand(esp, kPointerSize)); // Object.
__ push(eax); __ mov(ecx, Operand(esp, 0)); // Key.
EmitKeyedPropertyLoad(prop); EmitKeyedPropertyLoad(prop);
} }
} }
@ -4079,7 +4180,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
if (proxy != NULL && proxy->var()->IsUnallocated()) { if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable"); Comment cmnt(masm_, "Global variable");
__ mov(eax, GlobalObjectOperand()); __ mov(edx, GlobalObjectOperand());
__ mov(ecx, Immediate(proxy->name())); __ mov(ecx, Immediate(proxy->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference // Use a regular load, not a contextual load, to avoid a reference
@ -4344,7 +4445,8 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() { void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope(); Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope()) { if (declaration_scope->is_global_scope() ||
declaration_scope->is_module_scope()) {
// Contexts nested in the global context have a canonical empty function // Contexts nested in the global context have a canonical empty function
// as their closure, not the anonymous closure containing the global // as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty // code. Pass a smi sentinel and let the runtime look up the empty
@ -4374,14 +4476,49 @@ void FullCodeGenerator::EnterFinallyBlock() {
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ SmiTag(edx); __ SmiTag(edx);
__ push(edx); __ push(edx);
// Store result register while executing finally block. // Store result register while executing finally block.
__ push(result_register()); __ push(result_register());
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(edx, Operand::StaticVariable(pending_message_obj));
__ push(edx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(edx, Operand::StaticVariable(has_pending_message));
__ push(edx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ mov(edx, Operand::StaticVariable(pending_message_script));
__ push(edx);
} }
void FullCodeGenerator::ExitFinallyBlock() { void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(edx)); ASSERT(!result_register().is(edx));
// Restore pending message from stack.
__ pop(edx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ mov(Operand::StaticVariable(pending_message_script), edx);
__ pop(edx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(Operand::StaticVariable(has_pending_message), edx);
__ pop(edx);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(Operand::StaticVariable(pending_message_obj), edx);
// Restore result register from stack.
__ pop(result_register()); __ pop(result_register());
// Uncook return address. // Uncook return address.
__ pop(edx); __ pop(edx);
__ SmiUntag(edx); __ SmiUntag(edx);

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save